1 /* tc-aarch64.c -- Assemble for the AArch64 ISA 2 3 Copyright (C) 2009-2020 Free Software Foundation, Inc. 4 Contributed by ARM Ltd. 5 6 This file is part of GAS. 7 8 GAS is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3 of the license, or 11 (at your option) any later version. 12 13 GAS is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program; see the file COPYING3. If not, 20 see <http://www.gnu.org/licenses/>. */ 21 22 #include "as.h" 23 #include <limits.h> 24 #include <stdarg.h> 25 #include "bfd_stdint.h" 26 #define NO_RELOC 0 27 #include "safe-ctype.h" 28 #include "subsegs.h" 29 #include "obstack.h" 30 31 #ifdef OBJ_ELF 32 #include "elf/aarch64.h" 33 #include "dw2gencfi.h" 34 #endif 35 36 #include "dwarf2dbg.h" 37 38 /* Types of processor to assemble for. */ 39 #ifndef CPU_DEFAULT 40 #define CPU_DEFAULT AARCH64_ARCH_V8 41 #endif 42 43 #define streq(a, b) (strcmp (a, b) == 0) 44 45 #define END_OF_INSN '\0' 46 47 static aarch64_feature_set cpu_variant; 48 49 /* Variables that we set while parsing command-line options. Once all 50 options have been read we re-process these values to set the real 51 assembly flags. */ 52 static const aarch64_feature_set *mcpu_cpu_opt = NULL; 53 static const aarch64_feature_set *march_cpu_opt = NULL; 54 55 /* Constants for known architecture features. */ 56 static const aarch64_feature_set cpu_default = CPU_DEFAULT; 57 58 /* Currently active instruction sequence. */ 59 static aarch64_instr_sequence *insn_sequence = NULL; 60 61 #ifdef OBJ_ELF 62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */ 63 static symbolS *GOT_symbol; 64 65 /* Which ABI to use. */ 66 enum aarch64_abi_type 67 { 68 AARCH64_ABI_NONE = 0, 69 AARCH64_ABI_LP64 = 1, 70 AARCH64_ABI_ILP32 = 2 71 }; 72 73 #ifndef DEFAULT_ARCH 74 #define DEFAULT_ARCH "aarch64" 75 #endif 76 77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */ 78 static const char *default_arch = DEFAULT_ARCH; 79 80 /* AArch64 ABI for the output file. */ 81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE; 82 83 /* When non-zero, program to a 32-bit model, in which the C data types 84 int, long and all pointer types are 32-bit objects (ILP32); or to a 85 64-bit model, in which the C int type is 32-bits but the C long type 86 and all pointer types are 64-bit objects (LP64). */ 87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32) 88 #endif 89 90 enum vector_el_type 91 { 92 NT_invtype = -1, 93 NT_b, 94 NT_h, 95 NT_s, 96 NT_d, 97 NT_q, 98 NT_zero, 99 NT_merge 100 }; 101 102 /* Bits for DEFINED field in vector_type_el. */ 103 #define NTA_HASTYPE 1 104 #define NTA_HASINDEX 2 105 #define NTA_HASVARWIDTH 4 106 107 struct vector_type_el 108 { 109 enum vector_el_type type; 110 unsigned char defined; 111 unsigned width; 112 int64_t index; 113 }; 114 115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001 116 117 struct reloc 118 { 119 bfd_reloc_code_real_type type; 120 expressionS exp; 121 int pc_rel; 122 enum aarch64_opnd opnd; 123 uint32_t flags; 124 unsigned need_libopcodes_p : 1; 125 }; 126 127 struct aarch64_instruction 128 { 129 /* libopcodes structure for instruction intermediate representation. */ 130 aarch64_inst base; 131 /* Record assembly errors found during the parsing. */ 132 struct 133 { 134 enum aarch64_operand_error_kind kind; 135 const char *error; 136 } parsing_error; 137 /* The condition that appears in the assembly line. */ 138 int cond; 139 /* Relocation information (including the GAS internal fixup). */ 140 struct reloc reloc; 141 /* Need to generate an immediate in the literal pool. */ 142 unsigned gen_lit_pool : 1; 143 }; 144 145 typedef struct aarch64_instruction aarch64_instruction; 146 147 static aarch64_instruction inst; 148 149 static bfd_boolean parse_operands (char *, const aarch64_opcode *); 150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *); 151 152 #ifdef OBJ_ELF 153 # define now_instr_sequence seg_info \ 154 (now_seg)->tc_segment_info_data.insn_sequence 155 #else 156 static struct aarch64_instr_sequence now_instr_sequence; 157 #endif 158 159 /* Diagnostics inline function utilities. 160 161 These are lightweight utilities which should only be called by parse_operands 162 and other parsers. GAS processes each assembly line by parsing it against 163 instruction template(s), in the case of multiple templates (for the same 164 mnemonic name), those templates are tried one by one until one succeeds or 165 all fail. An assembly line may fail a few templates before being 166 successfully parsed; an error saved here in most cases is not a user error 167 but an error indicating the current template is not the right template. 168 Therefore it is very important that errors can be saved at a low cost during 169 the parsing; we don't want to slow down the whole parsing by recording 170 non-user errors in detail. 171 172 Remember that the objective is to help GAS pick up the most appropriate 173 error message in the case of multiple templates, e.g. FMOV which has 8 174 templates. */ 175 176 static inline void 177 clear_error (void) 178 { 179 inst.parsing_error.kind = AARCH64_OPDE_NIL; 180 inst.parsing_error.error = NULL; 181 } 182 183 static inline bfd_boolean 184 error_p (void) 185 { 186 return inst.parsing_error.kind != AARCH64_OPDE_NIL; 187 } 188 189 static inline const char * 190 get_error_message (void) 191 { 192 return inst.parsing_error.error; 193 } 194 195 static inline enum aarch64_operand_error_kind 196 get_error_kind (void) 197 { 198 return inst.parsing_error.kind; 199 } 200 201 static inline void 202 set_error (enum aarch64_operand_error_kind kind, const char *error) 203 { 204 inst.parsing_error.kind = kind; 205 inst.parsing_error.error = error; 206 } 207 208 static inline void 209 set_recoverable_error (const char *error) 210 { 211 set_error (AARCH64_OPDE_RECOVERABLE, error); 212 } 213 214 /* Use the DESC field of the corresponding aarch64_operand entry to compose 215 the error message. */ 216 static inline void 217 set_default_error (void) 218 { 219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL); 220 } 221 222 static inline void 223 set_syntax_error (const char *error) 224 { 225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error); 226 } 227 228 static inline void 229 set_first_syntax_error (const char *error) 230 { 231 if (! error_p ()) 232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error); 233 } 234 235 static inline void 236 set_fatal_syntax_error (const char *error) 237 { 238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error); 239 } 240 241 /* Return value for certain parsers when the parsing fails; those parsers 242 return the information of the parsed result, e.g. register number, on 243 success. */ 244 #define PARSE_FAIL -1 245 246 /* This is an invalid condition code that means no conditional field is 247 present. */ 248 #define COND_ALWAYS 0x10 249 250 typedef struct 251 { 252 const char *template; 253 unsigned long value; 254 } asm_barrier_opt; 255 256 typedef struct 257 { 258 const char *template; 259 uint32_t value; 260 } asm_nzcv; 261 262 struct reloc_entry 263 { 264 char *name; 265 bfd_reloc_code_real_type reloc; 266 }; 267 268 /* Macros to define the register types and masks for the purpose 269 of parsing. */ 270 271 #undef AARCH64_REG_TYPES 272 #define AARCH64_REG_TYPES \ 273 BASIC_REG_TYPE(R_32) /* w[0-30] */ \ 274 BASIC_REG_TYPE(R_64) /* x[0-30] */ \ 275 BASIC_REG_TYPE(SP_32) /* wsp */ \ 276 BASIC_REG_TYPE(SP_64) /* sp */ \ 277 BASIC_REG_TYPE(Z_32) /* wzr */ \ 278 BASIC_REG_TYPE(Z_64) /* xzr */ \ 279 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\ 280 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \ 281 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \ 282 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \ 283 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \ 284 BASIC_REG_TYPE(VN) /* v[0-31] */ \ 285 BASIC_REG_TYPE(ZN) /* z[0-31] */ \ 286 BASIC_REG_TYPE(PN) /* p[0-15] */ \ 287 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \ 288 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \ 289 /* Typecheck: same, plus SVE registers. */ \ 290 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \ 291 | REG_TYPE(ZN)) \ 292 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \ 293 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \ 294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \ 295 /* Typecheck: same, plus SVE registers. */ \ 296 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \ 297 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \ 298 | REG_TYPE(ZN)) \ 299 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \ 300 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \ 301 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \ 302 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \ 303 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \ 304 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \ 305 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \ 306 /* Typecheck: any [BHSDQ]P FP. */ \ 307 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \ 308 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \ 309 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \ 310 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \ 311 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \ 312 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \ 313 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \ 314 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \ 315 be used for SVE instructions, since Zn and Pn are valid symbols \ 316 in other contexts. */ \ 317 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \ 318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \ 319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \ 320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \ 321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \ 322 | REG_TYPE(ZN) | REG_TYPE(PN)) \ 323 /* Any integer register; used for error messages only. */ \ 324 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \ 325 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \ 326 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \ 327 /* Pseudo type to mark the end of the enumerator sequence. */ \ 328 BASIC_REG_TYPE(MAX) 329 330 #undef BASIC_REG_TYPE 331 #define BASIC_REG_TYPE(T) REG_TYPE_##T, 332 #undef MULTI_REG_TYPE 333 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T) 334 335 /* Register type enumerators. */ 336 typedef enum aarch64_reg_type_ 337 { 338 /* A list of REG_TYPE_*. */ 339 AARCH64_REG_TYPES 340 } aarch64_reg_type; 341 342 #undef BASIC_REG_TYPE 343 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T, 344 #undef REG_TYPE 345 #define REG_TYPE(T) (1 << REG_TYPE_##T) 346 #undef MULTI_REG_TYPE 347 #define MULTI_REG_TYPE(T,V) V, 348 349 /* Structure for a hash table entry for a register. */ 350 typedef struct 351 { 352 const char *name; 353 unsigned char number; 354 ENUM_BITFIELD (aarch64_reg_type_) type : 8; 355 unsigned char builtin; 356 } reg_entry; 357 358 /* Values indexed by aarch64_reg_type to assist the type checking. */ 359 static const unsigned reg_type_masks[] = 360 { 361 AARCH64_REG_TYPES 362 }; 363 364 #undef BASIC_REG_TYPE 365 #undef REG_TYPE 366 #undef MULTI_REG_TYPE 367 #undef AARCH64_REG_TYPES 368 369 /* Diagnostics used when we don't get a register of the expected type. 370 Note: this has to synchronized with aarch64_reg_type definitions 371 above. */ 372 static const char * 373 get_reg_expected_msg (aarch64_reg_type reg_type) 374 { 375 const char *msg; 376 377 switch (reg_type) 378 { 379 case REG_TYPE_R_32: 380 msg = N_("integer 32-bit register expected"); 381 break; 382 case REG_TYPE_R_64: 383 msg = N_("integer 64-bit register expected"); 384 break; 385 case REG_TYPE_R_N: 386 msg = N_("integer register expected"); 387 break; 388 case REG_TYPE_R64_SP: 389 msg = N_("64-bit integer or SP register expected"); 390 break; 391 case REG_TYPE_SVE_BASE: 392 msg = N_("base register expected"); 393 break; 394 case REG_TYPE_R_Z: 395 msg = N_("integer or zero register expected"); 396 break; 397 case REG_TYPE_SVE_OFFSET: 398 msg = N_("offset register expected"); 399 break; 400 case REG_TYPE_R_SP: 401 msg = N_("integer or SP register expected"); 402 break; 403 case REG_TYPE_R_Z_SP: 404 msg = N_("integer, zero or SP register expected"); 405 break; 406 case REG_TYPE_FP_B: 407 msg = N_("8-bit SIMD scalar register expected"); 408 break; 409 case REG_TYPE_FP_H: 410 msg = N_("16-bit SIMD scalar or floating-point half precision " 411 "register expected"); 412 break; 413 case REG_TYPE_FP_S: 414 msg = N_("32-bit SIMD scalar or floating-point single precision " 415 "register expected"); 416 break; 417 case REG_TYPE_FP_D: 418 msg = N_("64-bit SIMD scalar or floating-point double precision " 419 "register expected"); 420 break; 421 case REG_TYPE_FP_Q: 422 msg = N_("128-bit SIMD scalar or floating-point quad precision " 423 "register expected"); 424 break; 425 case REG_TYPE_R_Z_BHSDQ_V: 426 case REG_TYPE_R_Z_SP_BHSDQ_VZP: 427 msg = N_("register expected"); 428 break; 429 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */ 430 msg = N_("SIMD scalar or floating-point register expected"); 431 break; 432 case REG_TYPE_VN: /* any V reg */ 433 msg = N_("vector register expected"); 434 break; 435 case REG_TYPE_ZN: 436 msg = N_("SVE vector register expected"); 437 break; 438 case REG_TYPE_PN: 439 msg = N_("SVE predicate register expected"); 440 break; 441 default: 442 as_fatal (_("invalid register type %d"), reg_type); 443 } 444 return msg; 445 } 446 447 /* Some well known registers that we refer to directly elsewhere. */ 448 #define REG_SP 31 449 #define REG_ZR 31 450 451 /* Instructions take 4 bytes in the object file. */ 452 #define INSN_SIZE 4 453 454 static struct hash_control *aarch64_ops_hsh; 455 static struct hash_control *aarch64_cond_hsh; 456 static struct hash_control *aarch64_shift_hsh; 457 static struct hash_control *aarch64_sys_regs_hsh; 458 static struct hash_control *aarch64_pstatefield_hsh; 459 static struct hash_control *aarch64_sys_regs_ic_hsh; 460 static struct hash_control *aarch64_sys_regs_dc_hsh; 461 static struct hash_control *aarch64_sys_regs_at_hsh; 462 static struct hash_control *aarch64_sys_regs_tlbi_hsh; 463 static struct hash_control *aarch64_sys_regs_sr_hsh; 464 static struct hash_control *aarch64_reg_hsh; 465 static struct hash_control *aarch64_barrier_opt_hsh; 466 static struct hash_control *aarch64_nzcv_hsh; 467 static struct hash_control *aarch64_pldop_hsh; 468 static struct hash_control *aarch64_hint_opt_hsh; 469 470 /* Stuff needed to resolve the label ambiguity 471 As: 472 ... 473 label: <insn> 474 may differ from: 475 ... 476 label: 477 <insn> */ 478 479 static symbolS *last_label_seen; 480 481 /* Literal pool structure. Held on a per-section 482 and per-sub-section basis. */ 483 484 #define MAX_LITERAL_POOL_SIZE 1024 485 typedef struct literal_expression 486 { 487 expressionS exp; 488 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */ 489 LITTLENUM_TYPE * bignum; 490 } literal_expression; 491 492 typedef struct literal_pool 493 { 494 literal_expression literals[MAX_LITERAL_POOL_SIZE]; 495 unsigned int next_free_entry; 496 unsigned int id; 497 symbolS *symbol; 498 segT section; 499 subsegT sub_section; 500 int size; 501 struct literal_pool *next; 502 } literal_pool; 503 504 /* Pointer to a linked list of literal pools. */ 505 static literal_pool *list_of_pools = NULL; 506 507 /* Pure syntax. */ 508 509 /* This array holds the chars that always start a comment. If the 510 pre-processor is disabled, these aren't very useful. */ 511 const char comment_chars[] = ""; 512 513 /* This array holds the chars that only start a comment at the beginning of 514 a line. If the line seems to have the form '# 123 filename' 515 .line and .file directives will appear in the pre-processed output. */ 516 /* Note that input_file.c hand checks for '#' at the beginning of the 517 first line of the input file. This is because the compiler outputs 518 #NO_APP at the beginning of its output. */ 519 /* Also note that comments like this one will always work. */ 520 const char line_comment_chars[] = "#"; 521 522 const char line_separator_chars[] = ";"; 523 524 /* Chars that can be used to separate mant 525 from exp in floating point numbers. */ 526 const char EXP_CHARS[] = "eE"; 527 528 /* Chars that mean this number is a floating point constant. */ 529 /* As in 0f12.456 */ 530 /* or 0d1.2345e12 */ 531 532 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH"; 533 534 /* Prefix character that indicates the start of an immediate value. */ 535 #define is_immediate_prefix(C) ((C) == '#') 536 537 /* Separator character handling. */ 538 539 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0) 540 541 static inline bfd_boolean 542 skip_past_char (char **str, char c) 543 { 544 if (**str == c) 545 { 546 (*str)++; 547 return TRUE; 548 } 549 else 550 return FALSE; 551 } 552 553 #define skip_past_comma(str) skip_past_char (str, ',') 554 555 /* Arithmetic expressions (possibly involving symbols). */ 556 557 static bfd_boolean in_my_get_expression_p = FALSE; 558 559 /* Third argument to my_get_expression. */ 560 #define GE_NO_PREFIX 0 561 #define GE_OPT_PREFIX 1 562 563 /* Return TRUE if the string pointed by *STR is successfully parsed 564 as an valid expression; *EP will be filled with the information of 565 such an expression. Otherwise return FALSE. */ 566 567 static bfd_boolean 568 my_get_expression (expressionS * ep, char **str, int prefix_mode, 569 int reject_absent) 570 { 571 char *save_in; 572 segT seg; 573 int prefix_present_p = 0; 574 575 switch (prefix_mode) 576 { 577 case GE_NO_PREFIX: 578 break; 579 case GE_OPT_PREFIX: 580 if (is_immediate_prefix (**str)) 581 { 582 (*str)++; 583 prefix_present_p = 1; 584 } 585 break; 586 default: 587 abort (); 588 } 589 590 memset (ep, 0, sizeof (expressionS)); 591 592 save_in = input_line_pointer; 593 input_line_pointer = *str; 594 in_my_get_expression_p = TRUE; 595 seg = expression (ep); 596 in_my_get_expression_p = FALSE; 597 598 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent)) 599 { 600 /* We found a bad expression in md_operand(). */ 601 *str = input_line_pointer; 602 input_line_pointer = save_in; 603 if (prefix_present_p && ! error_p ()) 604 set_fatal_syntax_error (_("bad expression")); 605 else 606 set_first_syntax_error (_("bad expression")); 607 return FALSE; 608 } 609 610 #ifdef OBJ_AOUT 611 if (seg != absolute_section 612 && seg != text_section 613 && seg != data_section 614 && seg != bss_section && seg != undefined_section) 615 { 616 set_syntax_error (_("bad segment")); 617 *str = input_line_pointer; 618 input_line_pointer = save_in; 619 return FALSE; 620 } 621 #else 622 (void) seg; 623 #endif 624 625 *str = input_line_pointer; 626 input_line_pointer = save_in; 627 return TRUE; 628 } 629 630 /* Turn a string in input_line_pointer into a floating point constant 631 of type TYPE, and store the appropriate bytes in *LITP. The number 632 of LITTLENUMS emitted is stored in *SIZEP. An error message is 633 returned, or NULL on OK. */ 634 635 const char * 636 md_atof (int type, char *litP, int *sizeP) 637 { 638 /* If this is a bfloat16 type, then parse it slightly differently - 639 as it does not follow the IEEE standard exactly. */ 640 if (type == 'b') 641 { 642 char * t; 643 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 644 FLONUM_TYPE generic_float; 645 646 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float); 647 648 if (t) 649 input_line_pointer = t; 650 else 651 return _("invalid floating point number"); 652 653 switch (generic_float.sign) 654 { 655 /* Is +Inf. */ 656 case 'P': 657 words[0] = 0x7f80; 658 break; 659 660 /* Is -Inf. */ 661 case 'N': 662 words[0] = 0xff80; 663 break; 664 665 /* Is NaN. */ 666 /* bfloat16 has two types of NaN - quiet and signalling. 667 Quiet NaN has bit[6] == 1 && faction != 0, whereas 668 signalling Nan's have bit[0] == 0 && fraction != 0. 669 Chose this specific encoding as it is the same form 670 as used by other IEEE 754 encodings in GAS. */ 671 case 0: 672 words[0] = 0x7fff; 673 break; 674 675 default: 676 break; 677 } 678 679 *sizeP = 2; 680 681 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE)); 682 683 return NULL; 684 } 685 686 return ieee_md_atof (type, litP, sizeP, target_big_endian); 687 } 688 689 /* We handle all bad expressions here, so that we can report the faulty 690 instruction in the error message. */ 691 void 692 md_operand (expressionS * exp) 693 { 694 if (in_my_get_expression_p) 695 exp->X_op = O_illegal; 696 } 697 698 /* Immediate values. */ 699 700 /* Errors may be set multiple times during parsing or bit encoding 701 (particularly in the Neon bits), but usually the earliest error which is set 702 will be the most meaningful. Avoid overwriting it with later (cascading) 703 errors by calling this function. */ 704 705 static void 706 first_error (const char *error) 707 { 708 if (! error_p ()) 709 set_syntax_error (error); 710 } 711 712 /* Similar to first_error, but this function accepts formatted error 713 message. */ 714 static void 715 first_error_fmt (const char *format, ...) 716 { 717 va_list args; 718 enum 719 { size = 100 }; 720 /* N.B. this single buffer will not cause error messages for different 721 instructions to pollute each other; this is because at the end of 722 processing of each assembly line, error message if any will be 723 collected by as_bad. */ 724 static char buffer[size]; 725 726 if (! error_p ()) 727 { 728 int ret ATTRIBUTE_UNUSED; 729 va_start (args, format); 730 ret = vsnprintf (buffer, size, format, args); 731 know (ret <= size - 1 && ret >= 0); 732 va_end (args); 733 set_syntax_error (buffer); 734 } 735 } 736 737 /* Register parsing. */ 738 739 /* Generic register parser which is called by other specialized 740 register parsers. 741 CCP points to what should be the beginning of a register name. 742 If it is indeed a valid register name, advance CCP over it and 743 return the reg_entry structure; otherwise return NULL. 744 It does not issue diagnostics. */ 745 746 static reg_entry * 747 parse_reg (char **ccp) 748 { 749 char *start = *ccp; 750 char *p; 751 reg_entry *reg; 752 753 #ifdef REGISTER_PREFIX 754 if (*start != REGISTER_PREFIX) 755 return NULL; 756 start++; 757 #endif 758 759 p = start; 760 if (!ISALPHA (*p) || !is_name_beginner (*p)) 761 return NULL; 762 763 do 764 p++; 765 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_'); 766 767 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start); 768 769 if (!reg) 770 return NULL; 771 772 *ccp = p; 773 return reg; 774 } 775 776 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise 777 return FALSE. */ 778 static bfd_boolean 779 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type) 780 { 781 return (reg_type_masks[type] & (1 << reg->type)) != 0; 782 } 783 784 /* Try to parse a base or offset register. Allow SVE base and offset 785 registers if REG_TYPE includes SVE registers. Return the register 786 entry on success, setting *QUALIFIER to the register qualifier. 787 Return null otherwise. 788 789 Note that this function does not issue any diagnostics. */ 790 791 static const reg_entry * 792 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type, 793 aarch64_opnd_qualifier_t *qualifier) 794 { 795 char *str = *ccp; 796 const reg_entry *reg = parse_reg (&str); 797 798 if (reg == NULL) 799 return NULL; 800 801 switch (reg->type) 802 { 803 case REG_TYPE_R_32: 804 case REG_TYPE_SP_32: 805 case REG_TYPE_Z_32: 806 *qualifier = AARCH64_OPND_QLF_W; 807 break; 808 809 case REG_TYPE_R_64: 810 case REG_TYPE_SP_64: 811 case REG_TYPE_Z_64: 812 *qualifier = AARCH64_OPND_QLF_X; 813 break; 814 815 case REG_TYPE_ZN: 816 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0 817 || str[0] != '.') 818 return NULL; 819 switch (TOLOWER (str[1])) 820 { 821 case 's': 822 *qualifier = AARCH64_OPND_QLF_S_S; 823 break; 824 case 'd': 825 *qualifier = AARCH64_OPND_QLF_S_D; 826 break; 827 default: 828 return NULL; 829 } 830 str += 2; 831 break; 832 833 default: 834 return NULL; 835 } 836 837 *ccp = str; 838 839 return reg; 840 } 841 842 /* Try to parse a base or offset register. Return the register entry 843 on success, setting *QUALIFIER to the register qualifier. Return null 844 otherwise. 845 846 Note that this function does not issue any diagnostics. */ 847 848 static const reg_entry * 849 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier) 850 { 851 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier); 852 } 853 854 /* Parse the qualifier of a vector register or vector element of type 855 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing 856 succeeds; otherwise return FALSE. 857 858 Accept only one occurrence of: 859 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d 860 b h s d q */ 861 static bfd_boolean 862 parse_vector_type_for_operand (aarch64_reg_type reg_type, 863 struct vector_type_el *parsed_type, char **str) 864 { 865 char *ptr = *str; 866 unsigned width; 867 unsigned element_size; 868 enum vector_el_type type; 869 870 /* skip '.' */ 871 gas_assert (*ptr == '.'); 872 ptr++; 873 874 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr)) 875 { 876 width = 0; 877 goto elt_size; 878 } 879 width = strtoul (ptr, &ptr, 10); 880 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16) 881 { 882 first_error_fmt (_("bad size %d in vector width specifier"), width); 883 return FALSE; 884 } 885 886 elt_size: 887 switch (TOLOWER (*ptr)) 888 { 889 case 'b': 890 type = NT_b; 891 element_size = 8; 892 break; 893 case 'h': 894 type = NT_h; 895 element_size = 16; 896 break; 897 case 's': 898 type = NT_s; 899 element_size = 32; 900 break; 901 case 'd': 902 type = NT_d; 903 element_size = 64; 904 break; 905 case 'q': 906 if (reg_type == REG_TYPE_ZN || width == 1) 907 { 908 type = NT_q; 909 element_size = 128; 910 break; 911 } 912 /* fall through. */ 913 default: 914 if (*ptr != '\0') 915 first_error_fmt (_("unexpected character `%c' in element size"), *ptr); 916 else 917 first_error (_("missing element size")); 918 return FALSE; 919 } 920 if (width != 0 && width * element_size != 64 921 && width * element_size != 128 922 && !(width == 2 && element_size == 16) 923 && !(width == 4 && element_size == 8)) 924 { 925 first_error_fmt (_ 926 ("invalid element size %d and vector size combination %c"), 927 width, *ptr); 928 return FALSE; 929 } 930 ptr++; 931 932 parsed_type->type = type; 933 parsed_type->width = width; 934 935 *str = ptr; 936 937 return TRUE; 938 } 939 940 /* *STR contains an SVE zero/merge predication suffix. Parse it into 941 *PARSED_TYPE and point *STR at the end of the suffix. */ 942 943 static bfd_boolean 944 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str) 945 { 946 char *ptr = *str; 947 948 /* Skip '/'. */ 949 gas_assert (*ptr == '/'); 950 ptr++; 951 switch (TOLOWER (*ptr)) 952 { 953 case 'z': 954 parsed_type->type = NT_zero; 955 break; 956 case 'm': 957 parsed_type->type = NT_merge; 958 break; 959 default: 960 if (*ptr != '\0' && *ptr != ',') 961 first_error_fmt (_("unexpected character `%c' in predication type"), 962 *ptr); 963 else 964 first_error (_("missing predication type")); 965 return FALSE; 966 } 967 parsed_type->width = 0; 968 *str = ptr + 1; 969 return TRUE; 970 } 971 972 /* Parse a register of the type TYPE. 973 974 Return PARSE_FAIL if the string pointed by *CCP is not a valid register 975 name or the parsed register is not of TYPE. 976 977 Otherwise return the register number, and optionally fill in the actual 978 type of the register in *RTYPE when multiple alternatives were given, and 979 return the register shape and element index information in *TYPEINFO. 980 981 IN_REG_LIST should be set with TRUE if the caller is parsing a register 982 list. */ 983 984 static int 985 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype, 986 struct vector_type_el *typeinfo, bfd_boolean in_reg_list) 987 { 988 char *str = *ccp; 989 const reg_entry *reg = parse_reg (&str); 990 struct vector_type_el atype; 991 struct vector_type_el parsetype; 992 bfd_boolean is_typed_vecreg = FALSE; 993 994 atype.defined = 0; 995 atype.type = NT_invtype; 996 atype.width = -1; 997 atype.index = 0; 998 999 if (reg == NULL) 1000 { 1001 if (typeinfo) 1002 *typeinfo = atype; 1003 set_default_error (); 1004 return PARSE_FAIL; 1005 } 1006 1007 if (! aarch64_check_reg_type (reg, type)) 1008 { 1009 DEBUG_TRACE ("reg type check failed"); 1010 set_default_error (); 1011 return PARSE_FAIL; 1012 } 1013 type = reg->type; 1014 1015 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN) 1016 && (*str == '.' || (type == REG_TYPE_PN && *str == '/'))) 1017 { 1018 if (*str == '.') 1019 { 1020 if (!parse_vector_type_for_operand (type, &parsetype, &str)) 1021 return PARSE_FAIL; 1022 } 1023 else 1024 { 1025 if (!parse_predication_for_operand (&parsetype, &str)) 1026 return PARSE_FAIL; 1027 } 1028 1029 /* Register if of the form Vn.[bhsdq]. */ 1030 is_typed_vecreg = TRUE; 1031 1032 if (type == REG_TYPE_ZN || type == REG_TYPE_PN) 1033 { 1034 /* The width is always variable; we don't allow an integer width 1035 to be specified. */ 1036 gas_assert (parsetype.width == 0); 1037 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE; 1038 } 1039 else if (parsetype.width == 0) 1040 /* Expect index. In the new scheme we cannot have 1041 Vn.[bhsdq] represent a scalar. Therefore any 1042 Vn.[bhsdq] should have an index following it. 1043 Except in reglists of course. */ 1044 atype.defined |= NTA_HASINDEX; 1045 else 1046 atype.defined |= NTA_HASTYPE; 1047 1048 atype.type = parsetype.type; 1049 atype.width = parsetype.width; 1050 } 1051 1052 if (skip_past_char (&str, '[')) 1053 { 1054 expressionS exp; 1055 1056 /* Reject Sn[index] syntax. */ 1057 if (!is_typed_vecreg) 1058 { 1059 first_error (_("this type of register can't be indexed")); 1060 return PARSE_FAIL; 1061 } 1062 1063 if (in_reg_list) 1064 { 1065 first_error (_("index not allowed inside register list")); 1066 return PARSE_FAIL; 1067 } 1068 1069 atype.defined |= NTA_HASINDEX; 1070 1071 my_get_expression (&exp, &str, GE_NO_PREFIX, 1); 1072 1073 if (exp.X_op != O_constant) 1074 { 1075 first_error (_("constant expression required")); 1076 return PARSE_FAIL; 1077 } 1078 1079 if (! skip_past_char (&str, ']')) 1080 return PARSE_FAIL; 1081 1082 atype.index = exp.X_add_number; 1083 } 1084 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0) 1085 { 1086 /* Indexed vector register expected. */ 1087 first_error (_("indexed vector register expected")); 1088 return PARSE_FAIL; 1089 } 1090 1091 /* A vector reg Vn should be typed or indexed. */ 1092 if (type == REG_TYPE_VN && atype.defined == 0) 1093 { 1094 first_error (_("invalid use of vector register")); 1095 } 1096 1097 if (typeinfo) 1098 *typeinfo = atype; 1099 1100 if (rtype) 1101 *rtype = type; 1102 1103 *ccp = str; 1104 1105 return reg->number; 1106 } 1107 1108 /* Parse register. 1109 1110 Return the register number on success; return PARSE_FAIL otherwise. 1111 1112 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of 1113 the register (e.g. NEON double or quad reg when either has been requested). 1114 1115 If this is a NEON vector register with additional type information, fill 1116 in the struct pointed to by VECTYPE (if non-NULL). 1117 1118 This parser does not handle register list. */ 1119 1120 static int 1121 aarch64_reg_parse (char **ccp, aarch64_reg_type type, 1122 aarch64_reg_type *rtype, struct vector_type_el *vectype) 1123 { 1124 struct vector_type_el atype; 1125 char *str = *ccp; 1126 int reg = parse_typed_reg (&str, type, rtype, &atype, 1127 /*in_reg_list= */ FALSE); 1128 1129 if (reg == PARSE_FAIL) 1130 return PARSE_FAIL; 1131 1132 if (vectype) 1133 *vectype = atype; 1134 1135 *ccp = str; 1136 1137 return reg; 1138 } 1139 1140 static inline bfd_boolean 1141 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2) 1142 { 1143 return 1144 e1.type == e2.type 1145 && e1.defined == e2.defined 1146 && e1.width == e2.width && e1.index == e2.index; 1147 } 1148 1149 /* This function parses a list of vector registers of type TYPE. 1150 On success, it returns the parsed register list information in the 1151 following encoded format: 1152 1153 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1 1154 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg 1155 1156 The information of the register shape and/or index is returned in 1157 *VECTYPE. 1158 1159 It returns PARSE_FAIL if the register list is invalid. 1160 1161 The list contains one to four registers. 1162 Each register can be one of: 1163 <Vt>.<T>[<index>] 1164 <Vt>.<T> 1165 All <T> should be identical. 1166 All <index> should be identical. 1167 There are restrictions on <Vt> numbers which are checked later 1168 (by reg_list_valid_p). */ 1169 1170 static int 1171 parse_vector_reg_list (char **ccp, aarch64_reg_type type, 1172 struct vector_type_el *vectype) 1173 { 1174 char *str = *ccp; 1175 int nb_regs; 1176 struct vector_type_el typeinfo, typeinfo_first; 1177 int val, val_range; 1178 int in_range; 1179 int ret_val; 1180 int i; 1181 bfd_boolean error = FALSE; 1182 bfd_boolean expect_index = FALSE; 1183 1184 if (*str != '{') 1185 { 1186 set_syntax_error (_("expecting {")); 1187 return PARSE_FAIL; 1188 } 1189 str++; 1190 1191 nb_regs = 0; 1192 typeinfo_first.defined = 0; 1193 typeinfo_first.type = NT_invtype; 1194 typeinfo_first.width = -1; 1195 typeinfo_first.index = 0; 1196 ret_val = 0; 1197 val = -1; 1198 val_range = -1; 1199 in_range = 0; 1200 do 1201 { 1202 if (in_range) 1203 { 1204 str++; /* skip over '-' */ 1205 val_range = val; 1206 } 1207 val = parse_typed_reg (&str, type, NULL, &typeinfo, 1208 /*in_reg_list= */ TRUE); 1209 if (val == PARSE_FAIL) 1210 { 1211 set_first_syntax_error (_("invalid vector register in list")); 1212 error = TRUE; 1213 continue; 1214 } 1215 /* reject [bhsd]n */ 1216 if (type == REG_TYPE_VN && typeinfo.defined == 0) 1217 { 1218 set_first_syntax_error (_("invalid scalar register in list")); 1219 error = TRUE; 1220 continue; 1221 } 1222 1223 if (typeinfo.defined & NTA_HASINDEX) 1224 expect_index = TRUE; 1225 1226 if (in_range) 1227 { 1228 if (val < val_range) 1229 { 1230 set_first_syntax_error 1231 (_("invalid range in vector register list")); 1232 error = TRUE; 1233 } 1234 val_range++; 1235 } 1236 else 1237 { 1238 val_range = val; 1239 if (nb_regs == 0) 1240 typeinfo_first = typeinfo; 1241 else if (! eq_vector_type_el (typeinfo_first, typeinfo)) 1242 { 1243 set_first_syntax_error 1244 (_("type mismatch in vector register list")); 1245 error = TRUE; 1246 } 1247 } 1248 if (! error) 1249 for (i = val_range; i <= val; i++) 1250 { 1251 ret_val |= i << (5 * nb_regs); 1252 nb_regs++; 1253 } 1254 in_range = 0; 1255 } 1256 while (skip_past_comma (&str) || (in_range = 1, *str == '-')); 1257 1258 skip_whitespace (str); 1259 if (*str != '}') 1260 { 1261 set_first_syntax_error (_("end of vector register list not found")); 1262 error = TRUE; 1263 } 1264 str++; 1265 1266 skip_whitespace (str); 1267 1268 if (expect_index) 1269 { 1270 if (skip_past_char (&str, '[')) 1271 { 1272 expressionS exp; 1273 1274 my_get_expression (&exp, &str, GE_NO_PREFIX, 1); 1275 if (exp.X_op != O_constant) 1276 { 1277 set_first_syntax_error (_("constant expression required.")); 1278 error = TRUE; 1279 } 1280 if (! skip_past_char (&str, ']')) 1281 error = TRUE; 1282 else 1283 typeinfo_first.index = exp.X_add_number; 1284 } 1285 else 1286 { 1287 set_first_syntax_error (_("expected index")); 1288 error = TRUE; 1289 } 1290 } 1291 1292 if (nb_regs > 4) 1293 { 1294 set_first_syntax_error (_("too many registers in vector register list")); 1295 error = TRUE; 1296 } 1297 else if (nb_regs == 0) 1298 { 1299 set_first_syntax_error (_("empty vector register list")); 1300 error = TRUE; 1301 } 1302 1303 *ccp = str; 1304 if (! error) 1305 *vectype = typeinfo_first; 1306 1307 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1); 1308 } 1309 1310 /* Directives: register aliases. */ 1311 1312 static reg_entry * 1313 insert_reg_alias (char *str, int number, aarch64_reg_type type) 1314 { 1315 reg_entry *new; 1316 const char *name; 1317 1318 if ((new = hash_find (aarch64_reg_hsh, str)) != 0) 1319 { 1320 if (new->builtin) 1321 as_warn (_("ignoring attempt to redefine built-in register '%s'"), 1322 str); 1323 1324 /* Only warn about a redefinition if it's not defined as the 1325 same register. */ 1326 else if (new->number != number || new->type != type) 1327 as_warn (_("ignoring redefinition of register alias '%s'"), str); 1328 1329 return NULL; 1330 } 1331 1332 name = xstrdup (str); 1333 new = XNEW (reg_entry); 1334 1335 new->name = name; 1336 new->number = number; 1337 new->type = type; 1338 new->builtin = FALSE; 1339 1340 if (hash_insert (aarch64_reg_hsh, name, (void *) new)) 1341 abort (); 1342 1343 return new; 1344 } 1345 1346 /* Look for the .req directive. This is of the form: 1347 1348 new_register_name .req existing_register_name 1349 1350 If we find one, or if it looks sufficiently like one that we want to 1351 handle any error here, return TRUE. Otherwise return FALSE. */ 1352 1353 static bfd_boolean 1354 create_register_alias (char *newname, char *p) 1355 { 1356 const reg_entry *old; 1357 char *oldname, *nbuf; 1358 size_t nlen; 1359 1360 /* The input scrubber ensures that whitespace after the mnemonic is 1361 collapsed to single spaces. */ 1362 oldname = p; 1363 if (strncmp (oldname, " .req ", 6) != 0) 1364 return FALSE; 1365 1366 oldname += 6; 1367 if (*oldname == '\0') 1368 return FALSE; 1369 1370 old = hash_find (aarch64_reg_hsh, oldname); 1371 if (!old) 1372 { 1373 as_warn (_("unknown register '%s' -- .req ignored"), oldname); 1374 return TRUE; 1375 } 1376 1377 /* If TC_CASE_SENSITIVE is defined, then newname already points to 1378 the desired alias name, and p points to its end. If not, then 1379 the desired alias name is in the global original_case_string. */ 1380 #ifdef TC_CASE_SENSITIVE 1381 nlen = p - newname; 1382 #else 1383 newname = original_case_string; 1384 nlen = strlen (newname); 1385 #endif 1386 1387 nbuf = xmemdup0 (newname, nlen); 1388 1389 /* Create aliases under the new name as stated; an all-lowercase 1390 version of the new name; and an all-uppercase version of the new 1391 name. */ 1392 if (insert_reg_alias (nbuf, old->number, old->type) != NULL) 1393 { 1394 for (p = nbuf; *p; p++) 1395 *p = TOUPPER (*p); 1396 1397 if (strncmp (nbuf, newname, nlen)) 1398 { 1399 /* If this attempt to create an additional alias fails, do not bother 1400 trying to create the all-lower case alias. We will fail and issue 1401 a second, duplicate error message. This situation arises when the 1402 programmer does something like: 1403 foo .req r0 1404 Foo .req r1 1405 The second .req creates the "Foo" alias but then fails to create 1406 the artificial FOO alias because it has already been created by the 1407 first .req. */ 1408 if (insert_reg_alias (nbuf, old->number, old->type) == NULL) 1409 { 1410 free (nbuf); 1411 return TRUE; 1412 } 1413 } 1414 1415 for (p = nbuf; *p; p++) 1416 *p = TOLOWER (*p); 1417 1418 if (strncmp (nbuf, newname, nlen)) 1419 insert_reg_alias (nbuf, old->number, old->type); 1420 } 1421 1422 free (nbuf); 1423 return TRUE; 1424 } 1425 1426 /* Should never be called, as .req goes between the alias and the 1427 register name, not at the beginning of the line. */ 1428 static void 1429 s_req (int a ATTRIBUTE_UNUSED) 1430 { 1431 as_bad (_("invalid syntax for .req directive")); 1432 } 1433 1434 /* The .unreq directive deletes an alias which was previously defined 1435 by .req. For example: 1436 1437 my_alias .req r11 1438 .unreq my_alias */ 1439 1440 static void 1441 s_unreq (int a ATTRIBUTE_UNUSED) 1442 { 1443 char *name; 1444 char saved_char; 1445 1446 name = input_line_pointer; 1447 1448 while (*input_line_pointer != 0 1449 && *input_line_pointer != ' ' && *input_line_pointer != '\n') 1450 ++input_line_pointer; 1451 1452 saved_char = *input_line_pointer; 1453 *input_line_pointer = 0; 1454 1455 if (!*name) 1456 as_bad (_("invalid syntax for .unreq directive")); 1457 else 1458 { 1459 reg_entry *reg = hash_find (aarch64_reg_hsh, name); 1460 1461 if (!reg) 1462 as_bad (_("unknown register alias '%s'"), name); 1463 else if (reg->builtin) 1464 as_warn (_("ignoring attempt to undefine built-in register '%s'"), 1465 name); 1466 else 1467 { 1468 char *p; 1469 char *nbuf; 1470 1471 hash_delete (aarch64_reg_hsh, name, FALSE); 1472 free ((char *) reg->name); 1473 free (reg); 1474 1475 /* Also locate the all upper case and all lower case versions. 1476 Do not complain if we cannot find one or the other as it 1477 was probably deleted above. */ 1478 1479 nbuf = strdup (name); 1480 for (p = nbuf; *p; p++) 1481 *p = TOUPPER (*p); 1482 reg = hash_find (aarch64_reg_hsh, nbuf); 1483 if (reg) 1484 { 1485 hash_delete (aarch64_reg_hsh, nbuf, FALSE); 1486 free ((char *) reg->name); 1487 free (reg); 1488 } 1489 1490 for (p = nbuf; *p; p++) 1491 *p = TOLOWER (*p); 1492 reg = hash_find (aarch64_reg_hsh, nbuf); 1493 if (reg) 1494 { 1495 hash_delete (aarch64_reg_hsh, nbuf, FALSE); 1496 free ((char *) reg->name); 1497 free (reg); 1498 } 1499 1500 free (nbuf); 1501 } 1502 } 1503 1504 *input_line_pointer = saved_char; 1505 demand_empty_rest_of_line (); 1506 } 1507 1508 /* Directives: Instruction set selection. */ 1509 1510 #ifdef OBJ_ELF 1511 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF 1512 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05). 1513 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag), 1514 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */ 1515 1516 /* Create a new mapping symbol for the transition to STATE. */ 1517 1518 static void 1519 make_mapping_symbol (enum mstate state, valueT value, fragS * frag) 1520 { 1521 symbolS *symbolP; 1522 const char *symname; 1523 int type; 1524 1525 switch (state) 1526 { 1527 case MAP_DATA: 1528 symname = "$d"; 1529 type = BSF_NO_FLAGS; 1530 break; 1531 case MAP_INSN: 1532 symname = "$x"; 1533 type = BSF_NO_FLAGS; 1534 break; 1535 default: 1536 abort (); 1537 } 1538 1539 symbolP = symbol_new (symname, now_seg, value, frag); 1540 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL; 1541 1542 /* Save the mapping symbols for future reference. Also check that 1543 we do not place two mapping symbols at the same offset within a 1544 frag. We'll handle overlap between frags in 1545 check_mapping_symbols. 1546 1547 If .fill or other data filling directive generates zero sized data, 1548 the mapping symbol for the following code will have the same value 1549 as the one generated for the data filling directive. In this case, 1550 we replace the old symbol with the new one at the same address. */ 1551 if (value == 0) 1552 { 1553 if (frag->tc_frag_data.first_map != NULL) 1554 { 1555 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0); 1556 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, 1557 &symbol_lastP); 1558 } 1559 frag->tc_frag_data.first_map = symbolP; 1560 } 1561 if (frag->tc_frag_data.last_map != NULL) 1562 { 1563 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= 1564 S_GET_VALUE (symbolP)); 1565 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP)) 1566 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, 1567 &symbol_lastP); 1568 } 1569 frag->tc_frag_data.last_map = symbolP; 1570 } 1571 1572 /* We must sometimes convert a region marked as code to data during 1573 code alignment, if an odd number of bytes have to be padded. The 1574 code mapping symbol is pushed to an aligned address. */ 1575 1576 static void 1577 insert_data_mapping_symbol (enum mstate state, 1578 valueT value, fragS * frag, offsetT bytes) 1579 { 1580 /* If there was already a mapping symbol, remove it. */ 1581 if (frag->tc_frag_data.last_map != NULL 1582 && S_GET_VALUE (frag->tc_frag_data.last_map) == 1583 frag->fr_address + value) 1584 { 1585 symbolS *symp = frag->tc_frag_data.last_map; 1586 1587 if (value == 0) 1588 { 1589 know (frag->tc_frag_data.first_map == symp); 1590 frag->tc_frag_data.first_map = NULL; 1591 } 1592 frag->tc_frag_data.last_map = NULL; 1593 symbol_remove (symp, &symbol_rootP, &symbol_lastP); 1594 } 1595 1596 make_mapping_symbol (MAP_DATA, value, frag); 1597 make_mapping_symbol (state, value + bytes, frag); 1598 } 1599 1600 static void mapping_state_2 (enum mstate state, int max_chars); 1601 1602 /* Set the mapping state to STATE. Only call this when about to 1603 emit some STATE bytes to the file. */ 1604 1605 void 1606 mapping_state (enum mstate state) 1607 { 1608 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 1609 1610 if (state == MAP_INSN) 1611 /* AArch64 instructions require 4-byte alignment. When emitting 1612 instructions into any section, record the appropriate section 1613 alignment. */ 1614 record_alignment (now_seg, 2); 1615 1616 if (mapstate == state) 1617 /* The mapping symbol has already been emitted. 1618 There is nothing else to do. */ 1619 return; 1620 1621 #define TRANSITION(from, to) (mapstate == (from) && state == (to)) 1622 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg)) 1623 /* Emit MAP_DATA within executable section in order. Otherwise, it will be 1624 evaluated later in the next else. */ 1625 return; 1626 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN)) 1627 { 1628 /* Only add the symbol if the offset is > 0: 1629 if we're at the first frag, check it's size > 0; 1630 if we're not at the first frag, then for sure 1631 the offset is > 0. */ 1632 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root; 1633 const int add_symbol = (frag_now != frag_first) 1634 || (frag_now_fix () > 0); 1635 1636 if (add_symbol) 1637 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first); 1638 } 1639 #undef TRANSITION 1640 1641 mapping_state_2 (state, 0); 1642 } 1643 1644 /* Same as mapping_state, but MAX_CHARS bytes have already been 1645 allocated. Put the mapping symbol that far back. */ 1646 1647 static void 1648 mapping_state_2 (enum mstate state, int max_chars) 1649 { 1650 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 1651 1652 if (!SEG_NORMAL (now_seg)) 1653 return; 1654 1655 if (mapstate == state) 1656 /* The mapping symbol has already been emitted. 1657 There is nothing else to do. */ 1658 return; 1659 1660 seg_info (now_seg)->tc_segment_info_data.mapstate = state; 1661 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now); 1662 } 1663 #else 1664 #define mapping_state(x) /* nothing */ 1665 #define mapping_state_2(x, y) /* nothing */ 1666 #endif 1667 1668 /* Directives: sectioning and alignment. */ 1669 1670 static void 1671 s_bss (int ignore ATTRIBUTE_UNUSED) 1672 { 1673 /* We don't support putting frags in the BSS segment, we fake it by 1674 marking in_bss, then looking at s_skip for clues. */ 1675 subseg_set (bss_section, 0); 1676 demand_empty_rest_of_line (); 1677 mapping_state (MAP_DATA); 1678 } 1679 1680 static void 1681 s_even (int ignore ATTRIBUTE_UNUSED) 1682 { 1683 /* Never make frag if expect extra pass. */ 1684 if (!need_pass_2) 1685 frag_align (1, 0, 0); 1686 1687 record_alignment (now_seg, 1); 1688 1689 demand_empty_rest_of_line (); 1690 } 1691 1692 /* Directives: Literal pools. */ 1693 1694 static literal_pool * 1695 find_literal_pool (int size) 1696 { 1697 literal_pool *pool; 1698 1699 for (pool = list_of_pools; pool != NULL; pool = pool->next) 1700 { 1701 if (pool->section == now_seg 1702 && pool->sub_section == now_subseg && pool->size == size) 1703 break; 1704 } 1705 1706 return pool; 1707 } 1708 1709 static literal_pool * 1710 find_or_make_literal_pool (int size) 1711 { 1712 /* Next literal pool ID number. */ 1713 static unsigned int latest_pool_num = 1; 1714 literal_pool *pool; 1715 1716 pool = find_literal_pool (size); 1717 1718 if (pool == NULL) 1719 { 1720 /* Create a new pool. */ 1721 pool = XNEW (literal_pool); 1722 if (!pool) 1723 return NULL; 1724 1725 /* Currently we always put the literal pool in the current text 1726 section. If we were generating "small" model code where we 1727 knew that all code and initialised data was within 1MB then 1728 we could output literals to mergeable, read-only data 1729 sections. */ 1730 1731 pool->next_free_entry = 0; 1732 pool->section = now_seg; 1733 pool->sub_section = now_subseg; 1734 pool->size = size; 1735 pool->next = list_of_pools; 1736 pool->symbol = NULL; 1737 1738 /* Add it to the list. */ 1739 list_of_pools = pool; 1740 } 1741 1742 /* New pools, and emptied pools, will have a NULL symbol. */ 1743 if (pool->symbol == NULL) 1744 { 1745 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section, 1746 (valueT) 0, &zero_address_frag); 1747 pool->id = latest_pool_num++; 1748 } 1749 1750 /* Done. */ 1751 return pool; 1752 } 1753 1754 /* Add the literal of size SIZE in *EXP to the relevant literal pool. 1755 Return TRUE on success, otherwise return FALSE. */ 1756 static bfd_boolean 1757 add_to_lit_pool (expressionS *exp, int size) 1758 { 1759 literal_pool *pool; 1760 unsigned int entry; 1761 1762 pool = find_or_make_literal_pool (size); 1763 1764 /* Check if this literal value is already in the pool. */ 1765 for (entry = 0; entry < pool->next_free_entry; entry++) 1766 { 1767 expressionS * litexp = & pool->literals[entry].exp; 1768 1769 if ((litexp->X_op == exp->X_op) 1770 && (exp->X_op == O_constant) 1771 && (litexp->X_add_number == exp->X_add_number) 1772 && (litexp->X_unsigned == exp->X_unsigned)) 1773 break; 1774 1775 if ((litexp->X_op == exp->X_op) 1776 && (exp->X_op == O_symbol) 1777 && (litexp->X_add_number == exp->X_add_number) 1778 && (litexp->X_add_symbol == exp->X_add_symbol) 1779 && (litexp->X_op_symbol == exp->X_op_symbol)) 1780 break; 1781 } 1782 1783 /* Do we need to create a new entry? */ 1784 if (entry == pool->next_free_entry) 1785 { 1786 if (entry >= MAX_LITERAL_POOL_SIZE) 1787 { 1788 set_syntax_error (_("literal pool overflow")); 1789 return FALSE; 1790 } 1791 1792 pool->literals[entry].exp = *exp; 1793 pool->next_free_entry += 1; 1794 if (exp->X_op == O_big) 1795 { 1796 /* PR 16688: Bignums are held in a single global array. We must 1797 copy and preserve that value now, before it is overwritten. */ 1798 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE, 1799 exp->X_add_number); 1800 memcpy (pool->literals[entry].bignum, generic_bignum, 1801 CHARS_PER_LITTLENUM * exp->X_add_number); 1802 } 1803 else 1804 pool->literals[entry].bignum = NULL; 1805 } 1806 1807 exp->X_op = O_symbol; 1808 exp->X_add_number = ((int) entry) * size; 1809 exp->X_add_symbol = pool->symbol; 1810 1811 return TRUE; 1812 } 1813 1814 /* Can't use symbol_new here, so have to create a symbol and then at 1815 a later date assign it a value. That's what these functions do. */ 1816 1817 static void 1818 symbol_locate (symbolS * symbolP, 1819 const char *name,/* It is copied, the caller can modify. */ 1820 segT segment, /* Segment identifier (SEG_<something>). */ 1821 valueT valu, /* Symbol value. */ 1822 fragS * frag) /* Associated fragment. */ 1823 { 1824 size_t name_length; 1825 char *preserved_copy_of_name; 1826 1827 name_length = strlen (name) + 1; /* +1 for \0. */ 1828 obstack_grow (¬es, name, name_length); 1829 preserved_copy_of_name = obstack_finish (¬es); 1830 1831 #ifdef tc_canonicalize_symbol_name 1832 preserved_copy_of_name = 1833 tc_canonicalize_symbol_name (preserved_copy_of_name); 1834 #endif 1835 1836 S_SET_NAME (symbolP, preserved_copy_of_name); 1837 1838 S_SET_SEGMENT (symbolP, segment); 1839 S_SET_VALUE (symbolP, valu); 1840 symbol_clear_list_pointers (symbolP); 1841 1842 symbol_set_frag (symbolP, frag); 1843 1844 /* Link to end of symbol chain. */ 1845 { 1846 extern int symbol_table_frozen; 1847 1848 if (symbol_table_frozen) 1849 abort (); 1850 } 1851 1852 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP); 1853 1854 obj_symbol_new_hook (symbolP); 1855 1856 #ifdef tc_symbol_new_hook 1857 tc_symbol_new_hook (symbolP); 1858 #endif 1859 1860 #ifdef DEBUG_SYMS 1861 verify_symbol_chain (symbol_rootP, symbol_lastP); 1862 #endif /* DEBUG_SYMS */ 1863 } 1864 1865 1866 static void 1867 s_ltorg (int ignored ATTRIBUTE_UNUSED) 1868 { 1869 unsigned int entry; 1870 literal_pool *pool; 1871 char sym_name[20]; 1872 int align; 1873 1874 for (align = 2; align <= 4; align++) 1875 { 1876 int size = 1 << align; 1877 1878 pool = find_literal_pool (size); 1879 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0) 1880 continue; 1881 1882 /* Align pool as you have word accesses. 1883 Only make a frag if we have to. */ 1884 if (!need_pass_2) 1885 frag_align (align, 0, 0); 1886 1887 mapping_state (MAP_DATA); 1888 1889 record_alignment (now_seg, align); 1890 1891 sprintf (sym_name, "$$lit_\002%x", pool->id); 1892 1893 symbol_locate (pool->symbol, sym_name, now_seg, 1894 (valueT) frag_now_fix (), frag_now); 1895 symbol_table_insert (pool->symbol); 1896 1897 for (entry = 0; entry < pool->next_free_entry; entry++) 1898 { 1899 expressionS * exp = & pool->literals[entry].exp; 1900 1901 if (exp->X_op == O_big) 1902 { 1903 /* PR 16688: Restore the global bignum value. */ 1904 gas_assert (pool->literals[entry].bignum != NULL); 1905 memcpy (generic_bignum, pool->literals[entry].bignum, 1906 CHARS_PER_LITTLENUM * exp->X_add_number); 1907 } 1908 1909 /* First output the expression in the instruction to the pool. */ 1910 emit_expr (exp, size); /* .word|.xword */ 1911 1912 if (exp->X_op == O_big) 1913 { 1914 free (pool->literals[entry].bignum); 1915 pool->literals[entry].bignum = NULL; 1916 } 1917 } 1918 1919 /* Mark the pool as empty. */ 1920 pool->next_free_entry = 0; 1921 pool->symbol = NULL; 1922 } 1923 } 1924 1925 #ifdef OBJ_ELF 1926 /* Forward declarations for functions below, in the MD interface 1927 section. */ 1928 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int); 1929 static struct reloc_table_entry * find_reloc_table_entry (char **); 1930 1931 /* Directives: Data. */ 1932 /* N.B. the support for relocation suffix in this directive needs to be 1933 implemented properly. */ 1934 1935 static void 1936 s_aarch64_elf_cons (int nbytes) 1937 { 1938 expressionS exp; 1939 1940 #ifdef md_flush_pending_output 1941 md_flush_pending_output (); 1942 #endif 1943 1944 if (is_it_end_of_statement ()) 1945 { 1946 demand_empty_rest_of_line (); 1947 return; 1948 } 1949 1950 #ifdef md_cons_align 1951 md_cons_align (nbytes); 1952 #endif 1953 1954 mapping_state (MAP_DATA); 1955 do 1956 { 1957 struct reloc_table_entry *reloc; 1958 1959 expression (&exp); 1960 1961 if (exp.X_op != O_symbol) 1962 emit_expr (&exp, (unsigned int) nbytes); 1963 else 1964 { 1965 skip_past_char (&input_line_pointer, '#'); 1966 if (skip_past_char (&input_line_pointer, ':')) 1967 { 1968 reloc = find_reloc_table_entry (&input_line_pointer); 1969 if (reloc == NULL) 1970 as_bad (_("unrecognized relocation suffix")); 1971 else 1972 as_bad (_("unimplemented relocation suffix")); 1973 ignore_rest_of_line (); 1974 return; 1975 } 1976 else 1977 emit_expr (&exp, (unsigned int) nbytes); 1978 } 1979 } 1980 while (*input_line_pointer++ == ','); 1981 1982 /* Put terminator back into stream. */ 1983 input_line_pointer--; 1984 demand_empty_rest_of_line (); 1985 } 1986 1987 /* Mark symbol that it follows a variant PCS convention. */ 1988 1989 static void 1990 s_variant_pcs (int ignored ATTRIBUTE_UNUSED) 1991 { 1992 char *name; 1993 char c; 1994 symbolS *sym; 1995 asymbol *bfdsym; 1996 elf_symbol_type *elfsym; 1997 1998 c = get_symbol_name (&name); 1999 if (!*name) 2000 as_bad (_("Missing symbol name in directive")); 2001 sym = symbol_find_or_make (name); 2002 restore_line_pointer (c); 2003 demand_empty_rest_of_line (); 2004 bfdsym = symbol_get_bfdsym (sym); 2005 elfsym = elf_symbol_from (bfd_asymbol_bfd (bfdsym), bfdsym); 2006 gas_assert (elfsym); 2007 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS; 2008 } 2009 #endif /* OBJ_ELF */ 2010 2011 /* Output a 32-bit word, but mark as an instruction. */ 2012 2013 static void 2014 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED) 2015 { 2016 expressionS exp; 2017 2018 #ifdef md_flush_pending_output 2019 md_flush_pending_output (); 2020 #endif 2021 2022 if (is_it_end_of_statement ()) 2023 { 2024 demand_empty_rest_of_line (); 2025 return; 2026 } 2027 2028 /* Sections are assumed to start aligned. In executable section, there is no 2029 MAP_DATA symbol pending. So we only align the address during 2030 MAP_DATA --> MAP_INSN transition. 2031 For other sections, this is not guaranteed. */ 2032 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 2033 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA) 2034 frag_align_code (2, 0); 2035 2036 #ifdef OBJ_ELF 2037 mapping_state (MAP_INSN); 2038 #endif 2039 2040 do 2041 { 2042 expression (&exp); 2043 if (exp.X_op != O_constant) 2044 { 2045 as_bad (_("constant expression required")); 2046 ignore_rest_of_line (); 2047 return; 2048 } 2049 2050 if (target_big_endian) 2051 { 2052 unsigned int val = exp.X_add_number; 2053 exp.X_add_number = SWAP_32 (val); 2054 } 2055 emit_expr (&exp, 4); 2056 } 2057 while (*input_line_pointer++ == ','); 2058 2059 /* Put terminator back into stream. */ 2060 input_line_pointer--; 2061 demand_empty_rest_of_line (); 2062 } 2063 2064 static void 2065 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED) 2066 { 2067 demand_empty_rest_of_line (); 2068 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data; 2069 fde->pauth_key = AARCH64_PAUTH_KEY_B; 2070 } 2071 2072 #ifdef OBJ_ELF 2073 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */ 2074 2075 static void 2076 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED) 2077 { 2078 expressionS exp; 2079 2080 expression (&exp); 2081 frag_grow (4); 2082 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0, 2083 BFD_RELOC_AARCH64_TLSDESC_ADD); 2084 2085 demand_empty_rest_of_line (); 2086 } 2087 2088 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */ 2089 2090 static void 2091 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED) 2092 { 2093 expressionS exp; 2094 2095 /* Since we're just labelling the code, there's no need to define a 2096 mapping symbol. */ 2097 expression (&exp); 2098 /* Make sure there is enough room in this frag for the following 2099 blr. This trick only works if the blr follows immediately after 2100 the .tlsdesc directive. */ 2101 frag_grow (4); 2102 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0, 2103 BFD_RELOC_AARCH64_TLSDESC_CALL); 2104 2105 demand_empty_rest_of_line (); 2106 } 2107 2108 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */ 2109 2110 static void 2111 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED) 2112 { 2113 expressionS exp; 2114 2115 expression (&exp); 2116 frag_grow (4); 2117 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0, 2118 BFD_RELOC_AARCH64_TLSDESC_LDR); 2119 2120 demand_empty_rest_of_line (); 2121 } 2122 #endif /* OBJ_ELF */ 2123 2124 static void s_aarch64_arch (int); 2125 static void s_aarch64_cpu (int); 2126 static void s_aarch64_arch_extension (int); 2127 2128 /* This table describes all the machine specific pseudo-ops the assembler 2129 has to support. The fields are: 2130 pseudo-op name without dot 2131 function to call to execute this pseudo-op 2132 Integer arg to pass to the function. */ 2133 2134 const pseudo_typeS md_pseudo_table[] = { 2135 /* Never called because '.req' does not start a line. */ 2136 {"req", s_req, 0}, 2137 {"unreq", s_unreq, 0}, 2138 {"bss", s_bss, 0}, 2139 {"even", s_even, 0}, 2140 {"ltorg", s_ltorg, 0}, 2141 {"pool", s_ltorg, 0}, 2142 {"cpu", s_aarch64_cpu, 0}, 2143 {"arch", s_aarch64_arch, 0}, 2144 {"arch_extension", s_aarch64_arch_extension, 0}, 2145 {"inst", s_aarch64_inst, 0}, 2146 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0}, 2147 #ifdef OBJ_ELF 2148 {"tlsdescadd", s_tlsdescadd, 0}, 2149 {"tlsdesccall", s_tlsdesccall, 0}, 2150 {"tlsdescldr", s_tlsdescldr, 0}, 2151 {"word", s_aarch64_elf_cons, 4}, 2152 {"long", s_aarch64_elf_cons, 4}, 2153 {"xword", s_aarch64_elf_cons, 8}, 2154 {"dword", s_aarch64_elf_cons, 8}, 2155 {"variant_pcs", s_variant_pcs, 0}, 2156 #endif 2157 {"float16", float_cons, 'h'}, 2158 {"bfloat16", float_cons, 'b'}, 2159 {0, 0, 0} 2160 }; 2161 2162 2163 /* Check whether STR points to a register name followed by a comma or the 2164 end of line; REG_TYPE indicates which register types are checked 2165 against. Return TRUE if STR is such a register name; otherwise return 2166 FALSE. The function does not intend to produce any diagnostics, but since 2167 the register parser aarch64_reg_parse, which is called by this function, 2168 does produce diagnostics, we call clear_error to clear any diagnostics 2169 that may be generated by aarch64_reg_parse. 2170 Also, the function returns FALSE directly if there is any user error 2171 present at the function entry. This prevents the existing diagnostics 2172 state from being spoiled. 2173 The function currently serves parse_constant_immediate and 2174 parse_big_immediate only. */ 2175 static bfd_boolean 2176 reg_name_p (char *str, aarch64_reg_type reg_type) 2177 { 2178 int reg; 2179 2180 /* Prevent the diagnostics state from being spoiled. */ 2181 if (error_p ()) 2182 return FALSE; 2183 2184 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL); 2185 2186 /* Clear the parsing error that may be set by the reg parser. */ 2187 clear_error (); 2188 2189 if (reg == PARSE_FAIL) 2190 return FALSE; 2191 2192 skip_whitespace (str); 2193 if (*str == ',' || is_end_of_line[(unsigned int) *str]) 2194 return TRUE; 2195 2196 return FALSE; 2197 } 2198 2199 /* Parser functions used exclusively in instruction operands. */ 2200 2201 /* Parse an immediate expression which may not be constant. 2202 2203 To prevent the expression parser from pushing a register name 2204 into the symbol table as an undefined symbol, firstly a check is 2205 done to find out whether STR is a register of type REG_TYPE followed 2206 by a comma or the end of line. Return FALSE if STR is such a string. */ 2207 2208 static bfd_boolean 2209 parse_immediate_expression (char **str, expressionS *exp, 2210 aarch64_reg_type reg_type) 2211 { 2212 if (reg_name_p (*str, reg_type)) 2213 { 2214 set_recoverable_error (_("immediate operand required")); 2215 return FALSE; 2216 } 2217 2218 my_get_expression (exp, str, GE_OPT_PREFIX, 1); 2219 2220 if (exp->X_op == O_absent) 2221 { 2222 set_fatal_syntax_error (_("missing immediate expression")); 2223 return FALSE; 2224 } 2225 2226 return TRUE; 2227 } 2228 2229 /* Constant immediate-value read function for use in insn parsing. 2230 STR points to the beginning of the immediate (with the optional 2231 leading #); *VAL receives the value. REG_TYPE says which register 2232 names should be treated as registers rather than as symbolic immediates. 2233 2234 Return TRUE on success; otherwise return FALSE. */ 2235 2236 static bfd_boolean 2237 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type) 2238 { 2239 expressionS exp; 2240 2241 if (! parse_immediate_expression (str, &exp, reg_type)) 2242 return FALSE; 2243 2244 if (exp.X_op != O_constant) 2245 { 2246 set_syntax_error (_("constant expression required")); 2247 return FALSE; 2248 } 2249 2250 *val = exp.X_add_number; 2251 return TRUE; 2252 } 2253 2254 static uint32_t 2255 encode_imm_float_bits (uint32_t imm) 2256 { 2257 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */ 2258 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */ 2259 } 2260 2261 /* Return TRUE if the single-precision floating-point value encoded in IMM 2262 can be expressed in the AArch64 8-bit signed floating-point format with 2263 3-bit exponent and normalized 4 bits of precision; in other words, the 2264 floating-point value must be expressable as 2265 (+/-) n / 16 * power (2, r) 2266 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */ 2267 2268 static bfd_boolean 2269 aarch64_imm_float_p (uint32_t imm) 2270 { 2271 /* If a single-precision floating-point value has the following bit 2272 pattern, it can be expressed in the AArch64 8-bit floating-point 2273 format: 2274 2275 3 32222222 2221111111111 2276 1 09876543 21098765432109876543210 2277 n Eeeeeexx xxxx0000000000000000000 2278 2279 where n, e and each x are either 0 or 1 independently, with 2280 E == ~ e. */ 2281 2282 uint32_t pattern; 2283 2284 /* Prepare the pattern for 'Eeeeee'. */ 2285 if (((imm >> 30) & 0x1) == 0) 2286 pattern = 0x3e000000; 2287 else 2288 pattern = 0x40000000; 2289 2290 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */ 2291 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */ 2292 } 2293 2294 /* Return TRUE if the IEEE double value encoded in IMM can be expressed 2295 as an IEEE float without any loss of precision. Store the value in 2296 *FPWORD if so. */ 2297 2298 static bfd_boolean 2299 can_convert_double_to_float (uint64_t imm, uint32_t *fpword) 2300 { 2301 /* If a double-precision floating-point value has the following bit 2302 pattern, it can be expressed in a float: 2303 2304 6 66655555555 5544 44444444 33333333 33222222 22221111 111111 2305 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210 2306 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000 2307 2308 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS 2309 if Eeee_eeee != 1111_1111 2310 2311 where n, e, s and S are either 0 or 1 independently and where ~ is the 2312 inverse of E. */ 2313 2314 uint32_t pattern; 2315 uint32_t high32 = imm >> 32; 2316 uint32_t low32 = imm; 2317 2318 /* Lower 29 bits need to be 0s. */ 2319 if ((imm & 0x1fffffff) != 0) 2320 return FALSE; 2321 2322 /* Prepare the pattern for 'Eeeeeeeee'. */ 2323 if (((high32 >> 30) & 0x1) == 0) 2324 pattern = 0x38000000; 2325 else 2326 pattern = 0x40000000; 2327 2328 /* Check E~~~. */ 2329 if ((high32 & 0x78000000) != pattern) 2330 return FALSE; 2331 2332 /* Check Eeee_eeee != 1111_1111. */ 2333 if ((high32 & 0x7ff00000) == 0x47f00000) 2334 return FALSE; 2335 2336 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */ 2337 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */ 2338 | (low32 >> 29)); /* 3 S bits. */ 2339 return TRUE; 2340 } 2341 2342 /* Return true if we should treat OPERAND as a double-precision 2343 floating-point operand rather than a single-precision one. */ 2344 static bfd_boolean 2345 double_precision_operand_p (const aarch64_opnd_info *operand) 2346 { 2347 /* Check for unsuffixed SVE registers, which are allowed 2348 for LDR and STR but not in instructions that require an 2349 immediate. We get better error messages if we arbitrarily 2350 pick one size, parse the immediate normally, and then 2351 report the match failure in the normal way. */ 2352 return (operand->qualifier == AARCH64_OPND_QLF_NIL 2353 || aarch64_get_qualifier_esize (operand->qualifier) == 8); 2354 } 2355 2356 /* Parse a floating-point immediate. Return TRUE on success and return the 2357 value in *IMMED in the format of IEEE754 single-precision encoding. 2358 *CCP points to the start of the string; DP_P is TRUE when the immediate 2359 is expected to be in double-precision (N.B. this only matters when 2360 hexadecimal representation is involved). REG_TYPE says which register 2361 names should be treated as registers rather than as symbolic immediates. 2362 2363 This routine accepts any IEEE float; it is up to the callers to reject 2364 invalid ones. */ 2365 2366 static bfd_boolean 2367 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p, 2368 aarch64_reg_type reg_type) 2369 { 2370 char *str = *ccp; 2371 char *fpnum; 2372 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 2373 int64_t val = 0; 2374 unsigned fpword = 0; 2375 bfd_boolean hex_p = FALSE; 2376 2377 skip_past_char (&str, '#'); 2378 2379 fpnum = str; 2380 skip_whitespace (fpnum); 2381 2382 if (strncmp (fpnum, "0x", 2) == 0) 2383 { 2384 /* Support the hexadecimal representation of the IEEE754 encoding. 2385 Double-precision is expected when DP_P is TRUE, otherwise the 2386 representation should be in single-precision. */ 2387 if (! parse_constant_immediate (&str, &val, reg_type)) 2388 goto invalid_fp; 2389 2390 if (dp_p) 2391 { 2392 if (!can_convert_double_to_float (val, &fpword)) 2393 goto invalid_fp; 2394 } 2395 else if ((uint64_t) val > 0xffffffff) 2396 goto invalid_fp; 2397 else 2398 fpword = val; 2399 2400 hex_p = TRUE; 2401 } 2402 else if (reg_name_p (str, reg_type)) 2403 { 2404 set_recoverable_error (_("immediate operand required")); 2405 return FALSE; 2406 } 2407 2408 if (! hex_p) 2409 { 2410 int i; 2411 2412 if ((str = atof_ieee (str, 's', words)) == NULL) 2413 goto invalid_fp; 2414 2415 /* Our FP word must be 32 bits (single-precision FP). */ 2416 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++) 2417 { 2418 fpword <<= LITTLENUM_NUMBER_OF_BITS; 2419 fpword |= words[i]; 2420 } 2421 } 2422 2423 *immed = fpword; 2424 *ccp = str; 2425 return TRUE; 2426 2427 invalid_fp: 2428 set_fatal_syntax_error (_("invalid floating-point constant")); 2429 return FALSE; 2430 } 2431 2432 /* Less-generic immediate-value read function with the possibility of loading 2433 a big (64-bit) immediate, as required by AdvSIMD Modified immediate 2434 instructions. 2435 2436 To prevent the expression parser from pushing a register name into the 2437 symbol table as an undefined symbol, a check is firstly done to find 2438 out whether STR is a register of type REG_TYPE followed by a comma or 2439 the end of line. Return FALSE if STR is such a register. */ 2440 2441 static bfd_boolean 2442 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type) 2443 { 2444 char *ptr = *str; 2445 2446 if (reg_name_p (ptr, reg_type)) 2447 { 2448 set_syntax_error (_("immediate operand required")); 2449 return FALSE; 2450 } 2451 2452 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1); 2453 2454 if (inst.reloc.exp.X_op == O_constant) 2455 *imm = inst.reloc.exp.X_add_number; 2456 2457 *str = ptr; 2458 2459 return TRUE; 2460 } 2461 2462 /* Set operand IDX of the *INSTR that needs a GAS internal fixup. 2463 if NEED_LIBOPCODES is non-zero, the fixup will need 2464 assistance from the libopcodes. */ 2465 2466 static inline void 2467 aarch64_set_gas_internal_fixup (struct reloc *reloc, 2468 const aarch64_opnd_info *operand, 2469 int need_libopcodes_p) 2470 { 2471 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP; 2472 reloc->opnd = operand->type; 2473 if (need_libopcodes_p) 2474 reloc->need_libopcodes_p = 1; 2475 }; 2476 2477 /* Return TRUE if the instruction needs to be fixed up later internally by 2478 the GAS; otherwise return FALSE. */ 2479 2480 static inline bfd_boolean 2481 aarch64_gas_internal_fixup_p (void) 2482 { 2483 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP; 2484 } 2485 2486 /* Assign the immediate value to the relevant field in *OPERAND if 2487 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND 2488 needs an internal fixup in a later stage. 2489 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or 2490 IMM.VALUE that may get assigned with the constant. */ 2491 static inline void 2492 assign_imm_if_const_or_fixup_later (struct reloc *reloc, 2493 aarch64_opnd_info *operand, 2494 int addr_off_p, 2495 int need_libopcodes_p, 2496 int skip_p) 2497 { 2498 if (reloc->exp.X_op == O_constant) 2499 { 2500 if (addr_off_p) 2501 operand->addr.offset.imm = reloc->exp.X_add_number; 2502 else 2503 operand->imm.value = reloc->exp.X_add_number; 2504 reloc->type = BFD_RELOC_UNUSED; 2505 } 2506 else 2507 { 2508 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p); 2509 /* Tell libopcodes to ignore this operand or not. This is helpful 2510 when one of the operands needs to be fixed up later but we need 2511 libopcodes to check the other operands. */ 2512 operand->skip = skip_p; 2513 } 2514 } 2515 2516 /* Relocation modifiers. Each entry in the table contains the textual 2517 name for the relocation which may be placed before a symbol used as 2518 a load/store offset, or add immediate. It must be surrounded by a 2519 leading and trailing colon, for example: 2520 2521 ldr x0, [x1, #:rello:varsym] 2522 add x0, x1, #:rello:varsym */ 2523 2524 struct reloc_table_entry 2525 { 2526 const char *name; 2527 int pc_rel; 2528 bfd_reloc_code_real_type adr_type; 2529 bfd_reloc_code_real_type adrp_type; 2530 bfd_reloc_code_real_type movw_type; 2531 bfd_reloc_code_real_type add_type; 2532 bfd_reloc_code_real_type ldst_type; 2533 bfd_reloc_code_real_type ld_literal_type; 2534 }; 2535 2536 static struct reloc_table_entry reloc_table[] = { 2537 /* Low 12 bits of absolute address: ADD/i and LDR/STR */ 2538 {"lo12", 0, 2539 0, /* adr_type */ 2540 0, 2541 0, 2542 BFD_RELOC_AARCH64_ADD_LO12, 2543 BFD_RELOC_AARCH64_LDST_LO12, 2544 0}, 2545 2546 /* Higher 21 bits of pc-relative page offset: ADRP */ 2547 {"pg_hi21", 1, 2548 0, /* adr_type */ 2549 BFD_RELOC_AARCH64_ADR_HI21_PCREL, 2550 0, 2551 0, 2552 0, 2553 0}, 2554 2555 /* Higher 21 bits of pc-relative page offset: ADRP, no check */ 2556 {"pg_hi21_nc", 1, 2557 0, /* adr_type */ 2558 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, 2559 0, 2560 0, 2561 0, 2562 0}, 2563 2564 /* Most significant bits 0-15 of unsigned address/value: MOVZ */ 2565 {"abs_g0", 0, 2566 0, /* adr_type */ 2567 0, 2568 BFD_RELOC_AARCH64_MOVW_G0, 2569 0, 2570 0, 2571 0}, 2572 2573 /* Most significant bits 0-15 of signed address/value: MOVN/Z */ 2574 {"abs_g0_s", 0, 2575 0, /* adr_type */ 2576 0, 2577 BFD_RELOC_AARCH64_MOVW_G0_S, 2578 0, 2579 0, 2580 0}, 2581 2582 /* Less significant bits 0-15 of address/value: MOVK, no check */ 2583 {"abs_g0_nc", 0, 2584 0, /* adr_type */ 2585 0, 2586 BFD_RELOC_AARCH64_MOVW_G0_NC, 2587 0, 2588 0, 2589 0}, 2590 2591 /* Most significant bits 16-31 of unsigned address/value: MOVZ */ 2592 {"abs_g1", 0, 2593 0, /* adr_type */ 2594 0, 2595 BFD_RELOC_AARCH64_MOVW_G1, 2596 0, 2597 0, 2598 0}, 2599 2600 /* Most significant bits 16-31 of signed address/value: MOVN/Z */ 2601 {"abs_g1_s", 0, 2602 0, /* adr_type */ 2603 0, 2604 BFD_RELOC_AARCH64_MOVW_G1_S, 2605 0, 2606 0, 2607 0}, 2608 2609 /* Less significant bits 16-31 of address/value: MOVK, no check */ 2610 {"abs_g1_nc", 0, 2611 0, /* adr_type */ 2612 0, 2613 BFD_RELOC_AARCH64_MOVW_G1_NC, 2614 0, 2615 0, 2616 0}, 2617 2618 /* Most significant bits 32-47 of unsigned address/value: MOVZ */ 2619 {"abs_g2", 0, 2620 0, /* adr_type */ 2621 0, 2622 BFD_RELOC_AARCH64_MOVW_G2, 2623 0, 2624 0, 2625 0}, 2626 2627 /* Most significant bits 32-47 of signed address/value: MOVN/Z */ 2628 {"abs_g2_s", 0, 2629 0, /* adr_type */ 2630 0, 2631 BFD_RELOC_AARCH64_MOVW_G2_S, 2632 0, 2633 0, 2634 0}, 2635 2636 /* Less significant bits 32-47 of address/value: MOVK, no check */ 2637 {"abs_g2_nc", 0, 2638 0, /* adr_type */ 2639 0, 2640 BFD_RELOC_AARCH64_MOVW_G2_NC, 2641 0, 2642 0, 2643 0}, 2644 2645 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */ 2646 {"abs_g3", 0, 2647 0, /* adr_type */ 2648 0, 2649 BFD_RELOC_AARCH64_MOVW_G3, 2650 0, 2651 0, 2652 0}, 2653 2654 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */ 2655 {"prel_g0", 1, 2656 0, /* adr_type */ 2657 0, 2658 BFD_RELOC_AARCH64_MOVW_PREL_G0, 2659 0, 2660 0, 2661 0}, 2662 2663 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */ 2664 {"prel_g0_nc", 1, 2665 0, /* adr_type */ 2666 0, 2667 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC, 2668 0, 2669 0, 2670 0}, 2671 2672 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */ 2673 {"prel_g1", 1, 2674 0, /* adr_type */ 2675 0, 2676 BFD_RELOC_AARCH64_MOVW_PREL_G1, 2677 0, 2678 0, 2679 0}, 2680 2681 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */ 2682 {"prel_g1_nc", 1, 2683 0, /* adr_type */ 2684 0, 2685 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC, 2686 0, 2687 0, 2688 0}, 2689 2690 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */ 2691 {"prel_g2", 1, 2692 0, /* adr_type */ 2693 0, 2694 BFD_RELOC_AARCH64_MOVW_PREL_G2, 2695 0, 2696 0, 2697 0}, 2698 2699 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */ 2700 {"prel_g2_nc", 1, 2701 0, /* adr_type */ 2702 0, 2703 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC, 2704 0, 2705 0, 2706 0}, 2707 2708 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */ 2709 {"prel_g3", 1, 2710 0, /* adr_type */ 2711 0, 2712 BFD_RELOC_AARCH64_MOVW_PREL_G3, 2713 0, 2714 0, 2715 0}, 2716 2717 /* Get to the page containing GOT entry for a symbol. */ 2718 {"got", 1, 2719 0, /* adr_type */ 2720 BFD_RELOC_AARCH64_ADR_GOT_PAGE, 2721 0, 2722 0, 2723 0, 2724 BFD_RELOC_AARCH64_GOT_LD_PREL19}, 2725 2726 /* 12 bit offset into the page containing GOT entry for that symbol. */ 2727 {"got_lo12", 0, 2728 0, /* adr_type */ 2729 0, 2730 0, 2731 0, 2732 BFD_RELOC_AARCH64_LD_GOT_LO12_NC, 2733 0}, 2734 2735 /* 0-15 bits of address/value: MOVk, no check. */ 2736 {"gotoff_g0_nc", 0, 2737 0, /* adr_type */ 2738 0, 2739 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC, 2740 0, 2741 0, 2742 0}, 2743 2744 /* Most significant bits 16-31 of address/value: MOVZ. */ 2745 {"gotoff_g1", 0, 2746 0, /* adr_type */ 2747 0, 2748 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1, 2749 0, 2750 0, 2751 0}, 2752 2753 /* 15 bit offset into the page containing GOT entry for that symbol. */ 2754 {"gotoff_lo15", 0, 2755 0, /* adr_type */ 2756 0, 2757 0, 2758 0, 2759 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15, 2760 0}, 2761 2762 /* Get to the page containing GOT TLS entry for a symbol */ 2763 {"gottprel_g0_nc", 0, 2764 0, /* adr_type */ 2765 0, 2766 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, 2767 0, 2768 0, 2769 0}, 2770 2771 /* Get to the page containing GOT TLS entry for a symbol */ 2772 {"gottprel_g1", 0, 2773 0, /* adr_type */ 2774 0, 2775 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1, 2776 0, 2777 0, 2778 0}, 2779 2780 /* Get to the page containing GOT TLS entry for a symbol */ 2781 {"tlsgd", 0, 2782 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */ 2783 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, 2784 0, 2785 0, 2786 0, 2787 0}, 2788 2789 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2790 {"tlsgd_lo12", 0, 2791 0, /* adr_type */ 2792 0, 2793 0, 2794 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, 2795 0, 2796 0}, 2797 2798 /* Lower 16 bits address/value: MOVk. */ 2799 {"tlsgd_g0_nc", 0, 2800 0, /* adr_type */ 2801 0, 2802 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC, 2803 0, 2804 0, 2805 0}, 2806 2807 /* Most significant bits 16-31 of address/value: MOVZ. */ 2808 {"tlsgd_g1", 0, 2809 0, /* adr_type */ 2810 0, 2811 BFD_RELOC_AARCH64_TLSGD_MOVW_G1, 2812 0, 2813 0, 2814 0}, 2815 2816 /* Get to the page containing GOT TLS entry for a symbol */ 2817 {"tlsdesc", 0, 2818 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */ 2819 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21, 2820 0, 2821 0, 2822 0, 2823 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19}, 2824 2825 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2826 {"tlsdesc_lo12", 0, 2827 0, /* adr_type */ 2828 0, 2829 0, 2830 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12, 2831 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC, 2832 0}, 2833 2834 /* Get to the page containing GOT TLS entry for a symbol. 2835 The same as GD, we allocate two consecutive GOT slots 2836 for module index and module offset, the only difference 2837 with GD is the module offset should be initialized to 2838 zero without any outstanding runtime relocation. */ 2839 {"tlsldm", 0, 2840 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */ 2841 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21, 2842 0, 2843 0, 2844 0, 2845 0}, 2846 2847 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2848 {"tlsldm_lo12_nc", 0, 2849 0, /* adr_type */ 2850 0, 2851 0, 2852 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC, 2853 0, 2854 0}, 2855 2856 /* 12 bit offset into the module TLS base address. */ 2857 {"dtprel_lo12", 0, 2858 0, /* adr_type */ 2859 0, 2860 0, 2861 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12, 2862 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12, 2863 0}, 2864 2865 /* Same as dtprel_lo12, no overflow check. */ 2866 {"dtprel_lo12_nc", 0, 2867 0, /* adr_type */ 2868 0, 2869 0, 2870 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC, 2871 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC, 2872 0}, 2873 2874 /* bits[23:12] of offset to the module TLS base address. */ 2875 {"dtprel_hi12", 0, 2876 0, /* adr_type */ 2877 0, 2878 0, 2879 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12, 2880 0, 2881 0}, 2882 2883 /* bits[15:0] of offset to the module TLS base address. */ 2884 {"dtprel_g0", 0, 2885 0, /* adr_type */ 2886 0, 2887 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0, 2888 0, 2889 0, 2890 0}, 2891 2892 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */ 2893 {"dtprel_g0_nc", 0, 2894 0, /* adr_type */ 2895 0, 2896 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC, 2897 0, 2898 0, 2899 0}, 2900 2901 /* bits[31:16] of offset to the module TLS base address. */ 2902 {"dtprel_g1", 0, 2903 0, /* adr_type */ 2904 0, 2905 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1, 2906 0, 2907 0, 2908 0}, 2909 2910 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */ 2911 {"dtprel_g1_nc", 0, 2912 0, /* adr_type */ 2913 0, 2914 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC, 2915 0, 2916 0, 2917 0}, 2918 2919 /* bits[47:32] of offset to the module TLS base address. */ 2920 {"dtprel_g2", 0, 2921 0, /* adr_type */ 2922 0, 2923 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2, 2924 0, 2925 0, 2926 0}, 2927 2928 /* Lower 16 bit offset into GOT entry for a symbol */ 2929 {"tlsdesc_off_g0_nc", 0, 2930 0, /* adr_type */ 2931 0, 2932 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, 2933 0, 2934 0, 2935 0}, 2936 2937 /* Higher 16 bit offset into GOT entry for a symbol */ 2938 {"tlsdesc_off_g1", 0, 2939 0, /* adr_type */ 2940 0, 2941 BFD_RELOC_AARCH64_TLSDESC_OFF_G1, 2942 0, 2943 0, 2944 0}, 2945 2946 /* Get to the page containing GOT TLS entry for a symbol */ 2947 {"gottprel", 0, 2948 0, /* adr_type */ 2949 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, 2950 0, 2951 0, 2952 0, 2953 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19}, 2954 2955 /* 12 bit offset into the page containing GOT TLS entry for a symbol */ 2956 {"gottprel_lo12", 0, 2957 0, /* adr_type */ 2958 0, 2959 0, 2960 0, 2961 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC, 2962 0}, 2963 2964 /* Get tp offset for a symbol. */ 2965 {"tprel", 0, 2966 0, /* adr_type */ 2967 0, 2968 0, 2969 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, 2970 0, 2971 0}, 2972 2973 /* Get tp offset for a symbol. */ 2974 {"tprel_lo12", 0, 2975 0, /* adr_type */ 2976 0, 2977 0, 2978 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, 2979 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12, 2980 0}, 2981 2982 /* Get tp offset for a symbol. */ 2983 {"tprel_hi12", 0, 2984 0, /* adr_type */ 2985 0, 2986 0, 2987 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, 2988 0, 2989 0}, 2990 2991 /* Get tp offset for a symbol. */ 2992 {"tprel_lo12_nc", 0, 2993 0, /* adr_type */ 2994 0, 2995 0, 2996 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC, 2997 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC, 2998 0}, 2999 3000 /* Most significant bits 32-47 of address/value: MOVZ. */ 3001 {"tprel_g2", 0, 3002 0, /* adr_type */ 3003 0, 3004 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, 3005 0, 3006 0, 3007 0}, 3008 3009 /* Most significant bits 16-31 of address/value: MOVZ. */ 3010 {"tprel_g1", 0, 3011 0, /* adr_type */ 3012 0, 3013 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, 3014 0, 3015 0, 3016 0}, 3017 3018 /* Most significant bits 16-31 of address/value: MOVZ, no check. */ 3019 {"tprel_g1_nc", 0, 3020 0, /* adr_type */ 3021 0, 3022 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC, 3023 0, 3024 0, 3025 0}, 3026 3027 /* Most significant bits 0-15 of address/value: MOVZ. */ 3028 {"tprel_g0", 0, 3029 0, /* adr_type */ 3030 0, 3031 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, 3032 0, 3033 0, 3034 0}, 3035 3036 /* Most significant bits 0-15 of address/value: MOVZ, no check. */ 3037 {"tprel_g0_nc", 0, 3038 0, /* adr_type */ 3039 0, 3040 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC, 3041 0, 3042 0, 3043 0}, 3044 3045 /* 15bit offset from got entry to base address of GOT table. */ 3046 {"gotpage_lo15", 0, 3047 0, 3048 0, 3049 0, 3050 0, 3051 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15, 3052 0}, 3053 3054 /* 14bit offset from got entry to base address of GOT table. */ 3055 {"gotpage_lo14", 0, 3056 0, 3057 0, 3058 0, 3059 0, 3060 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14, 3061 0}, 3062 }; 3063 3064 /* Given the address of a pointer pointing to the textual name of a 3065 relocation as may appear in assembler source, attempt to find its 3066 details in reloc_table. The pointer will be updated to the character 3067 after the trailing colon. On failure, NULL will be returned; 3068 otherwise return the reloc_table_entry. */ 3069 3070 static struct reloc_table_entry * 3071 find_reloc_table_entry (char **str) 3072 { 3073 unsigned int i; 3074 for (i = 0; i < ARRAY_SIZE (reloc_table); i++) 3075 { 3076 int length = strlen (reloc_table[i].name); 3077 3078 if (strncasecmp (reloc_table[i].name, *str, length) == 0 3079 && (*str)[length] == ':') 3080 { 3081 *str += (length + 1); 3082 return &reloc_table[i]; 3083 } 3084 } 3085 3086 return NULL; 3087 } 3088 3089 /* Mode argument to parse_shift and parser_shifter_operand. */ 3090 enum parse_shift_mode 3091 { 3092 SHIFTED_NONE, /* no shifter allowed */ 3093 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or 3094 "#imm{,lsl #n}" */ 3095 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or 3096 "#imm" */ 3097 SHIFTED_LSL, /* bare "lsl #n" */ 3098 SHIFTED_MUL, /* bare "mul #n" */ 3099 SHIFTED_LSL_MSL, /* "lsl|msl #n" */ 3100 SHIFTED_MUL_VL, /* "mul vl" */ 3101 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */ 3102 }; 3103 3104 /* Parse a <shift> operator on an AArch64 data processing instruction. 3105 Return TRUE on success; otherwise return FALSE. */ 3106 static bfd_boolean 3107 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode) 3108 { 3109 const struct aarch64_name_value_pair *shift_op; 3110 enum aarch64_modifier_kind kind; 3111 expressionS exp; 3112 int exp_has_prefix; 3113 char *s = *str; 3114 char *p = s; 3115 3116 for (p = *str; ISALPHA (*p); p++) 3117 ; 3118 3119 if (p == *str) 3120 { 3121 set_syntax_error (_("shift expression expected")); 3122 return FALSE; 3123 } 3124 3125 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str); 3126 3127 if (shift_op == NULL) 3128 { 3129 set_syntax_error (_("shift operator expected")); 3130 return FALSE; 3131 } 3132 3133 kind = aarch64_get_operand_modifier (shift_op); 3134 3135 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL) 3136 { 3137 set_syntax_error (_("invalid use of 'MSL'")); 3138 return FALSE; 3139 } 3140 3141 if (kind == AARCH64_MOD_MUL 3142 && mode != SHIFTED_MUL 3143 && mode != SHIFTED_MUL_VL) 3144 { 3145 set_syntax_error (_("invalid use of 'MUL'")); 3146 return FALSE; 3147 } 3148 3149 switch (mode) 3150 { 3151 case SHIFTED_LOGIC_IMM: 3152 if (aarch64_extend_operator_p (kind)) 3153 { 3154 set_syntax_error (_("extending shift is not permitted")); 3155 return FALSE; 3156 } 3157 break; 3158 3159 case SHIFTED_ARITH_IMM: 3160 if (kind == AARCH64_MOD_ROR) 3161 { 3162 set_syntax_error (_("'ROR' shift is not permitted")); 3163 return FALSE; 3164 } 3165 break; 3166 3167 case SHIFTED_LSL: 3168 if (kind != AARCH64_MOD_LSL) 3169 { 3170 set_syntax_error (_("only 'LSL' shift is permitted")); 3171 return FALSE; 3172 } 3173 break; 3174 3175 case SHIFTED_MUL: 3176 if (kind != AARCH64_MOD_MUL) 3177 { 3178 set_syntax_error (_("only 'MUL' is permitted")); 3179 return FALSE; 3180 } 3181 break; 3182 3183 case SHIFTED_MUL_VL: 3184 /* "MUL VL" consists of two separate tokens. Require the first 3185 token to be "MUL" and look for a following "VL". */ 3186 if (kind == AARCH64_MOD_MUL) 3187 { 3188 skip_whitespace (p); 3189 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2])) 3190 { 3191 p += 2; 3192 kind = AARCH64_MOD_MUL_VL; 3193 break; 3194 } 3195 } 3196 set_syntax_error (_("only 'MUL VL' is permitted")); 3197 return FALSE; 3198 3199 case SHIFTED_REG_OFFSET: 3200 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL 3201 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX) 3202 { 3203 set_fatal_syntax_error 3204 (_("invalid shift for the register offset addressing mode")); 3205 return FALSE; 3206 } 3207 break; 3208 3209 case SHIFTED_LSL_MSL: 3210 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL) 3211 { 3212 set_syntax_error (_("invalid shift operator")); 3213 return FALSE; 3214 } 3215 break; 3216 3217 default: 3218 abort (); 3219 } 3220 3221 /* Whitespace can appear here if the next thing is a bare digit. */ 3222 skip_whitespace (p); 3223 3224 /* Parse shift amount. */ 3225 exp_has_prefix = 0; 3226 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL) 3227 exp.X_op = O_absent; 3228 else 3229 { 3230 if (is_immediate_prefix (*p)) 3231 { 3232 p++; 3233 exp_has_prefix = 1; 3234 } 3235 my_get_expression (&exp, &p, GE_NO_PREFIX, 0); 3236 } 3237 if (kind == AARCH64_MOD_MUL_VL) 3238 /* For consistency, give MUL VL the same shift amount as an implicit 3239 MUL #1. */ 3240 operand->shifter.amount = 1; 3241 else if (exp.X_op == O_absent) 3242 { 3243 if (!aarch64_extend_operator_p (kind) || exp_has_prefix) 3244 { 3245 set_syntax_error (_("missing shift amount")); 3246 return FALSE; 3247 } 3248 operand->shifter.amount = 0; 3249 } 3250 else if (exp.X_op != O_constant) 3251 { 3252 set_syntax_error (_("constant shift amount required")); 3253 return FALSE; 3254 } 3255 /* For parsing purposes, MUL #n has no inherent range. The range 3256 depends on the operand and will be checked by operand-specific 3257 routines. */ 3258 else if (kind != AARCH64_MOD_MUL 3259 && (exp.X_add_number < 0 || exp.X_add_number > 63)) 3260 { 3261 set_fatal_syntax_error (_("shift amount out of range 0 to 63")); 3262 return FALSE; 3263 } 3264 else 3265 { 3266 operand->shifter.amount = exp.X_add_number; 3267 operand->shifter.amount_present = 1; 3268 } 3269 3270 operand->shifter.operator_present = 1; 3271 operand->shifter.kind = kind; 3272 3273 *str = p; 3274 return TRUE; 3275 } 3276 3277 /* Parse a <shifter_operand> for a data processing instruction: 3278 3279 #<immediate> 3280 #<immediate>, LSL #imm 3281 3282 Validation of immediate operands is deferred to md_apply_fix. 3283 3284 Return TRUE on success; otherwise return FALSE. */ 3285 3286 static bfd_boolean 3287 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand, 3288 enum parse_shift_mode mode) 3289 { 3290 char *p; 3291 3292 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM) 3293 return FALSE; 3294 3295 p = *str; 3296 3297 /* Accept an immediate expression. */ 3298 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1)) 3299 return FALSE; 3300 3301 /* Accept optional LSL for arithmetic immediate values. */ 3302 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p)) 3303 if (! parse_shift (&p, operand, SHIFTED_LSL)) 3304 return FALSE; 3305 3306 /* Not accept any shifter for logical immediate values. */ 3307 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p) 3308 && parse_shift (&p, operand, mode)) 3309 { 3310 set_syntax_error (_("unexpected shift operator")); 3311 return FALSE; 3312 } 3313 3314 *str = p; 3315 return TRUE; 3316 } 3317 3318 /* Parse a <shifter_operand> for a data processing instruction: 3319 3320 <Rm> 3321 <Rm>, <shift> 3322 #<immediate> 3323 #<immediate>, LSL #imm 3324 3325 where <shift> is handled by parse_shift above, and the last two 3326 cases are handled by the function above. 3327 3328 Validation of immediate operands is deferred to md_apply_fix. 3329 3330 Return TRUE on success; otherwise return FALSE. */ 3331 3332 static bfd_boolean 3333 parse_shifter_operand (char **str, aarch64_opnd_info *operand, 3334 enum parse_shift_mode mode) 3335 { 3336 const reg_entry *reg; 3337 aarch64_opnd_qualifier_t qualifier; 3338 enum aarch64_operand_class opd_class 3339 = aarch64_get_operand_class (operand->type); 3340 3341 reg = aarch64_reg_parse_32_64 (str, &qualifier); 3342 if (reg) 3343 { 3344 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE) 3345 { 3346 set_syntax_error (_("unexpected register in the immediate operand")); 3347 return FALSE; 3348 } 3349 3350 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z)) 3351 { 3352 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z))); 3353 return FALSE; 3354 } 3355 3356 operand->reg.regno = reg->number; 3357 operand->qualifier = qualifier; 3358 3359 /* Accept optional shift operation on register. */ 3360 if (! skip_past_comma (str)) 3361 return TRUE; 3362 3363 if (! parse_shift (str, operand, mode)) 3364 return FALSE; 3365 3366 return TRUE; 3367 } 3368 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG) 3369 { 3370 set_syntax_error 3371 (_("integer register expected in the extended/shifted operand " 3372 "register")); 3373 return FALSE; 3374 } 3375 3376 /* We have a shifted immediate variable. */ 3377 return parse_shifter_operand_imm (str, operand, mode); 3378 } 3379 3380 /* Return TRUE on success; return FALSE otherwise. */ 3381 3382 static bfd_boolean 3383 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand, 3384 enum parse_shift_mode mode) 3385 { 3386 char *p = *str; 3387 3388 /* Determine if we have the sequence of characters #: or just : 3389 coming next. If we do, then we check for a :rello: relocation 3390 modifier. If we don't, punt the whole lot to 3391 parse_shifter_operand. */ 3392 3393 if ((p[0] == '#' && p[1] == ':') || p[0] == ':') 3394 { 3395 struct reloc_table_entry *entry; 3396 3397 if (p[0] == '#') 3398 p += 2; 3399 else 3400 p++; 3401 *str = p; 3402 3403 /* Try to parse a relocation. Anything else is an error. */ 3404 if (!(entry = find_reloc_table_entry (str))) 3405 { 3406 set_syntax_error (_("unknown relocation modifier")); 3407 return FALSE; 3408 } 3409 3410 if (entry->add_type == 0) 3411 { 3412 set_syntax_error 3413 (_("this relocation modifier is not allowed on this instruction")); 3414 return FALSE; 3415 } 3416 3417 /* Save str before we decompose it. */ 3418 p = *str; 3419 3420 /* Next, we parse the expression. */ 3421 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1)) 3422 return FALSE; 3423 3424 /* Record the relocation type (use the ADD variant here). */ 3425 inst.reloc.type = entry->add_type; 3426 inst.reloc.pc_rel = entry->pc_rel; 3427 3428 /* If str is empty, we've reached the end, stop here. */ 3429 if (**str == '\0') 3430 return TRUE; 3431 3432 /* Otherwise, we have a shifted reloc modifier, so rewind to 3433 recover the variable name and continue parsing for the shifter. */ 3434 *str = p; 3435 return parse_shifter_operand_imm (str, operand, mode); 3436 } 3437 3438 return parse_shifter_operand (str, operand, mode); 3439 } 3440 3441 /* Parse all forms of an address expression. Information is written 3442 to *OPERAND and/or inst.reloc. 3443 3444 The A64 instruction set has the following addressing modes: 3445 3446 Offset 3447 [base] // in SIMD ld/st structure 3448 [base{,#0}] // in ld/st exclusive 3449 [base{,#imm}] 3450 [base,Xm{,LSL #imm}] 3451 [base,Xm,SXTX {#imm}] 3452 [base,Wm,(S|U)XTW {#imm}] 3453 Pre-indexed 3454 [base]! // in ldraa/ldrab exclusive 3455 [base,#imm]! 3456 Post-indexed 3457 [base],#imm 3458 [base],Xm // in SIMD ld/st structure 3459 PC-relative (literal) 3460 label 3461 SVE: 3462 [base,#imm,MUL VL] 3463 [base,Zm.D{,LSL #imm}] 3464 [base,Zm.S,(S|U)XTW {#imm}] 3465 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements 3466 [Zn.S,#imm] 3467 [Zn.D,#imm] 3468 [Zn.S{, Xm}] 3469 [Zn.S,Zm.S{,LSL #imm}] // in ADR 3470 [Zn.D,Zm.D{,LSL #imm}] // in ADR 3471 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR 3472 3473 (As a convenience, the notation "=immediate" is permitted in conjunction 3474 with the pc-relative literal load instructions to automatically place an 3475 immediate value or symbolic address in a nearby literal pool and generate 3476 a hidden label which references it.) 3477 3478 Upon a successful parsing, the address structure in *OPERAND will be 3479 filled in the following way: 3480 3481 .base_regno = <base> 3482 .offset.is_reg // 1 if the offset is a register 3483 .offset.imm = <imm> 3484 .offset.regno = <Rm> 3485 3486 For different addressing modes defined in the A64 ISA: 3487 3488 Offset 3489 .pcrel=0; .preind=1; .postind=0; .writeback=0 3490 Pre-indexed 3491 .pcrel=0; .preind=1; .postind=0; .writeback=1 3492 Post-indexed 3493 .pcrel=0; .preind=0; .postind=1; .writeback=1 3494 PC-relative (literal) 3495 .pcrel=1; .preind=1; .postind=0; .writeback=0 3496 3497 The shift/extension information, if any, will be stored in .shifter. 3498 The base and offset qualifiers will be stored in *BASE_QUALIFIER and 3499 *OFFSET_QUALIFIER respectively, with NIL being used if there's no 3500 corresponding register. 3501 3502 BASE_TYPE says which types of base register should be accepted and 3503 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE 3504 is the type of shifter that is allowed for immediate offsets, 3505 or SHIFTED_NONE if none. 3506 3507 In all other respects, it is the caller's responsibility to check 3508 for addressing modes not supported by the instruction, and to set 3509 inst.reloc.type. */ 3510 3511 static bfd_boolean 3512 parse_address_main (char **str, aarch64_opnd_info *operand, 3513 aarch64_opnd_qualifier_t *base_qualifier, 3514 aarch64_opnd_qualifier_t *offset_qualifier, 3515 aarch64_reg_type base_type, aarch64_reg_type offset_type, 3516 enum parse_shift_mode imm_shift_mode) 3517 { 3518 char *p = *str; 3519 const reg_entry *reg; 3520 expressionS *exp = &inst.reloc.exp; 3521 3522 *base_qualifier = AARCH64_OPND_QLF_NIL; 3523 *offset_qualifier = AARCH64_OPND_QLF_NIL; 3524 if (! skip_past_char (&p, '[')) 3525 { 3526 /* =immediate or label. */ 3527 operand->addr.pcrel = 1; 3528 operand->addr.preind = 1; 3529 3530 /* #:<reloc_op>:<symbol> */ 3531 skip_past_char (&p, '#'); 3532 if (skip_past_char (&p, ':')) 3533 { 3534 bfd_reloc_code_real_type ty; 3535 struct reloc_table_entry *entry; 3536 3537 /* Try to parse a relocation modifier. Anything else is 3538 an error. */ 3539 entry = find_reloc_table_entry (&p); 3540 if (! entry) 3541 { 3542 set_syntax_error (_("unknown relocation modifier")); 3543 return FALSE; 3544 } 3545 3546 switch (operand->type) 3547 { 3548 case AARCH64_OPND_ADDR_PCREL21: 3549 /* adr */ 3550 ty = entry->adr_type; 3551 break; 3552 3553 default: 3554 ty = entry->ld_literal_type; 3555 break; 3556 } 3557 3558 if (ty == 0) 3559 { 3560 set_syntax_error 3561 (_("this relocation modifier is not allowed on this " 3562 "instruction")); 3563 return FALSE; 3564 } 3565 3566 /* #:<reloc_op>: */ 3567 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1)) 3568 { 3569 set_syntax_error (_("invalid relocation expression")); 3570 return FALSE; 3571 } 3572 3573 /* #:<reloc_op>:<expr> */ 3574 /* Record the relocation type. */ 3575 inst.reloc.type = ty; 3576 inst.reloc.pc_rel = entry->pc_rel; 3577 } 3578 else 3579 { 3580 3581 if (skip_past_char (&p, '=')) 3582 /* =immediate; need to generate the literal in the literal pool. */ 3583 inst.gen_lit_pool = 1; 3584 3585 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1)) 3586 { 3587 set_syntax_error (_("invalid address")); 3588 return FALSE; 3589 } 3590 } 3591 3592 *str = p; 3593 return TRUE; 3594 } 3595 3596 /* [ */ 3597 3598 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier); 3599 if (!reg || !aarch64_check_reg_type (reg, base_type)) 3600 { 3601 set_syntax_error (_(get_reg_expected_msg (base_type))); 3602 return FALSE; 3603 } 3604 operand->addr.base_regno = reg->number; 3605 3606 /* [Xn */ 3607 if (skip_past_comma (&p)) 3608 { 3609 /* [Xn, */ 3610 operand->addr.preind = 1; 3611 3612 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier); 3613 if (reg) 3614 { 3615 if (!aarch64_check_reg_type (reg, offset_type)) 3616 { 3617 set_syntax_error (_(get_reg_expected_msg (offset_type))); 3618 return FALSE; 3619 } 3620 3621 /* [Xn,Rm */ 3622 operand->addr.offset.regno = reg->number; 3623 operand->addr.offset.is_reg = 1; 3624 /* Shifted index. */ 3625 if (skip_past_comma (&p)) 3626 { 3627 /* [Xn,Rm, */ 3628 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET)) 3629 /* Use the diagnostics set in parse_shift, so not set new 3630 error message here. */ 3631 return FALSE; 3632 } 3633 /* We only accept: 3634 [base,Xm] # For vector plus scalar SVE2 indexing. 3635 [base,Xm{,LSL #imm}] 3636 [base,Xm,SXTX {#imm}] 3637 [base,Wm,(S|U)XTW {#imm}] */ 3638 if (operand->shifter.kind == AARCH64_MOD_NONE 3639 || operand->shifter.kind == AARCH64_MOD_LSL 3640 || operand->shifter.kind == AARCH64_MOD_SXTX) 3641 { 3642 if (*offset_qualifier == AARCH64_OPND_QLF_W) 3643 { 3644 set_syntax_error (_("invalid use of 32-bit register offset")); 3645 return FALSE; 3646 } 3647 if (aarch64_get_qualifier_esize (*base_qualifier) 3648 != aarch64_get_qualifier_esize (*offset_qualifier) 3649 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX 3650 || *base_qualifier != AARCH64_OPND_QLF_S_S 3651 || *offset_qualifier != AARCH64_OPND_QLF_X)) 3652 { 3653 set_syntax_error (_("offset has different size from base")); 3654 return FALSE; 3655 } 3656 } 3657 else if (*offset_qualifier == AARCH64_OPND_QLF_X) 3658 { 3659 set_syntax_error (_("invalid use of 64-bit register offset")); 3660 return FALSE; 3661 } 3662 } 3663 else 3664 { 3665 /* [Xn,#:<reloc_op>:<symbol> */ 3666 skip_past_char (&p, '#'); 3667 if (skip_past_char (&p, ':')) 3668 { 3669 struct reloc_table_entry *entry; 3670 3671 /* Try to parse a relocation modifier. Anything else is 3672 an error. */ 3673 if (!(entry = find_reloc_table_entry (&p))) 3674 { 3675 set_syntax_error (_("unknown relocation modifier")); 3676 return FALSE; 3677 } 3678 3679 if (entry->ldst_type == 0) 3680 { 3681 set_syntax_error 3682 (_("this relocation modifier is not allowed on this " 3683 "instruction")); 3684 return FALSE; 3685 } 3686 3687 /* [Xn,#:<reloc_op>: */ 3688 /* We now have the group relocation table entry corresponding to 3689 the name in the assembler source. Next, we parse the 3690 expression. */ 3691 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1)) 3692 { 3693 set_syntax_error (_("invalid relocation expression")); 3694 return FALSE; 3695 } 3696 3697 /* [Xn,#:<reloc_op>:<expr> */ 3698 /* Record the load/store relocation type. */ 3699 inst.reloc.type = entry->ldst_type; 3700 inst.reloc.pc_rel = entry->pc_rel; 3701 } 3702 else 3703 { 3704 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1)) 3705 { 3706 set_syntax_error (_("invalid expression in the address")); 3707 return FALSE; 3708 } 3709 /* [Xn,<expr> */ 3710 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p)) 3711 /* [Xn,<expr>,<shifter> */ 3712 if (! parse_shift (&p, operand, imm_shift_mode)) 3713 return FALSE; 3714 } 3715 } 3716 } 3717 3718 if (! skip_past_char (&p, ']')) 3719 { 3720 set_syntax_error (_("']' expected")); 3721 return FALSE; 3722 } 3723 3724 if (skip_past_char (&p, '!')) 3725 { 3726 if (operand->addr.preind && operand->addr.offset.is_reg) 3727 { 3728 set_syntax_error (_("register offset not allowed in pre-indexed " 3729 "addressing mode")); 3730 return FALSE; 3731 } 3732 /* [Xn]! */ 3733 operand->addr.writeback = 1; 3734 } 3735 else if (skip_past_comma (&p)) 3736 { 3737 /* [Xn], */ 3738 operand->addr.postind = 1; 3739 operand->addr.writeback = 1; 3740 3741 if (operand->addr.preind) 3742 { 3743 set_syntax_error (_("cannot combine pre- and post-indexing")); 3744 return FALSE; 3745 } 3746 3747 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier); 3748 if (reg) 3749 { 3750 /* [Xn],Xm */ 3751 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64)) 3752 { 3753 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64))); 3754 return FALSE; 3755 } 3756 3757 operand->addr.offset.regno = reg->number; 3758 operand->addr.offset.is_reg = 1; 3759 } 3760 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1)) 3761 { 3762 /* [Xn],#expr */ 3763 set_syntax_error (_("invalid expression in the address")); 3764 return FALSE; 3765 } 3766 } 3767 3768 /* If at this point neither .preind nor .postind is set, we have a 3769 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and 3770 ldrab, accept [Rn] as a shorthand for [Rn,#0]. 3771 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for 3772 [Zn.<T>, xzr]. */ 3773 if (operand->addr.preind == 0 && operand->addr.postind == 0) 3774 { 3775 if (operand->addr.writeback) 3776 { 3777 if (operand->type == AARCH64_OPND_ADDR_SIMM10) 3778 { 3779 /* Accept [Rn]! as a shorthand for [Rn,#0]! */ 3780 operand->addr.offset.is_reg = 0; 3781 operand->addr.offset.imm = 0; 3782 operand->addr.preind = 1; 3783 } 3784 else 3785 { 3786 /* Reject [Rn]! */ 3787 set_syntax_error (_("missing offset in the pre-indexed address")); 3788 return FALSE; 3789 } 3790 } 3791 else 3792 { 3793 operand->addr.preind = 1; 3794 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX) 3795 { 3796 operand->addr.offset.is_reg = 1; 3797 operand->addr.offset.regno = REG_ZR; 3798 *offset_qualifier = AARCH64_OPND_QLF_X; 3799 } 3800 else 3801 { 3802 inst.reloc.exp.X_op = O_constant; 3803 inst.reloc.exp.X_add_number = 0; 3804 } 3805 } 3806 } 3807 3808 *str = p; 3809 return TRUE; 3810 } 3811 3812 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE 3813 on success. */ 3814 static bfd_boolean 3815 parse_address (char **str, aarch64_opnd_info *operand) 3816 { 3817 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier; 3818 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier, 3819 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE); 3820 } 3821 3822 /* Parse an address in which SVE vector registers and MUL VL are allowed. 3823 The arguments have the same meaning as for parse_address_main. 3824 Return TRUE on success. */ 3825 static bfd_boolean 3826 parse_sve_address (char **str, aarch64_opnd_info *operand, 3827 aarch64_opnd_qualifier_t *base_qualifier, 3828 aarch64_opnd_qualifier_t *offset_qualifier) 3829 { 3830 return parse_address_main (str, operand, base_qualifier, offset_qualifier, 3831 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET, 3832 SHIFTED_MUL_VL); 3833 } 3834 3835 /* Parse an operand for a MOVZ, MOVN or MOVK instruction. 3836 Return TRUE on success; otherwise return FALSE. */ 3837 static bfd_boolean 3838 parse_half (char **str, int *internal_fixup_p) 3839 { 3840 char *p = *str; 3841 3842 skip_past_char (&p, '#'); 3843 3844 gas_assert (internal_fixup_p); 3845 *internal_fixup_p = 0; 3846 3847 if (*p == ':') 3848 { 3849 struct reloc_table_entry *entry; 3850 3851 /* Try to parse a relocation. Anything else is an error. */ 3852 ++p; 3853 if (!(entry = find_reloc_table_entry (&p))) 3854 { 3855 set_syntax_error (_("unknown relocation modifier")); 3856 return FALSE; 3857 } 3858 3859 if (entry->movw_type == 0) 3860 { 3861 set_syntax_error 3862 (_("this relocation modifier is not allowed on this instruction")); 3863 return FALSE; 3864 } 3865 3866 inst.reloc.type = entry->movw_type; 3867 } 3868 else 3869 *internal_fixup_p = 1; 3870 3871 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1)) 3872 return FALSE; 3873 3874 *str = p; 3875 return TRUE; 3876 } 3877 3878 /* Parse an operand for an ADRP instruction: 3879 ADRP <Xd>, <label> 3880 Return TRUE on success; otherwise return FALSE. */ 3881 3882 static bfd_boolean 3883 parse_adrp (char **str) 3884 { 3885 char *p; 3886 3887 p = *str; 3888 if (*p == ':') 3889 { 3890 struct reloc_table_entry *entry; 3891 3892 /* Try to parse a relocation. Anything else is an error. */ 3893 ++p; 3894 if (!(entry = find_reloc_table_entry (&p))) 3895 { 3896 set_syntax_error (_("unknown relocation modifier")); 3897 return FALSE; 3898 } 3899 3900 if (entry->adrp_type == 0) 3901 { 3902 set_syntax_error 3903 (_("this relocation modifier is not allowed on this instruction")); 3904 return FALSE; 3905 } 3906 3907 inst.reloc.type = entry->adrp_type; 3908 } 3909 else 3910 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL; 3911 3912 inst.reloc.pc_rel = 1; 3913 3914 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1)) 3915 return FALSE; 3916 3917 *str = p; 3918 return TRUE; 3919 } 3920 3921 /* Miscellaneous. */ 3922 3923 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array 3924 of SIZE tokens in which index I gives the token for field value I, 3925 or is null if field value I is invalid. REG_TYPE says which register 3926 names should be treated as registers rather than as symbolic immediates. 3927 3928 Return true on success, moving *STR past the operand and storing the 3929 field value in *VAL. */ 3930 3931 static int 3932 parse_enum_string (char **str, int64_t *val, const char *const *array, 3933 size_t size, aarch64_reg_type reg_type) 3934 { 3935 expressionS exp; 3936 char *p, *q; 3937 size_t i; 3938 3939 /* Match C-like tokens. */ 3940 p = q = *str; 3941 while (ISALNUM (*q)) 3942 q++; 3943 3944 for (i = 0; i < size; ++i) 3945 if (array[i] 3946 && strncasecmp (array[i], p, q - p) == 0 3947 && array[i][q - p] == 0) 3948 { 3949 *val = i; 3950 *str = q; 3951 return TRUE; 3952 } 3953 3954 if (!parse_immediate_expression (&p, &exp, reg_type)) 3955 return FALSE; 3956 3957 if (exp.X_op == O_constant 3958 && (uint64_t) exp.X_add_number < size) 3959 { 3960 *val = exp.X_add_number; 3961 *str = p; 3962 return TRUE; 3963 } 3964 3965 /* Use the default error for this operand. */ 3966 return FALSE; 3967 } 3968 3969 /* Parse an option for a preload instruction. Returns the encoding for the 3970 option, or PARSE_FAIL. */ 3971 3972 static int 3973 parse_pldop (char **str) 3974 { 3975 char *p, *q; 3976 const struct aarch64_name_value_pair *o; 3977 3978 p = q = *str; 3979 while (ISALNUM (*q)) 3980 q++; 3981 3982 o = hash_find_n (aarch64_pldop_hsh, p, q - p); 3983 if (!o) 3984 return PARSE_FAIL; 3985 3986 *str = q; 3987 return o->value; 3988 } 3989 3990 /* Parse an option for a barrier instruction. Returns the encoding for the 3991 option, or PARSE_FAIL. */ 3992 3993 static int 3994 parse_barrier (char **str) 3995 { 3996 char *p, *q; 3997 const asm_barrier_opt *o; 3998 3999 p = q = *str; 4000 while (ISALPHA (*q)) 4001 q++; 4002 4003 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p); 4004 if (!o) 4005 return PARSE_FAIL; 4006 4007 *str = q; 4008 return o->value; 4009 } 4010 4011 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record 4012 return 0 if successful. Otherwise return PARSE_FAIL. */ 4013 4014 static int 4015 parse_barrier_psb (char **str, 4016 const struct aarch64_name_value_pair ** hint_opt) 4017 { 4018 char *p, *q; 4019 const struct aarch64_name_value_pair *o; 4020 4021 p = q = *str; 4022 while (ISALPHA (*q)) 4023 q++; 4024 4025 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p); 4026 if (!o) 4027 { 4028 set_fatal_syntax_error 4029 ( _("unknown or missing option to PSB")); 4030 return PARSE_FAIL; 4031 } 4032 4033 if (o->value != 0x11) 4034 { 4035 /* PSB only accepts option name 'CSYNC'. */ 4036 set_syntax_error 4037 (_("the specified option is not accepted for PSB")); 4038 return PARSE_FAIL; 4039 } 4040 4041 *str = q; 4042 *hint_opt = o; 4043 return 0; 4044 } 4045 4046 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record 4047 return 0 if successful. Otherwise return PARSE_FAIL. */ 4048 4049 static int 4050 parse_bti_operand (char **str, 4051 const struct aarch64_name_value_pair ** hint_opt) 4052 { 4053 char *p, *q; 4054 const struct aarch64_name_value_pair *o; 4055 4056 p = q = *str; 4057 while (ISALPHA (*q)) 4058 q++; 4059 4060 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p); 4061 if (!o) 4062 { 4063 set_fatal_syntax_error 4064 ( _("unknown option to BTI")); 4065 return PARSE_FAIL; 4066 } 4067 4068 switch (o->value) 4069 { 4070 /* Valid BTI operands. */ 4071 case HINT_OPD_C: 4072 case HINT_OPD_J: 4073 case HINT_OPD_JC: 4074 break; 4075 4076 default: 4077 set_syntax_error 4078 (_("unknown option to BTI")); 4079 return PARSE_FAIL; 4080 } 4081 4082 *str = q; 4083 *hint_opt = o; 4084 return 0; 4085 } 4086 4087 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction. 4088 Returns the encoding for the option, or PARSE_FAIL. 4089 4090 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the 4091 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. 4092 4093 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE 4094 field, otherwise as a system register. 4095 */ 4096 4097 static int 4098 parse_sys_reg (char **str, struct hash_control *sys_regs, 4099 int imple_defined_p, int pstatefield_p, 4100 uint32_t* flags) 4101 { 4102 char *p, *q; 4103 char buf[32]; 4104 const aarch64_sys_reg *o; 4105 int value; 4106 4107 p = buf; 4108 for (q = *str; ISALNUM (*q) || *q == '_'; q++) 4109 if (p < buf + 31) 4110 *p++ = TOLOWER (*q); 4111 *p = '\0'; 4112 /* Assert that BUF be large enough. */ 4113 gas_assert (p - buf == q - *str); 4114 4115 o = hash_find (sys_regs, buf); 4116 if (!o) 4117 { 4118 if (!imple_defined_p) 4119 return PARSE_FAIL; 4120 else 4121 { 4122 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */ 4123 unsigned int op0, op1, cn, cm, op2; 4124 4125 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) 4126 != 5) 4127 return PARSE_FAIL; 4128 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7) 4129 return PARSE_FAIL; 4130 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2; 4131 if (flags) 4132 *flags = 0; 4133 } 4134 } 4135 else 4136 { 4137 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o)) 4138 as_bad (_("selected processor does not support PSTATE field " 4139 "name '%s'"), buf); 4140 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o)) 4141 as_bad (_("selected processor does not support system register " 4142 "name '%s'"), buf); 4143 if (aarch64_sys_reg_deprecated_p (o)) 4144 as_warn (_("system register name '%s' is deprecated and may be " 4145 "removed in a future release"), buf); 4146 value = o->value; 4147 if (flags) 4148 *flags = o->flags; 4149 } 4150 4151 *str = q; 4152 return value; 4153 } 4154 4155 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry 4156 for the option, or NULL. */ 4157 4158 static const aarch64_sys_ins_reg * 4159 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs) 4160 { 4161 char *p, *q; 4162 char buf[32]; 4163 const aarch64_sys_ins_reg *o; 4164 4165 p = buf; 4166 for (q = *str; ISALNUM (*q) || *q == '_'; q++) 4167 if (p < buf + 31) 4168 *p++ = TOLOWER (*q); 4169 *p = '\0'; 4170 4171 o = hash_find (sys_ins_regs, buf); 4172 if (!o) 4173 return NULL; 4174 4175 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o)) 4176 as_bad (_("selected processor does not support system register " 4177 "name '%s'"), buf); 4178 4179 *str = q; 4180 return o; 4181 } 4182 4183 #define po_char_or_fail(chr) do { \ 4184 if (! skip_past_char (&str, chr)) \ 4185 goto failure; \ 4186 } while (0) 4187 4188 #define po_reg_or_fail(regtype) do { \ 4189 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \ 4190 if (val == PARSE_FAIL) \ 4191 { \ 4192 set_default_error (); \ 4193 goto failure; \ 4194 } \ 4195 } while (0) 4196 4197 #define po_int_reg_or_fail(reg_type) do { \ 4198 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \ 4199 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \ 4200 { \ 4201 set_default_error (); \ 4202 goto failure; \ 4203 } \ 4204 info->reg.regno = reg->number; \ 4205 info->qualifier = qualifier; \ 4206 } while (0) 4207 4208 #define po_imm_nc_or_fail() do { \ 4209 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \ 4210 goto failure; \ 4211 } while (0) 4212 4213 #define po_imm_or_fail(min, max) do { \ 4214 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \ 4215 goto failure; \ 4216 if (val < min || val > max) \ 4217 { \ 4218 set_fatal_syntax_error (_("immediate value out of range "\ 4219 #min " to "#max)); \ 4220 goto failure; \ 4221 } \ 4222 } while (0) 4223 4224 #define po_enum_or_fail(array) do { \ 4225 if (!parse_enum_string (&str, &val, array, \ 4226 ARRAY_SIZE (array), imm_reg_type)) \ 4227 goto failure; \ 4228 } while (0) 4229 4230 #define po_misc_or_fail(expr) do { \ 4231 if (!expr) \ 4232 goto failure; \ 4233 } while (0) 4234 4235 /* encode the 12-bit imm field of Add/sub immediate */ 4236 static inline uint32_t 4237 encode_addsub_imm (uint32_t imm) 4238 { 4239 return imm << 10; 4240 } 4241 4242 /* encode the shift amount field of Add/sub immediate */ 4243 static inline uint32_t 4244 encode_addsub_imm_shift_amount (uint32_t cnt) 4245 { 4246 return cnt << 22; 4247 } 4248 4249 4250 /* encode the imm field of Adr instruction */ 4251 static inline uint32_t 4252 encode_adr_imm (uint32_t imm) 4253 { 4254 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */ 4255 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */ 4256 } 4257 4258 /* encode the immediate field of Move wide immediate */ 4259 static inline uint32_t 4260 encode_movw_imm (uint32_t imm) 4261 { 4262 return imm << 5; 4263 } 4264 4265 /* encode the 26-bit offset of unconditional branch */ 4266 static inline uint32_t 4267 encode_branch_ofs_26 (uint32_t ofs) 4268 { 4269 return ofs & ((1 << 26) - 1); 4270 } 4271 4272 /* encode the 19-bit offset of conditional branch and compare & branch */ 4273 static inline uint32_t 4274 encode_cond_branch_ofs_19 (uint32_t ofs) 4275 { 4276 return (ofs & ((1 << 19) - 1)) << 5; 4277 } 4278 4279 /* encode the 19-bit offset of ld literal */ 4280 static inline uint32_t 4281 encode_ld_lit_ofs_19 (uint32_t ofs) 4282 { 4283 return (ofs & ((1 << 19) - 1)) << 5; 4284 } 4285 4286 /* Encode the 14-bit offset of test & branch. */ 4287 static inline uint32_t 4288 encode_tst_branch_ofs_14 (uint32_t ofs) 4289 { 4290 return (ofs & ((1 << 14) - 1)) << 5; 4291 } 4292 4293 /* Encode the 16-bit imm field of svc/hvc/smc. */ 4294 static inline uint32_t 4295 encode_svc_imm (uint32_t imm) 4296 { 4297 return imm << 5; 4298 } 4299 4300 /* Reencode add(s) to sub(s), or sub(s) to add(s). */ 4301 static inline uint32_t 4302 reencode_addsub_switch_add_sub (uint32_t opcode) 4303 { 4304 return opcode ^ (1 << 30); 4305 } 4306 4307 static inline uint32_t 4308 reencode_movzn_to_movz (uint32_t opcode) 4309 { 4310 return opcode | (1 << 30); 4311 } 4312 4313 static inline uint32_t 4314 reencode_movzn_to_movn (uint32_t opcode) 4315 { 4316 return opcode & ~(1 << 30); 4317 } 4318 4319 /* Overall per-instruction processing. */ 4320 4321 /* We need to be able to fix up arbitrary expressions in some statements. 4322 This is so that we can handle symbols that are an arbitrary distance from 4323 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask), 4324 which returns part of an address in a form which will be valid for 4325 a data instruction. We do this by pushing the expression into a symbol 4326 in the expr_section, and creating a fix for that. */ 4327 4328 static fixS * 4329 fix_new_aarch64 (fragS * frag, 4330 int where, 4331 short int size, expressionS * exp, int pc_rel, int reloc) 4332 { 4333 fixS *new_fix; 4334 4335 switch (exp->X_op) 4336 { 4337 case O_constant: 4338 case O_symbol: 4339 case O_add: 4340 case O_subtract: 4341 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc); 4342 break; 4343 4344 default: 4345 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0, 4346 pc_rel, reloc); 4347 break; 4348 } 4349 return new_fix; 4350 } 4351 4352 /* Diagnostics on operands errors. */ 4353 4354 /* By default, output verbose error message. 4355 Disable the verbose error message by -mno-verbose-error. */ 4356 static int verbose_error_p = 1; 4357 4358 #ifdef DEBUG_AARCH64 4359 /* N.B. this is only for the purpose of debugging. */ 4360 const char* operand_mismatch_kind_names[] = 4361 { 4362 "AARCH64_OPDE_NIL", 4363 "AARCH64_OPDE_RECOVERABLE", 4364 "AARCH64_OPDE_SYNTAX_ERROR", 4365 "AARCH64_OPDE_FATAL_SYNTAX_ERROR", 4366 "AARCH64_OPDE_INVALID_VARIANT", 4367 "AARCH64_OPDE_OUT_OF_RANGE", 4368 "AARCH64_OPDE_UNALIGNED", 4369 "AARCH64_OPDE_REG_LIST", 4370 "AARCH64_OPDE_OTHER_ERROR", 4371 }; 4372 #endif /* DEBUG_AARCH64 */ 4373 4374 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE. 4375 4376 When multiple errors of different kinds are found in the same assembly 4377 line, only the error of the highest severity will be picked up for 4378 issuing the diagnostics. */ 4379 4380 static inline bfd_boolean 4381 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs, 4382 enum aarch64_operand_error_kind rhs) 4383 { 4384 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL); 4385 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE); 4386 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR); 4387 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR); 4388 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT); 4389 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE); 4390 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED); 4391 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST); 4392 return lhs > rhs; 4393 } 4394 4395 /* Helper routine to get the mnemonic name from the assembly instruction 4396 line; should only be called for the diagnosis purpose, as there is 4397 string copy operation involved, which may affect the runtime 4398 performance if used in elsewhere. */ 4399 4400 static const char* 4401 get_mnemonic_name (const char *str) 4402 { 4403 static char mnemonic[32]; 4404 char *ptr; 4405 4406 /* Get the first 15 bytes and assume that the full name is included. */ 4407 strncpy (mnemonic, str, 31); 4408 mnemonic[31] = '\0'; 4409 4410 /* Scan up to the end of the mnemonic, which must end in white space, 4411 '.', or end of string. */ 4412 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr) 4413 ; 4414 4415 *ptr = '\0'; 4416 4417 /* Append '...' to the truncated long name. */ 4418 if (ptr - mnemonic == 31) 4419 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.'; 4420 4421 return mnemonic; 4422 } 4423 4424 static void 4425 reset_aarch64_instruction (aarch64_instruction *instruction) 4426 { 4427 memset (instruction, '\0', sizeof (aarch64_instruction)); 4428 instruction->reloc.type = BFD_RELOC_UNUSED; 4429 } 4430 4431 /* Data structures storing one user error in the assembly code related to 4432 operands. */ 4433 4434 struct operand_error_record 4435 { 4436 const aarch64_opcode *opcode; 4437 aarch64_operand_error detail; 4438 struct operand_error_record *next; 4439 }; 4440 4441 typedef struct operand_error_record operand_error_record; 4442 4443 struct operand_errors 4444 { 4445 operand_error_record *head; 4446 operand_error_record *tail; 4447 }; 4448 4449 typedef struct operand_errors operand_errors; 4450 4451 /* Top-level data structure reporting user errors for the current line of 4452 the assembly code. 4453 The way md_assemble works is that all opcodes sharing the same mnemonic 4454 name are iterated to find a match to the assembly line. In this data 4455 structure, each of the such opcodes will have one operand_error_record 4456 allocated and inserted. In other words, excessive errors related with 4457 a single opcode are disregarded. */ 4458 operand_errors operand_error_report; 4459 4460 /* Free record nodes. */ 4461 static operand_error_record *free_opnd_error_record_nodes = NULL; 4462 4463 /* Initialize the data structure that stores the operand mismatch 4464 information on assembling one line of the assembly code. */ 4465 static void 4466 init_operand_error_report (void) 4467 { 4468 if (operand_error_report.head != NULL) 4469 { 4470 gas_assert (operand_error_report.tail != NULL); 4471 operand_error_report.tail->next = free_opnd_error_record_nodes; 4472 free_opnd_error_record_nodes = operand_error_report.head; 4473 operand_error_report.head = NULL; 4474 operand_error_report.tail = NULL; 4475 return; 4476 } 4477 gas_assert (operand_error_report.tail == NULL); 4478 } 4479 4480 /* Return TRUE if some operand error has been recorded during the 4481 parsing of the current assembly line using the opcode *OPCODE; 4482 otherwise return FALSE. */ 4483 static inline bfd_boolean 4484 opcode_has_operand_error_p (const aarch64_opcode *opcode) 4485 { 4486 operand_error_record *record = operand_error_report.head; 4487 return record && record->opcode == opcode; 4488 } 4489 4490 /* Add the error record *NEW_RECORD to operand_error_report. The record's 4491 OPCODE field is initialized with OPCODE. 4492 N.B. only one record for each opcode, i.e. the maximum of one error is 4493 recorded for each instruction template. */ 4494 4495 static void 4496 add_operand_error_record (const operand_error_record* new_record) 4497 { 4498 const aarch64_opcode *opcode = new_record->opcode; 4499 operand_error_record* record = operand_error_report.head; 4500 4501 /* The record may have been created for this opcode. If not, we need 4502 to prepare one. */ 4503 if (! opcode_has_operand_error_p (opcode)) 4504 { 4505 /* Get one empty record. */ 4506 if (free_opnd_error_record_nodes == NULL) 4507 { 4508 record = XNEW (operand_error_record); 4509 } 4510 else 4511 { 4512 record = free_opnd_error_record_nodes; 4513 free_opnd_error_record_nodes = record->next; 4514 } 4515 record->opcode = opcode; 4516 /* Insert at the head. */ 4517 record->next = operand_error_report.head; 4518 operand_error_report.head = record; 4519 if (operand_error_report.tail == NULL) 4520 operand_error_report.tail = record; 4521 } 4522 else if (record->detail.kind != AARCH64_OPDE_NIL 4523 && record->detail.index <= new_record->detail.index 4524 && operand_error_higher_severity_p (record->detail.kind, 4525 new_record->detail.kind)) 4526 { 4527 /* In the case of multiple errors found on operands related with a 4528 single opcode, only record the error of the leftmost operand and 4529 only if the error is of higher severity. */ 4530 DEBUG_TRACE ("error %s on operand %d not added to the report due to" 4531 " the existing error %s on operand %d", 4532 operand_mismatch_kind_names[new_record->detail.kind], 4533 new_record->detail.index, 4534 operand_mismatch_kind_names[record->detail.kind], 4535 record->detail.index); 4536 return; 4537 } 4538 4539 record->detail = new_record->detail; 4540 } 4541 4542 static inline void 4543 record_operand_error_info (const aarch64_opcode *opcode, 4544 aarch64_operand_error *error_info) 4545 { 4546 operand_error_record record; 4547 record.opcode = opcode; 4548 record.detail = *error_info; 4549 add_operand_error_record (&record); 4550 } 4551 4552 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed 4553 error message *ERROR, for operand IDX (count from 0). */ 4554 4555 static void 4556 record_operand_error (const aarch64_opcode *opcode, int idx, 4557 enum aarch64_operand_error_kind kind, 4558 const char* error) 4559 { 4560 aarch64_operand_error info; 4561 memset(&info, 0, sizeof (info)); 4562 info.index = idx; 4563 info.kind = kind; 4564 info.error = error; 4565 info.non_fatal = FALSE; 4566 record_operand_error_info (opcode, &info); 4567 } 4568 4569 static void 4570 record_operand_error_with_data (const aarch64_opcode *opcode, int idx, 4571 enum aarch64_operand_error_kind kind, 4572 const char* error, const int *extra_data) 4573 { 4574 aarch64_operand_error info; 4575 info.index = idx; 4576 info.kind = kind; 4577 info.error = error; 4578 info.data[0] = extra_data[0]; 4579 info.data[1] = extra_data[1]; 4580 info.data[2] = extra_data[2]; 4581 info.non_fatal = FALSE; 4582 record_operand_error_info (opcode, &info); 4583 } 4584 4585 static void 4586 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx, 4587 const char* error, int lower_bound, 4588 int upper_bound) 4589 { 4590 int data[3] = {lower_bound, upper_bound, 0}; 4591 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE, 4592 error, data); 4593 } 4594 4595 /* Remove the operand error record for *OPCODE. */ 4596 static void ATTRIBUTE_UNUSED 4597 remove_operand_error_record (const aarch64_opcode *opcode) 4598 { 4599 if (opcode_has_operand_error_p (opcode)) 4600 { 4601 operand_error_record* record = operand_error_report.head; 4602 gas_assert (record != NULL && operand_error_report.tail != NULL); 4603 operand_error_report.head = record->next; 4604 record->next = free_opnd_error_record_nodes; 4605 free_opnd_error_record_nodes = record; 4606 if (operand_error_report.head == NULL) 4607 { 4608 gas_assert (operand_error_report.tail == record); 4609 operand_error_report.tail = NULL; 4610 } 4611 } 4612 } 4613 4614 /* Given the instruction in *INSTR, return the index of the best matched 4615 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST. 4616 4617 Return -1 if there is no qualifier sequence; return the first match 4618 if there is multiple matches found. */ 4619 4620 static int 4621 find_best_match (const aarch64_inst *instr, 4622 const aarch64_opnd_qualifier_seq_t *qualifiers_list) 4623 { 4624 int i, num_opnds, max_num_matched, idx; 4625 4626 num_opnds = aarch64_num_of_operands (instr->opcode); 4627 if (num_opnds == 0) 4628 { 4629 DEBUG_TRACE ("no operand"); 4630 return -1; 4631 } 4632 4633 max_num_matched = 0; 4634 idx = 0; 4635 4636 /* For each pattern. */ 4637 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list) 4638 { 4639 int j, num_matched; 4640 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list; 4641 4642 /* Most opcodes has much fewer patterns in the list. */ 4643 if (empty_qualifier_sequence_p (qualifiers)) 4644 { 4645 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence"); 4646 break; 4647 } 4648 4649 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers) 4650 if (*qualifiers == instr->operands[j].qualifier) 4651 ++num_matched; 4652 4653 if (num_matched > max_num_matched) 4654 { 4655 max_num_matched = num_matched; 4656 idx = i; 4657 } 4658 } 4659 4660 DEBUG_TRACE ("return with %d", idx); 4661 return idx; 4662 } 4663 4664 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the 4665 corresponding operands in *INSTR. */ 4666 4667 static inline void 4668 assign_qualifier_sequence (aarch64_inst *instr, 4669 const aarch64_opnd_qualifier_t *qualifiers) 4670 { 4671 int i = 0; 4672 int num_opnds = aarch64_num_of_operands (instr->opcode); 4673 gas_assert (num_opnds); 4674 for (i = 0; i < num_opnds; ++i, ++qualifiers) 4675 instr->operands[i].qualifier = *qualifiers; 4676 } 4677 4678 /* Print operands for the diagnosis purpose. */ 4679 4680 static void 4681 print_operands (char *buf, const aarch64_opcode *opcode, 4682 const aarch64_opnd_info *opnds) 4683 { 4684 int i; 4685 4686 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) 4687 { 4688 char str[128]; 4689 4690 /* We regard the opcode operand info more, however we also look into 4691 the inst->operands to support the disassembling of the optional 4692 operand. 4693 The two operand code should be the same in all cases, apart from 4694 when the operand can be optional. */ 4695 if (opcode->operands[i] == AARCH64_OPND_NIL 4696 || opnds[i].type == AARCH64_OPND_NIL) 4697 break; 4698 4699 /* Generate the operand string in STR. */ 4700 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL, 4701 NULL); 4702 4703 /* Delimiter. */ 4704 if (str[0] != '\0') 4705 strcat (buf, i == 0 ? " " : ", "); 4706 4707 /* Append the operand string. */ 4708 strcat (buf, str); 4709 } 4710 } 4711 4712 /* Send to stderr a string as information. */ 4713 4714 static void 4715 output_info (const char *format, ...) 4716 { 4717 const char *file; 4718 unsigned int line; 4719 va_list args; 4720 4721 file = as_where (&line); 4722 if (file) 4723 { 4724 if (line != 0) 4725 fprintf (stderr, "%s:%u: ", file, line); 4726 else 4727 fprintf (stderr, "%s: ", file); 4728 } 4729 fprintf (stderr, _("Info: ")); 4730 va_start (args, format); 4731 vfprintf (stderr, format, args); 4732 va_end (args); 4733 (void) putc ('\n', stderr); 4734 } 4735 4736 /* Output one operand error record. */ 4737 4738 static void 4739 output_operand_error_record (const operand_error_record *record, char *str) 4740 { 4741 const aarch64_operand_error *detail = &record->detail; 4742 int idx = detail->index; 4743 const aarch64_opcode *opcode = record->opcode; 4744 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx] 4745 : AARCH64_OPND_NIL); 4746 4747 typedef void (*handler_t)(const char *format, ...); 4748 handler_t handler = detail->non_fatal ? as_warn : as_bad; 4749 4750 switch (detail->kind) 4751 { 4752 case AARCH64_OPDE_NIL: 4753 gas_assert (0); 4754 break; 4755 case AARCH64_OPDE_SYNTAX_ERROR: 4756 case AARCH64_OPDE_RECOVERABLE: 4757 case AARCH64_OPDE_FATAL_SYNTAX_ERROR: 4758 case AARCH64_OPDE_OTHER_ERROR: 4759 /* Use the prepared error message if there is, otherwise use the 4760 operand description string to describe the error. */ 4761 if (detail->error != NULL) 4762 { 4763 if (idx < 0) 4764 handler (_("%s -- `%s'"), detail->error, str); 4765 else 4766 handler (_("%s at operand %d -- `%s'"), 4767 detail->error, idx + 1, str); 4768 } 4769 else 4770 { 4771 gas_assert (idx >= 0); 4772 handler (_("operand %d must be %s -- `%s'"), idx + 1, 4773 aarch64_get_operand_desc (opd_code), str); 4774 } 4775 break; 4776 4777 case AARCH64_OPDE_INVALID_VARIANT: 4778 handler (_("operand mismatch -- `%s'"), str); 4779 if (verbose_error_p) 4780 { 4781 /* We will try to correct the erroneous instruction and also provide 4782 more information e.g. all other valid variants. 4783 4784 The string representation of the corrected instruction and other 4785 valid variants are generated by 4786 4787 1) obtaining the intermediate representation of the erroneous 4788 instruction; 4789 2) manipulating the IR, e.g. replacing the operand qualifier; 4790 3) printing out the instruction by calling the printer functions 4791 shared with the disassembler. 4792 4793 The limitation of this method is that the exact input assembly 4794 line cannot be accurately reproduced in some cases, for example an 4795 optional operand present in the actual assembly line will be 4796 omitted in the output; likewise for the optional syntax rules, 4797 e.g. the # before the immediate. Another limitation is that the 4798 assembly symbols and relocation operations in the assembly line 4799 currently cannot be printed out in the error report. Last but not 4800 least, when there is other error(s) co-exist with this error, the 4801 'corrected' instruction may be still incorrect, e.g. given 4802 'ldnp h0,h1,[x0,#6]!' 4803 this diagnosis will provide the version: 4804 'ldnp s0,s1,[x0,#6]!' 4805 which is still not right. */ 4806 size_t len = strlen (get_mnemonic_name (str)); 4807 int i, qlf_idx; 4808 bfd_boolean result; 4809 char buf[2048]; 4810 aarch64_inst *inst_base = &inst.base; 4811 const aarch64_opnd_qualifier_seq_t *qualifiers_list; 4812 4813 /* Init inst. */ 4814 reset_aarch64_instruction (&inst); 4815 inst_base->opcode = opcode; 4816 4817 /* Reset the error report so that there is no side effect on the 4818 following operand parsing. */ 4819 init_operand_error_report (); 4820 4821 /* Fill inst. */ 4822 result = parse_operands (str + len, opcode) 4823 && programmer_friendly_fixup (&inst); 4824 gas_assert (result); 4825 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value, 4826 NULL, NULL, insn_sequence); 4827 gas_assert (!result); 4828 4829 /* Find the most matched qualifier sequence. */ 4830 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list); 4831 gas_assert (qlf_idx > -1); 4832 4833 /* Assign the qualifiers. */ 4834 assign_qualifier_sequence (inst_base, 4835 opcode->qualifiers_list[qlf_idx]); 4836 4837 /* Print the hint. */ 4838 output_info (_(" did you mean this?")); 4839 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str)); 4840 print_operands (buf, opcode, inst_base->operands); 4841 output_info (_(" %s"), buf); 4842 4843 /* Print out other variant(s) if there is any. */ 4844 if (qlf_idx != 0 || 4845 !empty_qualifier_sequence_p (opcode->qualifiers_list[1])) 4846 output_info (_(" other valid variant(s):")); 4847 4848 /* For each pattern. */ 4849 qualifiers_list = opcode->qualifiers_list; 4850 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list) 4851 { 4852 /* Most opcodes has much fewer patterns in the list. 4853 First NIL qualifier indicates the end in the list. */ 4854 if (empty_qualifier_sequence_p (*qualifiers_list)) 4855 break; 4856 4857 if (i != qlf_idx) 4858 { 4859 /* Mnemonics name. */ 4860 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str)); 4861 4862 /* Assign the qualifiers. */ 4863 assign_qualifier_sequence (inst_base, *qualifiers_list); 4864 4865 /* Print instruction. */ 4866 print_operands (buf, opcode, inst_base->operands); 4867 4868 output_info (_(" %s"), buf); 4869 } 4870 } 4871 } 4872 break; 4873 4874 case AARCH64_OPDE_UNTIED_OPERAND: 4875 handler (_("operand %d must be the same register as operand 1 -- `%s'"), 4876 detail->index + 1, str); 4877 break; 4878 4879 case AARCH64_OPDE_OUT_OF_RANGE: 4880 if (detail->data[0] != detail->data[1]) 4881 handler (_("%s out of range %d to %d at operand %d -- `%s'"), 4882 detail->error ? detail->error : _("immediate value"), 4883 detail->data[0], detail->data[1], idx + 1, str); 4884 else 4885 handler (_("%s must be %d at operand %d -- `%s'"), 4886 detail->error ? detail->error : _("immediate value"), 4887 detail->data[0], idx + 1, str); 4888 break; 4889 4890 case AARCH64_OPDE_REG_LIST: 4891 if (detail->data[0] == 1) 4892 handler (_("invalid number of registers in the list; " 4893 "only 1 register is expected at operand %d -- `%s'"), 4894 idx + 1, str); 4895 else 4896 handler (_("invalid number of registers in the list; " 4897 "%d registers are expected at operand %d -- `%s'"), 4898 detail->data[0], idx + 1, str); 4899 break; 4900 4901 case AARCH64_OPDE_UNALIGNED: 4902 handler (_("immediate value must be a multiple of " 4903 "%d at operand %d -- `%s'"), 4904 detail->data[0], idx + 1, str); 4905 break; 4906 4907 default: 4908 gas_assert (0); 4909 break; 4910 } 4911 } 4912 4913 /* Process and output the error message about the operand mismatching. 4914 4915 When this function is called, the operand error information had 4916 been collected for an assembly line and there will be multiple 4917 errors in the case of multiple instruction templates; output the 4918 error message that most closely describes the problem. 4919 4920 The errors to be printed can be filtered on printing all errors 4921 or only non-fatal errors. This distinction has to be made because 4922 the error buffer may already be filled with fatal errors we don't want to 4923 print due to the different instruction templates. */ 4924 4925 static void 4926 output_operand_error_report (char *str, bfd_boolean non_fatal_only) 4927 { 4928 int largest_error_pos; 4929 const char *msg = NULL; 4930 enum aarch64_operand_error_kind kind; 4931 operand_error_record *curr; 4932 operand_error_record *head = operand_error_report.head; 4933 operand_error_record *record = NULL; 4934 4935 /* No error to report. */ 4936 if (head == NULL) 4937 return; 4938 4939 gas_assert (head != NULL && operand_error_report.tail != NULL); 4940 4941 /* Only one error. */ 4942 if (head == operand_error_report.tail) 4943 { 4944 /* If the only error is a non-fatal one and we don't want to print it, 4945 just exit. */ 4946 if (!non_fatal_only || head->detail.non_fatal) 4947 { 4948 DEBUG_TRACE ("single opcode entry with error kind: %s", 4949 operand_mismatch_kind_names[head->detail.kind]); 4950 output_operand_error_record (head, str); 4951 } 4952 return; 4953 } 4954 4955 /* Find the error kind of the highest severity. */ 4956 DEBUG_TRACE ("multiple opcode entries with error kind"); 4957 kind = AARCH64_OPDE_NIL; 4958 for (curr = head; curr != NULL; curr = curr->next) 4959 { 4960 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL); 4961 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]); 4962 if (operand_error_higher_severity_p (curr->detail.kind, kind) 4963 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal))) 4964 kind = curr->detail.kind; 4965 } 4966 4967 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only); 4968 4969 /* Pick up one of errors of KIND to report. */ 4970 largest_error_pos = -2; /* Index can be -1 which means unknown index. */ 4971 for (curr = head; curr != NULL; curr = curr->next) 4972 { 4973 /* If we don't want to print non-fatal errors then don't consider them 4974 at all. */ 4975 if (curr->detail.kind != kind 4976 || (non_fatal_only && !curr->detail.non_fatal)) 4977 continue; 4978 /* If there are multiple errors, pick up the one with the highest 4979 mismatching operand index. In the case of multiple errors with 4980 the equally highest operand index, pick up the first one or the 4981 first one with non-NULL error message. */ 4982 if (curr->detail.index > largest_error_pos 4983 || (curr->detail.index == largest_error_pos && msg == NULL 4984 && curr->detail.error != NULL)) 4985 { 4986 largest_error_pos = curr->detail.index; 4987 record = curr; 4988 msg = record->detail.error; 4989 } 4990 } 4991 4992 /* The way errors are collected in the back-end is a bit non-intuitive. But 4993 essentially, because each operand template is tried recursively you may 4994 always have errors collected from the previous tried OPND. These are 4995 usually skipped if there is one successful match. However now with the 4996 non-fatal errors we have to ignore those previously collected hard errors 4997 when we're only interested in printing the non-fatal ones. This condition 4998 prevents us from printing errors that are not appropriate, since we did 4999 match a condition, but it also has warnings that it wants to print. */ 5000 if (non_fatal_only && !record) 5001 return; 5002 5003 gas_assert (largest_error_pos != -2 && record != NULL); 5004 DEBUG_TRACE ("Pick up error kind %s to report", 5005 operand_mismatch_kind_names[record->detail.kind]); 5006 5007 /* Output. */ 5008 output_operand_error_record (record, str); 5009 } 5010 5011 /* Write an AARCH64 instruction to buf - always little-endian. */ 5012 static void 5013 put_aarch64_insn (char *buf, uint32_t insn) 5014 { 5015 unsigned char *where = (unsigned char *) buf; 5016 where[0] = insn; 5017 where[1] = insn >> 8; 5018 where[2] = insn >> 16; 5019 where[3] = insn >> 24; 5020 } 5021 5022 static uint32_t 5023 get_aarch64_insn (char *buf) 5024 { 5025 unsigned char *where = (unsigned char *) buf; 5026 uint32_t result; 5027 result = ((where[0] | (where[1] << 8) | (where[2] << 16) 5028 | ((uint32_t) where[3] << 24))); 5029 return result; 5030 } 5031 5032 static void 5033 output_inst (struct aarch64_inst *new_inst) 5034 { 5035 char *to = NULL; 5036 5037 to = frag_more (INSN_SIZE); 5038 5039 frag_now->tc_frag_data.recorded = 1; 5040 5041 put_aarch64_insn (to, inst.base.value); 5042 5043 if (inst.reloc.type != BFD_RELOC_UNUSED) 5044 { 5045 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal, 5046 INSN_SIZE, &inst.reloc.exp, 5047 inst.reloc.pc_rel, 5048 inst.reloc.type); 5049 DEBUG_TRACE ("Prepared relocation fix up"); 5050 /* Don't check the addend value against the instruction size, 5051 that's the job of our code in md_apply_fix(). */ 5052 fixp->fx_no_overflow = 1; 5053 if (new_inst != NULL) 5054 fixp->tc_fix_data.inst = new_inst; 5055 if (aarch64_gas_internal_fixup_p ()) 5056 { 5057 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL); 5058 fixp->tc_fix_data.opnd = inst.reloc.opnd; 5059 fixp->fx_addnumber = inst.reloc.flags; 5060 } 5061 } 5062 5063 dwarf2_emit_insn (INSN_SIZE); 5064 } 5065 5066 /* Link together opcodes of the same name. */ 5067 5068 struct templates 5069 { 5070 aarch64_opcode *opcode; 5071 struct templates *next; 5072 }; 5073 5074 typedef struct templates templates; 5075 5076 static templates * 5077 lookup_mnemonic (const char *start, int len) 5078 { 5079 templates *templ = NULL; 5080 5081 templ = hash_find_n (aarch64_ops_hsh, start, len); 5082 return templ; 5083 } 5084 5085 /* Subroutine of md_assemble, responsible for looking up the primary 5086 opcode from the mnemonic the user wrote. STR points to the 5087 beginning of the mnemonic. */ 5088 5089 static templates * 5090 opcode_lookup (char **str) 5091 { 5092 char *end, *base, *dot; 5093 const aarch64_cond *cond; 5094 char condname[16]; 5095 int len; 5096 5097 /* Scan up to the end of the mnemonic, which must end in white space, 5098 '.', or end of string. */ 5099 dot = 0; 5100 for (base = end = *str; is_part_of_name(*end); end++) 5101 if (*end == '.' && !dot) 5102 dot = end; 5103 5104 if (end == base || dot == base) 5105 return 0; 5106 5107 inst.cond = COND_ALWAYS; 5108 5109 /* Handle a possible condition. */ 5110 if (dot) 5111 { 5112 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1); 5113 if (cond) 5114 { 5115 inst.cond = cond->value; 5116 *str = end; 5117 } 5118 else 5119 { 5120 *str = dot; 5121 return 0; 5122 } 5123 len = dot - base; 5124 } 5125 else 5126 { 5127 *str = end; 5128 len = end - base; 5129 } 5130 5131 if (inst.cond == COND_ALWAYS) 5132 { 5133 /* Look for unaffixed mnemonic. */ 5134 return lookup_mnemonic (base, len); 5135 } 5136 else if (len <= 13) 5137 { 5138 /* append ".c" to mnemonic if conditional */ 5139 memcpy (condname, base, len); 5140 memcpy (condname + len, ".c", 2); 5141 base = condname; 5142 len += 2; 5143 return lookup_mnemonic (base, len); 5144 } 5145 5146 return NULL; 5147 } 5148 5149 /* Internal helper routine converting a vector_type_el structure *VECTYPE 5150 to a corresponding operand qualifier. */ 5151 5152 static inline aarch64_opnd_qualifier_t 5153 vectype_to_qualifier (const struct vector_type_el *vectype) 5154 { 5155 /* Element size in bytes indexed by vector_el_type. */ 5156 const unsigned char ele_size[5] 5157 = {1, 2, 4, 8, 16}; 5158 const unsigned int ele_base [5] = 5159 { 5160 AARCH64_OPND_QLF_V_4B, 5161 AARCH64_OPND_QLF_V_2H, 5162 AARCH64_OPND_QLF_V_2S, 5163 AARCH64_OPND_QLF_V_1D, 5164 AARCH64_OPND_QLF_V_1Q 5165 }; 5166 5167 if (!vectype->defined || vectype->type == NT_invtype) 5168 goto vectype_conversion_fail; 5169 5170 if (vectype->type == NT_zero) 5171 return AARCH64_OPND_QLF_P_Z; 5172 if (vectype->type == NT_merge) 5173 return AARCH64_OPND_QLF_P_M; 5174 5175 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q); 5176 5177 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH)) 5178 { 5179 /* Special case S_4B. */ 5180 if (vectype->type == NT_b && vectype->width == 4) 5181 return AARCH64_OPND_QLF_S_4B; 5182 5183 /* Special case S_2H. */ 5184 if (vectype->type == NT_h && vectype->width == 2) 5185 return AARCH64_OPND_QLF_S_2H; 5186 5187 /* Vector element register. */ 5188 return AARCH64_OPND_QLF_S_B + vectype->type; 5189 } 5190 else 5191 { 5192 /* Vector register. */ 5193 int reg_size = ele_size[vectype->type] * vectype->width; 5194 unsigned offset; 5195 unsigned shift; 5196 if (reg_size != 16 && reg_size != 8 && reg_size != 4) 5197 goto vectype_conversion_fail; 5198 5199 /* The conversion is by calculating the offset from the base operand 5200 qualifier for the vector type. The operand qualifiers are regular 5201 enough that the offset can established by shifting the vector width by 5202 a vector-type dependent amount. */ 5203 shift = 0; 5204 if (vectype->type == NT_b) 5205 shift = 3; 5206 else if (vectype->type == NT_h || vectype->type == NT_s) 5207 shift = 2; 5208 else if (vectype->type >= NT_d) 5209 shift = 1; 5210 else 5211 gas_assert (0); 5212 5213 offset = ele_base [vectype->type] + (vectype->width >> shift); 5214 gas_assert (AARCH64_OPND_QLF_V_4B <= offset 5215 && offset <= AARCH64_OPND_QLF_V_1Q); 5216 return offset; 5217 } 5218 5219 vectype_conversion_fail: 5220 first_error (_("bad vector arrangement type")); 5221 return AARCH64_OPND_QLF_NIL; 5222 } 5223 5224 /* Process an optional operand that is found omitted from the assembly line. 5225 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the 5226 instruction's opcode entry while IDX is the index of this omitted operand. 5227 */ 5228 5229 static void 5230 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode, 5231 int idx, aarch64_opnd_info *operand) 5232 { 5233 aarch64_insn default_value = get_optional_operand_default_value (opcode); 5234 gas_assert (optional_operand_p (opcode, idx)); 5235 gas_assert (!operand->present); 5236 5237 switch (type) 5238 { 5239 case AARCH64_OPND_Rd: 5240 case AARCH64_OPND_Rn: 5241 case AARCH64_OPND_Rm: 5242 case AARCH64_OPND_Rt: 5243 case AARCH64_OPND_Rt2: 5244 case AARCH64_OPND_Rt_SP: 5245 case AARCH64_OPND_Rs: 5246 case AARCH64_OPND_Ra: 5247 case AARCH64_OPND_Rt_SYS: 5248 case AARCH64_OPND_Rd_SP: 5249 case AARCH64_OPND_Rn_SP: 5250 case AARCH64_OPND_Rm_SP: 5251 case AARCH64_OPND_Fd: 5252 case AARCH64_OPND_Fn: 5253 case AARCH64_OPND_Fm: 5254 case AARCH64_OPND_Fa: 5255 case AARCH64_OPND_Ft: 5256 case AARCH64_OPND_Ft2: 5257 case AARCH64_OPND_Sd: 5258 case AARCH64_OPND_Sn: 5259 case AARCH64_OPND_Sm: 5260 case AARCH64_OPND_Va: 5261 case AARCH64_OPND_Vd: 5262 case AARCH64_OPND_Vn: 5263 case AARCH64_OPND_Vm: 5264 case AARCH64_OPND_VdD1: 5265 case AARCH64_OPND_VnD1: 5266 operand->reg.regno = default_value; 5267 break; 5268 5269 case AARCH64_OPND_Ed: 5270 case AARCH64_OPND_En: 5271 case AARCH64_OPND_Em: 5272 case AARCH64_OPND_Em16: 5273 case AARCH64_OPND_SM3_IMM2: 5274 operand->reglane.regno = default_value; 5275 break; 5276 5277 case AARCH64_OPND_IDX: 5278 case AARCH64_OPND_BIT_NUM: 5279 case AARCH64_OPND_IMMR: 5280 case AARCH64_OPND_IMMS: 5281 case AARCH64_OPND_SHLL_IMM: 5282 case AARCH64_OPND_IMM_VLSL: 5283 case AARCH64_OPND_IMM_VLSR: 5284 case AARCH64_OPND_CCMP_IMM: 5285 case AARCH64_OPND_FBITS: 5286 case AARCH64_OPND_UIMM4: 5287 case AARCH64_OPND_UIMM3_OP1: 5288 case AARCH64_OPND_UIMM3_OP2: 5289 case AARCH64_OPND_IMM: 5290 case AARCH64_OPND_IMM_2: 5291 case AARCH64_OPND_WIDTH: 5292 case AARCH64_OPND_UIMM7: 5293 case AARCH64_OPND_NZCV: 5294 case AARCH64_OPND_SVE_PATTERN: 5295 case AARCH64_OPND_SVE_PRFOP: 5296 operand->imm.value = default_value; 5297 break; 5298 5299 case AARCH64_OPND_SVE_PATTERN_SCALED: 5300 operand->imm.value = default_value; 5301 operand->shifter.kind = AARCH64_MOD_MUL; 5302 operand->shifter.amount = 1; 5303 break; 5304 5305 case AARCH64_OPND_EXCEPTION: 5306 inst.reloc.type = BFD_RELOC_UNUSED; 5307 break; 5308 5309 case AARCH64_OPND_BARRIER_ISB: 5310 operand->barrier = aarch64_barrier_options + default_value; 5311 break; 5312 5313 case AARCH64_OPND_BTI_TARGET: 5314 operand->hint_option = aarch64_hint_options + default_value; 5315 break; 5316 5317 default: 5318 break; 5319 } 5320 } 5321 5322 /* Process the relocation type for move wide instructions. 5323 Return TRUE on success; otherwise return FALSE. */ 5324 5325 static bfd_boolean 5326 process_movw_reloc_info (void) 5327 { 5328 int is32; 5329 unsigned shift; 5330 5331 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0; 5332 5333 if (inst.base.opcode->op == OP_MOVK) 5334 switch (inst.reloc.type) 5335 { 5336 case BFD_RELOC_AARCH64_MOVW_G0_S: 5337 case BFD_RELOC_AARCH64_MOVW_G1_S: 5338 case BFD_RELOC_AARCH64_MOVW_G2_S: 5339 case BFD_RELOC_AARCH64_MOVW_PREL_G0: 5340 case BFD_RELOC_AARCH64_MOVW_PREL_G1: 5341 case BFD_RELOC_AARCH64_MOVW_PREL_G2: 5342 case BFD_RELOC_AARCH64_MOVW_PREL_G3: 5343 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 5344 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 5345 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 5346 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 5347 set_syntax_error 5348 (_("the specified relocation type is not allowed for MOVK")); 5349 return FALSE; 5350 default: 5351 break; 5352 } 5353 5354 switch (inst.reloc.type) 5355 { 5356 case BFD_RELOC_AARCH64_MOVW_G0: 5357 case BFD_RELOC_AARCH64_MOVW_G0_NC: 5358 case BFD_RELOC_AARCH64_MOVW_G0_S: 5359 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 5360 case BFD_RELOC_AARCH64_MOVW_PREL_G0: 5361 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC: 5362 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 5363 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 5364 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 5365 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0: 5366 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 5367 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 5368 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 5369 shift = 0; 5370 break; 5371 case BFD_RELOC_AARCH64_MOVW_G1: 5372 case BFD_RELOC_AARCH64_MOVW_G1_NC: 5373 case BFD_RELOC_AARCH64_MOVW_G1_S: 5374 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 5375 case BFD_RELOC_AARCH64_MOVW_PREL_G1: 5376 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC: 5377 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 5378 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 5379 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 5380 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: 5381 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: 5382 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 5383 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 5384 shift = 16; 5385 break; 5386 case BFD_RELOC_AARCH64_MOVW_G2: 5387 case BFD_RELOC_AARCH64_MOVW_G2_NC: 5388 case BFD_RELOC_AARCH64_MOVW_G2_S: 5389 case BFD_RELOC_AARCH64_MOVW_PREL_G2: 5390 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC: 5391 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: 5392 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 5393 if (is32) 5394 { 5395 set_fatal_syntax_error 5396 (_("the specified relocation type is not allowed for 32-bit " 5397 "register")); 5398 return FALSE; 5399 } 5400 shift = 32; 5401 break; 5402 case BFD_RELOC_AARCH64_MOVW_G3: 5403 case BFD_RELOC_AARCH64_MOVW_PREL_G3: 5404 if (is32) 5405 { 5406 set_fatal_syntax_error 5407 (_("the specified relocation type is not allowed for 32-bit " 5408 "register")); 5409 return FALSE; 5410 } 5411 shift = 48; 5412 break; 5413 default: 5414 /* More cases should be added when more MOVW-related relocation types 5415 are supported in GAS. */ 5416 gas_assert (aarch64_gas_internal_fixup_p ()); 5417 /* The shift amount should have already been set by the parser. */ 5418 return TRUE; 5419 } 5420 inst.base.operands[1].shifter.amount = shift; 5421 return TRUE; 5422 } 5423 5424 /* A primitive log calculator. */ 5425 5426 static inline unsigned int 5427 get_logsz (unsigned int size) 5428 { 5429 const unsigned char ls[16] = 5430 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4}; 5431 if (size > 16) 5432 { 5433 gas_assert (0); 5434 return -1; 5435 } 5436 gas_assert (ls[size - 1] != (unsigned char)-1); 5437 return ls[size - 1]; 5438 } 5439 5440 /* Determine and return the real reloc type code for an instruction 5441 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */ 5442 5443 static inline bfd_reloc_code_real_type 5444 ldst_lo12_determine_real_reloc_type (void) 5445 { 5446 unsigned logsz; 5447 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier; 5448 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier; 5449 5450 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = { 5451 { 5452 BFD_RELOC_AARCH64_LDST8_LO12, 5453 BFD_RELOC_AARCH64_LDST16_LO12, 5454 BFD_RELOC_AARCH64_LDST32_LO12, 5455 BFD_RELOC_AARCH64_LDST64_LO12, 5456 BFD_RELOC_AARCH64_LDST128_LO12 5457 }, 5458 { 5459 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12, 5460 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12, 5461 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12, 5462 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12, 5463 BFD_RELOC_AARCH64_NONE 5464 }, 5465 { 5466 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC, 5467 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC, 5468 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC, 5469 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC, 5470 BFD_RELOC_AARCH64_NONE 5471 }, 5472 { 5473 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12, 5474 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12, 5475 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12, 5476 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12, 5477 BFD_RELOC_AARCH64_NONE 5478 }, 5479 { 5480 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC, 5481 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC, 5482 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC, 5483 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC, 5484 BFD_RELOC_AARCH64_NONE 5485 } 5486 }; 5487 5488 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12 5489 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12 5490 || (inst.reloc.type 5491 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC) 5492 || (inst.reloc.type 5493 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12) 5494 || (inst.reloc.type 5495 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)); 5496 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12); 5497 5498 if (opd1_qlf == AARCH64_OPND_QLF_NIL) 5499 opd1_qlf = 5500 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list, 5501 1, opd0_qlf, 0); 5502 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL); 5503 5504 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf)); 5505 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12 5506 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC 5507 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12 5508 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC) 5509 gas_assert (logsz <= 3); 5510 else 5511 gas_assert (logsz <= 4); 5512 5513 /* In reloc.c, these pseudo relocation types should be defined in similar 5514 order as above reloc_ldst_lo12 array. Because the array index calculation 5515 below relies on this. */ 5516 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz]; 5517 } 5518 5519 /* Check whether a register list REGINFO is valid. The registers must be 5520 numbered in increasing order (modulo 32), in increments of one or two. 5521 5522 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in 5523 increments of two. 5524 5525 Return FALSE if such a register list is invalid, otherwise return TRUE. */ 5526 5527 static bfd_boolean 5528 reg_list_valid_p (uint32_t reginfo, int accept_alternate) 5529 { 5530 uint32_t i, nb_regs, prev_regno, incr; 5531 5532 nb_regs = 1 + (reginfo & 0x3); 5533 reginfo >>= 2; 5534 prev_regno = reginfo & 0x1f; 5535 incr = accept_alternate ? 2 : 1; 5536 5537 for (i = 1; i < nb_regs; ++i) 5538 { 5539 uint32_t curr_regno; 5540 reginfo >>= 5; 5541 curr_regno = reginfo & 0x1f; 5542 if (curr_regno != ((prev_regno + incr) & 0x1f)) 5543 return FALSE; 5544 prev_regno = curr_regno; 5545 } 5546 5547 return TRUE; 5548 } 5549 5550 /* Generic instruction operand parser. This does no encoding and no 5551 semantic validation; it merely squirrels values away in the inst 5552 structure. Returns TRUE or FALSE depending on whether the 5553 specified grammar matched. */ 5554 5555 static bfd_boolean 5556 parse_operands (char *str, const aarch64_opcode *opcode) 5557 { 5558 int i; 5559 char *backtrack_pos = 0; 5560 const enum aarch64_opnd *operands = opcode->operands; 5561 aarch64_reg_type imm_reg_type; 5562 5563 clear_error (); 5564 skip_whitespace (str); 5565 5566 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant)) 5567 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP; 5568 else 5569 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V; 5570 5571 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++) 5572 { 5573 int64_t val; 5574 const reg_entry *reg; 5575 int comma_skipped_p = 0; 5576 aarch64_reg_type rtype; 5577 struct vector_type_el vectype; 5578 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier; 5579 aarch64_opnd_info *info = &inst.base.operands[i]; 5580 aarch64_reg_type reg_type; 5581 5582 DEBUG_TRACE ("parse operand %d", i); 5583 5584 /* Assign the operand code. */ 5585 info->type = operands[i]; 5586 5587 if (optional_operand_p (opcode, i)) 5588 { 5589 /* Remember where we are in case we need to backtrack. */ 5590 gas_assert (!backtrack_pos); 5591 backtrack_pos = str; 5592 } 5593 5594 /* Expect comma between operands; the backtrack mechanism will take 5595 care of cases of omitted optional operand. */ 5596 if (i > 0 && ! skip_past_char (&str, ',')) 5597 { 5598 set_syntax_error (_("comma expected between operands")); 5599 goto failure; 5600 } 5601 else 5602 comma_skipped_p = 1; 5603 5604 switch (operands[i]) 5605 { 5606 case AARCH64_OPND_Rd: 5607 case AARCH64_OPND_Rn: 5608 case AARCH64_OPND_Rm: 5609 case AARCH64_OPND_Rt: 5610 case AARCH64_OPND_Rt2: 5611 case AARCH64_OPND_Rs: 5612 case AARCH64_OPND_Ra: 5613 case AARCH64_OPND_Rt_SYS: 5614 case AARCH64_OPND_PAIRREG: 5615 case AARCH64_OPND_SVE_Rm: 5616 po_int_reg_or_fail (REG_TYPE_R_Z); 5617 break; 5618 5619 case AARCH64_OPND_Rd_SP: 5620 case AARCH64_OPND_Rn_SP: 5621 case AARCH64_OPND_Rt_SP: 5622 case AARCH64_OPND_SVE_Rn_SP: 5623 case AARCH64_OPND_Rm_SP: 5624 po_int_reg_or_fail (REG_TYPE_R_SP); 5625 break; 5626 5627 case AARCH64_OPND_Rm_EXT: 5628 case AARCH64_OPND_Rm_SFT: 5629 po_misc_or_fail (parse_shifter_operand 5630 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT 5631 ? SHIFTED_ARITH_IMM 5632 : SHIFTED_LOGIC_IMM))); 5633 if (!info->shifter.operator_present) 5634 { 5635 /* Default to LSL if not present. Libopcodes prefers shifter 5636 kind to be explicit. */ 5637 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 5638 info->shifter.kind = AARCH64_MOD_LSL; 5639 /* For Rm_EXT, libopcodes will carry out further check on whether 5640 or not stack pointer is used in the instruction (Recall that 5641 "the extend operator is not optional unless at least one of 5642 "Rd" or "Rn" is '11111' (i.e. WSP)"). */ 5643 } 5644 break; 5645 5646 case AARCH64_OPND_Fd: 5647 case AARCH64_OPND_Fn: 5648 case AARCH64_OPND_Fm: 5649 case AARCH64_OPND_Fa: 5650 case AARCH64_OPND_Ft: 5651 case AARCH64_OPND_Ft2: 5652 case AARCH64_OPND_Sd: 5653 case AARCH64_OPND_Sn: 5654 case AARCH64_OPND_Sm: 5655 case AARCH64_OPND_SVE_VZn: 5656 case AARCH64_OPND_SVE_Vd: 5657 case AARCH64_OPND_SVE_Vm: 5658 case AARCH64_OPND_SVE_Vn: 5659 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL); 5660 if (val == PARSE_FAIL) 5661 { 5662 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ))); 5663 goto failure; 5664 } 5665 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q); 5666 5667 info->reg.regno = val; 5668 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B); 5669 break; 5670 5671 case AARCH64_OPND_SVE_Pd: 5672 case AARCH64_OPND_SVE_Pg3: 5673 case AARCH64_OPND_SVE_Pg4_5: 5674 case AARCH64_OPND_SVE_Pg4_10: 5675 case AARCH64_OPND_SVE_Pg4_16: 5676 case AARCH64_OPND_SVE_Pm: 5677 case AARCH64_OPND_SVE_Pn: 5678 case AARCH64_OPND_SVE_Pt: 5679 reg_type = REG_TYPE_PN; 5680 goto vector_reg; 5681 5682 case AARCH64_OPND_SVE_Za_5: 5683 case AARCH64_OPND_SVE_Za_16: 5684 case AARCH64_OPND_SVE_Zd: 5685 case AARCH64_OPND_SVE_Zm_5: 5686 case AARCH64_OPND_SVE_Zm_16: 5687 case AARCH64_OPND_SVE_Zn: 5688 case AARCH64_OPND_SVE_Zt: 5689 reg_type = REG_TYPE_ZN; 5690 goto vector_reg; 5691 5692 case AARCH64_OPND_Va: 5693 case AARCH64_OPND_Vd: 5694 case AARCH64_OPND_Vn: 5695 case AARCH64_OPND_Vm: 5696 reg_type = REG_TYPE_VN; 5697 vector_reg: 5698 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype); 5699 if (val == PARSE_FAIL) 5700 { 5701 first_error (_(get_reg_expected_msg (reg_type))); 5702 goto failure; 5703 } 5704 if (vectype.defined & NTA_HASINDEX) 5705 goto failure; 5706 5707 info->reg.regno = val; 5708 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN) 5709 && vectype.type == NT_invtype) 5710 /* Unqualified Pn and Zn registers are allowed in certain 5711 contexts. Rely on F_STRICT qualifier checking to catch 5712 invalid uses. */ 5713 info->qualifier = AARCH64_OPND_QLF_NIL; 5714 else 5715 { 5716 info->qualifier = vectype_to_qualifier (&vectype); 5717 if (info->qualifier == AARCH64_OPND_QLF_NIL) 5718 goto failure; 5719 } 5720 break; 5721 5722 case AARCH64_OPND_VdD1: 5723 case AARCH64_OPND_VnD1: 5724 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype); 5725 if (val == PARSE_FAIL) 5726 { 5727 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN))); 5728 goto failure; 5729 } 5730 if (vectype.type != NT_d || vectype.index != 1) 5731 { 5732 set_fatal_syntax_error 5733 (_("the top half of a 128-bit FP/SIMD register is expected")); 5734 goto failure; 5735 } 5736 info->reg.regno = val; 5737 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register 5738 here; it is correct for the purpose of encoding/decoding since 5739 only the register number is explicitly encoded in the related 5740 instructions, although this appears a bit hacky. */ 5741 info->qualifier = AARCH64_OPND_QLF_S_D; 5742 break; 5743 5744 case AARCH64_OPND_SVE_Zm3_INDEX: 5745 case AARCH64_OPND_SVE_Zm3_22_INDEX: 5746 case AARCH64_OPND_SVE_Zm3_11_INDEX: 5747 case AARCH64_OPND_SVE_Zm4_11_INDEX: 5748 case AARCH64_OPND_SVE_Zm4_INDEX: 5749 case AARCH64_OPND_SVE_Zn_INDEX: 5750 reg_type = REG_TYPE_ZN; 5751 goto vector_reg_index; 5752 5753 case AARCH64_OPND_Ed: 5754 case AARCH64_OPND_En: 5755 case AARCH64_OPND_Em: 5756 case AARCH64_OPND_Em16: 5757 case AARCH64_OPND_SM3_IMM2: 5758 reg_type = REG_TYPE_VN; 5759 vector_reg_index: 5760 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype); 5761 if (val == PARSE_FAIL) 5762 { 5763 first_error (_(get_reg_expected_msg (reg_type))); 5764 goto failure; 5765 } 5766 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX)) 5767 goto failure; 5768 5769 info->reglane.regno = val; 5770 info->reglane.index = vectype.index; 5771 info->qualifier = vectype_to_qualifier (&vectype); 5772 if (info->qualifier == AARCH64_OPND_QLF_NIL) 5773 goto failure; 5774 break; 5775 5776 case AARCH64_OPND_SVE_ZnxN: 5777 case AARCH64_OPND_SVE_ZtxN: 5778 reg_type = REG_TYPE_ZN; 5779 goto vector_reg_list; 5780 5781 case AARCH64_OPND_LVn: 5782 case AARCH64_OPND_LVt: 5783 case AARCH64_OPND_LVt_AL: 5784 case AARCH64_OPND_LEt: 5785 reg_type = REG_TYPE_VN; 5786 vector_reg_list: 5787 if (reg_type == REG_TYPE_ZN 5788 && get_opcode_dependent_value (opcode) == 1 5789 && *str != '{') 5790 { 5791 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype); 5792 if (val == PARSE_FAIL) 5793 { 5794 first_error (_(get_reg_expected_msg (reg_type))); 5795 goto failure; 5796 } 5797 info->reglist.first_regno = val; 5798 info->reglist.num_regs = 1; 5799 } 5800 else 5801 { 5802 val = parse_vector_reg_list (&str, reg_type, &vectype); 5803 if (val == PARSE_FAIL) 5804 goto failure; 5805 5806 if (! reg_list_valid_p (val, /* accept_alternate */ 0)) 5807 { 5808 set_fatal_syntax_error (_("invalid register list")); 5809 goto failure; 5810 } 5811 5812 if (vectype.width != 0 && *str != ',') 5813 { 5814 set_fatal_syntax_error 5815 (_("expected element type rather than vector type")); 5816 goto failure; 5817 } 5818 5819 info->reglist.first_regno = (val >> 2) & 0x1f; 5820 info->reglist.num_regs = (val & 0x3) + 1; 5821 } 5822 if (operands[i] == AARCH64_OPND_LEt) 5823 { 5824 if (!(vectype.defined & NTA_HASINDEX)) 5825 goto failure; 5826 info->reglist.has_index = 1; 5827 info->reglist.index = vectype.index; 5828 } 5829 else 5830 { 5831 if (vectype.defined & NTA_HASINDEX) 5832 goto failure; 5833 if (!(vectype.defined & NTA_HASTYPE)) 5834 { 5835 if (reg_type == REG_TYPE_ZN) 5836 set_fatal_syntax_error (_("missing type suffix")); 5837 goto failure; 5838 } 5839 } 5840 info->qualifier = vectype_to_qualifier (&vectype); 5841 if (info->qualifier == AARCH64_OPND_QLF_NIL) 5842 goto failure; 5843 break; 5844 5845 case AARCH64_OPND_CRn: 5846 case AARCH64_OPND_CRm: 5847 { 5848 char prefix = *(str++); 5849 if (prefix != 'c' && prefix != 'C') 5850 goto failure; 5851 5852 po_imm_nc_or_fail (); 5853 if (val > 15) 5854 { 5855 set_fatal_syntax_error (_(N_ ("C0 - C15 expected"))); 5856 goto failure; 5857 } 5858 info->qualifier = AARCH64_OPND_QLF_CR; 5859 info->imm.value = val; 5860 break; 5861 } 5862 5863 case AARCH64_OPND_SHLL_IMM: 5864 case AARCH64_OPND_IMM_VLSR: 5865 po_imm_or_fail (1, 64); 5866 info->imm.value = val; 5867 break; 5868 5869 case AARCH64_OPND_CCMP_IMM: 5870 case AARCH64_OPND_SIMM5: 5871 case AARCH64_OPND_FBITS: 5872 case AARCH64_OPND_TME_UIMM16: 5873 case AARCH64_OPND_UIMM4: 5874 case AARCH64_OPND_UIMM4_ADDG: 5875 case AARCH64_OPND_UIMM10: 5876 case AARCH64_OPND_UIMM3_OP1: 5877 case AARCH64_OPND_UIMM3_OP2: 5878 case AARCH64_OPND_IMM_VLSL: 5879 case AARCH64_OPND_IMM: 5880 case AARCH64_OPND_IMM_2: 5881 case AARCH64_OPND_WIDTH: 5882 case AARCH64_OPND_SVE_INV_LIMM: 5883 case AARCH64_OPND_SVE_LIMM: 5884 case AARCH64_OPND_SVE_LIMM_MOV: 5885 case AARCH64_OPND_SVE_SHLIMM_PRED: 5886 case AARCH64_OPND_SVE_SHLIMM_UNPRED: 5887 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22: 5888 case AARCH64_OPND_SVE_SHRIMM_PRED: 5889 case AARCH64_OPND_SVE_SHRIMM_UNPRED: 5890 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22: 5891 case AARCH64_OPND_SVE_SIMM5: 5892 case AARCH64_OPND_SVE_SIMM5B: 5893 case AARCH64_OPND_SVE_SIMM6: 5894 case AARCH64_OPND_SVE_SIMM8: 5895 case AARCH64_OPND_SVE_UIMM3: 5896 case AARCH64_OPND_SVE_UIMM7: 5897 case AARCH64_OPND_SVE_UIMM8: 5898 case AARCH64_OPND_SVE_UIMM8_53: 5899 case AARCH64_OPND_IMM_ROT1: 5900 case AARCH64_OPND_IMM_ROT2: 5901 case AARCH64_OPND_IMM_ROT3: 5902 case AARCH64_OPND_SVE_IMM_ROT1: 5903 case AARCH64_OPND_SVE_IMM_ROT2: 5904 case AARCH64_OPND_SVE_IMM_ROT3: 5905 po_imm_nc_or_fail (); 5906 info->imm.value = val; 5907 break; 5908 5909 case AARCH64_OPND_SVE_AIMM: 5910 case AARCH64_OPND_SVE_ASIMM: 5911 po_imm_nc_or_fail (); 5912 info->imm.value = val; 5913 skip_whitespace (str); 5914 if (skip_past_comma (&str)) 5915 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL)); 5916 else 5917 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL; 5918 break; 5919 5920 case AARCH64_OPND_SVE_PATTERN: 5921 po_enum_or_fail (aarch64_sve_pattern_array); 5922 info->imm.value = val; 5923 break; 5924 5925 case AARCH64_OPND_SVE_PATTERN_SCALED: 5926 po_enum_or_fail (aarch64_sve_pattern_array); 5927 info->imm.value = val; 5928 if (skip_past_comma (&str) 5929 && !parse_shift (&str, info, SHIFTED_MUL)) 5930 goto failure; 5931 if (!info->shifter.operator_present) 5932 { 5933 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 5934 info->shifter.kind = AARCH64_MOD_MUL; 5935 info->shifter.amount = 1; 5936 } 5937 break; 5938 5939 case AARCH64_OPND_SVE_PRFOP: 5940 po_enum_or_fail (aarch64_sve_prfop_array); 5941 info->imm.value = val; 5942 break; 5943 5944 case AARCH64_OPND_UIMM7: 5945 po_imm_or_fail (0, 127); 5946 info->imm.value = val; 5947 break; 5948 5949 case AARCH64_OPND_IDX: 5950 case AARCH64_OPND_MASK: 5951 case AARCH64_OPND_BIT_NUM: 5952 case AARCH64_OPND_IMMR: 5953 case AARCH64_OPND_IMMS: 5954 po_imm_or_fail (0, 63); 5955 info->imm.value = val; 5956 break; 5957 5958 case AARCH64_OPND_IMM0: 5959 po_imm_nc_or_fail (); 5960 if (val != 0) 5961 { 5962 set_fatal_syntax_error (_("immediate zero expected")); 5963 goto failure; 5964 } 5965 info->imm.value = 0; 5966 break; 5967 5968 case AARCH64_OPND_FPIMM0: 5969 { 5970 int qfloat; 5971 bfd_boolean res1 = FALSE, res2 = FALSE; 5972 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected, 5973 it is probably not worth the effort to support it. */ 5974 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE, 5975 imm_reg_type)) 5976 && (error_p () 5977 || !(res2 = parse_constant_immediate (&str, &val, 5978 imm_reg_type)))) 5979 goto failure; 5980 if ((res1 && qfloat == 0) || (res2 && val == 0)) 5981 { 5982 info->imm.value = 0; 5983 info->imm.is_fp = 1; 5984 break; 5985 } 5986 set_fatal_syntax_error (_("immediate zero expected")); 5987 goto failure; 5988 } 5989 5990 case AARCH64_OPND_IMM_MOV: 5991 { 5992 char *saved = str; 5993 if (reg_name_p (str, REG_TYPE_R_Z_SP) || 5994 reg_name_p (str, REG_TYPE_VN)) 5995 goto failure; 5996 str = saved; 5997 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 5998 GE_OPT_PREFIX, 1)); 5999 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn 6000 later. fix_mov_imm_insn will try to determine a machine 6001 instruction (MOVZ, MOVN or ORR) for it and will issue an error 6002 message if the immediate cannot be moved by a single 6003 instruction. */ 6004 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1); 6005 inst.base.operands[i].skip = 1; 6006 } 6007 break; 6008 6009 case AARCH64_OPND_SIMD_IMM: 6010 case AARCH64_OPND_SIMD_IMM_SFT: 6011 if (! parse_big_immediate (&str, &val, imm_reg_type)) 6012 goto failure; 6013 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 6014 /* addr_off_p */ 0, 6015 /* need_libopcodes_p */ 1, 6016 /* skip_p */ 1); 6017 /* Parse shift. 6018 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any 6019 shift, we don't check it here; we leave the checking to 6020 the libopcodes (operand_general_constraint_met_p). By 6021 doing this, we achieve better diagnostics. */ 6022 if (skip_past_comma (&str) 6023 && ! parse_shift (&str, info, SHIFTED_LSL_MSL)) 6024 goto failure; 6025 if (!info->shifter.operator_present 6026 && info->type == AARCH64_OPND_SIMD_IMM_SFT) 6027 { 6028 /* Default to LSL if not present. Libopcodes prefers shifter 6029 kind to be explicit. */ 6030 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 6031 info->shifter.kind = AARCH64_MOD_LSL; 6032 } 6033 break; 6034 6035 case AARCH64_OPND_FPIMM: 6036 case AARCH64_OPND_SIMD_FPIMM: 6037 case AARCH64_OPND_SVE_FPIMM8: 6038 { 6039 int qfloat; 6040 bfd_boolean dp_p; 6041 6042 dp_p = double_precision_operand_p (&inst.base.operands[0]); 6043 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type) 6044 || !aarch64_imm_float_p (qfloat)) 6045 { 6046 if (!error_p ()) 6047 set_fatal_syntax_error (_("invalid floating-point" 6048 " constant")); 6049 goto failure; 6050 } 6051 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat); 6052 inst.base.operands[i].imm.is_fp = 1; 6053 } 6054 break; 6055 6056 case AARCH64_OPND_SVE_I1_HALF_ONE: 6057 case AARCH64_OPND_SVE_I1_HALF_TWO: 6058 case AARCH64_OPND_SVE_I1_ZERO_ONE: 6059 { 6060 int qfloat; 6061 bfd_boolean dp_p; 6062 6063 dp_p = double_precision_operand_p (&inst.base.operands[0]); 6064 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)) 6065 { 6066 if (!error_p ()) 6067 set_fatal_syntax_error (_("invalid floating-point" 6068 " constant")); 6069 goto failure; 6070 } 6071 inst.base.operands[i].imm.value = qfloat; 6072 inst.base.operands[i].imm.is_fp = 1; 6073 } 6074 break; 6075 6076 case AARCH64_OPND_LIMM: 6077 po_misc_or_fail (parse_shifter_operand (&str, info, 6078 SHIFTED_LOGIC_IMM)); 6079 if (info->shifter.operator_present) 6080 { 6081 set_fatal_syntax_error 6082 (_("shift not allowed for bitmask immediate")); 6083 goto failure; 6084 } 6085 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 6086 /* addr_off_p */ 0, 6087 /* need_libopcodes_p */ 1, 6088 /* skip_p */ 1); 6089 break; 6090 6091 case AARCH64_OPND_AIMM: 6092 if (opcode->op == OP_ADD) 6093 /* ADD may have relocation types. */ 6094 po_misc_or_fail (parse_shifter_operand_reloc (&str, info, 6095 SHIFTED_ARITH_IMM)); 6096 else 6097 po_misc_or_fail (parse_shifter_operand (&str, info, 6098 SHIFTED_ARITH_IMM)); 6099 switch (inst.reloc.type) 6100 { 6101 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 6102 info->shifter.amount = 12; 6103 break; 6104 case BFD_RELOC_UNUSED: 6105 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0); 6106 if (info->shifter.kind != AARCH64_MOD_NONE) 6107 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT; 6108 inst.reloc.pc_rel = 0; 6109 break; 6110 default: 6111 break; 6112 } 6113 info->imm.value = 0; 6114 if (!info->shifter.operator_present) 6115 { 6116 /* Default to LSL if not present. Libopcodes prefers shifter 6117 kind to be explicit. */ 6118 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 6119 info->shifter.kind = AARCH64_MOD_LSL; 6120 } 6121 break; 6122 6123 case AARCH64_OPND_HALF: 6124 { 6125 /* #<imm16> or relocation. */ 6126 int internal_fixup_p; 6127 po_misc_or_fail (parse_half (&str, &internal_fixup_p)); 6128 if (internal_fixup_p) 6129 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0); 6130 skip_whitespace (str); 6131 if (skip_past_comma (&str)) 6132 { 6133 /* {, LSL #<shift>} */ 6134 if (! aarch64_gas_internal_fixup_p ()) 6135 { 6136 set_fatal_syntax_error (_("can't mix relocation modifier " 6137 "with explicit shift")); 6138 goto failure; 6139 } 6140 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL)); 6141 } 6142 else 6143 inst.base.operands[i].shifter.amount = 0; 6144 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL; 6145 inst.base.operands[i].imm.value = 0; 6146 if (! process_movw_reloc_info ()) 6147 goto failure; 6148 } 6149 break; 6150 6151 case AARCH64_OPND_EXCEPTION: 6152 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp, 6153 imm_reg_type)); 6154 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 6155 /* addr_off_p */ 0, 6156 /* need_libopcodes_p */ 0, 6157 /* skip_p */ 1); 6158 break; 6159 6160 case AARCH64_OPND_NZCV: 6161 { 6162 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4); 6163 if (nzcv != NULL) 6164 { 6165 str += 4; 6166 info->imm.value = nzcv->value; 6167 break; 6168 } 6169 po_imm_or_fail (0, 15); 6170 info->imm.value = val; 6171 } 6172 break; 6173 6174 case AARCH64_OPND_COND: 6175 case AARCH64_OPND_COND1: 6176 { 6177 char *start = str; 6178 do 6179 str++; 6180 while (ISALPHA (*str)); 6181 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start); 6182 if (info->cond == NULL) 6183 { 6184 set_syntax_error (_("invalid condition")); 6185 goto failure; 6186 } 6187 else if (operands[i] == AARCH64_OPND_COND1 6188 && (info->cond->value & 0xe) == 0xe) 6189 { 6190 /* Do not allow AL or NV. */ 6191 set_default_error (); 6192 goto failure; 6193 } 6194 } 6195 break; 6196 6197 case AARCH64_OPND_ADDR_ADRP: 6198 po_misc_or_fail (parse_adrp (&str)); 6199 /* Clear the value as operand needs to be relocated. */ 6200 info->imm.value = 0; 6201 break; 6202 6203 case AARCH64_OPND_ADDR_PCREL14: 6204 case AARCH64_OPND_ADDR_PCREL19: 6205 case AARCH64_OPND_ADDR_PCREL21: 6206 case AARCH64_OPND_ADDR_PCREL26: 6207 po_misc_or_fail (parse_address (&str, info)); 6208 if (!info->addr.pcrel) 6209 { 6210 set_syntax_error (_("invalid pc-relative address")); 6211 goto failure; 6212 } 6213 if (inst.gen_lit_pool 6214 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT)) 6215 { 6216 /* Only permit "=value" in the literal load instructions. 6217 The literal will be generated by programmer_friendly_fixup. */ 6218 set_syntax_error (_("invalid use of \"=immediate\"")); 6219 goto failure; 6220 } 6221 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str)) 6222 { 6223 set_syntax_error (_("unrecognized relocation suffix")); 6224 goto failure; 6225 } 6226 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool) 6227 { 6228 info->imm.value = inst.reloc.exp.X_add_number; 6229 inst.reloc.type = BFD_RELOC_UNUSED; 6230 } 6231 else 6232 { 6233 info->imm.value = 0; 6234 if (inst.reloc.type == BFD_RELOC_UNUSED) 6235 switch (opcode->iclass) 6236 { 6237 case compbranch: 6238 case condbranch: 6239 /* e.g. CBZ or B.COND */ 6240 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19); 6241 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19; 6242 break; 6243 case testbranch: 6244 /* e.g. TBZ */ 6245 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14); 6246 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14; 6247 break; 6248 case branch_imm: 6249 /* e.g. B or BL */ 6250 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26); 6251 inst.reloc.type = 6252 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26 6253 : BFD_RELOC_AARCH64_JUMP26; 6254 break; 6255 case loadlit: 6256 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19); 6257 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL; 6258 break; 6259 case pcreladdr: 6260 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21); 6261 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL; 6262 break; 6263 default: 6264 gas_assert (0); 6265 abort (); 6266 } 6267 inst.reloc.pc_rel = 1; 6268 } 6269 break; 6270 6271 case AARCH64_OPND_ADDR_SIMPLE: 6272 case AARCH64_OPND_SIMD_ADDR_SIMPLE: 6273 { 6274 /* [<Xn|SP>{, #<simm>}] */ 6275 char *start = str; 6276 /* First use the normal address-parsing routines, to get 6277 the usual syntax errors. */ 6278 po_misc_or_fail (parse_address (&str, info)); 6279 if (info->addr.pcrel || info->addr.offset.is_reg 6280 || !info->addr.preind || info->addr.postind 6281 || info->addr.writeback) 6282 { 6283 set_syntax_error (_("invalid addressing mode")); 6284 goto failure; 6285 } 6286 6287 /* Then retry, matching the specific syntax of these addresses. */ 6288 str = start; 6289 po_char_or_fail ('['); 6290 po_reg_or_fail (REG_TYPE_R64_SP); 6291 /* Accept optional ", #0". */ 6292 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE 6293 && skip_past_char (&str, ',')) 6294 { 6295 skip_past_char (&str, '#'); 6296 if (! skip_past_char (&str, '0')) 6297 { 6298 set_fatal_syntax_error 6299 (_("the optional immediate offset can only be 0")); 6300 goto failure; 6301 } 6302 } 6303 po_char_or_fail (']'); 6304 break; 6305 } 6306 6307 case AARCH64_OPND_ADDR_REGOFF: 6308 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */ 6309 po_misc_or_fail (parse_address (&str, info)); 6310 regoff_addr: 6311 if (info->addr.pcrel || !info->addr.offset.is_reg 6312 || !info->addr.preind || info->addr.postind 6313 || info->addr.writeback) 6314 { 6315 set_syntax_error (_("invalid addressing mode")); 6316 goto failure; 6317 } 6318 if (!info->shifter.operator_present) 6319 { 6320 /* Default to LSL if not present. Libopcodes prefers shifter 6321 kind to be explicit. */ 6322 gas_assert (info->shifter.kind == AARCH64_MOD_NONE); 6323 info->shifter.kind = AARCH64_MOD_LSL; 6324 } 6325 /* Qualifier to be deduced by libopcodes. */ 6326 break; 6327 6328 case AARCH64_OPND_ADDR_SIMM7: 6329 po_misc_or_fail (parse_address (&str, info)); 6330 if (info->addr.pcrel || info->addr.offset.is_reg 6331 || (!info->addr.preind && !info->addr.postind)) 6332 { 6333 set_syntax_error (_("invalid addressing mode")); 6334 goto failure; 6335 } 6336 if (inst.reloc.type != BFD_RELOC_UNUSED) 6337 { 6338 set_syntax_error (_("relocation not allowed")); 6339 goto failure; 6340 } 6341 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 6342 /* addr_off_p */ 1, 6343 /* need_libopcodes_p */ 1, 6344 /* skip_p */ 0); 6345 break; 6346 6347 case AARCH64_OPND_ADDR_SIMM9: 6348 case AARCH64_OPND_ADDR_SIMM9_2: 6349 case AARCH64_OPND_ADDR_SIMM11: 6350 case AARCH64_OPND_ADDR_SIMM13: 6351 po_misc_or_fail (parse_address (&str, info)); 6352 if (info->addr.pcrel || info->addr.offset.is_reg 6353 || (!info->addr.preind && !info->addr.postind) 6354 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2 6355 && info->addr.writeback)) 6356 { 6357 set_syntax_error (_("invalid addressing mode")); 6358 goto failure; 6359 } 6360 if (inst.reloc.type != BFD_RELOC_UNUSED) 6361 { 6362 set_syntax_error (_("relocation not allowed")); 6363 goto failure; 6364 } 6365 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 6366 /* addr_off_p */ 1, 6367 /* need_libopcodes_p */ 1, 6368 /* skip_p */ 0); 6369 break; 6370 6371 case AARCH64_OPND_ADDR_SIMM10: 6372 case AARCH64_OPND_ADDR_OFFSET: 6373 po_misc_or_fail (parse_address (&str, info)); 6374 if (info->addr.pcrel || info->addr.offset.is_reg 6375 || !info->addr.preind || info->addr.postind) 6376 { 6377 set_syntax_error (_("invalid addressing mode")); 6378 goto failure; 6379 } 6380 if (inst.reloc.type != BFD_RELOC_UNUSED) 6381 { 6382 set_syntax_error (_("relocation not allowed")); 6383 goto failure; 6384 } 6385 assign_imm_if_const_or_fixup_later (&inst.reloc, info, 6386 /* addr_off_p */ 1, 6387 /* need_libopcodes_p */ 1, 6388 /* skip_p */ 0); 6389 break; 6390 6391 case AARCH64_OPND_ADDR_UIMM12: 6392 po_misc_or_fail (parse_address (&str, info)); 6393 if (info->addr.pcrel || info->addr.offset.is_reg 6394 || !info->addr.preind || info->addr.writeback) 6395 { 6396 set_syntax_error (_("invalid addressing mode")); 6397 goto failure; 6398 } 6399 if (inst.reloc.type == BFD_RELOC_UNUSED) 6400 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1); 6401 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12 6402 || (inst.reloc.type 6403 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12) 6404 || (inst.reloc.type 6405 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC) 6406 || (inst.reloc.type 6407 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12) 6408 || (inst.reloc.type 6409 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)) 6410 inst.reloc.type = ldst_lo12_determine_real_reloc_type (); 6411 /* Leave qualifier to be determined by libopcodes. */ 6412 break; 6413 6414 case AARCH64_OPND_SIMD_ADDR_POST: 6415 /* [<Xn|SP>], <Xm|#<amount>> */ 6416 po_misc_or_fail (parse_address (&str, info)); 6417 if (!info->addr.postind || !info->addr.writeback) 6418 { 6419 set_syntax_error (_("invalid addressing mode")); 6420 goto failure; 6421 } 6422 if (!info->addr.offset.is_reg) 6423 { 6424 if (inst.reloc.exp.X_op == O_constant) 6425 info->addr.offset.imm = inst.reloc.exp.X_add_number; 6426 else 6427 { 6428 set_fatal_syntax_error 6429 (_("writeback value must be an immediate constant")); 6430 goto failure; 6431 } 6432 } 6433 /* No qualifier. */ 6434 break; 6435 6436 case AARCH64_OPND_SVE_ADDR_RI_S4x16: 6437 case AARCH64_OPND_SVE_ADDR_RI_S4x32: 6438 case AARCH64_OPND_SVE_ADDR_RI_S4xVL: 6439 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: 6440 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: 6441 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL: 6442 case AARCH64_OPND_SVE_ADDR_RI_S6xVL: 6443 case AARCH64_OPND_SVE_ADDR_RI_S9xVL: 6444 case AARCH64_OPND_SVE_ADDR_RI_U6: 6445 case AARCH64_OPND_SVE_ADDR_RI_U6x2: 6446 case AARCH64_OPND_SVE_ADDR_RI_U6x4: 6447 case AARCH64_OPND_SVE_ADDR_RI_U6x8: 6448 /* [X<n>{, #imm, MUL VL}] 6449 [X<n>{, #imm}] 6450 but recognizing SVE registers. */ 6451 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier, 6452 &offset_qualifier)); 6453 if (base_qualifier != AARCH64_OPND_QLF_X) 6454 { 6455 set_syntax_error (_("invalid addressing mode")); 6456 goto failure; 6457 } 6458 sve_regimm: 6459 if (info->addr.pcrel || info->addr.offset.is_reg 6460 || !info->addr.preind || info->addr.writeback) 6461 { 6462 set_syntax_error (_("invalid addressing mode")); 6463 goto failure; 6464 } 6465 if (inst.reloc.type != BFD_RELOC_UNUSED 6466 || inst.reloc.exp.X_op != O_constant) 6467 { 6468 /* Make sure this has priority over 6469 "invalid addressing mode". */ 6470 set_fatal_syntax_error (_("constant offset required")); 6471 goto failure; 6472 } 6473 info->addr.offset.imm = inst.reloc.exp.X_add_number; 6474 break; 6475 6476 case AARCH64_OPND_SVE_ADDR_R: 6477 /* [<Xn|SP>{, <R><m>}] 6478 but recognizing SVE registers. */ 6479 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier, 6480 &offset_qualifier)); 6481 if (offset_qualifier == AARCH64_OPND_QLF_NIL) 6482 { 6483 offset_qualifier = AARCH64_OPND_QLF_X; 6484 info->addr.offset.is_reg = 1; 6485 info->addr.offset.regno = 31; 6486 } 6487 else if (base_qualifier != AARCH64_OPND_QLF_X 6488 || offset_qualifier != AARCH64_OPND_QLF_X) 6489 { 6490 set_syntax_error (_("invalid addressing mode")); 6491 goto failure; 6492 } 6493 goto regoff_addr; 6494 6495 case AARCH64_OPND_SVE_ADDR_RR: 6496 case AARCH64_OPND_SVE_ADDR_RR_LSL1: 6497 case AARCH64_OPND_SVE_ADDR_RR_LSL2: 6498 case AARCH64_OPND_SVE_ADDR_RR_LSL3: 6499 case AARCH64_OPND_SVE_ADDR_RX: 6500 case AARCH64_OPND_SVE_ADDR_RX_LSL1: 6501 case AARCH64_OPND_SVE_ADDR_RX_LSL2: 6502 case AARCH64_OPND_SVE_ADDR_RX_LSL3: 6503 /* [<Xn|SP>, <R><m>{, lsl #<amount>}] 6504 but recognizing SVE registers. */ 6505 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier, 6506 &offset_qualifier)); 6507 if (base_qualifier != AARCH64_OPND_QLF_X 6508 || offset_qualifier != AARCH64_OPND_QLF_X) 6509 { 6510 set_syntax_error (_("invalid addressing mode")); 6511 goto failure; 6512 } 6513 goto regoff_addr; 6514 6515 case AARCH64_OPND_SVE_ADDR_RZ: 6516 case AARCH64_OPND_SVE_ADDR_RZ_LSL1: 6517 case AARCH64_OPND_SVE_ADDR_RZ_LSL2: 6518 case AARCH64_OPND_SVE_ADDR_RZ_LSL3: 6519 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14: 6520 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22: 6521 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14: 6522 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22: 6523 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14: 6524 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22: 6525 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14: 6526 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22: 6527 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}] 6528 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */ 6529 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier, 6530 &offset_qualifier)); 6531 if (base_qualifier != AARCH64_OPND_QLF_X 6532 || (offset_qualifier != AARCH64_OPND_QLF_S_S 6533 && offset_qualifier != AARCH64_OPND_QLF_S_D)) 6534 { 6535 set_syntax_error (_("invalid addressing mode")); 6536 goto failure; 6537 } 6538 info->qualifier = offset_qualifier; 6539 goto regoff_addr; 6540 6541 case AARCH64_OPND_SVE_ADDR_ZX: 6542 /* [Zn.<T>{, <Xm>}]. */ 6543 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier, 6544 &offset_qualifier)); 6545 /* Things to check: 6546 base_qualifier either S_S or S_D 6547 offset_qualifier must be X 6548 */ 6549 if ((base_qualifier != AARCH64_OPND_QLF_S_S 6550 && base_qualifier != AARCH64_OPND_QLF_S_D) 6551 || offset_qualifier != AARCH64_OPND_QLF_X) 6552 { 6553 set_syntax_error (_("invalid addressing mode")); 6554 goto failure; 6555 } 6556 info->qualifier = base_qualifier; 6557 if (!info->addr.offset.is_reg || info->addr.pcrel 6558 || !info->addr.preind || info->addr.writeback 6559 || info->shifter.operator_present != 0) 6560 { 6561 set_syntax_error (_("invalid addressing mode")); 6562 goto failure; 6563 } 6564 info->shifter.kind = AARCH64_MOD_LSL; 6565 break; 6566 6567 6568 case AARCH64_OPND_SVE_ADDR_ZI_U5: 6569 case AARCH64_OPND_SVE_ADDR_ZI_U5x2: 6570 case AARCH64_OPND_SVE_ADDR_ZI_U5x4: 6571 case AARCH64_OPND_SVE_ADDR_ZI_U5x8: 6572 /* [Z<n>.<T>{, #imm}] */ 6573 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier, 6574 &offset_qualifier)); 6575 if (base_qualifier != AARCH64_OPND_QLF_S_S 6576 && base_qualifier != AARCH64_OPND_QLF_S_D) 6577 { 6578 set_syntax_error (_("invalid addressing mode")); 6579 goto failure; 6580 } 6581 info->qualifier = base_qualifier; 6582 goto sve_regimm; 6583 6584 case AARCH64_OPND_SVE_ADDR_ZZ_LSL: 6585 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW: 6586 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW: 6587 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}] 6588 [Z<n>.D, Z<m>.D, <extend> {#<amount>}] 6589 6590 We don't reject: 6591 6592 [Z<n>.S, Z<m>.S, <extend> {#<amount>}] 6593 6594 here since we get better error messages by leaving it to 6595 the qualifier checking routines. */ 6596 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier, 6597 &offset_qualifier)); 6598 if ((base_qualifier != AARCH64_OPND_QLF_S_S 6599 && base_qualifier != AARCH64_OPND_QLF_S_D) 6600 || offset_qualifier != base_qualifier) 6601 { 6602 set_syntax_error (_("invalid addressing mode")); 6603 goto failure; 6604 } 6605 info->qualifier = base_qualifier; 6606 goto regoff_addr; 6607 6608 case AARCH64_OPND_SYSREG: 6609 { 6610 uint32_t sysreg_flags; 6611 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0, 6612 &sysreg_flags)) == PARSE_FAIL) 6613 { 6614 set_syntax_error (_("unknown or missing system register name")); 6615 goto failure; 6616 } 6617 inst.base.operands[i].sysreg.value = val; 6618 inst.base.operands[i].sysreg.flags = sysreg_flags; 6619 break; 6620 } 6621 6622 case AARCH64_OPND_PSTATEFIELD: 6623 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL)) 6624 == PARSE_FAIL) 6625 { 6626 set_syntax_error (_("unknown or missing PSTATE field name")); 6627 goto failure; 6628 } 6629 inst.base.operands[i].pstatefield = val; 6630 break; 6631 6632 case AARCH64_OPND_SYSREG_IC: 6633 inst.base.operands[i].sysins_op = 6634 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh); 6635 goto sys_reg_ins; 6636 6637 case AARCH64_OPND_SYSREG_DC: 6638 inst.base.operands[i].sysins_op = 6639 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh); 6640 goto sys_reg_ins; 6641 6642 case AARCH64_OPND_SYSREG_AT: 6643 inst.base.operands[i].sysins_op = 6644 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh); 6645 goto sys_reg_ins; 6646 6647 case AARCH64_OPND_SYSREG_SR: 6648 inst.base.operands[i].sysins_op = 6649 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh); 6650 goto sys_reg_ins; 6651 6652 case AARCH64_OPND_SYSREG_TLBI: 6653 inst.base.operands[i].sysins_op = 6654 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh); 6655 sys_reg_ins: 6656 if (inst.base.operands[i].sysins_op == NULL) 6657 { 6658 set_fatal_syntax_error ( _("unknown or missing operation name")); 6659 goto failure; 6660 } 6661 break; 6662 6663 case AARCH64_OPND_BARRIER: 6664 case AARCH64_OPND_BARRIER_ISB: 6665 val = parse_barrier (&str); 6666 if (val != PARSE_FAIL 6667 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf) 6668 { 6669 /* ISB only accepts options name 'sy'. */ 6670 set_syntax_error 6671 (_("the specified option is not accepted in ISB")); 6672 /* Turn off backtrack as this optional operand is present. */ 6673 backtrack_pos = 0; 6674 goto failure; 6675 } 6676 /* This is an extension to accept a 0..15 immediate. */ 6677 if (val == PARSE_FAIL) 6678 po_imm_or_fail (0, 15); 6679 info->barrier = aarch64_barrier_options + val; 6680 break; 6681 6682 case AARCH64_OPND_PRFOP: 6683 val = parse_pldop (&str); 6684 /* This is an extension to accept a 0..31 immediate. */ 6685 if (val == PARSE_FAIL) 6686 po_imm_or_fail (0, 31); 6687 inst.base.operands[i].prfop = aarch64_prfops + val; 6688 break; 6689 6690 case AARCH64_OPND_BARRIER_PSB: 6691 val = parse_barrier_psb (&str, &(info->hint_option)); 6692 if (val == PARSE_FAIL) 6693 goto failure; 6694 break; 6695 6696 case AARCH64_OPND_BTI_TARGET: 6697 val = parse_bti_operand (&str, &(info->hint_option)); 6698 if (val == PARSE_FAIL) 6699 goto failure; 6700 break; 6701 6702 default: 6703 as_fatal (_("unhandled operand code %d"), operands[i]); 6704 } 6705 6706 /* If we get here, this operand was successfully parsed. */ 6707 inst.base.operands[i].present = 1; 6708 continue; 6709 6710 failure: 6711 /* The parse routine should already have set the error, but in case 6712 not, set a default one here. */ 6713 if (! error_p ()) 6714 set_default_error (); 6715 6716 if (! backtrack_pos) 6717 goto parse_operands_return; 6718 6719 { 6720 /* We reach here because this operand is marked as optional, and 6721 either no operand was supplied or the operand was supplied but it 6722 was syntactically incorrect. In the latter case we report an 6723 error. In the former case we perform a few more checks before 6724 dropping through to the code to insert the default operand. */ 6725 6726 char *tmp = backtrack_pos; 6727 char endchar = END_OF_INSN; 6728 6729 if (i != (aarch64_num_of_operands (opcode) - 1)) 6730 endchar = ','; 6731 skip_past_char (&tmp, ','); 6732 6733 if (*tmp != endchar) 6734 /* The user has supplied an operand in the wrong format. */ 6735 goto parse_operands_return; 6736 6737 /* Make sure there is not a comma before the optional operand. 6738 For example the fifth operand of 'sys' is optional: 6739 6740 sys #0,c0,c0,#0, <--- wrong 6741 sys #0,c0,c0,#0 <--- correct. */ 6742 if (comma_skipped_p && i && endchar == END_OF_INSN) 6743 { 6744 set_fatal_syntax_error 6745 (_("unexpected comma before the omitted optional operand")); 6746 goto parse_operands_return; 6747 } 6748 } 6749 6750 /* Reaching here means we are dealing with an optional operand that is 6751 omitted from the assembly line. */ 6752 gas_assert (optional_operand_p (opcode, i)); 6753 info->present = 0; 6754 process_omitted_operand (operands[i], opcode, i, info); 6755 6756 /* Try again, skipping the optional operand at backtrack_pos. */ 6757 str = backtrack_pos; 6758 backtrack_pos = 0; 6759 6760 /* Clear any error record after the omitted optional operand has been 6761 successfully handled. */ 6762 clear_error (); 6763 } 6764 6765 /* Check if we have parsed all the operands. */ 6766 if (*str != '\0' && ! error_p ()) 6767 { 6768 /* Set I to the index of the last present operand; this is 6769 for the purpose of diagnostics. */ 6770 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i) 6771 ; 6772 set_fatal_syntax_error 6773 (_("unexpected characters following instruction")); 6774 } 6775 6776 parse_operands_return: 6777 6778 if (error_p ()) 6779 { 6780 DEBUG_TRACE ("parsing FAIL: %s - %s", 6781 operand_mismatch_kind_names[get_error_kind ()], 6782 get_error_message ()); 6783 /* Record the operand error properly; this is useful when there 6784 are multiple instruction templates for a mnemonic name, so that 6785 later on, we can select the error that most closely describes 6786 the problem. */ 6787 record_operand_error (opcode, i, get_error_kind (), 6788 get_error_message ()); 6789 return FALSE; 6790 } 6791 else 6792 { 6793 DEBUG_TRACE ("parsing SUCCESS"); 6794 return TRUE; 6795 } 6796 } 6797 6798 /* It does some fix-up to provide some programmer friendly feature while 6799 keeping the libopcodes happy, i.e. libopcodes only accepts 6800 the preferred architectural syntax. 6801 Return FALSE if there is any failure; otherwise return TRUE. */ 6802 6803 static bfd_boolean 6804 programmer_friendly_fixup (aarch64_instruction *instr) 6805 { 6806 aarch64_inst *base = &instr->base; 6807 const aarch64_opcode *opcode = base->opcode; 6808 enum aarch64_op op = opcode->op; 6809 aarch64_opnd_info *operands = base->operands; 6810 6811 DEBUG_TRACE ("enter"); 6812 6813 switch (opcode->iclass) 6814 { 6815 case testbranch: 6816 /* TBNZ Xn|Wn, #uimm6, label 6817 Test and Branch Not Zero: conditionally jumps to label if bit number 6818 uimm6 in register Xn is not zero. The bit number implies the width of 6819 the register, which may be written and should be disassembled as Wn if 6820 uimm is less than 32. */ 6821 if (operands[0].qualifier == AARCH64_OPND_QLF_W) 6822 { 6823 if (operands[1].imm.value >= 32) 6824 { 6825 record_operand_out_of_range_error (opcode, 1, _("immediate value"), 6826 0, 31); 6827 return FALSE; 6828 } 6829 operands[0].qualifier = AARCH64_OPND_QLF_X; 6830 } 6831 break; 6832 case loadlit: 6833 /* LDR Wt, label | =value 6834 As a convenience assemblers will typically permit the notation 6835 "=value" in conjunction with the pc-relative literal load instructions 6836 to automatically place an immediate value or symbolic address in a 6837 nearby literal pool and generate a hidden label which references it. 6838 ISREG has been set to 0 in the case of =value. */ 6839 if (instr->gen_lit_pool 6840 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT)) 6841 { 6842 int size = aarch64_get_qualifier_esize (operands[0].qualifier); 6843 if (op == OP_LDRSW_LIT) 6844 size = 4; 6845 if (instr->reloc.exp.X_op != O_constant 6846 && instr->reloc.exp.X_op != O_big 6847 && instr->reloc.exp.X_op != O_symbol) 6848 { 6849 record_operand_error (opcode, 1, 6850 AARCH64_OPDE_FATAL_SYNTAX_ERROR, 6851 _("constant expression expected")); 6852 return FALSE; 6853 } 6854 if (! add_to_lit_pool (&instr->reloc.exp, size)) 6855 { 6856 record_operand_error (opcode, 1, 6857 AARCH64_OPDE_OTHER_ERROR, 6858 _("literal pool insertion failed")); 6859 return FALSE; 6860 } 6861 } 6862 break; 6863 case log_shift: 6864 case bitfield: 6865 /* UXT[BHW] Wd, Wn 6866 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias 6867 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is 6868 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn). 6869 A programmer-friendly assembler should accept a destination Xd in 6870 place of Wd, however that is not the preferred form for disassembly. 6871 */ 6872 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW) 6873 && operands[1].qualifier == AARCH64_OPND_QLF_W 6874 && operands[0].qualifier == AARCH64_OPND_QLF_X) 6875 operands[0].qualifier = AARCH64_OPND_QLF_W; 6876 break; 6877 6878 case addsub_ext: 6879 { 6880 /* In the 64-bit form, the final register operand is written as Wm 6881 for all but the (possibly omitted) UXTX/LSL and SXTX 6882 operators. 6883 As a programmer-friendly assembler, we accept e.g. 6884 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to 6885 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */ 6886 int idx = aarch64_operand_index (opcode->operands, 6887 AARCH64_OPND_Rm_EXT); 6888 gas_assert (idx == 1 || idx == 2); 6889 if (operands[0].qualifier == AARCH64_OPND_QLF_X 6890 && operands[idx].qualifier == AARCH64_OPND_QLF_X 6891 && operands[idx].shifter.kind != AARCH64_MOD_LSL 6892 && operands[idx].shifter.kind != AARCH64_MOD_UXTX 6893 && operands[idx].shifter.kind != AARCH64_MOD_SXTX) 6894 operands[idx].qualifier = AARCH64_OPND_QLF_W; 6895 } 6896 break; 6897 6898 default: 6899 break; 6900 } 6901 6902 DEBUG_TRACE ("exit with SUCCESS"); 6903 return TRUE; 6904 } 6905 6906 /* Check for loads and stores that will cause unpredictable behavior. */ 6907 6908 static void 6909 warn_unpredictable_ldst (aarch64_instruction *instr, char *str) 6910 { 6911 aarch64_inst *base = &instr->base; 6912 const aarch64_opcode *opcode = base->opcode; 6913 const aarch64_opnd_info *opnds = base->operands; 6914 switch (opcode->iclass) 6915 { 6916 case ldst_pos: 6917 case ldst_imm9: 6918 case ldst_imm10: 6919 case ldst_unscaled: 6920 case ldst_unpriv: 6921 /* Loading/storing the base register is unpredictable if writeback. */ 6922 if ((aarch64_get_operand_class (opnds[0].type) 6923 == AARCH64_OPND_CLASS_INT_REG) 6924 && opnds[0].reg.regno == opnds[1].addr.base_regno 6925 && opnds[1].addr.base_regno != REG_SP 6926 /* Exempt STG/STZG/ST2G/STZ2G. */ 6927 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13) 6928 && opnds[1].addr.writeback) 6929 as_warn (_("unpredictable transfer with writeback -- `%s'"), str); 6930 break; 6931 6932 case ldstpair_off: 6933 case ldstnapair_offs: 6934 case ldstpair_indexed: 6935 /* Loading/storing the base register is unpredictable if writeback. */ 6936 if ((aarch64_get_operand_class (opnds[0].type) 6937 == AARCH64_OPND_CLASS_INT_REG) 6938 && (opnds[0].reg.regno == opnds[2].addr.base_regno 6939 || opnds[1].reg.regno == opnds[2].addr.base_regno) 6940 && opnds[2].addr.base_regno != REG_SP 6941 /* Exempt STGP. */ 6942 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11) 6943 && opnds[2].addr.writeback) 6944 as_warn (_("unpredictable transfer with writeback -- `%s'"), str); 6945 /* Load operations must load different registers. */ 6946 if ((opcode->opcode & (1 << 22)) 6947 && opnds[0].reg.regno == opnds[1].reg.regno) 6948 as_warn (_("unpredictable load of register pair -- `%s'"), str); 6949 break; 6950 6951 case ldstexcl: 6952 /* It is unpredictable if the destination and status registers are the 6953 same. */ 6954 if ((aarch64_get_operand_class (opnds[0].type) 6955 == AARCH64_OPND_CLASS_INT_REG) 6956 && (aarch64_get_operand_class (opnds[1].type) 6957 == AARCH64_OPND_CLASS_INT_REG) 6958 && (opnds[0].reg.regno == opnds[1].reg.regno 6959 || opnds[0].reg.regno == opnds[2].reg.regno)) 6960 as_warn (_("unpredictable: identical transfer and status registers" 6961 " --`%s'"), 6962 str); 6963 6964 break; 6965 6966 default: 6967 break; 6968 } 6969 } 6970 6971 static void 6972 force_automatic_sequence_close (void) 6973 { 6974 if (now_instr_sequence.instr) 6975 { 6976 as_warn (_("previous `%s' sequence has not been closed"), 6977 now_instr_sequence.instr->opcode->name); 6978 init_insn_sequence (NULL, &now_instr_sequence); 6979 } 6980 } 6981 6982 /* A wrapper function to interface with libopcodes on encoding and 6983 record the error message if there is any. 6984 6985 Return TRUE on success; otherwise return FALSE. */ 6986 6987 static bfd_boolean 6988 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr, 6989 aarch64_insn *code) 6990 { 6991 aarch64_operand_error error_info; 6992 memset (&error_info, '\0', sizeof (error_info)); 6993 error_info.kind = AARCH64_OPDE_NIL; 6994 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence) 6995 && !error_info.non_fatal) 6996 return TRUE; 6997 6998 gas_assert (error_info.kind != AARCH64_OPDE_NIL); 6999 record_operand_error_info (opcode, &error_info); 7000 return error_info.non_fatal; 7001 } 7002 7003 #ifdef DEBUG_AARCH64 7004 static inline void 7005 dump_opcode_operands (const aarch64_opcode *opcode) 7006 { 7007 int i = 0; 7008 while (opcode->operands[i] != AARCH64_OPND_NIL) 7009 { 7010 aarch64_verbose ("\t\t opnd%d: %s", i, 7011 aarch64_get_operand_name (opcode->operands[i])[0] != '\0' 7012 ? aarch64_get_operand_name (opcode->operands[i]) 7013 : aarch64_get_operand_desc (opcode->operands[i])); 7014 ++i; 7015 } 7016 } 7017 #endif /* DEBUG_AARCH64 */ 7018 7019 /* This is the guts of the machine-dependent assembler. STR points to a 7020 machine dependent instruction. This function is supposed to emit 7021 the frags/bytes it assembles to. */ 7022 7023 void 7024 md_assemble (char *str) 7025 { 7026 char *p = str; 7027 templates *template; 7028 aarch64_opcode *opcode; 7029 aarch64_inst *inst_base; 7030 unsigned saved_cond; 7031 7032 /* Align the previous label if needed. */ 7033 if (last_label_seen != NULL) 7034 { 7035 symbol_set_frag (last_label_seen, frag_now); 7036 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ()); 7037 S_SET_SEGMENT (last_label_seen, now_seg); 7038 } 7039 7040 /* Update the current insn_sequence from the segment. */ 7041 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence; 7042 7043 inst.reloc.type = BFD_RELOC_UNUSED; 7044 7045 DEBUG_TRACE ("\n\n"); 7046 DEBUG_TRACE ("=============================="); 7047 DEBUG_TRACE ("Enter md_assemble with %s", str); 7048 7049 template = opcode_lookup (&p); 7050 if (!template) 7051 { 7052 /* It wasn't an instruction, but it might be a register alias of 7053 the form alias .req reg directive. */ 7054 if (!create_register_alias (str, p)) 7055 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str), 7056 str); 7057 return; 7058 } 7059 7060 skip_whitespace (p); 7061 if (*p == ',') 7062 { 7063 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"), 7064 get_mnemonic_name (str), str); 7065 return; 7066 } 7067 7068 init_operand_error_report (); 7069 7070 /* Sections are assumed to start aligned. In executable section, there is no 7071 MAP_DATA symbol pending. So we only align the address during 7072 MAP_DATA --> MAP_INSN transition. 7073 For other sections, this is not guaranteed. */ 7074 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate; 7075 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA) 7076 frag_align_code (2, 0); 7077 7078 saved_cond = inst.cond; 7079 reset_aarch64_instruction (&inst); 7080 inst.cond = saved_cond; 7081 7082 /* Iterate through all opcode entries with the same mnemonic name. */ 7083 do 7084 { 7085 opcode = template->opcode; 7086 7087 DEBUG_TRACE ("opcode %s found", opcode->name); 7088 #ifdef DEBUG_AARCH64 7089 if (debug_dump) 7090 dump_opcode_operands (opcode); 7091 #endif /* DEBUG_AARCH64 */ 7092 7093 mapping_state (MAP_INSN); 7094 7095 inst_base = &inst.base; 7096 inst_base->opcode = opcode; 7097 7098 /* Truly conditionally executed instructions, e.g. b.cond. */ 7099 if (opcode->flags & F_COND) 7100 { 7101 gas_assert (inst.cond != COND_ALWAYS); 7102 inst_base->cond = get_cond_from_value (inst.cond); 7103 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]); 7104 } 7105 else if (inst.cond != COND_ALWAYS) 7106 { 7107 /* It shouldn't arrive here, where the assembly looks like a 7108 conditional instruction but the found opcode is unconditional. */ 7109 gas_assert (0); 7110 continue; 7111 } 7112 7113 if (parse_operands (p, opcode) 7114 && programmer_friendly_fixup (&inst) 7115 && do_encode (inst_base->opcode, &inst.base, &inst_base->value)) 7116 { 7117 /* Check that this instruction is supported for this CPU. */ 7118 if (!opcode->avariant 7119 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant)) 7120 { 7121 as_bad (_("selected processor does not support `%s'"), str); 7122 return; 7123 } 7124 7125 warn_unpredictable_ldst (&inst, str); 7126 7127 if (inst.reloc.type == BFD_RELOC_UNUSED 7128 || !inst.reloc.need_libopcodes_p) 7129 output_inst (NULL); 7130 else 7131 { 7132 /* If there is relocation generated for the instruction, 7133 store the instruction information for the future fix-up. */ 7134 struct aarch64_inst *copy; 7135 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED); 7136 copy = XNEW (struct aarch64_inst); 7137 memcpy (copy, &inst.base, sizeof (struct aarch64_inst)); 7138 output_inst (copy); 7139 } 7140 7141 /* Issue non-fatal messages if any. */ 7142 output_operand_error_report (str, TRUE); 7143 return; 7144 } 7145 7146 template = template->next; 7147 if (template != NULL) 7148 { 7149 reset_aarch64_instruction (&inst); 7150 inst.cond = saved_cond; 7151 } 7152 } 7153 while (template != NULL); 7154 7155 /* Issue the error messages if any. */ 7156 output_operand_error_report (str, FALSE); 7157 } 7158 7159 /* Various frobbings of labels and their addresses. */ 7160 7161 void 7162 aarch64_start_line_hook (void) 7163 { 7164 last_label_seen = NULL; 7165 } 7166 7167 void 7168 aarch64_frob_label (symbolS * sym) 7169 { 7170 last_label_seen = sym; 7171 7172 dwarf2_emit_label (sym); 7173 } 7174 7175 void 7176 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED) 7177 { 7178 /* Check to see if we have a block to close. */ 7179 force_automatic_sequence_close (); 7180 } 7181 7182 int 7183 aarch64_data_in_code (void) 7184 { 7185 if (!strncmp (input_line_pointer + 1, "data:", 5)) 7186 { 7187 *input_line_pointer = '/'; 7188 input_line_pointer += 5; 7189 *input_line_pointer = 0; 7190 return 1; 7191 } 7192 7193 return 0; 7194 } 7195 7196 char * 7197 aarch64_canonicalize_symbol_name (char *name) 7198 { 7199 int len; 7200 7201 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data")) 7202 *(name + len - 5) = 0; 7203 7204 return name; 7205 } 7206 7207 /* Table of all register names defined by default. The user can 7208 define additional names with .req. Note that all register names 7209 should appear in both upper and lowercase variants. Some registers 7210 also have mixed-case names. */ 7211 7212 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE } 7213 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE} 7214 #define REGNUM(p,n,t) REGDEF(p##n, n, t) 7215 #define REGSET16(p,t) \ 7216 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \ 7217 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \ 7218 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \ 7219 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t) 7220 #define REGSET31(p,t) \ 7221 REGSET16(p, t), \ 7222 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \ 7223 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \ 7224 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \ 7225 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t) 7226 #define REGSET(p,t) \ 7227 REGSET31(p,t), REGNUM(p,31,t) 7228 7229 /* These go into aarch64_reg_hsh hash-table. */ 7230 static const reg_entry reg_names[] = { 7231 /* Integer registers. */ 7232 REGSET31 (x, R_64), REGSET31 (X, R_64), 7233 REGSET31 (w, R_32), REGSET31 (W, R_32), 7234 7235 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64), 7236 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64), 7237 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64), 7238 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64), 7239 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32), 7240 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64), 7241 7242 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32), 7243 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64), 7244 7245 /* Floating-point single precision registers. */ 7246 REGSET (s, FP_S), REGSET (S, FP_S), 7247 7248 /* Floating-point double precision registers. */ 7249 REGSET (d, FP_D), REGSET (D, FP_D), 7250 7251 /* Floating-point half precision registers. */ 7252 REGSET (h, FP_H), REGSET (H, FP_H), 7253 7254 /* Floating-point byte precision registers. */ 7255 REGSET (b, FP_B), REGSET (B, FP_B), 7256 7257 /* Floating-point quad precision registers. */ 7258 REGSET (q, FP_Q), REGSET (Q, FP_Q), 7259 7260 /* FP/SIMD registers. */ 7261 REGSET (v, VN), REGSET (V, VN), 7262 7263 /* SVE vector registers. */ 7264 REGSET (z, ZN), REGSET (Z, ZN), 7265 7266 /* SVE predicate registers. */ 7267 REGSET16 (p, PN), REGSET16 (P, PN) 7268 }; 7269 7270 #undef REGDEF 7271 #undef REGDEF_ALIAS 7272 #undef REGNUM 7273 #undef REGSET16 7274 #undef REGSET31 7275 #undef REGSET 7276 7277 #define N 1 7278 #define n 0 7279 #define Z 1 7280 #define z 0 7281 #define C 1 7282 #define c 0 7283 #define V 1 7284 #define v 0 7285 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d)) 7286 static const asm_nzcv nzcv_names[] = { 7287 {"nzcv", B (n, z, c, v)}, 7288 {"nzcV", B (n, z, c, V)}, 7289 {"nzCv", B (n, z, C, v)}, 7290 {"nzCV", B (n, z, C, V)}, 7291 {"nZcv", B (n, Z, c, v)}, 7292 {"nZcV", B (n, Z, c, V)}, 7293 {"nZCv", B (n, Z, C, v)}, 7294 {"nZCV", B (n, Z, C, V)}, 7295 {"Nzcv", B (N, z, c, v)}, 7296 {"NzcV", B (N, z, c, V)}, 7297 {"NzCv", B (N, z, C, v)}, 7298 {"NzCV", B (N, z, C, V)}, 7299 {"NZcv", B (N, Z, c, v)}, 7300 {"NZcV", B (N, Z, c, V)}, 7301 {"NZCv", B (N, Z, C, v)}, 7302 {"NZCV", B (N, Z, C, V)} 7303 }; 7304 7305 #undef N 7306 #undef n 7307 #undef Z 7308 #undef z 7309 #undef C 7310 #undef c 7311 #undef V 7312 #undef v 7313 #undef B 7314 7315 /* MD interface: bits in the object file. */ 7316 7317 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate 7318 for use in the a.out file, and stores them in the array pointed to by buf. 7319 This knows about the endian-ness of the target machine and does 7320 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte) 7321 2 (short) and 4 (long) Floating numbers are put out as a series of 7322 LITTLENUMS (shorts, here at least). */ 7323 7324 void 7325 md_number_to_chars (char *buf, valueT val, int n) 7326 { 7327 if (target_big_endian) 7328 number_to_chars_bigendian (buf, val, n); 7329 else 7330 number_to_chars_littleendian (buf, val, n); 7331 } 7332 7333 /* MD interface: Sections. */ 7334 7335 /* Estimate the size of a frag before relaxing. Assume everything fits in 7336 4 bytes. */ 7337 7338 int 7339 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED) 7340 { 7341 fragp->fr_var = 4; 7342 return 4; 7343 } 7344 7345 /* Round up a section size to the appropriate boundary. */ 7346 7347 valueT 7348 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size) 7349 { 7350 return size; 7351 } 7352 7353 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents 7354 of an rs_align_code fragment. 7355 7356 Here we fill the frag with the appropriate info for padding the 7357 output stream. The resulting frag will consist of a fixed (fr_fix) 7358 and of a repeating (fr_var) part. 7359 7360 The fixed content is always emitted before the repeating content and 7361 these two parts are used as follows in constructing the output: 7362 - the fixed part will be used to align to a valid instruction word 7363 boundary, in case that we start at a misaligned address; as no 7364 executable instruction can live at the misaligned location, we 7365 simply fill with zeros; 7366 - the variable part will be used to cover the remaining padding and 7367 we fill using the AArch64 NOP instruction. 7368 7369 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide 7370 enough storage space for up to 3 bytes for padding the back to a valid 7371 instruction alignment and exactly 4 bytes to store the NOP pattern. */ 7372 7373 void 7374 aarch64_handle_align (fragS * fragP) 7375 { 7376 /* NOP = d503201f */ 7377 /* AArch64 instructions are always little-endian. */ 7378 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 }; 7379 7380 int bytes, fix, noop_size; 7381 char *p; 7382 7383 if (fragP->fr_type != rs_align_code) 7384 return; 7385 7386 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix; 7387 p = fragP->fr_literal + fragP->fr_fix; 7388 7389 #ifdef OBJ_ELF 7390 gas_assert (fragP->tc_frag_data.recorded); 7391 #endif 7392 7393 noop_size = sizeof (aarch64_noop); 7394 7395 fix = bytes & (noop_size - 1); 7396 if (fix) 7397 { 7398 #ifdef OBJ_ELF 7399 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix); 7400 #endif 7401 memset (p, 0, fix); 7402 p += fix; 7403 fragP->fr_fix += fix; 7404 } 7405 7406 if (noop_size) 7407 memcpy (p, aarch64_noop, noop_size); 7408 fragP->fr_var = noop_size; 7409 } 7410 7411 /* Perform target specific initialisation of a frag. 7412 Note - despite the name this initialisation is not done when the frag 7413 is created, but only when its type is assigned. A frag can be created 7414 and used a long time before its type is set, so beware of assuming that 7415 this initialisation is performed first. */ 7416 7417 #ifndef OBJ_ELF 7418 void 7419 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED, 7420 int max_chars ATTRIBUTE_UNUSED) 7421 { 7422 } 7423 7424 #else /* OBJ_ELF is defined. */ 7425 void 7426 aarch64_init_frag (fragS * fragP, int max_chars) 7427 { 7428 /* Record a mapping symbol for alignment frags. We will delete this 7429 later if the alignment ends up empty. */ 7430 if (!fragP->tc_frag_data.recorded) 7431 fragP->tc_frag_data.recorded = 1; 7432 7433 /* PR 21809: Do not set a mapping state for debug sections 7434 - it just confuses other tools. */ 7435 if (bfd_section_flags (now_seg) & SEC_DEBUGGING) 7436 return; 7437 7438 switch (fragP->fr_type) 7439 { 7440 case rs_align_test: 7441 case rs_fill: 7442 mapping_state_2 (MAP_DATA, max_chars); 7443 break; 7444 case rs_align: 7445 /* PR 20364: We can get alignment frags in code sections, 7446 so do not just assume that we should use the MAP_DATA state. */ 7447 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars); 7448 break; 7449 case rs_align_code: 7450 mapping_state_2 (MAP_INSN, max_chars); 7451 break; 7452 default: 7453 break; 7454 } 7455 } 7456 7457 /* Initialize the DWARF-2 unwind information for this procedure. */ 7458 7459 void 7460 tc_aarch64_frame_initial_instructions (void) 7461 { 7462 cfi_add_CFA_def_cfa (REG_SP, 0); 7463 } 7464 #endif /* OBJ_ELF */ 7465 7466 /* Convert REGNAME to a DWARF-2 register number. */ 7467 7468 int 7469 tc_aarch64_regname_to_dw2regnum (char *regname) 7470 { 7471 const reg_entry *reg = parse_reg (®name); 7472 if (reg == NULL) 7473 return -1; 7474 7475 switch (reg->type) 7476 { 7477 case REG_TYPE_SP_32: 7478 case REG_TYPE_SP_64: 7479 case REG_TYPE_R_32: 7480 case REG_TYPE_R_64: 7481 return reg->number; 7482 7483 case REG_TYPE_FP_B: 7484 case REG_TYPE_FP_H: 7485 case REG_TYPE_FP_S: 7486 case REG_TYPE_FP_D: 7487 case REG_TYPE_FP_Q: 7488 return reg->number + 64; 7489 7490 default: 7491 break; 7492 } 7493 return -1; 7494 } 7495 7496 /* Implement DWARF2_ADDR_SIZE. */ 7497 7498 int 7499 aarch64_dwarf2_addr_size (void) 7500 { 7501 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF) 7502 if (ilp32_p) 7503 return 4; 7504 #endif 7505 return bfd_arch_bits_per_address (stdoutput) / 8; 7506 } 7507 7508 /* MD interface: Symbol and relocation handling. */ 7509 7510 /* Return the address within the segment that a PC-relative fixup is 7511 relative to. For AArch64 PC-relative fixups applied to instructions 7512 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */ 7513 7514 long 7515 md_pcrel_from_section (fixS * fixP, segT seg) 7516 { 7517 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address; 7518 7519 /* If this is pc-relative and we are going to emit a relocation 7520 then we just want to put out any pipeline compensation that the linker 7521 will need. Otherwise we want to use the calculated base. */ 7522 if (fixP->fx_pcrel 7523 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg) 7524 || aarch64_force_relocation (fixP))) 7525 base = 0; 7526 7527 /* AArch64 should be consistent for all pc-relative relocations. */ 7528 return base + AARCH64_PCREL_OFFSET; 7529 } 7530 7531 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE. 7532 Otherwise we have no need to default values of symbols. */ 7533 7534 symbolS * 7535 md_undefined_symbol (char *name ATTRIBUTE_UNUSED) 7536 { 7537 #ifdef OBJ_ELF 7538 if (name[0] == '_' && name[1] == 'G' 7539 && streq (name, GLOBAL_OFFSET_TABLE_NAME)) 7540 { 7541 if (!GOT_symbol) 7542 { 7543 if (symbol_find (name)) 7544 as_bad (_("GOT already in the symbol table")); 7545 7546 GOT_symbol = symbol_new (name, undefined_section, 7547 (valueT) 0, &zero_address_frag); 7548 } 7549 7550 return GOT_symbol; 7551 } 7552 #endif 7553 7554 return 0; 7555 } 7556 7557 /* Return non-zero if the indicated VALUE has overflowed the maximum 7558 range expressible by a unsigned number with the indicated number of 7559 BITS. */ 7560 7561 static bfd_boolean 7562 unsigned_overflow (valueT value, unsigned bits) 7563 { 7564 valueT lim; 7565 if (bits >= sizeof (valueT) * 8) 7566 return FALSE; 7567 lim = (valueT) 1 << bits; 7568 return (value >= lim); 7569 } 7570 7571 7572 /* Return non-zero if the indicated VALUE has overflowed the maximum 7573 range expressible by an signed number with the indicated number of 7574 BITS. */ 7575 7576 static bfd_boolean 7577 signed_overflow (offsetT value, unsigned bits) 7578 { 7579 offsetT lim; 7580 if (bits >= sizeof (offsetT) * 8) 7581 return FALSE; 7582 lim = (offsetT) 1 << (bits - 1); 7583 return (value < -lim || value >= lim); 7584 } 7585 7586 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit, 7587 unsigned immediate offset load/store instruction, try to encode it as 7588 an unscaled, 9-bit, signed immediate offset load/store instruction. 7589 Return TRUE if it is successful; otherwise return FALSE. 7590 7591 As a programmer-friendly assembler, LDUR/STUR instructions can be generated 7592 in response to the standard LDR/STR mnemonics when the immediate offset is 7593 unambiguous, i.e. when it is negative or unaligned. */ 7594 7595 static bfd_boolean 7596 try_to_encode_as_unscaled_ldst (aarch64_inst *instr) 7597 { 7598 int idx; 7599 enum aarch64_op new_op; 7600 const aarch64_opcode *new_opcode; 7601 7602 gas_assert (instr->opcode->iclass == ldst_pos); 7603 7604 switch (instr->opcode->op) 7605 { 7606 case OP_LDRB_POS:new_op = OP_LDURB; break; 7607 case OP_STRB_POS: new_op = OP_STURB; break; 7608 case OP_LDRSB_POS: new_op = OP_LDURSB; break; 7609 case OP_LDRH_POS: new_op = OP_LDURH; break; 7610 case OP_STRH_POS: new_op = OP_STURH; break; 7611 case OP_LDRSH_POS: new_op = OP_LDURSH; break; 7612 case OP_LDR_POS: new_op = OP_LDUR; break; 7613 case OP_STR_POS: new_op = OP_STUR; break; 7614 case OP_LDRF_POS: new_op = OP_LDURV; break; 7615 case OP_STRF_POS: new_op = OP_STURV; break; 7616 case OP_LDRSW_POS: new_op = OP_LDURSW; break; 7617 case OP_PRFM_POS: new_op = OP_PRFUM; break; 7618 default: new_op = OP_NIL; break; 7619 } 7620 7621 if (new_op == OP_NIL) 7622 return FALSE; 7623 7624 new_opcode = aarch64_get_opcode (new_op); 7625 gas_assert (new_opcode != NULL); 7626 7627 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d", 7628 instr->opcode->op, new_opcode->op); 7629 7630 aarch64_replace_opcode (instr, new_opcode); 7631 7632 /* Clear up the ADDR_SIMM9's qualifier; otherwise the 7633 qualifier matching may fail because the out-of-date qualifier will 7634 prevent the operand being updated with a new and correct qualifier. */ 7635 idx = aarch64_operand_index (instr->opcode->operands, 7636 AARCH64_OPND_ADDR_SIMM9); 7637 gas_assert (idx == 1); 7638 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL; 7639 7640 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB"); 7641 7642 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL, 7643 insn_sequence)) 7644 return FALSE; 7645 7646 return TRUE; 7647 } 7648 7649 /* Called by fix_insn to fix a MOV immediate alias instruction. 7650 7651 Operand for a generic move immediate instruction, which is an alias 7652 instruction that generates a single MOVZ, MOVN or ORR instruction to loads 7653 a 32-bit/64-bit immediate value into general register. An assembler error 7654 shall result if the immediate cannot be created by a single one of these 7655 instructions. If there is a choice, then to ensure reversability an 7656 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */ 7657 7658 static void 7659 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value) 7660 { 7661 const aarch64_opcode *opcode; 7662 7663 /* Need to check if the destination is SP/ZR. The check has to be done 7664 before any aarch64_replace_opcode. */ 7665 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]); 7666 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]); 7667 7668 instr->operands[1].imm.value = value; 7669 instr->operands[1].skip = 0; 7670 7671 if (try_mov_wide_p) 7672 { 7673 /* Try the MOVZ alias. */ 7674 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE); 7675 aarch64_replace_opcode (instr, opcode); 7676 if (aarch64_opcode_encode (instr->opcode, instr, 7677 &instr->value, NULL, NULL, insn_sequence)) 7678 { 7679 put_aarch64_insn (buf, instr->value); 7680 return; 7681 } 7682 /* Try the MOVK alias. */ 7683 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN); 7684 aarch64_replace_opcode (instr, opcode); 7685 if (aarch64_opcode_encode (instr->opcode, instr, 7686 &instr->value, NULL, NULL, insn_sequence)) 7687 { 7688 put_aarch64_insn (buf, instr->value); 7689 return; 7690 } 7691 } 7692 7693 if (try_mov_bitmask_p) 7694 { 7695 /* Try the ORR alias. */ 7696 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG); 7697 aarch64_replace_opcode (instr, opcode); 7698 if (aarch64_opcode_encode (instr->opcode, instr, 7699 &instr->value, NULL, NULL, insn_sequence)) 7700 { 7701 put_aarch64_insn (buf, instr->value); 7702 return; 7703 } 7704 } 7705 7706 as_bad_where (fixP->fx_file, fixP->fx_line, 7707 _("immediate cannot be moved by a single instruction")); 7708 } 7709 7710 /* An instruction operand which is immediate related may have symbol used 7711 in the assembly, e.g. 7712 7713 mov w0, u32 7714 .set u32, 0x00ffff00 7715 7716 At the time when the assembly instruction is parsed, a referenced symbol, 7717 like 'u32' in the above example may not have been seen; a fixS is created 7718 in such a case and is handled here after symbols have been resolved. 7719 Instruction is fixed up with VALUE using the information in *FIXP plus 7720 extra information in FLAGS. 7721 7722 This function is called by md_apply_fix to fix up instructions that need 7723 a fix-up described above but does not involve any linker-time relocation. */ 7724 7725 static void 7726 fix_insn (fixS *fixP, uint32_t flags, offsetT value) 7727 { 7728 int idx; 7729 uint32_t insn; 7730 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal; 7731 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd; 7732 aarch64_inst *new_inst = fixP->tc_fix_data.inst; 7733 7734 if (new_inst) 7735 { 7736 /* Now the instruction is about to be fixed-up, so the operand that 7737 was previously marked as 'ignored' needs to be unmarked in order 7738 to get the encoding done properly. */ 7739 idx = aarch64_operand_index (new_inst->opcode->operands, opnd); 7740 new_inst->operands[idx].skip = 0; 7741 } 7742 7743 gas_assert (opnd != AARCH64_OPND_NIL); 7744 7745 switch (opnd) 7746 { 7747 case AARCH64_OPND_EXCEPTION: 7748 if (unsigned_overflow (value, 16)) 7749 as_bad_where (fixP->fx_file, fixP->fx_line, 7750 _("immediate out of range")); 7751 insn = get_aarch64_insn (buf); 7752 insn |= encode_svc_imm (value); 7753 put_aarch64_insn (buf, insn); 7754 break; 7755 7756 case AARCH64_OPND_AIMM: 7757 /* ADD or SUB with immediate. 7758 NOTE this assumes we come here with a add/sub shifted reg encoding 7759 3 322|2222|2 2 2 21111 111111 7760 1 098|7654|3 2 1 09876 543210 98765 43210 7761 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD 7762 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS 7763 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB 7764 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS 7765 -> 7766 3 322|2222|2 2 221111111111 7767 1 098|7654|3 2 109876543210 98765 43210 7768 11000000 sf 001|0001|shift imm12 Rn Rd ADD 7769 31000000 sf 011|0001|shift imm12 Rn Rd ADDS 7770 51000000 sf 101|0001|shift imm12 Rn Rd SUB 7771 71000000 sf 111|0001|shift imm12 Rn Rd SUBS 7772 Fields sf Rn Rd are already set. */ 7773 insn = get_aarch64_insn (buf); 7774 if (value < 0) 7775 { 7776 /* Add <-> sub. */ 7777 insn = reencode_addsub_switch_add_sub (insn); 7778 value = -value; 7779 } 7780 7781 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0 7782 && unsigned_overflow (value, 12)) 7783 { 7784 /* Try to shift the value by 12 to make it fit. */ 7785 if (((value >> 12) << 12) == value 7786 && ! unsigned_overflow (value, 12 + 12)) 7787 { 7788 value >>= 12; 7789 insn |= encode_addsub_imm_shift_amount (1); 7790 } 7791 } 7792 7793 if (unsigned_overflow (value, 12)) 7794 as_bad_where (fixP->fx_file, fixP->fx_line, 7795 _("immediate out of range")); 7796 7797 insn |= encode_addsub_imm (value); 7798 7799 put_aarch64_insn (buf, insn); 7800 break; 7801 7802 case AARCH64_OPND_SIMD_IMM: 7803 case AARCH64_OPND_SIMD_IMM_SFT: 7804 case AARCH64_OPND_LIMM: 7805 /* Bit mask immediate. */ 7806 gas_assert (new_inst != NULL); 7807 idx = aarch64_operand_index (new_inst->opcode->operands, opnd); 7808 new_inst->operands[idx].imm.value = value; 7809 if (aarch64_opcode_encode (new_inst->opcode, new_inst, 7810 &new_inst->value, NULL, NULL, insn_sequence)) 7811 put_aarch64_insn (buf, new_inst->value); 7812 else 7813 as_bad_where (fixP->fx_file, fixP->fx_line, 7814 _("invalid immediate")); 7815 break; 7816 7817 case AARCH64_OPND_HALF: 7818 /* 16-bit unsigned immediate. */ 7819 if (unsigned_overflow (value, 16)) 7820 as_bad_where (fixP->fx_file, fixP->fx_line, 7821 _("immediate out of range")); 7822 insn = get_aarch64_insn (buf); 7823 insn |= encode_movw_imm (value & 0xffff); 7824 put_aarch64_insn (buf, insn); 7825 break; 7826 7827 case AARCH64_OPND_IMM_MOV: 7828 /* Operand for a generic move immediate instruction, which is 7829 an alias instruction that generates a single MOVZ, MOVN or ORR 7830 instruction to loads a 32-bit/64-bit immediate value into general 7831 register. An assembler error shall result if the immediate cannot be 7832 created by a single one of these instructions. If there is a choice, 7833 then to ensure reversability an assembler must prefer a MOVZ to MOVN, 7834 and MOVZ or MOVN to ORR. */ 7835 gas_assert (new_inst != NULL); 7836 fix_mov_imm_insn (fixP, buf, new_inst, value); 7837 break; 7838 7839 case AARCH64_OPND_ADDR_SIMM7: 7840 case AARCH64_OPND_ADDR_SIMM9: 7841 case AARCH64_OPND_ADDR_SIMM9_2: 7842 case AARCH64_OPND_ADDR_SIMM10: 7843 case AARCH64_OPND_ADDR_UIMM12: 7844 case AARCH64_OPND_ADDR_SIMM11: 7845 case AARCH64_OPND_ADDR_SIMM13: 7846 /* Immediate offset in an address. */ 7847 insn = get_aarch64_insn (buf); 7848 7849 gas_assert (new_inst != NULL && new_inst->value == insn); 7850 gas_assert (new_inst->opcode->operands[1] == opnd 7851 || new_inst->opcode->operands[2] == opnd); 7852 7853 /* Get the index of the address operand. */ 7854 if (new_inst->opcode->operands[1] == opnd) 7855 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ 7856 idx = 1; 7857 else 7858 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */ 7859 idx = 2; 7860 7861 /* Update the resolved offset value. */ 7862 new_inst->operands[idx].addr.offset.imm = value; 7863 7864 /* Encode/fix-up. */ 7865 if (aarch64_opcode_encode (new_inst->opcode, new_inst, 7866 &new_inst->value, NULL, NULL, insn_sequence)) 7867 { 7868 put_aarch64_insn (buf, new_inst->value); 7869 break; 7870 } 7871 else if (new_inst->opcode->iclass == ldst_pos 7872 && try_to_encode_as_unscaled_ldst (new_inst)) 7873 { 7874 put_aarch64_insn (buf, new_inst->value); 7875 break; 7876 } 7877 7878 as_bad_where (fixP->fx_file, fixP->fx_line, 7879 _("immediate offset out of range")); 7880 break; 7881 7882 default: 7883 gas_assert (0); 7884 as_fatal (_("unhandled operand code %d"), opnd); 7885 } 7886 } 7887 7888 /* Apply a fixup (fixP) to segment data, once it has been determined 7889 by our caller that we have all the info we need to fix it up. 7890 7891 Parameter valP is the pointer to the value of the bits. */ 7892 7893 void 7894 md_apply_fix (fixS * fixP, valueT * valP, segT seg) 7895 { 7896 offsetT value = *valP; 7897 uint32_t insn; 7898 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal; 7899 int scale; 7900 unsigned flags = fixP->fx_addnumber; 7901 7902 DEBUG_TRACE ("\n\n"); 7903 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~"); 7904 DEBUG_TRACE ("Enter md_apply_fix"); 7905 7906 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED); 7907 7908 /* Note whether this will delete the relocation. */ 7909 7910 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel) 7911 fixP->fx_done = 1; 7912 7913 /* Process the relocations. */ 7914 switch (fixP->fx_r_type) 7915 { 7916 case BFD_RELOC_NONE: 7917 /* This will need to go in the object file. */ 7918 fixP->fx_done = 0; 7919 break; 7920 7921 case BFD_RELOC_8: 7922 case BFD_RELOC_8_PCREL: 7923 if (fixP->fx_done || !seg->use_rela_p) 7924 md_number_to_chars (buf, value, 1); 7925 break; 7926 7927 case BFD_RELOC_16: 7928 case BFD_RELOC_16_PCREL: 7929 if (fixP->fx_done || !seg->use_rela_p) 7930 md_number_to_chars (buf, value, 2); 7931 break; 7932 7933 case BFD_RELOC_32: 7934 case BFD_RELOC_32_PCREL: 7935 if (fixP->fx_done || !seg->use_rela_p) 7936 md_number_to_chars (buf, value, 4); 7937 break; 7938 7939 case BFD_RELOC_64: 7940 case BFD_RELOC_64_PCREL: 7941 if (fixP->fx_done || !seg->use_rela_p) 7942 md_number_to_chars (buf, value, 8); 7943 break; 7944 7945 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP: 7946 /* We claim that these fixups have been processed here, even if 7947 in fact we generate an error because we do not have a reloc 7948 for them, so tc_gen_reloc() will reject them. */ 7949 fixP->fx_done = 1; 7950 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy)) 7951 { 7952 as_bad_where (fixP->fx_file, fixP->fx_line, 7953 _("undefined symbol %s used as an immediate value"), 7954 S_GET_NAME (fixP->fx_addsy)); 7955 goto apply_fix_return; 7956 } 7957 fix_insn (fixP, flags, value); 7958 break; 7959 7960 case BFD_RELOC_AARCH64_LD_LO19_PCREL: 7961 if (fixP->fx_done || !seg->use_rela_p) 7962 { 7963 if (value & 3) 7964 as_bad_where (fixP->fx_file, fixP->fx_line, 7965 _("pc-relative load offset not word aligned")); 7966 if (signed_overflow (value, 21)) 7967 as_bad_where (fixP->fx_file, fixP->fx_line, 7968 _("pc-relative load offset out of range")); 7969 insn = get_aarch64_insn (buf); 7970 insn |= encode_ld_lit_ofs_19 (value >> 2); 7971 put_aarch64_insn (buf, insn); 7972 } 7973 break; 7974 7975 case BFD_RELOC_AARCH64_ADR_LO21_PCREL: 7976 if (fixP->fx_done || !seg->use_rela_p) 7977 { 7978 if (signed_overflow (value, 21)) 7979 as_bad_where (fixP->fx_file, fixP->fx_line, 7980 _("pc-relative address offset out of range")); 7981 insn = get_aarch64_insn (buf); 7982 insn |= encode_adr_imm (value); 7983 put_aarch64_insn (buf, insn); 7984 } 7985 break; 7986 7987 case BFD_RELOC_AARCH64_BRANCH19: 7988 if (fixP->fx_done || !seg->use_rela_p) 7989 { 7990 if (value & 3) 7991 as_bad_where (fixP->fx_file, fixP->fx_line, 7992 _("conditional branch target not word aligned")); 7993 if (signed_overflow (value, 21)) 7994 as_bad_where (fixP->fx_file, fixP->fx_line, 7995 _("conditional branch out of range")); 7996 insn = get_aarch64_insn (buf); 7997 insn |= encode_cond_branch_ofs_19 (value >> 2); 7998 put_aarch64_insn (buf, insn); 7999 } 8000 break; 8001 8002 case BFD_RELOC_AARCH64_TSTBR14: 8003 if (fixP->fx_done || !seg->use_rela_p) 8004 { 8005 if (value & 3) 8006 as_bad_where (fixP->fx_file, fixP->fx_line, 8007 _("conditional branch target not word aligned")); 8008 if (signed_overflow (value, 16)) 8009 as_bad_where (fixP->fx_file, fixP->fx_line, 8010 _("conditional branch out of range")); 8011 insn = get_aarch64_insn (buf); 8012 insn |= encode_tst_branch_ofs_14 (value >> 2); 8013 put_aarch64_insn (buf, insn); 8014 } 8015 break; 8016 8017 case BFD_RELOC_AARCH64_CALL26: 8018 case BFD_RELOC_AARCH64_JUMP26: 8019 if (fixP->fx_done || !seg->use_rela_p) 8020 { 8021 if (value & 3) 8022 as_bad_where (fixP->fx_file, fixP->fx_line, 8023 _("branch target not word aligned")); 8024 if (signed_overflow (value, 28)) 8025 as_bad_where (fixP->fx_file, fixP->fx_line, 8026 _("branch out of range")); 8027 insn = get_aarch64_insn (buf); 8028 insn |= encode_branch_ofs_26 (value >> 2); 8029 put_aarch64_insn (buf, insn); 8030 } 8031 break; 8032 8033 case BFD_RELOC_AARCH64_MOVW_G0: 8034 case BFD_RELOC_AARCH64_MOVW_G0_NC: 8035 case BFD_RELOC_AARCH64_MOVW_G0_S: 8036 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC: 8037 case BFD_RELOC_AARCH64_MOVW_PREL_G0: 8038 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC: 8039 scale = 0; 8040 goto movw_common; 8041 case BFD_RELOC_AARCH64_MOVW_G1: 8042 case BFD_RELOC_AARCH64_MOVW_G1_NC: 8043 case BFD_RELOC_AARCH64_MOVW_G1_S: 8044 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 8045 case BFD_RELOC_AARCH64_MOVW_PREL_G1: 8046 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC: 8047 scale = 16; 8048 goto movw_common; 8049 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 8050 scale = 0; 8051 S_SET_THREAD_LOCAL (fixP->fx_addsy); 8052 /* Should always be exported to object file, see 8053 aarch64_force_relocation(). */ 8054 gas_assert (!fixP->fx_done); 8055 gas_assert (seg->use_rela_p); 8056 goto movw_common; 8057 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 8058 scale = 16; 8059 S_SET_THREAD_LOCAL (fixP->fx_addsy); 8060 /* Should always be exported to object file, see 8061 aarch64_force_relocation(). */ 8062 gas_assert (!fixP->fx_done); 8063 gas_assert (seg->use_rela_p); 8064 goto movw_common; 8065 case BFD_RELOC_AARCH64_MOVW_G2: 8066 case BFD_RELOC_AARCH64_MOVW_G2_NC: 8067 case BFD_RELOC_AARCH64_MOVW_G2_S: 8068 case BFD_RELOC_AARCH64_MOVW_PREL_G2: 8069 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC: 8070 scale = 32; 8071 goto movw_common; 8072 case BFD_RELOC_AARCH64_MOVW_G3: 8073 case BFD_RELOC_AARCH64_MOVW_PREL_G3: 8074 scale = 48; 8075 movw_common: 8076 if (fixP->fx_done || !seg->use_rela_p) 8077 { 8078 insn = get_aarch64_insn (buf); 8079 8080 if (!fixP->fx_done) 8081 { 8082 /* REL signed addend must fit in 16 bits */ 8083 if (signed_overflow (value, 16)) 8084 as_bad_where (fixP->fx_file, fixP->fx_line, 8085 _("offset out of range")); 8086 } 8087 else 8088 { 8089 /* Check for overflow and scale. */ 8090 switch (fixP->fx_r_type) 8091 { 8092 case BFD_RELOC_AARCH64_MOVW_G0: 8093 case BFD_RELOC_AARCH64_MOVW_G1: 8094 case BFD_RELOC_AARCH64_MOVW_G2: 8095 case BFD_RELOC_AARCH64_MOVW_G3: 8096 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1: 8097 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 8098 if (unsigned_overflow (value, scale + 16)) 8099 as_bad_where (fixP->fx_file, fixP->fx_line, 8100 _("unsigned value out of range")); 8101 break; 8102 case BFD_RELOC_AARCH64_MOVW_G0_S: 8103 case BFD_RELOC_AARCH64_MOVW_G1_S: 8104 case BFD_RELOC_AARCH64_MOVW_G2_S: 8105 case BFD_RELOC_AARCH64_MOVW_PREL_G0: 8106 case BFD_RELOC_AARCH64_MOVW_PREL_G1: 8107 case BFD_RELOC_AARCH64_MOVW_PREL_G2: 8108 /* NOTE: We can only come here with movz or movn. */ 8109 if (signed_overflow (value, scale + 16)) 8110 as_bad_where (fixP->fx_file, fixP->fx_line, 8111 _("signed value out of range")); 8112 if (value < 0) 8113 { 8114 /* Force use of MOVN. */ 8115 value = ~value; 8116 insn = reencode_movzn_to_movn (insn); 8117 } 8118 else 8119 { 8120 /* Force use of MOVZ. */ 8121 insn = reencode_movzn_to_movz (insn); 8122 } 8123 break; 8124 default: 8125 /* Unchecked relocations. */ 8126 break; 8127 } 8128 value >>= scale; 8129 } 8130 8131 /* Insert value into MOVN/MOVZ/MOVK instruction. */ 8132 insn |= encode_movw_imm (value & 0xffff); 8133 8134 put_aarch64_insn (buf, insn); 8135 } 8136 break; 8137 8138 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC: 8139 fixP->fx_r_type = (ilp32_p 8140 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC 8141 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC); 8142 S_SET_THREAD_LOCAL (fixP->fx_addsy); 8143 /* Should always be exported to object file, see 8144 aarch64_force_relocation(). */ 8145 gas_assert (!fixP->fx_done); 8146 gas_assert (seg->use_rela_p); 8147 break; 8148 8149 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC: 8150 fixP->fx_r_type = (ilp32_p 8151 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC 8152 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12); 8153 S_SET_THREAD_LOCAL (fixP->fx_addsy); 8154 /* Should always be exported to object file, see 8155 aarch64_force_relocation(). */ 8156 gas_assert (!fixP->fx_done); 8157 gas_assert (seg->use_rela_p); 8158 break; 8159 8160 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 8161 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 8162 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 8163 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 8164 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12: 8165 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 8166 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 8167 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 8168 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 8169 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 8170 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 8171 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 8172 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 8173 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 8174 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 8175 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 8176 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 8177 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12: 8178 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12: 8179 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 8180 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 8181 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 8182 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 8183 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12: 8184 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC: 8185 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12: 8186 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC: 8187 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12: 8188 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: 8189 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12: 8190 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: 8191 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0: 8192 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 8193 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: 8194 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: 8195 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: 8196 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12: 8197 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 8198 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12: 8199 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 8200 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12: 8201 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: 8202 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12: 8203 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 8204 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 8205 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12: 8206 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 8207 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 8208 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 8209 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 8210 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 8211 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 8212 S_SET_THREAD_LOCAL (fixP->fx_addsy); 8213 /* Should always be exported to object file, see 8214 aarch64_force_relocation(). */ 8215 gas_assert (!fixP->fx_done); 8216 gas_assert (seg->use_rela_p); 8217 break; 8218 8219 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC: 8220 /* Should always be exported to object file, see 8221 aarch64_force_relocation(). */ 8222 fixP->fx_r_type = (ilp32_p 8223 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC 8224 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC); 8225 gas_assert (!fixP->fx_done); 8226 gas_assert (seg->use_rela_p); 8227 break; 8228 8229 case BFD_RELOC_AARCH64_ADD_LO12: 8230 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 8231 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: 8232 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 8233 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 8234 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 8235 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 8236 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 8237 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 8238 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 8239 case BFD_RELOC_AARCH64_LDST128_LO12: 8240 case BFD_RELOC_AARCH64_LDST16_LO12: 8241 case BFD_RELOC_AARCH64_LDST32_LO12: 8242 case BFD_RELOC_AARCH64_LDST64_LO12: 8243 case BFD_RELOC_AARCH64_LDST8_LO12: 8244 /* Should always be exported to object file, see 8245 aarch64_force_relocation(). */ 8246 gas_assert (!fixP->fx_done); 8247 gas_assert (seg->use_rela_p); 8248 break; 8249 8250 case BFD_RELOC_AARCH64_TLSDESC_ADD: 8251 case BFD_RELOC_AARCH64_TLSDESC_CALL: 8252 case BFD_RELOC_AARCH64_TLSDESC_LDR: 8253 break; 8254 8255 case BFD_RELOC_UNUSED: 8256 /* An error will already have been reported. */ 8257 break; 8258 8259 default: 8260 as_bad_where (fixP->fx_file, fixP->fx_line, 8261 _("unexpected %s fixup"), 8262 bfd_get_reloc_code_name (fixP->fx_r_type)); 8263 break; 8264 } 8265 8266 apply_fix_return: 8267 /* Free the allocated the struct aarch64_inst. 8268 N.B. currently there are very limited number of fix-up types actually use 8269 this field, so the impact on the performance should be minimal . */ 8270 if (fixP->tc_fix_data.inst != NULL) 8271 free (fixP->tc_fix_data.inst); 8272 8273 return; 8274 } 8275 8276 /* Translate internal representation of relocation info to BFD target 8277 format. */ 8278 8279 arelent * 8280 tc_gen_reloc (asection * section, fixS * fixp) 8281 { 8282 arelent *reloc; 8283 bfd_reloc_code_real_type code; 8284 8285 reloc = XNEW (arelent); 8286 8287 reloc->sym_ptr_ptr = XNEW (asymbol *); 8288 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy); 8289 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where; 8290 8291 if (fixp->fx_pcrel) 8292 { 8293 if (section->use_rela_p) 8294 fixp->fx_offset -= md_pcrel_from_section (fixp, section); 8295 else 8296 fixp->fx_offset = reloc->address; 8297 } 8298 reloc->addend = fixp->fx_offset; 8299 8300 code = fixp->fx_r_type; 8301 switch (code) 8302 { 8303 case BFD_RELOC_16: 8304 if (fixp->fx_pcrel) 8305 code = BFD_RELOC_16_PCREL; 8306 break; 8307 8308 case BFD_RELOC_32: 8309 if (fixp->fx_pcrel) 8310 code = BFD_RELOC_32_PCREL; 8311 break; 8312 8313 case BFD_RELOC_64: 8314 if (fixp->fx_pcrel) 8315 code = BFD_RELOC_64_PCREL; 8316 break; 8317 8318 default: 8319 break; 8320 } 8321 8322 reloc->howto = bfd_reloc_type_lookup (stdoutput, code); 8323 if (reloc->howto == NULL) 8324 { 8325 as_bad_where (fixp->fx_file, fixp->fx_line, 8326 _ 8327 ("cannot represent %s relocation in this object file format"), 8328 bfd_get_reloc_code_name (code)); 8329 return NULL; 8330 } 8331 8332 return reloc; 8333 } 8334 8335 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */ 8336 8337 void 8338 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp) 8339 { 8340 bfd_reloc_code_real_type type; 8341 int pcrel = 0; 8342 8343 /* Pick a reloc. 8344 FIXME: @@ Should look at CPU word size. */ 8345 switch (size) 8346 { 8347 case 1: 8348 type = BFD_RELOC_8; 8349 break; 8350 case 2: 8351 type = BFD_RELOC_16; 8352 break; 8353 case 4: 8354 type = BFD_RELOC_32; 8355 break; 8356 case 8: 8357 type = BFD_RELOC_64; 8358 break; 8359 default: 8360 as_bad (_("cannot do %u-byte relocation"), size); 8361 type = BFD_RELOC_UNUSED; 8362 break; 8363 } 8364 8365 fix_new_exp (frag, where, (int) size, exp, pcrel, type); 8366 } 8367 8368 int 8369 aarch64_force_relocation (struct fix *fixp) 8370 { 8371 switch (fixp->fx_r_type) 8372 { 8373 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP: 8374 /* Perform these "immediate" internal relocations 8375 even if the symbol is extern or weak. */ 8376 return 0; 8377 8378 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC: 8379 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC: 8380 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC: 8381 /* Pseudo relocs that need to be fixed up according to 8382 ilp32_p. */ 8383 return 0; 8384 8385 case BFD_RELOC_AARCH64_ADD_LO12: 8386 case BFD_RELOC_AARCH64_ADR_GOT_PAGE: 8387 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL: 8388 case BFD_RELOC_AARCH64_ADR_HI21_PCREL: 8389 case BFD_RELOC_AARCH64_GOT_LD_PREL19: 8390 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC: 8391 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14: 8392 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15: 8393 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15: 8394 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC: 8395 case BFD_RELOC_AARCH64_LDST128_LO12: 8396 case BFD_RELOC_AARCH64_LDST16_LO12: 8397 case BFD_RELOC_AARCH64_LDST32_LO12: 8398 case BFD_RELOC_AARCH64_LDST64_LO12: 8399 case BFD_RELOC_AARCH64_LDST8_LO12: 8400 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12: 8401 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21: 8402 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21: 8403 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC: 8404 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12: 8405 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19: 8406 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC: 8407 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1: 8408 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: 8409 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21: 8410 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21: 8411 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC: 8412 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1: 8413 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 8414 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC: 8415 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 8416 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 8417 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 8418 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 8419 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12: 8420 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12: 8421 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 8422 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC: 8423 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21: 8424 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21: 8425 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12: 8426 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC: 8427 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12: 8428 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC: 8429 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12: 8430 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC: 8431 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12: 8432 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC: 8433 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0: 8434 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 8435 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1: 8436 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: 8437 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2: 8438 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12: 8439 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC: 8440 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12: 8441 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC: 8442 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12: 8443 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: 8444 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12: 8445 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC: 8446 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12: 8447 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12: 8448 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 8449 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0: 8450 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 8451 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1: 8452 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 8453 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2: 8454 /* Always leave these relocations for the linker. */ 8455 return 1; 8456 8457 default: 8458 break; 8459 } 8460 8461 return generic_force_reloc (fixp); 8462 } 8463 8464 #ifdef OBJ_ELF 8465 8466 /* Implement md_after_parse_args. This is the earliest time we need to decide 8467 ABI. If no -mabi specified, the ABI will be decided by target triplet. */ 8468 8469 void 8470 aarch64_after_parse_args (void) 8471 { 8472 if (aarch64_abi != AARCH64_ABI_NONE) 8473 return; 8474 8475 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */ 8476 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0) 8477 aarch64_abi = AARCH64_ABI_ILP32; 8478 else 8479 aarch64_abi = AARCH64_ABI_LP64; 8480 } 8481 8482 const char * 8483 elf64_aarch64_target_format (void) 8484 { 8485 #ifdef TE_CLOUDABI 8486 /* FIXME: What to do for ilp32_p ? */ 8487 if (target_big_endian) 8488 return "elf64-bigaarch64-cloudabi"; 8489 else 8490 return "elf64-littleaarch64-cloudabi"; 8491 #else 8492 if (target_big_endian) 8493 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64"; 8494 else 8495 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64"; 8496 #endif 8497 } 8498 8499 void 8500 aarch64elf_frob_symbol (symbolS * symp, int *puntp) 8501 { 8502 elf_frob_symbol (symp, puntp); 8503 } 8504 #endif 8505 8506 /* MD interface: Finalization. */ 8507 8508 /* A good place to do this, although this was probably not intended 8509 for this kind of use. We need to dump the literal pool before 8510 references are made to a null symbol pointer. */ 8511 8512 void 8513 aarch64_cleanup (void) 8514 { 8515 literal_pool *pool; 8516 8517 for (pool = list_of_pools; pool; pool = pool->next) 8518 { 8519 /* Put it at the end of the relevant section. */ 8520 subseg_set (pool->section, pool->sub_section); 8521 s_ltorg (0); 8522 } 8523 } 8524 8525 #ifdef OBJ_ELF 8526 /* Remove any excess mapping symbols generated for alignment frags in 8527 SEC. We may have created a mapping symbol before a zero byte 8528 alignment; remove it if there's a mapping symbol after the 8529 alignment. */ 8530 static void 8531 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec, 8532 void *dummy ATTRIBUTE_UNUSED) 8533 { 8534 segment_info_type *seginfo = seg_info (sec); 8535 fragS *fragp; 8536 8537 if (seginfo == NULL || seginfo->frchainP == NULL) 8538 return; 8539 8540 for (fragp = seginfo->frchainP->frch_root; 8541 fragp != NULL; fragp = fragp->fr_next) 8542 { 8543 symbolS *sym = fragp->tc_frag_data.last_map; 8544 fragS *next = fragp->fr_next; 8545 8546 /* Variable-sized frags have been converted to fixed size by 8547 this point. But if this was variable-sized to start with, 8548 there will be a fixed-size frag after it. So don't handle 8549 next == NULL. */ 8550 if (sym == NULL || next == NULL) 8551 continue; 8552 8553 if (S_GET_VALUE (sym) < next->fr_address) 8554 /* Not at the end of this frag. */ 8555 continue; 8556 know (S_GET_VALUE (sym) == next->fr_address); 8557 8558 do 8559 { 8560 if (next->tc_frag_data.first_map != NULL) 8561 { 8562 /* Next frag starts with a mapping symbol. Discard this 8563 one. */ 8564 symbol_remove (sym, &symbol_rootP, &symbol_lastP); 8565 break; 8566 } 8567 8568 if (next->fr_next == NULL) 8569 { 8570 /* This mapping symbol is at the end of the section. Discard 8571 it. */ 8572 know (next->fr_fix == 0 && next->fr_var == 0); 8573 symbol_remove (sym, &symbol_rootP, &symbol_lastP); 8574 break; 8575 } 8576 8577 /* As long as we have empty frags without any mapping symbols, 8578 keep looking. */ 8579 /* If the next frag is non-empty and does not start with a 8580 mapping symbol, then this mapping symbol is required. */ 8581 if (next->fr_address != next->fr_next->fr_address) 8582 break; 8583 8584 next = next->fr_next; 8585 } 8586 while (next != NULL); 8587 } 8588 } 8589 #endif 8590 8591 /* Adjust the symbol table. */ 8592 8593 void 8594 aarch64_adjust_symtab (void) 8595 { 8596 #ifdef OBJ_ELF 8597 /* Remove any overlapping mapping symbols generated by alignment frags. */ 8598 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0); 8599 /* Now do generic ELF adjustments. */ 8600 elf_adjust_symtab (); 8601 #endif 8602 } 8603 8604 static void 8605 checked_hash_insert (struct hash_control *table, const char *key, void *value) 8606 { 8607 const char *hash_err; 8608 8609 hash_err = hash_insert (table, key, value); 8610 if (hash_err) 8611 printf ("Internal Error: Can't hash %s\n", key); 8612 } 8613 8614 static void 8615 fill_instruction_hash_table (void) 8616 { 8617 aarch64_opcode *opcode = aarch64_opcode_table; 8618 8619 while (opcode->name != NULL) 8620 { 8621 templates *templ, *new_templ; 8622 templ = hash_find (aarch64_ops_hsh, opcode->name); 8623 8624 new_templ = XNEW (templates); 8625 new_templ->opcode = opcode; 8626 new_templ->next = NULL; 8627 8628 if (!templ) 8629 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ); 8630 else 8631 { 8632 new_templ->next = templ->next; 8633 templ->next = new_templ; 8634 } 8635 ++opcode; 8636 } 8637 } 8638 8639 static inline void 8640 convert_to_upper (char *dst, const char *src, size_t num) 8641 { 8642 unsigned int i; 8643 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src) 8644 *dst = TOUPPER (*src); 8645 *dst = '\0'; 8646 } 8647 8648 /* Assume STR point to a lower-case string, allocate, convert and return 8649 the corresponding upper-case string. */ 8650 static inline const char* 8651 get_upper_str (const char *str) 8652 { 8653 char *ret; 8654 size_t len = strlen (str); 8655 ret = XNEWVEC (char, len + 1); 8656 convert_to_upper (ret, str, len); 8657 return ret; 8658 } 8659 8660 /* MD interface: Initialization. */ 8661 8662 void 8663 md_begin (void) 8664 { 8665 unsigned mach; 8666 unsigned int i; 8667 8668 if ((aarch64_ops_hsh = hash_new ()) == NULL 8669 || (aarch64_cond_hsh = hash_new ()) == NULL 8670 || (aarch64_shift_hsh = hash_new ()) == NULL 8671 || (aarch64_sys_regs_hsh = hash_new ()) == NULL 8672 || (aarch64_pstatefield_hsh = hash_new ()) == NULL 8673 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL 8674 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL 8675 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL 8676 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL 8677 || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL 8678 || (aarch64_reg_hsh = hash_new ()) == NULL 8679 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL 8680 || (aarch64_nzcv_hsh = hash_new ()) == NULL 8681 || (aarch64_pldop_hsh = hash_new ()) == NULL 8682 || (aarch64_hint_opt_hsh = hash_new ()) == NULL) 8683 as_fatal (_("virtual memory exhausted")); 8684 8685 fill_instruction_hash_table (); 8686 8687 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i) 8688 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name, 8689 (void *) (aarch64_sys_regs + i)); 8690 8691 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i) 8692 checked_hash_insert (aarch64_pstatefield_hsh, 8693 aarch64_pstatefields[i].name, 8694 (void *) (aarch64_pstatefields + i)); 8695 8696 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++) 8697 checked_hash_insert (aarch64_sys_regs_ic_hsh, 8698 aarch64_sys_regs_ic[i].name, 8699 (void *) (aarch64_sys_regs_ic + i)); 8700 8701 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++) 8702 checked_hash_insert (aarch64_sys_regs_dc_hsh, 8703 aarch64_sys_regs_dc[i].name, 8704 (void *) (aarch64_sys_regs_dc + i)); 8705 8706 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++) 8707 checked_hash_insert (aarch64_sys_regs_at_hsh, 8708 aarch64_sys_regs_at[i].name, 8709 (void *) (aarch64_sys_regs_at + i)); 8710 8711 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++) 8712 checked_hash_insert (aarch64_sys_regs_tlbi_hsh, 8713 aarch64_sys_regs_tlbi[i].name, 8714 (void *) (aarch64_sys_regs_tlbi + i)); 8715 8716 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++) 8717 checked_hash_insert (aarch64_sys_regs_sr_hsh, 8718 aarch64_sys_regs_sr[i].name, 8719 (void *) (aarch64_sys_regs_sr + i)); 8720 8721 for (i = 0; i < ARRAY_SIZE (reg_names); i++) 8722 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name, 8723 (void *) (reg_names + i)); 8724 8725 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++) 8726 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template, 8727 (void *) (nzcv_names + i)); 8728 8729 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++) 8730 { 8731 const char *name = aarch64_operand_modifiers[i].name; 8732 checked_hash_insert (aarch64_shift_hsh, name, 8733 (void *) (aarch64_operand_modifiers + i)); 8734 /* Also hash the name in the upper case. */ 8735 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name), 8736 (void *) (aarch64_operand_modifiers + i)); 8737 } 8738 8739 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++) 8740 { 8741 unsigned int j; 8742 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are 8743 the same condition code. */ 8744 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j) 8745 { 8746 const char *name = aarch64_conds[i].names[j]; 8747 if (name == NULL) 8748 break; 8749 checked_hash_insert (aarch64_cond_hsh, name, 8750 (void *) (aarch64_conds + i)); 8751 /* Also hash the name in the upper case. */ 8752 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name), 8753 (void *) (aarch64_conds + i)); 8754 } 8755 } 8756 8757 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++) 8758 { 8759 const char *name = aarch64_barrier_options[i].name; 8760 /* Skip xx00 - the unallocated values of option. */ 8761 if ((i & 0x3) == 0) 8762 continue; 8763 checked_hash_insert (aarch64_barrier_opt_hsh, name, 8764 (void *) (aarch64_barrier_options + i)); 8765 /* Also hash the name in the upper case. */ 8766 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name), 8767 (void *) (aarch64_barrier_options + i)); 8768 } 8769 8770 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++) 8771 { 8772 const char* name = aarch64_prfops[i].name; 8773 /* Skip the unallocated hint encodings. */ 8774 if (name == NULL) 8775 continue; 8776 checked_hash_insert (aarch64_pldop_hsh, name, 8777 (void *) (aarch64_prfops + i)); 8778 /* Also hash the name in the upper case. */ 8779 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name), 8780 (void *) (aarch64_prfops + i)); 8781 } 8782 8783 for (i = 0; aarch64_hint_options[i].name != NULL; i++) 8784 { 8785 const char* name = aarch64_hint_options[i].name; 8786 const char* upper_name = get_upper_str(name); 8787 8788 checked_hash_insert (aarch64_hint_opt_hsh, name, 8789 (void *) (aarch64_hint_options + i)); 8790 8791 /* Also hash the name in the upper case if not the same. */ 8792 if (strcmp (name, upper_name) != 0) 8793 checked_hash_insert (aarch64_hint_opt_hsh, upper_name, 8794 (void *) (aarch64_hint_options + i)); 8795 } 8796 8797 /* Set the cpu variant based on the command-line options. */ 8798 if (!mcpu_cpu_opt) 8799 mcpu_cpu_opt = march_cpu_opt; 8800 8801 if (!mcpu_cpu_opt) 8802 mcpu_cpu_opt = &cpu_default; 8803 8804 cpu_variant = *mcpu_cpu_opt; 8805 8806 /* Record the CPU type. */ 8807 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64; 8808 8809 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach); 8810 } 8811 8812 /* Command line processing. */ 8813 8814 const char *md_shortopts = "m:"; 8815 8816 #ifdef AARCH64_BI_ENDIAN 8817 #define OPTION_EB (OPTION_MD_BASE + 0) 8818 #define OPTION_EL (OPTION_MD_BASE + 1) 8819 #else 8820 #if TARGET_BYTES_BIG_ENDIAN 8821 #define OPTION_EB (OPTION_MD_BASE + 0) 8822 #else 8823 #define OPTION_EL (OPTION_MD_BASE + 1) 8824 #endif 8825 #endif 8826 8827 struct option md_longopts[] = { 8828 #ifdef OPTION_EB 8829 {"EB", no_argument, NULL, OPTION_EB}, 8830 #endif 8831 #ifdef OPTION_EL 8832 {"EL", no_argument, NULL, OPTION_EL}, 8833 #endif 8834 {NULL, no_argument, NULL, 0} 8835 }; 8836 8837 size_t md_longopts_size = sizeof (md_longopts); 8838 8839 struct aarch64_option_table 8840 { 8841 const char *option; /* Option name to match. */ 8842 const char *help; /* Help information. */ 8843 int *var; /* Variable to change. */ 8844 int value; /* What to change it to. */ 8845 char *deprecated; /* If non-null, print this message. */ 8846 }; 8847 8848 static struct aarch64_option_table aarch64_opts[] = { 8849 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL}, 8850 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0, 8851 NULL}, 8852 #ifdef DEBUG_AARCH64 8853 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL}, 8854 #endif /* DEBUG_AARCH64 */ 8855 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1, 8856 NULL}, 8857 {"mno-verbose-error", N_("do not output verbose error messages"), 8858 &verbose_error_p, 0, NULL}, 8859 {NULL, NULL, NULL, 0, NULL} 8860 }; 8861 8862 struct aarch64_cpu_option_table 8863 { 8864 const char *name; 8865 const aarch64_feature_set value; 8866 /* The canonical name of the CPU, or NULL to use NAME converted to upper 8867 case. */ 8868 const char *canonical_name; 8869 }; 8870 8871 /* This list should, at a minimum, contain all the cpu names 8872 recognized by GCC. */ 8873 static const struct aarch64_cpu_option_table aarch64_cpus[] = { 8874 {"all", AARCH64_ANY, NULL}, 8875 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8, 8876 AARCH64_FEATURE_CRC), "Cortex-A34"}, 8877 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8, 8878 AARCH64_FEATURE_CRC), "Cortex-A35"}, 8879 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8, 8880 AARCH64_FEATURE_CRC), "Cortex-A53"}, 8881 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8, 8882 AARCH64_FEATURE_CRC), "Cortex-A57"}, 8883 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8, 8884 AARCH64_FEATURE_CRC), "Cortex-A72"}, 8885 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8, 8886 AARCH64_FEATURE_CRC), "Cortex-A73"}, 8887 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2, 8888 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD), 8889 "Cortex-A55"}, 8890 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2, 8891 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD), 8892 "Cortex-A75"}, 8893 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2, 8894 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD), 8895 "Cortex-A76"}, 8896 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2, 8897 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC 8898 | AARCH64_FEATURE_DOTPROD 8899 | AARCH64_FEATURE_SSBS), 8900 "Cortex-A76AE"}, 8901 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2, 8902 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC 8903 | AARCH64_FEATURE_DOTPROD 8904 | AARCH64_FEATURE_SSBS), 8905 "Cortex-A77"}, 8906 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2, 8907 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC 8908 | AARCH64_FEATURE_DOTPROD 8909 | AARCH64_FEATURE_SSBS), 8910 "Cortex-A65"}, 8911 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2, 8912 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC 8913 | AARCH64_FEATURE_DOTPROD 8914 | AARCH64_FEATURE_SSBS), 8915 "Cortex-A65AE"}, 8916 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2, 8917 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 8918 | AARCH64_FEATURE_DOTPROD 8919 | AARCH64_FEATURE_PROFILE), 8920 "Ares"}, 8921 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8, 8922 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO), 8923 "Samsung Exynos M1"}, 8924 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8, 8925 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO 8926 | AARCH64_FEATURE_RDMA), 8927 "Qualcomm Falkor"}, 8928 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2, 8929 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 8930 | AARCH64_FEATURE_DOTPROD 8931 | AARCH64_FEATURE_SSBS), 8932 "Neoverse E1"}, 8933 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2, 8934 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 8935 | AARCH64_FEATURE_DOTPROD 8936 | AARCH64_FEATURE_PROFILE), 8937 "Neoverse N1"}, 8938 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8, 8939 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO 8940 | AARCH64_FEATURE_RDMA), 8941 "Qualcomm QDF24XX"}, 8942 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4, 8943 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE), 8944 "Qualcomm Saphira"}, 8945 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8, 8946 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO), 8947 "Cavium ThunderX"}, 8948 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1, 8949 AARCH64_FEATURE_CRYPTO), 8950 "Broadcom Vulcan"}, 8951 /* The 'xgene-1' name is an older name for 'xgene1', which was used 8952 in earlier releases and is superseded by 'xgene1' in all 8953 tools. */ 8954 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"}, 8955 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"}, 8956 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8, 8957 AARCH64_FEATURE_CRC), "APM X-Gene 2"}, 8958 {"generic", AARCH64_ARCH_V8, NULL}, 8959 8960 {NULL, AARCH64_ARCH_NONE, NULL} 8961 }; 8962 8963 struct aarch64_arch_option_table 8964 { 8965 const char *name; 8966 const aarch64_feature_set value; 8967 }; 8968 8969 /* This list should, at a minimum, contain all the architecture names 8970 recognized by GCC. */ 8971 static const struct aarch64_arch_option_table aarch64_archs[] = { 8972 {"all", AARCH64_ANY}, 8973 {"armv8-a", AARCH64_ARCH_V8}, 8974 {"armv8.1-a", AARCH64_ARCH_V8_1}, 8975 {"armv8.2-a", AARCH64_ARCH_V8_2}, 8976 {"armv8.3-a", AARCH64_ARCH_V8_3}, 8977 {"armv8.4-a", AARCH64_ARCH_V8_4}, 8978 {"armv8.5-a", AARCH64_ARCH_V8_5}, 8979 {"armv8.6-a", AARCH64_ARCH_V8_6}, 8980 {NULL, AARCH64_ARCH_NONE} 8981 }; 8982 8983 /* ISA extensions. */ 8984 struct aarch64_option_cpu_value_table 8985 { 8986 const char *name; 8987 const aarch64_feature_set value; 8988 const aarch64_feature_set require; /* Feature dependencies. */ 8989 }; 8990 8991 static const struct aarch64_option_cpu_value_table aarch64_features[] = { 8992 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0), 8993 AARCH64_ARCH_NONE}, 8994 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0), 8995 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)}, 8996 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0), 8997 AARCH64_ARCH_NONE}, 8998 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0), 8999 AARCH64_ARCH_NONE}, 9000 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0), 9001 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)}, 9002 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0), 9003 AARCH64_ARCH_NONE}, 9004 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0), 9005 AARCH64_ARCH_NONE}, 9006 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0), 9007 AARCH64_ARCH_NONE}, 9008 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0), 9009 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)}, 9010 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0), 9011 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)}, 9012 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0), 9013 AARCH64_FEATURE (AARCH64_FEATURE_FP 9014 | AARCH64_FEATURE_F16, 0)}, 9015 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0), 9016 AARCH64_ARCH_NONE}, 9017 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0), 9018 AARCH64_FEATURE (AARCH64_FEATURE_F16 9019 | AARCH64_FEATURE_SIMD 9020 | AARCH64_FEATURE_COMPNUM, 0)}, 9021 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0), 9022 AARCH64_ARCH_NONE}, 9023 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0), 9024 AARCH64_FEATURE (AARCH64_FEATURE_F16 9025 | AARCH64_FEATURE_SIMD, 0)}, 9026 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0), 9027 AARCH64_ARCH_NONE}, 9028 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0), 9029 AARCH64_ARCH_NONE}, 9030 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0), 9031 AARCH64_ARCH_NONE}, 9032 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0), 9033 AARCH64_ARCH_NONE}, 9034 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0), 9035 AARCH64_ARCH_NONE}, 9036 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0), 9037 AARCH64_ARCH_NONE}, 9038 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0), 9039 AARCH64_ARCH_NONE}, 9040 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0), 9041 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)}, 9042 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0), 9043 AARCH64_ARCH_NONE}, 9044 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0), 9045 AARCH64_ARCH_NONE}, 9046 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0), 9047 AARCH64_ARCH_NONE}, 9048 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0), 9049 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)}, 9050 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0), 9051 AARCH64_FEATURE (AARCH64_FEATURE_SVE2 9052 | AARCH64_FEATURE_SM4, 0)}, 9053 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0), 9054 AARCH64_FEATURE (AARCH64_FEATURE_SVE2 9055 | AARCH64_FEATURE_AES, 0)}, 9056 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0), 9057 AARCH64_FEATURE (AARCH64_FEATURE_SVE2 9058 | AARCH64_FEATURE_SHA3, 0)}, 9059 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0), 9060 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)}, 9061 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0), 9062 AARCH64_ARCH_NONE}, 9063 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0), 9064 AARCH64_ARCH_NONE}, 9065 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0), 9066 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)}, 9067 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0), 9068 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)}, 9069 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE}, 9070 }; 9071 9072 struct aarch64_long_option_table 9073 { 9074 const char *option; /* Substring to match. */ 9075 const char *help; /* Help information. */ 9076 int (*func) (const char *subopt); /* Function to decode sub-option. */ 9077 char *deprecated; /* If non-null, print this message. */ 9078 }; 9079 9080 /* Transitive closure of features depending on set. */ 9081 static aarch64_feature_set 9082 aarch64_feature_disable_set (aarch64_feature_set set) 9083 { 9084 const struct aarch64_option_cpu_value_table *opt; 9085 aarch64_feature_set prev = 0; 9086 9087 while (prev != set) { 9088 prev = set; 9089 for (opt = aarch64_features; opt->name != NULL; opt++) 9090 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set)) 9091 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value); 9092 } 9093 return set; 9094 } 9095 9096 /* Transitive closure of dependencies of set. */ 9097 static aarch64_feature_set 9098 aarch64_feature_enable_set (aarch64_feature_set set) 9099 { 9100 const struct aarch64_option_cpu_value_table *opt; 9101 aarch64_feature_set prev = 0; 9102 9103 while (prev != set) { 9104 prev = set; 9105 for (opt = aarch64_features; opt->name != NULL; opt++) 9106 if (AARCH64_CPU_HAS_FEATURE (set, opt->value)) 9107 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require); 9108 } 9109 return set; 9110 } 9111 9112 static int 9113 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p, 9114 bfd_boolean ext_only) 9115 { 9116 /* We insist on extensions being added before being removed. We achieve 9117 this by using the ADDING_VALUE variable to indicate whether we are 9118 adding an extension (1) or removing it (0) and only allowing it to 9119 change in the order -1 -> 1 -> 0. */ 9120 int adding_value = -1; 9121 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set); 9122 9123 /* Copy the feature set, so that we can modify it. */ 9124 *ext_set = **opt_p; 9125 *opt_p = ext_set; 9126 9127 while (str != NULL && *str != 0) 9128 { 9129 const struct aarch64_option_cpu_value_table *opt; 9130 const char *ext = NULL; 9131 int optlen; 9132 9133 if (!ext_only) 9134 { 9135 if (*str != '+') 9136 { 9137 as_bad (_("invalid architectural extension")); 9138 return 0; 9139 } 9140 9141 ext = strchr (++str, '+'); 9142 } 9143 9144 if (ext != NULL) 9145 optlen = ext - str; 9146 else 9147 optlen = strlen (str); 9148 9149 if (optlen >= 2 && strncmp (str, "no", 2) == 0) 9150 { 9151 if (adding_value != 0) 9152 adding_value = 0; 9153 optlen -= 2; 9154 str += 2; 9155 } 9156 else if (optlen > 0) 9157 { 9158 if (adding_value == -1) 9159 adding_value = 1; 9160 else if (adding_value != 1) 9161 { 9162 as_bad (_("must specify extensions to add before specifying " 9163 "those to remove")); 9164 return FALSE; 9165 } 9166 } 9167 9168 if (optlen == 0) 9169 { 9170 as_bad (_("missing architectural extension")); 9171 return 0; 9172 } 9173 9174 gas_assert (adding_value != -1); 9175 9176 for (opt = aarch64_features; opt->name != NULL; opt++) 9177 if (strncmp (opt->name, str, optlen) == 0) 9178 { 9179 aarch64_feature_set set; 9180 9181 /* Add or remove the extension. */ 9182 if (adding_value) 9183 { 9184 set = aarch64_feature_enable_set (opt->value); 9185 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set); 9186 } 9187 else 9188 { 9189 set = aarch64_feature_disable_set (opt->value); 9190 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set); 9191 } 9192 break; 9193 } 9194 9195 if (opt->name == NULL) 9196 { 9197 as_bad (_("unknown architectural extension `%s'"), str); 9198 return 0; 9199 } 9200 9201 str = ext; 9202 }; 9203 9204 return 1; 9205 } 9206 9207 static int 9208 aarch64_parse_cpu (const char *str) 9209 { 9210 const struct aarch64_cpu_option_table *opt; 9211 const char *ext = strchr (str, '+'); 9212 size_t optlen; 9213 9214 if (ext != NULL) 9215 optlen = ext - str; 9216 else 9217 optlen = strlen (str); 9218 9219 if (optlen == 0) 9220 { 9221 as_bad (_("missing cpu name `%s'"), str); 9222 return 0; 9223 } 9224 9225 for (opt = aarch64_cpus; opt->name != NULL; opt++) 9226 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0) 9227 { 9228 mcpu_cpu_opt = &opt->value; 9229 if (ext != NULL) 9230 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE); 9231 9232 return 1; 9233 } 9234 9235 as_bad (_("unknown cpu `%s'"), str); 9236 return 0; 9237 } 9238 9239 static int 9240 aarch64_parse_arch (const char *str) 9241 { 9242 const struct aarch64_arch_option_table *opt; 9243 const char *ext = strchr (str, '+'); 9244 size_t optlen; 9245 9246 if (ext != NULL) 9247 optlen = ext - str; 9248 else 9249 optlen = strlen (str); 9250 9251 if (optlen == 0) 9252 { 9253 as_bad (_("missing architecture name `%s'"), str); 9254 return 0; 9255 } 9256 9257 for (opt = aarch64_archs; opt->name != NULL; opt++) 9258 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0) 9259 { 9260 march_cpu_opt = &opt->value; 9261 if (ext != NULL) 9262 return aarch64_parse_features (ext, &march_cpu_opt, FALSE); 9263 9264 return 1; 9265 } 9266 9267 as_bad (_("unknown architecture `%s'\n"), str); 9268 return 0; 9269 } 9270 9271 /* ABIs. */ 9272 struct aarch64_option_abi_value_table 9273 { 9274 const char *name; 9275 enum aarch64_abi_type value; 9276 }; 9277 9278 static const struct aarch64_option_abi_value_table aarch64_abis[] = { 9279 {"ilp32", AARCH64_ABI_ILP32}, 9280 {"lp64", AARCH64_ABI_LP64}, 9281 }; 9282 9283 static int 9284 aarch64_parse_abi (const char *str) 9285 { 9286 unsigned int i; 9287 9288 if (str[0] == '\0') 9289 { 9290 as_bad (_("missing abi name `%s'"), str); 9291 return 0; 9292 } 9293 9294 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++) 9295 if (strcmp (str, aarch64_abis[i].name) == 0) 9296 { 9297 aarch64_abi = aarch64_abis[i].value; 9298 return 1; 9299 } 9300 9301 as_bad (_("unknown abi `%s'\n"), str); 9302 return 0; 9303 } 9304 9305 static struct aarch64_long_option_table aarch64_long_opts[] = { 9306 #ifdef OBJ_ELF 9307 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"), 9308 aarch64_parse_abi, NULL}, 9309 #endif /* OBJ_ELF */ 9310 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"), 9311 aarch64_parse_cpu, NULL}, 9312 {"march=", N_("<arch name>\t assemble for architecture <arch name>"), 9313 aarch64_parse_arch, NULL}, 9314 {NULL, NULL, 0, NULL} 9315 }; 9316 9317 int 9318 md_parse_option (int c, const char *arg) 9319 { 9320 struct aarch64_option_table *opt; 9321 struct aarch64_long_option_table *lopt; 9322 9323 switch (c) 9324 { 9325 #ifdef OPTION_EB 9326 case OPTION_EB: 9327 target_big_endian = 1; 9328 break; 9329 #endif 9330 9331 #ifdef OPTION_EL 9332 case OPTION_EL: 9333 target_big_endian = 0; 9334 break; 9335 #endif 9336 9337 case 'a': 9338 /* Listing option. Just ignore these, we don't support additional 9339 ones. */ 9340 return 0; 9341 9342 default: 9343 for (opt = aarch64_opts; opt->option != NULL; opt++) 9344 { 9345 if (c == opt->option[0] 9346 && ((arg == NULL && opt->option[1] == 0) 9347 || streq (arg, opt->option + 1))) 9348 { 9349 /* If the option is deprecated, tell the user. */ 9350 if (opt->deprecated != NULL) 9351 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, 9352 arg ? arg : "", _(opt->deprecated)); 9353 9354 if (opt->var != NULL) 9355 *opt->var = opt->value; 9356 9357 return 1; 9358 } 9359 } 9360 9361 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++) 9362 { 9363 /* These options are expected to have an argument. */ 9364 if (c == lopt->option[0] 9365 && arg != NULL 9366 && strncmp (arg, lopt->option + 1, 9367 strlen (lopt->option + 1)) == 0) 9368 { 9369 /* If the option is deprecated, tell the user. */ 9370 if (lopt->deprecated != NULL) 9371 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg, 9372 _(lopt->deprecated)); 9373 9374 /* Call the sup-option parser. */ 9375 return lopt->func (arg + strlen (lopt->option) - 1); 9376 } 9377 } 9378 9379 return 0; 9380 } 9381 9382 return 1; 9383 } 9384 9385 void 9386 md_show_usage (FILE * fp) 9387 { 9388 struct aarch64_option_table *opt; 9389 struct aarch64_long_option_table *lopt; 9390 9391 fprintf (fp, _(" AArch64-specific assembler options:\n")); 9392 9393 for (opt = aarch64_opts; opt->option != NULL; opt++) 9394 if (opt->help != NULL) 9395 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help)); 9396 9397 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++) 9398 if (lopt->help != NULL) 9399 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help)); 9400 9401 #ifdef OPTION_EB 9402 fprintf (fp, _("\ 9403 -EB assemble code for a big-endian cpu\n")); 9404 #endif 9405 9406 #ifdef OPTION_EL 9407 fprintf (fp, _("\ 9408 -EL assemble code for a little-endian cpu\n")); 9409 #endif 9410 } 9411 9412 /* Parse a .cpu directive. */ 9413 9414 static void 9415 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED) 9416 { 9417 const struct aarch64_cpu_option_table *opt; 9418 char saved_char; 9419 char *name; 9420 char *ext; 9421 size_t optlen; 9422 9423 name = input_line_pointer; 9424 while (*input_line_pointer && !ISSPACE (*input_line_pointer)) 9425 input_line_pointer++; 9426 saved_char = *input_line_pointer; 9427 *input_line_pointer = 0; 9428 9429 ext = strchr (name, '+'); 9430 9431 if (ext != NULL) 9432 optlen = ext - name; 9433 else 9434 optlen = strlen (name); 9435 9436 /* Skip the first "all" entry. */ 9437 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++) 9438 if (strlen (opt->name) == optlen 9439 && strncmp (name, opt->name, optlen) == 0) 9440 { 9441 mcpu_cpu_opt = &opt->value; 9442 if (ext != NULL) 9443 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE)) 9444 return; 9445 9446 cpu_variant = *mcpu_cpu_opt; 9447 9448 *input_line_pointer = saved_char; 9449 demand_empty_rest_of_line (); 9450 return; 9451 } 9452 as_bad (_("unknown cpu `%s'"), name); 9453 *input_line_pointer = saved_char; 9454 ignore_rest_of_line (); 9455 } 9456 9457 9458 /* Parse a .arch directive. */ 9459 9460 static void 9461 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED) 9462 { 9463 const struct aarch64_arch_option_table *opt; 9464 char saved_char; 9465 char *name; 9466 char *ext; 9467 size_t optlen; 9468 9469 name = input_line_pointer; 9470 while (*input_line_pointer && !ISSPACE (*input_line_pointer)) 9471 input_line_pointer++; 9472 saved_char = *input_line_pointer; 9473 *input_line_pointer = 0; 9474 9475 ext = strchr (name, '+'); 9476 9477 if (ext != NULL) 9478 optlen = ext - name; 9479 else 9480 optlen = strlen (name); 9481 9482 /* Skip the first "all" entry. */ 9483 for (opt = aarch64_archs + 1; opt->name != NULL; opt++) 9484 if (strlen (opt->name) == optlen 9485 && strncmp (name, opt->name, optlen) == 0) 9486 { 9487 mcpu_cpu_opt = &opt->value; 9488 if (ext != NULL) 9489 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE)) 9490 return; 9491 9492 cpu_variant = *mcpu_cpu_opt; 9493 9494 *input_line_pointer = saved_char; 9495 demand_empty_rest_of_line (); 9496 return; 9497 } 9498 9499 as_bad (_("unknown architecture `%s'\n"), name); 9500 *input_line_pointer = saved_char; 9501 ignore_rest_of_line (); 9502 } 9503 9504 /* Parse a .arch_extension directive. */ 9505 9506 static void 9507 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED) 9508 { 9509 char saved_char; 9510 char *ext = input_line_pointer;; 9511 9512 while (*input_line_pointer && !ISSPACE (*input_line_pointer)) 9513 input_line_pointer++; 9514 saved_char = *input_line_pointer; 9515 *input_line_pointer = 0; 9516 9517 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE)) 9518 return; 9519 9520 cpu_variant = *mcpu_cpu_opt; 9521 9522 *input_line_pointer = saved_char; 9523 demand_empty_rest_of_line (); 9524 } 9525 9526 /* Copy symbol information. */ 9527 9528 void 9529 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src) 9530 { 9531 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src); 9532 } 9533 9534 #ifdef OBJ_ELF 9535 /* Same as elf_copy_symbol_attributes, but without copying st_other. 9536 This is needed so AArch64 specific st_other values can be independently 9537 specified for an IFUNC resolver (that is called by the dynamic linker) 9538 and the symbol it resolves (aliased to the resolver). In particular, 9539 if a function symbol has special st_other value set via directives, 9540 then attaching an IFUNC resolver to that symbol should not override 9541 the st_other setting. Requiring the directive on the IFUNC resolver 9542 symbol would be unexpected and problematic in C code, where the two 9543 symbols appear as two independent function declarations. */ 9544 9545 void 9546 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src) 9547 { 9548 struct elf_obj_sy *srcelf = symbol_get_obj (src); 9549 struct elf_obj_sy *destelf = symbol_get_obj (dest); 9550 if (srcelf->size) 9551 { 9552 if (destelf->size == NULL) 9553 destelf->size = XNEW (expressionS); 9554 *destelf->size = *srcelf->size; 9555 } 9556 else 9557 { 9558 if (destelf->size != NULL) 9559 free (destelf->size); 9560 destelf->size = NULL; 9561 } 9562 S_SET_SIZE (dest, S_GET_SIZE (src)); 9563 } 9564 #endif 9565