1 /* aarch64-opc.c -- AArch64 opcode support. 2 Copyright 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc. 3 Contributed by ARM Ltd. 4 5 This file is part of the GNU opcodes library. 6 7 This library is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3, or (at your option) 10 any later version. 11 12 It is distributed in the hope that it will be useful, but WITHOUT 13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public 15 License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; see the file COPYING3. If not, 19 see <http://www.gnu.org/licenses/>. */ 20 21 #include "sysdep.h" 22 #include <assert.h> 23 #include <stdlib.h> 24 #include <stdio.h> 25 #include <stdint.h> 26 #include <stdarg.h> 27 #include <inttypes.h> 28 29 #include "opintl.h" 30 31 #include "aarch64-opc.h" 32 33 #ifdef DEBUG_AARCH64 34 int debug_dump = FALSE; 35 #endif /* DEBUG_AARCH64 */ 36 37 /* Helper functions to determine which operand to be used to encode/decode 38 the size:Q fields for AdvSIMD instructions. */ 39 40 static inline bfd_boolean 41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier) 42 { 43 return ((qualifier >= AARCH64_OPND_QLF_V_8B 44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE 45 : FALSE); 46 } 47 48 static inline bfd_boolean 49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier) 50 { 51 return ((qualifier >= AARCH64_OPND_QLF_S_B 52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE 53 : FALSE); 54 } 55 56 enum data_pattern 57 { 58 DP_UNKNOWN, 59 DP_VECTOR_3SAME, 60 DP_VECTOR_LONG, 61 DP_VECTOR_WIDE, 62 DP_VECTOR_ACROSS_LANES, 63 }; 64 65 static const char significant_operand_index [] = 66 { 67 0, /* DP_UNKNOWN, by default using operand 0. */ 68 0, /* DP_VECTOR_3SAME */ 69 1, /* DP_VECTOR_LONG */ 70 2, /* DP_VECTOR_WIDE */ 71 1, /* DP_VECTOR_ACROSS_LANES */ 72 }; 73 74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return 75 the data pattern. 76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which 77 corresponds to one of a sequence of operands. */ 78 79 static enum data_pattern 80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers) 81 { 82 if (vector_qualifier_p (qualifiers[0]) == TRUE) 83 { 84 /* e.g. v.4s, v.4s, v.4s 85 or v.4h, v.4h, v.h[3]. */ 86 if (qualifiers[0] == qualifiers[1] 87 && vector_qualifier_p (qualifiers[2]) == TRUE 88 && (aarch64_get_qualifier_esize (qualifiers[0]) 89 == aarch64_get_qualifier_esize (qualifiers[1])) 90 && (aarch64_get_qualifier_esize (qualifiers[0]) 91 == aarch64_get_qualifier_esize (qualifiers[2]))) 92 return DP_VECTOR_3SAME; 93 /* e.g. v.8h, v.8b, v.8b. 94 or v.4s, v.4h, v.h[2]. 95 or v.8h, v.16b. */ 96 if (vector_qualifier_p (qualifiers[1]) == TRUE 97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0 98 && (aarch64_get_qualifier_esize (qualifiers[0]) 99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1)) 100 return DP_VECTOR_LONG; 101 /* e.g. v.8h, v.8h, v.8b. */ 102 if (qualifiers[0] == qualifiers[1] 103 && vector_qualifier_p (qualifiers[2]) == TRUE 104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0 105 && (aarch64_get_qualifier_esize (qualifiers[0]) 106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1) 107 && (aarch64_get_qualifier_esize (qualifiers[0]) 108 == aarch64_get_qualifier_esize (qualifiers[1]))) 109 return DP_VECTOR_WIDE; 110 } 111 else if (fp_qualifier_p (qualifiers[0]) == TRUE) 112 { 113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */ 114 if (vector_qualifier_p (qualifiers[1]) == TRUE 115 && qualifiers[2] == AARCH64_OPND_QLF_NIL) 116 return DP_VECTOR_ACROSS_LANES; 117 } 118 119 return DP_UNKNOWN; 120 } 121 122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in 123 the AdvSIMD instructions. */ 124 /* N.B. it is possible to do some optimization that doesn't call 125 get_data_pattern each time when we need to select an operand. We can 126 either buffer the caculated the result or statically generate the data, 127 however, it is not obvious that the optimization will bring significant 128 benefit. */ 129 130 int 131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode) 132 { 133 return 134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])]; 135 } 136 137 const aarch64_field fields[] = 138 { 139 { 0, 0 }, /* NIL. */ 140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */ 141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */ 142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */ 143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */ 144 { 5, 19 }, /* imm19: e.g. in CBZ. */ 145 { 5, 19 }, /* immhi: e.g. in ADRP. */ 146 { 29, 2 }, /* immlo: e.g. in ADRP. */ 147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */ 148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */ 149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */ 150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */ 151 { 0, 5 }, /* Rt: in load/store instructions. */ 152 { 0, 5 }, /* Rd: in many integer instructions. */ 153 { 5, 5 }, /* Rn: in many integer instructions. */ 154 { 10, 5 }, /* Rt2: in load/store pair instructions. */ 155 { 10, 5 }, /* Ra: in fp instructions. */ 156 { 5, 3 }, /* op2: in the system instructions. */ 157 { 8, 4 }, /* CRm: in the system instructions. */ 158 { 12, 4 }, /* CRn: in the system instructions. */ 159 { 16, 3 }, /* op1: in the system instructions. */ 160 { 19, 2 }, /* op0: in the system instructions. */ 161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */ 162 { 12, 4 }, /* cond: condition flags as a source operand. */ 163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */ 164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */ 165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */ 166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */ 167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */ 168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */ 169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */ 170 { 12, 1 }, /* S: in load/store reg offset instructions. */ 171 { 21, 2 }, /* hw: in move wide constant instructions. */ 172 { 22, 2 }, /* opc: in load/store reg offset instructions. */ 173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */ 174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */ 175 { 22, 2 }, /* type: floating point type field in fp data inst. */ 176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */ 177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */ 178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */ 179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */ 180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */ 181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */ 182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */ 183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */ 184 { 5, 14 }, /* imm14: in test bit and branch instructions. */ 185 { 5, 16 }, /* imm16: in exception instructions. */ 186 { 0, 26 }, /* imm26: in unconditional branch instructions. */ 187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */ 188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */ 189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */ 190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */ 191 { 22, 1 }, /* N: in logical (immediate) instructions. */ 192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */ 193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */ 194 { 31, 1 }, /* sf: in integer data processing instructions. */ 195 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */ 196 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */ 197 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */ 198 { 31, 1 }, /* b5: in the test bit and branch instructions. */ 199 { 19, 5 }, /* b40: in the test bit and branch instructions. */ 200 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */ 201 }; 202 203 enum aarch64_operand_class 204 aarch64_get_operand_class (enum aarch64_opnd type) 205 { 206 return aarch64_operands[type].op_class; 207 } 208 209 const char * 210 aarch64_get_operand_name (enum aarch64_opnd type) 211 { 212 return aarch64_operands[type].name; 213 } 214 215 /* Get operand description string. 216 This is usually for the diagnosis purpose. */ 217 const char * 218 aarch64_get_operand_desc (enum aarch64_opnd type) 219 { 220 return aarch64_operands[type].desc; 221 } 222 223 /* Table of all conditional affixes. */ 224 const aarch64_cond aarch64_conds[16] = 225 { 226 {{"eq"}, 0x0}, 227 {{"ne"}, 0x1}, 228 {{"cs", "hs"}, 0x2}, 229 {{"cc", "lo", "ul"}, 0x3}, 230 {{"mi"}, 0x4}, 231 {{"pl"}, 0x5}, 232 {{"vs"}, 0x6}, 233 {{"vc"}, 0x7}, 234 {{"hi"}, 0x8}, 235 {{"ls"}, 0x9}, 236 {{"ge"}, 0xa}, 237 {{"lt"}, 0xb}, 238 {{"gt"}, 0xc}, 239 {{"le"}, 0xd}, 240 {{"al"}, 0xe}, 241 {{"nv"}, 0xf}, 242 }; 243 244 const aarch64_cond * 245 get_cond_from_value (aarch64_insn value) 246 { 247 assert (value < 16); 248 return &aarch64_conds[(unsigned int) value]; 249 } 250 251 const aarch64_cond * 252 get_inverted_cond (const aarch64_cond *cond) 253 { 254 return &aarch64_conds[cond->value ^ 0x1]; 255 } 256 257 /* Table describing the operand extension/shifting operators; indexed by 258 enum aarch64_modifier_kind. 259 260 The value column provides the most common values for encoding modifiers, 261 which enables table-driven encoding/decoding for the modifiers. */ 262 const struct aarch64_name_value_pair aarch64_operand_modifiers [] = 263 { 264 {"none", 0x0}, 265 {"msl", 0x0}, 266 {"ror", 0x3}, 267 {"asr", 0x2}, 268 {"lsr", 0x1}, 269 {"lsl", 0x0}, 270 {"uxtb", 0x0}, 271 {"uxth", 0x1}, 272 {"uxtw", 0x2}, 273 {"uxtx", 0x3}, 274 {"sxtb", 0x4}, 275 {"sxth", 0x5}, 276 {"sxtw", 0x6}, 277 {"sxtx", 0x7}, 278 {NULL, 0}, 279 }; 280 281 enum aarch64_modifier_kind 282 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc) 283 { 284 return desc - aarch64_operand_modifiers; 285 } 286 287 aarch64_insn 288 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind) 289 { 290 return aarch64_operand_modifiers[kind].value; 291 } 292 293 enum aarch64_modifier_kind 294 aarch64_get_operand_modifier_from_value (aarch64_insn value, 295 bfd_boolean extend_p) 296 { 297 if (extend_p == TRUE) 298 return AARCH64_MOD_UXTB + value; 299 else 300 return AARCH64_MOD_LSL - value; 301 } 302 303 bfd_boolean 304 aarch64_extend_operator_p (enum aarch64_modifier_kind kind) 305 { 306 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX) 307 ? TRUE : FALSE; 308 } 309 310 static inline bfd_boolean 311 aarch64_shift_operator_p (enum aarch64_modifier_kind kind) 312 { 313 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL) 314 ? TRUE : FALSE; 315 } 316 317 const struct aarch64_name_value_pair aarch64_barrier_options[16] = 318 { 319 { "#0x00", 0x0 }, 320 { "oshld", 0x1 }, 321 { "oshst", 0x2 }, 322 { "osh", 0x3 }, 323 { "#0x04", 0x4 }, 324 { "nshld", 0x5 }, 325 { "nshst", 0x6 }, 326 { "nsh", 0x7 }, 327 { "#0x08", 0x8 }, 328 { "ishld", 0x9 }, 329 { "ishst", 0xa }, 330 { "ish", 0xb }, 331 { "#0x0c", 0xc }, 332 { "ld", 0xd }, 333 { "st", 0xe }, 334 { "sy", 0xf }, 335 }; 336 337 /* op -> op: load = 0 instruction = 1 store = 2 338 l -> level: 1-3 339 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */ 340 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t)) 341 const struct aarch64_name_value_pair aarch64_prfops[32] = 342 { 343 { "pldl1keep", B(0, 1, 0) }, 344 { "pldl1strm", B(0, 1, 1) }, 345 { "pldl2keep", B(0, 2, 0) }, 346 { "pldl2strm", B(0, 2, 1) }, 347 { "pldl3keep", B(0, 3, 0) }, 348 { "pldl3strm", B(0, 3, 1) }, 349 { NULL, 0x06 }, 350 { NULL, 0x07 }, 351 { "plil1keep", B(1, 1, 0) }, 352 { "plil1strm", B(1, 1, 1) }, 353 { "plil2keep", B(1, 2, 0) }, 354 { "plil2strm", B(1, 2, 1) }, 355 { "plil3keep", B(1, 3, 0) }, 356 { "plil3strm", B(1, 3, 1) }, 357 { NULL, 0x0e }, 358 { NULL, 0x0f }, 359 { "pstl1keep", B(2, 1, 0) }, 360 { "pstl1strm", B(2, 1, 1) }, 361 { "pstl2keep", B(2, 2, 0) }, 362 { "pstl2strm", B(2, 2, 1) }, 363 { "pstl3keep", B(2, 3, 0) }, 364 { "pstl3strm", B(2, 3, 1) }, 365 { NULL, 0x16 }, 366 { NULL, 0x17 }, 367 { NULL, 0x18 }, 368 { NULL, 0x19 }, 369 { NULL, 0x1a }, 370 { NULL, 0x1b }, 371 { NULL, 0x1c }, 372 { NULL, 0x1d }, 373 { NULL, 0x1e }, 374 { NULL, 0x1f }, 375 }; 376 #undef B 377 378 /* Utilities on value constraint. */ 379 380 static inline int 381 value_in_range_p (int64_t value, int low, int high) 382 { 383 return (value >= low && value <= high) ? 1 : 0; 384 } 385 386 static inline int 387 value_aligned_p (int64_t value, int align) 388 { 389 return ((value & (align - 1)) == 0) ? 1 : 0; 390 } 391 392 /* A signed value fits in a field. */ 393 static inline int 394 value_fit_signed_field_p (int64_t value, unsigned width) 395 { 396 assert (width < 32); 397 if (width < sizeof (value) * 8) 398 { 399 int64_t lim = (int64_t)1 << (width - 1); 400 if (value >= -lim && value < lim) 401 return 1; 402 } 403 return 0; 404 } 405 406 /* An unsigned value fits in a field. */ 407 static inline int 408 value_fit_unsigned_field_p (int64_t value, unsigned width) 409 { 410 assert (width < 32); 411 if (width < sizeof (value) * 8) 412 { 413 int64_t lim = (int64_t)1 << width; 414 if (value >= 0 && value < lim) 415 return 1; 416 } 417 return 0; 418 } 419 420 /* Return 1 if OPERAND is SP or WSP. */ 421 int 422 aarch64_stack_pointer_p (const aarch64_opnd_info *operand) 423 { 424 return ((aarch64_get_operand_class (operand->type) 425 == AARCH64_OPND_CLASS_INT_REG) 426 && operand_maybe_stack_pointer (aarch64_operands + operand->type) 427 && operand->reg.regno == 31); 428 } 429 430 /* Return 1 if OPERAND is XZR or WZP. */ 431 int 432 aarch64_zero_register_p (const aarch64_opnd_info *operand) 433 { 434 return ((aarch64_get_operand_class (operand->type) 435 == AARCH64_OPND_CLASS_INT_REG) 436 && !operand_maybe_stack_pointer (aarch64_operands + operand->type) 437 && operand->reg.regno == 31); 438 } 439 440 /* Return true if the operand *OPERAND that has the operand code 441 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also 442 qualified by the qualifier TARGET. */ 443 444 static inline int 445 operand_also_qualified_p (const struct aarch64_opnd_info *operand, 446 aarch64_opnd_qualifier_t target) 447 { 448 switch (operand->qualifier) 449 { 450 case AARCH64_OPND_QLF_W: 451 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand)) 452 return 1; 453 break; 454 case AARCH64_OPND_QLF_X: 455 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand)) 456 return 1; 457 break; 458 case AARCH64_OPND_QLF_WSP: 459 if (target == AARCH64_OPND_QLF_W 460 && operand_maybe_stack_pointer (aarch64_operands + operand->type)) 461 return 1; 462 break; 463 case AARCH64_OPND_QLF_SP: 464 if (target == AARCH64_OPND_QLF_X 465 && operand_maybe_stack_pointer (aarch64_operands + operand->type)) 466 return 1; 467 break; 468 default: 469 break; 470 } 471 472 return 0; 473 } 474 475 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF 476 for operand KNOWN_IDX, return the expected qualifier for operand IDX. 477 478 Return NIL if more than one expected qualifiers are found. */ 479 480 aarch64_opnd_qualifier_t 481 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list, 482 int idx, 483 const aarch64_opnd_qualifier_t known_qlf, 484 int known_idx) 485 { 486 int i, saved_i; 487 488 /* Special case. 489 490 When the known qualifier is NIL, we have to assume that there is only 491 one qualifier sequence in the *QSEQ_LIST and return the corresponding 492 qualifier directly. One scenario is that for instruction 493 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>] 494 which has only one possible valid qualifier sequence 495 NIL, S_D 496 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can 497 determine the correct relocation type (i.e. LDST64_LO12) for PRFM. 498 499 Because the qualifier NIL has dual roles in the qualifier sequence: 500 it can mean no qualifier for the operand, or the qualifer sequence is 501 not in use (when all qualifiers in the sequence are NILs), we have to 502 handle this special case here. */ 503 if (known_qlf == AARCH64_OPND_NIL) 504 { 505 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL); 506 return qseq_list[0][idx]; 507 } 508 509 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i) 510 { 511 if (qseq_list[i][known_idx] == known_qlf) 512 { 513 if (saved_i != -1) 514 /* More than one sequences are found to have KNOWN_QLF at 515 KNOWN_IDX. */ 516 return AARCH64_OPND_NIL; 517 saved_i = i; 518 } 519 } 520 521 return qseq_list[saved_i][idx]; 522 } 523 524 enum operand_qualifier_kind 525 { 526 OQK_NIL, 527 OQK_OPD_VARIANT, 528 OQK_VALUE_IN_RANGE, 529 OQK_MISC, 530 }; 531 532 /* Operand qualifier description. */ 533 struct operand_qualifier_data 534 { 535 /* The usage of the three data fields depends on the qualifier kind. */ 536 int data0; 537 int data1; 538 int data2; 539 /* Description. */ 540 const char *desc; 541 /* Kind. */ 542 enum operand_qualifier_kind kind; 543 }; 544 545 /* Indexed by the operand qualifier enumerators. */ 546 struct operand_qualifier_data aarch64_opnd_qualifiers[] = 547 { 548 {0, 0, 0, "NIL", OQK_NIL}, 549 550 /* Operand variant qualifiers. 551 First 3 fields: 552 element size, number of elements and common value for encoding. */ 553 554 {4, 1, 0x0, "w", OQK_OPD_VARIANT}, 555 {8, 1, 0x1, "x", OQK_OPD_VARIANT}, 556 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT}, 557 {8, 1, 0x1, "sp", OQK_OPD_VARIANT}, 558 559 {1, 1, 0x0, "b", OQK_OPD_VARIANT}, 560 {2, 1, 0x1, "h", OQK_OPD_VARIANT}, 561 {4, 1, 0x2, "s", OQK_OPD_VARIANT}, 562 {8, 1, 0x3, "d", OQK_OPD_VARIANT}, 563 {16, 1, 0x4, "q", OQK_OPD_VARIANT}, 564 565 {1, 8, 0x0, "8b", OQK_OPD_VARIANT}, 566 {1, 16, 0x1, "16b", OQK_OPD_VARIANT}, 567 {2, 4, 0x2, "4h", OQK_OPD_VARIANT}, 568 {2, 8, 0x3, "8h", OQK_OPD_VARIANT}, 569 {4, 2, 0x4, "2s", OQK_OPD_VARIANT}, 570 {4, 4, 0x5, "4s", OQK_OPD_VARIANT}, 571 {8, 1, 0x6, "1d", OQK_OPD_VARIANT}, 572 {8, 2, 0x7, "2d", OQK_OPD_VARIANT}, 573 {16, 1, 0x8, "1q", OQK_OPD_VARIANT}, 574 575 /* Qualifiers constraining the value range. 576 First 3 fields: 577 Lower bound, higher bound, unused. */ 578 579 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE}, 580 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE}, 581 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE}, 582 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE}, 583 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE}, 584 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE}, 585 586 /* Qualifiers for miscellaneous purpose. 587 First 3 fields: 588 unused, unused and unused. */ 589 590 {0, 0, 0, "lsl", 0}, 591 {0, 0, 0, "msl", 0}, 592 593 {0, 0, 0, "retrieving", 0}, 594 }; 595 596 static inline bfd_boolean 597 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier) 598 { 599 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT) 600 ? TRUE : FALSE; 601 } 602 603 static inline bfd_boolean 604 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier) 605 { 606 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE) 607 ? TRUE : FALSE; 608 } 609 610 const char* 611 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier) 612 { 613 return aarch64_opnd_qualifiers[qualifier].desc; 614 } 615 616 /* Given an operand qualifier, return the expected data element size 617 of a qualified operand. */ 618 unsigned char 619 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier) 620 { 621 assert (operand_variant_qualifier_p (qualifier) == TRUE); 622 return aarch64_opnd_qualifiers[qualifier].data0; 623 } 624 625 unsigned char 626 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier) 627 { 628 assert (operand_variant_qualifier_p (qualifier) == TRUE); 629 return aarch64_opnd_qualifiers[qualifier].data1; 630 } 631 632 aarch64_insn 633 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier) 634 { 635 assert (operand_variant_qualifier_p (qualifier) == TRUE); 636 return aarch64_opnd_qualifiers[qualifier].data2; 637 } 638 639 static int 640 get_lower_bound (aarch64_opnd_qualifier_t qualifier) 641 { 642 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE); 643 return aarch64_opnd_qualifiers[qualifier].data0; 644 } 645 646 static int 647 get_upper_bound (aarch64_opnd_qualifier_t qualifier) 648 { 649 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE); 650 return aarch64_opnd_qualifiers[qualifier].data1; 651 } 652 653 #ifdef DEBUG_AARCH64 654 void 655 aarch64_verbose (const char *str, ...) 656 { 657 va_list ap; 658 va_start (ap, str); 659 printf ("#### "); 660 vprintf (str, ap); 661 printf ("\n"); 662 va_end (ap); 663 } 664 665 static inline void 666 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier) 667 { 668 int i; 669 printf ("#### \t"); 670 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier) 671 printf ("%s,", aarch64_get_qualifier_name (*qualifier)); 672 printf ("\n"); 673 } 674 675 static void 676 dump_match_qualifiers (const struct aarch64_opnd_info *opnd, 677 const aarch64_opnd_qualifier_t *qualifier) 678 { 679 int i; 680 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM]; 681 682 aarch64_verbose ("dump_match_qualifiers:"); 683 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) 684 curr[i] = opnd[i].qualifier; 685 dump_qualifier_sequence (curr); 686 aarch64_verbose ("against"); 687 dump_qualifier_sequence (qualifier); 688 } 689 #endif /* DEBUG_AARCH64 */ 690 691 /* TODO improve this, we can have an extra field at the runtime to 692 store the number of operands rather than calculating it every time. */ 693 694 int 695 aarch64_num_of_operands (const aarch64_opcode *opcode) 696 { 697 int i = 0; 698 const enum aarch64_opnd *opnds = opcode->operands; 699 while (opnds[i++] != AARCH64_OPND_NIL) 700 ; 701 --i; 702 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM); 703 return i; 704 } 705 706 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST. 707 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0. 708 709 N.B. on the entry, it is very likely that only some operands in *INST 710 have had their qualifiers been established. 711 712 If STOP_AT is not -1, the function will only try to match 713 the qualifier sequence for operands before and including the operand 714 of index STOP_AT; and on success *RET will only be filled with the first 715 (STOP_AT+1) qualifiers. 716 717 A couple examples of the matching algorithm: 718 719 X,W,NIL should match 720 X,W,NIL 721 722 NIL,NIL should match 723 X ,NIL 724 725 Apart from serving the main encoding routine, this can also be called 726 during or after the operand decoding. */ 727 728 int 729 aarch64_find_best_match (const aarch64_inst *inst, 730 const aarch64_opnd_qualifier_seq_t *qualifiers_list, 731 int stop_at, aarch64_opnd_qualifier_t *ret) 732 { 733 int found = 0; 734 int i, num_opnds; 735 const aarch64_opnd_qualifier_t *qualifiers; 736 737 num_opnds = aarch64_num_of_operands (inst->opcode); 738 if (num_opnds == 0) 739 { 740 DEBUG_TRACE ("SUCCEED: no operand"); 741 return 1; 742 } 743 744 if (stop_at < 0 || stop_at >= num_opnds) 745 stop_at = num_opnds - 1; 746 747 /* For each pattern. */ 748 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list) 749 { 750 int j; 751 qualifiers = *qualifiers_list; 752 753 /* Start as positive. */ 754 found = 1; 755 756 DEBUG_TRACE ("%d", i); 757 #ifdef DEBUG_AARCH64 758 if (debug_dump) 759 dump_match_qualifiers (inst->operands, qualifiers); 760 #endif 761 762 /* Most opcodes has much fewer patterns in the list. 763 First NIL qualifier indicates the end in the list. */ 764 if (empty_qualifier_sequence_p (qualifiers) == TRUE) 765 { 766 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list"); 767 if (i) 768 found = 0; 769 break; 770 } 771 772 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers) 773 { 774 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL) 775 { 776 /* Either the operand does not have qualifier, or the qualifier 777 for the operand needs to be deduced from the qualifier 778 sequence. 779 In the latter case, any constraint checking related with 780 the obtained qualifier should be done later in 781 operand_general_constraint_met_p. */ 782 continue; 783 } 784 else if (*qualifiers != inst->operands[j].qualifier) 785 { 786 /* Unless the target qualifier can also qualify the operand 787 (which has already had a non-nil qualifier), non-equal 788 qualifiers are generally un-matched. */ 789 if (operand_also_qualified_p (inst->operands + j, *qualifiers)) 790 continue; 791 else 792 { 793 found = 0; 794 break; 795 } 796 } 797 else 798 continue; /* Equal qualifiers are certainly matched. */ 799 } 800 801 /* Qualifiers established. */ 802 if (found == 1) 803 break; 804 } 805 806 if (found == 1) 807 { 808 /* Fill the result in *RET. */ 809 int j; 810 qualifiers = *qualifiers_list; 811 812 DEBUG_TRACE ("complete qualifiers using list %d", i); 813 #ifdef DEBUG_AARCH64 814 if (debug_dump) 815 dump_qualifier_sequence (qualifiers); 816 #endif 817 818 for (j = 0; j <= stop_at; ++j, ++qualifiers) 819 ret[j] = *qualifiers; 820 for (; j < AARCH64_MAX_OPND_NUM; ++j) 821 ret[j] = AARCH64_OPND_QLF_NIL; 822 823 DEBUG_TRACE ("SUCCESS"); 824 return 1; 825 } 826 827 DEBUG_TRACE ("FAIL"); 828 return 0; 829 } 830 831 /* Operand qualifier matching and resolving. 832 833 Return 1 if the operand qualifier(s) in *INST match one of the qualifier 834 sequences in INST->OPCODE->qualifiers_list; otherwise return 0. 835 836 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching 837 succeeds. */ 838 839 static int 840 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p) 841 { 842 int i; 843 aarch64_opnd_qualifier_seq_t qualifiers; 844 845 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1, 846 qualifiers)) 847 { 848 DEBUG_TRACE ("matching FAIL"); 849 return 0; 850 } 851 852 /* Update the qualifiers. */ 853 if (update_p == TRUE) 854 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) 855 { 856 if (inst->opcode->operands[i] == AARCH64_OPND_NIL) 857 break; 858 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i], 859 "update %s with %s for operand %d", 860 aarch64_get_qualifier_name (inst->operands[i].qualifier), 861 aarch64_get_qualifier_name (qualifiers[i]), i); 862 inst->operands[i].qualifier = qualifiers[i]; 863 } 864 865 DEBUG_TRACE ("matching SUCCESS"); 866 return 1; 867 } 868 869 /* Return TRUE if VALUE is a wide constant that can be moved into a general 870 register by MOVZ. 871 872 IS32 indicates whether value is a 32-bit immediate or not. 873 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift 874 amount will be returned in *SHIFT_AMOUNT. */ 875 876 bfd_boolean 877 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount) 878 { 879 int amount; 880 881 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value); 882 883 if (is32) 884 { 885 /* Allow all zeros or all ones in top 32-bits, so that 886 32-bit constant expressions like ~0x80000000 are 887 permitted. */ 888 uint64_t ext = value; 889 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff) 890 /* Immediate out of range. */ 891 return FALSE; 892 value &= (int64_t) 0xffffffff; 893 } 894 895 /* first, try movz then movn */ 896 amount = -1; 897 if ((value & ((int64_t) 0xffff << 0)) == value) 898 amount = 0; 899 else if ((value & ((int64_t) 0xffff << 16)) == value) 900 amount = 16; 901 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value) 902 amount = 32; 903 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value) 904 amount = 48; 905 906 if (amount == -1) 907 { 908 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value); 909 return FALSE; 910 } 911 912 if (shift_amount != NULL) 913 *shift_amount = amount; 914 915 DEBUG_TRACE ("exit TRUE with amount %d", amount); 916 917 return TRUE; 918 } 919 920 /* Build the accepted values for immediate logical SIMD instructions. 921 922 The standard encodings of the immediate value are: 923 N imms immr SIMD size R S 924 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss) 925 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss) 926 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss) 927 0 110sss 000rrr 8 UInt(rrr) UInt(sss) 928 0 1110ss 0000rr 4 UInt(rr) UInt(ss) 929 0 11110s 00000r 2 UInt(r) UInt(s) 930 where all-ones value of S is reserved. 931 932 Let's call E the SIMD size. 933 934 The immediate value is: S+1 bits '1' rotated to the right by R. 935 936 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334 937 (remember S != E - 1). */ 938 939 #define TOTAL_IMM_NB 5334 940 941 typedef struct 942 { 943 uint64_t imm; 944 aarch64_insn encoding; 945 } simd_imm_encoding; 946 947 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB]; 948 949 static int 950 simd_imm_encoding_cmp(const void *i1, const void *i2) 951 { 952 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1; 953 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2; 954 955 if (imm1->imm < imm2->imm) 956 return -1; 957 if (imm1->imm > imm2->imm) 958 return +1; 959 return 0; 960 } 961 962 /* immediate bitfield standard encoding 963 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S 964 1 ssssss rrrrrr 64 rrrrrr ssssss 965 0 0sssss 0rrrrr 32 rrrrr sssss 966 0 10ssss 00rrrr 16 rrrr ssss 967 0 110sss 000rrr 8 rrr sss 968 0 1110ss 0000rr 4 rr ss 969 0 11110s 00000r 2 r s */ 970 static inline int 971 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r) 972 { 973 return (is64 << 12) | (r << 6) | s; 974 } 975 976 static void 977 build_immediate_table (void) 978 { 979 uint32_t log_e, e, s, r, s_mask; 980 uint64_t mask, imm; 981 int nb_imms; 982 int is64; 983 984 nb_imms = 0; 985 for (log_e = 1; log_e <= 6; log_e++) 986 { 987 /* Get element size. */ 988 e = 1u << log_e; 989 if (log_e == 6) 990 { 991 is64 = 1; 992 mask = 0xffffffffffffffffull; 993 s_mask = 0; 994 } 995 else 996 { 997 is64 = 0; 998 mask = (1ull << e) - 1; 999 /* log_e s_mask 1000 1 ((1 << 4) - 1) << 2 = 111100 1001 2 ((1 << 3) - 1) << 3 = 111000 1002 3 ((1 << 2) - 1) << 4 = 110000 1003 4 ((1 << 1) - 1) << 5 = 100000 1004 5 ((1 << 0) - 1) << 6 = 000000 */ 1005 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1); 1006 } 1007 for (s = 0; s < e - 1; s++) 1008 for (r = 0; r < e; r++) 1009 { 1010 /* s+1 consecutive bits to 1 (s < 63) */ 1011 imm = (1ull << (s + 1)) - 1; 1012 /* rotate right by r */ 1013 if (r != 0) 1014 imm = (imm >> r) | ((imm << (e - r)) & mask); 1015 /* replicate the constant depending on SIMD size */ 1016 switch (log_e) 1017 { 1018 case 1: imm = (imm << 2) | imm; 1019 case 2: imm = (imm << 4) | imm; 1020 case 3: imm = (imm << 8) | imm; 1021 case 4: imm = (imm << 16) | imm; 1022 case 5: imm = (imm << 32) | imm; 1023 case 6: break; 1024 default: abort (); 1025 } 1026 simd_immediates[nb_imms].imm = imm; 1027 simd_immediates[nb_imms].encoding = 1028 encode_immediate_bitfield(is64, s | s_mask, r); 1029 nb_imms++; 1030 } 1031 } 1032 assert (nb_imms == TOTAL_IMM_NB); 1033 qsort(simd_immediates, nb_imms, 1034 sizeof(simd_immediates[0]), simd_imm_encoding_cmp); 1035 } 1036 1037 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can 1038 be accepted by logical (immediate) instructions 1039 e.g. ORR <Xd|SP>, <Xn>, #<imm>. 1040 1041 IS32 indicates whether or not VALUE is a 32-bit immediate. 1042 If ENCODING is not NULL, on the return of TRUE, the standard encoding for 1043 VALUE will be returned in *ENCODING. */ 1044 1045 bfd_boolean 1046 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding) 1047 { 1048 simd_imm_encoding imm_enc; 1049 const simd_imm_encoding *imm_encoding; 1050 static bfd_boolean initialized = FALSE; 1051 1052 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value, 1053 value, is32); 1054 1055 if (initialized == FALSE) 1056 { 1057 build_immediate_table (); 1058 initialized = TRUE; 1059 } 1060 1061 if (is32) 1062 { 1063 /* Allow all zeros or all ones in top 32-bits, so that 1064 constant expressions like ~1 are permitted. */ 1065 if (value >> 32 != 0 && value >> 32 != 0xffffffff) 1066 return FALSE; 1067 1068 /* Replicate the 32 lower bits to the 32 upper bits. */ 1069 value &= 0xffffffff; 1070 value |= value << 32; 1071 } 1072 1073 imm_enc.imm = value; 1074 imm_encoding = (const simd_imm_encoding *) 1075 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB, 1076 sizeof(simd_immediates[0]), simd_imm_encoding_cmp); 1077 if (imm_encoding == NULL) 1078 { 1079 DEBUG_TRACE ("exit with FALSE"); 1080 return FALSE; 1081 } 1082 if (encoding != NULL) 1083 *encoding = imm_encoding->encoding; 1084 DEBUG_TRACE ("exit with TRUE"); 1085 return TRUE; 1086 } 1087 1088 /* If 64-bit immediate IMM is in the format of 1089 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh", 1090 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer 1091 of value "abcdefgh". Otherwise return -1. */ 1092 int 1093 aarch64_shrink_expanded_imm8 (uint64_t imm) 1094 { 1095 int i, ret; 1096 uint32_t byte; 1097 1098 ret = 0; 1099 for (i = 0; i < 8; i++) 1100 { 1101 byte = (imm >> (8 * i)) & 0xff; 1102 if (byte == 0xff) 1103 ret |= 1 << i; 1104 else if (byte != 0x00) 1105 return -1; 1106 } 1107 return ret; 1108 } 1109 1110 /* Utility inline functions for operand_general_constraint_met_p. */ 1111 1112 static inline void 1113 set_error (aarch64_operand_error *mismatch_detail, 1114 enum aarch64_operand_error_kind kind, int idx, 1115 const char* error) 1116 { 1117 if (mismatch_detail == NULL) 1118 return; 1119 mismatch_detail->kind = kind; 1120 mismatch_detail->index = idx; 1121 mismatch_detail->error = error; 1122 } 1123 1124 static inline void 1125 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx, 1126 const char* error) 1127 { 1128 if (mismatch_detail == NULL) 1129 return; 1130 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error); 1131 } 1132 1133 static inline void 1134 set_out_of_range_error (aarch64_operand_error *mismatch_detail, 1135 int idx, int lower_bound, int upper_bound, 1136 const char* error) 1137 { 1138 if (mismatch_detail == NULL) 1139 return; 1140 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error); 1141 mismatch_detail->data[0] = lower_bound; 1142 mismatch_detail->data[1] = upper_bound; 1143 } 1144 1145 static inline void 1146 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail, 1147 int idx, int lower_bound, int upper_bound) 1148 { 1149 if (mismatch_detail == NULL) 1150 return; 1151 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, 1152 _("immediate value")); 1153 } 1154 1155 static inline void 1156 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail, 1157 int idx, int lower_bound, int upper_bound) 1158 { 1159 if (mismatch_detail == NULL) 1160 return; 1161 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, 1162 _("immediate offset")); 1163 } 1164 1165 static inline void 1166 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail, 1167 int idx, int lower_bound, int upper_bound) 1168 { 1169 if (mismatch_detail == NULL) 1170 return; 1171 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, 1172 _("register number")); 1173 } 1174 1175 static inline void 1176 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail, 1177 int idx, int lower_bound, int upper_bound) 1178 { 1179 if (mismatch_detail == NULL) 1180 return; 1181 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, 1182 _("register element index")); 1183 } 1184 1185 static inline void 1186 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail, 1187 int idx, int lower_bound, int upper_bound) 1188 { 1189 if (mismatch_detail == NULL) 1190 return; 1191 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound, 1192 _("shift amount")); 1193 } 1194 1195 static inline void 1196 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx, 1197 int alignment) 1198 { 1199 if (mismatch_detail == NULL) 1200 return; 1201 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL); 1202 mismatch_detail->data[0] = alignment; 1203 } 1204 1205 static inline void 1206 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx, 1207 int expected_num) 1208 { 1209 if (mismatch_detail == NULL) 1210 return; 1211 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL); 1212 mismatch_detail->data[0] = expected_num; 1213 } 1214 1215 static inline void 1216 set_other_error (aarch64_operand_error *mismatch_detail, int idx, 1217 const char* error) 1218 { 1219 if (mismatch_detail == NULL) 1220 return; 1221 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error); 1222 } 1223 1224 /* General constraint checking based on operand code. 1225 1226 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE 1227 as the IDXth operand of opcode OPCODE. Otherwise return 0. 1228 1229 This function has to be called after the qualifiers for all operands 1230 have been resolved. 1231 1232 Mismatching error message is returned in *MISMATCH_DETAIL upon request, 1233 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation 1234 of error message during the disassembling where error message is not 1235 wanted. We avoid the dynamic construction of strings of error messages 1236 here (i.e. in libopcodes), as it is costly and complicated; instead, we 1237 use a combination of error code, static string and some integer data to 1238 represent an error. */ 1239 1240 static int 1241 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, 1242 enum aarch64_opnd type, 1243 const aarch64_opcode *opcode, 1244 aarch64_operand_error *mismatch_detail) 1245 { 1246 unsigned num; 1247 unsigned char size; 1248 int64_t imm; 1249 const aarch64_opnd_info *opnd = opnds + idx; 1250 aarch64_opnd_qualifier_t qualifier = opnd->qualifier; 1251 1252 assert (opcode->operands[idx] == opnd->type && opnd->type == type); 1253 1254 switch (aarch64_operands[type].op_class) 1255 { 1256 case AARCH64_OPND_CLASS_INT_REG: 1257 /* <Xt> may be optional in some IC and TLBI instructions. */ 1258 if (type == AARCH64_OPND_Rt_SYS) 1259 { 1260 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type) 1261 == AARCH64_OPND_CLASS_SYSTEM)); 1262 if (opnds[1].present && !opnds[0].sysins_op->has_xt) 1263 { 1264 set_other_error (mismatch_detail, idx, _("extraneous register")); 1265 return 0; 1266 } 1267 if (!opnds[1].present && opnds[0].sysins_op->has_xt) 1268 { 1269 set_other_error (mismatch_detail, idx, _("missing register")); 1270 return 0; 1271 } 1272 } 1273 switch (qualifier) 1274 { 1275 case AARCH64_OPND_QLF_WSP: 1276 case AARCH64_OPND_QLF_SP: 1277 if (!aarch64_stack_pointer_p (opnd)) 1278 { 1279 set_other_error (mismatch_detail, idx, 1280 _("stack pointer register expected")); 1281 return 0; 1282 } 1283 break; 1284 default: 1285 break; 1286 } 1287 break; 1288 1289 case AARCH64_OPND_CLASS_COND: 1290 if (type == AARCH64_OPND_COND1 1291 && (opnds[idx].cond->value & 0xe) == 0xe) 1292 { 1293 /* Not allow AL or NV. */ 1294 set_syntax_error (mismatch_detail, idx, NULL); 1295 } 1296 break; 1297 1298 case AARCH64_OPND_CLASS_ADDRESS: 1299 /* Check writeback. */ 1300 switch (opcode->iclass) 1301 { 1302 case ldst_pos: 1303 case ldst_unscaled: 1304 case ldstnapair_offs: 1305 case ldstpair_off: 1306 case ldst_unpriv: 1307 if (opnd->addr.writeback == 1) 1308 { 1309 set_syntax_error (mismatch_detail, idx, 1310 _("unexpected address writeback")); 1311 return 0; 1312 } 1313 break; 1314 case ldst_imm9: 1315 case ldstpair_indexed: 1316 case asisdlsep: 1317 case asisdlsop: 1318 if (opnd->addr.writeback == 0) 1319 { 1320 set_syntax_error (mismatch_detail, idx, 1321 _("address writeback expected")); 1322 return 0; 1323 } 1324 break; 1325 default: 1326 assert (opnd->addr.writeback == 0); 1327 break; 1328 } 1329 switch (type) 1330 { 1331 case AARCH64_OPND_ADDR_SIMM7: 1332 /* Scaled signed 7 bits immediate offset. */ 1333 /* Get the size of the data element that is accessed, which may be 1334 different from that of the source register size, 1335 e.g. in strb/ldrb. */ 1336 size = aarch64_get_qualifier_esize (opnd->qualifier); 1337 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size)) 1338 { 1339 set_offset_out_of_range_error (mismatch_detail, idx, 1340 -64 * size, 63 * size); 1341 return 0; 1342 } 1343 if (!value_aligned_p (opnd->addr.offset.imm, size)) 1344 { 1345 set_unaligned_error (mismatch_detail, idx, size); 1346 return 0; 1347 } 1348 break; 1349 case AARCH64_OPND_ADDR_SIMM9: 1350 /* Unscaled signed 9 bits immediate offset. */ 1351 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255)) 1352 { 1353 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255); 1354 return 0; 1355 } 1356 break; 1357 1358 case AARCH64_OPND_ADDR_SIMM9_2: 1359 /* Unscaled signed 9 bits immediate offset, which has to be negative 1360 or unaligned. */ 1361 size = aarch64_get_qualifier_esize (qualifier); 1362 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255) 1363 && !value_aligned_p (opnd->addr.offset.imm, size)) 1364 || value_in_range_p (opnd->addr.offset.imm, -256, -1)) 1365 return 1; 1366 set_other_error (mismatch_detail, idx, 1367 _("negative or unaligned offset expected")); 1368 return 0; 1369 1370 case AARCH64_OPND_SIMD_ADDR_POST: 1371 /* AdvSIMD load/store multiple structures, post-index. */ 1372 assert (idx == 1); 1373 if (opnd->addr.offset.is_reg) 1374 { 1375 if (value_in_range_p (opnd->addr.offset.regno, 0, 30)) 1376 return 1; 1377 else 1378 { 1379 set_other_error (mismatch_detail, idx, 1380 _("invalid register offset")); 1381 return 0; 1382 } 1383 } 1384 else 1385 { 1386 const aarch64_opnd_info *prev = &opnds[idx-1]; 1387 unsigned num_bytes; /* total number of bytes transferred. */ 1388 /* The opcode dependent area stores the number of elements in 1389 each structure to be loaded/stored. */ 1390 int is_ld1r = get_opcode_dependent_value (opcode) == 1; 1391 if (opcode->operands[0] == AARCH64_OPND_LVt_AL) 1392 /* Special handling of loading single structure to all lane. */ 1393 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs) 1394 * aarch64_get_qualifier_esize (prev->qualifier); 1395 else 1396 num_bytes = prev->reglist.num_regs 1397 * aarch64_get_qualifier_esize (prev->qualifier) 1398 * aarch64_get_qualifier_nelem (prev->qualifier); 1399 if ((int) num_bytes != opnd->addr.offset.imm) 1400 { 1401 set_other_error (mismatch_detail, idx, 1402 _("invalid post-increment amount")); 1403 return 0; 1404 } 1405 } 1406 break; 1407 1408 case AARCH64_OPND_ADDR_REGOFF: 1409 /* Get the size of the data element that is accessed, which may be 1410 different from that of the source register size, 1411 e.g. in strb/ldrb. */ 1412 size = aarch64_get_qualifier_esize (opnd->qualifier); 1413 /* It is either no shift or shift by the binary logarithm of SIZE. */ 1414 if (opnd->shifter.amount != 0 1415 && opnd->shifter.amount != (int)get_logsz (size)) 1416 { 1417 set_other_error (mismatch_detail, idx, 1418 _("invalid shift amount")); 1419 return 0; 1420 } 1421 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending 1422 operators. */ 1423 switch (opnd->shifter.kind) 1424 { 1425 case AARCH64_MOD_UXTW: 1426 case AARCH64_MOD_LSL: 1427 case AARCH64_MOD_SXTW: 1428 case AARCH64_MOD_SXTX: break; 1429 default: 1430 set_other_error (mismatch_detail, idx, 1431 _("invalid extend/shift operator")); 1432 return 0; 1433 } 1434 break; 1435 1436 case AARCH64_OPND_ADDR_UIMM12: 1437 imm = opnd->addr.offset.imm; 1438 /* Get the size of the data element that is accessed, which may be 1439 different from that of the source register size, 1440 e.g. in strb/ldrb. */ 1441 size = aarch64_get_qualifier_esize (qualifier); 1442 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size)) 1443 { 1444 set_offset_out_of_range_error (mismatch_detail, idx, 1445 0, 4095 * size); 1446 return 0; 1447 } 1448 if (!value_aligned_p (opnd->addr.offset.imm, size)) 1449 { 1450 set_unaligned_error (mismatch_detail, idx, size); 1451 return 0; 1452 } 1453 break; 1454 1455 case AARCH64_OPND_ADDR_PCREL14: 1456 case AARCH64_OPND_ADDR_PCREL19: 1457 case AARCH64_OPND_ADDR_PCREL21: 1458 case AARCH64_OPND_ADDR_PCREL26: 1459 imm = opnd->imm.value; 1460 if (operand_need_shift_by_two (get_operand_from_code (type))) 1461 { 1462 /* The offset value in a PC-relative branch instruction is alway 1463 4-byte aligned and is encoded without the lowest 2 bits. */ 1464 if (!value_aligned_p (imm, 4)) 1465 { 1466 set_unaligned_error (mismatch_detail, idx, 4); 1467 return 0; 1468 } 1469 /* Right shift by 2 so that we can carry out the following check 1470 canonically. */ 1471 imm >>= 2; 1472 } 1473 size = get_operand_fields_width (get_operand_from_code (type)); 1474 if (!value_fit_signed_field_p (imm, size)) 1475 { 1476 set_other_error (mismatch_detail, idx, 1477 _("immediate out of range")); 1478 return 0; 1479 } 1480 break; 1481 1482 default: 1483 break; 1484 } 1485 break; 1486 1487 case AARCH64_OPND_CLASS_SIMD_REGLIST: 1488 /* The opcode dependent area stores the number of elements in 1489 each structure to be loaded/stored. */ 1490 num = get_opcode_dependent_value (opcode); 1491 switch (type) 1492 { 1493 case AARCH64_OPND_LVt: 1494 assert (num >= 1 && num <= 4); 1495 /* Unless LD1/ST1, the number of registers should be equal to that 1496 of the structure elements. */ 1497 if (num != 1 && opnd->reglist.num_regs != num) 1498 { 1499 set_reg_list_error (mismatch_detail, idx, num); 1500 return 0; 1501 } 1502 break; 1503 case AARCH64_OPND_LVt_AL: 1504 case AARCH64_OPND_LEt: 1505 assert (num >= 1 && num <= 4); 1506 /* The number of registers should be equal to that of the structure 1507 elements. */ 1508 if (opnd->reglist.num_regs != num) 1509 { 1510 set_reg_list_error (mismatch_detail, idx, num); 1511 return 0; 1512 } 1513 break; 1514 default: 1515 break; 1516 } 1517 break; 1518 1519 case AARCH64_OPND_CLASS_IMMEDIATE: 1520 /* Constraint check on immediate operand. */ 1521 imm = opnd->imm.value; 1522 /* E.g. imm_0_31 constrains value to be 0..31. */ 1523 if (qualifier_value_in_range_constraint_p (qualifier) 1524 && !value_in_range_p (imm, get_lower_bound (qualifier), 1525 get_upper_bound (qualifier))) 1526 { 1527 set_imm_out_of_range_error (mismatch_detail, idx, 1528 get_lower_bound (qualifier), 1529 get_upper_bound (qualifier)); 1530 return 0; 1531 } 1532 1533 switch (type) 1534 { 1535 case AARCH64_OPND_AIMM: 1536 if (opnd->shifter.kind != AARCH64_MOD_LSL) 1537 { 1538 set_other_error (mismatch_detail, idx, 1539 _("invalid shift operator")); 1540 return 0; 1541 } 1542 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12) 1543 { 1544 set_other_error (mismatch_detail, idx, 1545 _("shift amount expected to be 0 or 12")); 1546 return 0; 1547 } 1548 if (!value_fit_unsigned_field_p (opnd->imm.value, 12)) 1549 { 1550 set_other_error (mismatch_detail, idx, 1551 _("immediate out of range")); 1552 return 0; 1553 } 1554 break; 1555 1556 case AARCH64_OPND_HALF: 1557 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd); 1558 if (opnd->shifter.kind != AARCH64_MOD_LSL) 1559 { 1560 set_other_error (mismatch_detail, idx, 1561 _("invalid shift operator")); 1562 return 0; 1563 } 1564 size = aarch64_get_qualifier_esize (opnds[0].qualifier); 1565 if (!value_aligned_p (opnd->shifter.amount, 16)) 1566 { 1567 set_other_error (mismatch_detail, idx, 1568 _("shift amount should be a multiple of 16")); 1569 return 0; 1570 } 1571 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16)) 1572 { 1573 set_sft_amount_out_of_range_error (mismatch_detail, idx, 1574 0, size * 8 - 16); 1575 return 0; 1576 } 1577 if (opnd->imm.value < 0) 1578 { 1579 set_other_error (mismatch_detail, idx, 1580 _("negative immediate value not allowed")); 1581 return 0; 1582 } 1583 if (!value_fit_unsigned_field_p (opnd->imm.value, 16)) 1584 { 1585 set_other_error (mismatch_detail, idx, 1586 _("immediate out of range")); 1587 return 0; 1588 } 1589 break; 1590 1591 case AARCH64_OPND_IMM_MOV: 1592 { 1593 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4; 1594 imm = opnd->imm.value; 1595 assert (idx == 1); 1596 switch (opcode->op) 1597 { 1598 case OP_MOV_IMM_WIDEN: 1599 imm = ~imm; 1600 /* Fall through... */ 1601 case OP_MOV_IMM_WIDE: 1602 if (!aarch64_wide_constant_p (imm, is32, NULL)) 1603 { 1604 set_other_error (mismatch_detail, idx, 1605 _("immediate out of range")); 1606 return 0; 1607 } 1608 break; 1609 case OP_MOV_IMM_LOG: 1610 if (!aarch64_logical_immediate_p (imm, is32, NULL)) 1611 { 1612 set_other_error (mismatch_detail, idx, 1613 _("immediate out of range")); 1614 return 0; 1615 } 1616 break; 1617 default: 1618 assert (0); 1619 return 0; 1620 } 1621 } 1622 break; 1623 1624 case AARCH64_OPND_NZCV: 1625 case AARCH64_OPND_CCMP_IMM: 1626 case AARCH64_OPND_EXCEPTION: 1627 case AARCH64_OPND_UIMM4: 1628 case AARCH64_OPND_UIMM7: 1629 case AARCH64_OPND_UIMM3_OP1: 1630 case AARCH64_OPND_UIMM3_OP2: 1631 size = get_operand_fields_width (get_operand_from_code (type)); 1632 assert (size < 32); 1633 if (!value_fit_unsigned_field_p (opnd->imm.value, size)) 1634 { 1635 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1636 (1 << size) - 1); 1637 return 0; 1638 } 1639 break; 1640 1641 case AARCH64_OPND_WIDTH: 1642 assert (idx == 3 && opnds[idx-1].type == AARCH64_OPND_IMM 1643 && opnds[0].type == AARCH64_OPND_Rd); 1644 size = get_upper_bound (qualifier); 1645 if (opnd->imm.value + opnds[idx-1].imm.value > size) 1646 /* lsb+width <= reg.size */ 1647 { 1648 set_imm_out_of_range_error (mismatch_detail, idx, 1, 1649 size - opnds[idx-1].imm.value); 1650 return 0; 1651 } 1652 break; 1653 1654 case AARCH64_OPND_LIMM: 1655 { 1656 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W; 1657 uint64_t uimm = opnd->imm.value; 1658 if (opcode->op == OP_BIC) 1659 uimm = ~uimm; 1660 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE) 1661 { 1662 set_other_error (mismatch_detail, idx, 1663 _("immediate out of range")); 1664 return 0; 1665 } 1666 } 1667 break; 1668 1669 case AARCH64_OPND_IMM0: 1670 case AARCH64_OPND_FPIMM0: 1671 if (opnd->imm.value != 0) 1672 { 1673 set_other_error (mismatch_detail, idx, 1674 _("immediate zero expected")); 1675 return 0; 1676 } 1677 break; 1678 1679 case AARCH64_OPND_SHLL_IMM: 1680 assert (idx == 2); 1681 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier); 1682 if (opnd->imm.value != size) 1683 { 1684 set_other_error (mismatch_detail, idx, 1685 _("invalid shift amount")); 1686 return 0; 1687 } 1688 break; 1689 1690 case AARCH64_OPND_IMM_VLSL: 1691 size = aarch64_get_qualifier_esize (qualifier); 1692 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1)) 1693 { 1694 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1695 size * 8 - 1); 1696 return 0; 1697 } 1698 break; 1699 1700 case AARCH64_OPND_IMM_VLSR: 1701 size = aarch64_get_qualifier_esize (qualifier); 1702 if (!value_in_range_p (opnd->imm.value, 1, size * 8)) 1703 { 1704 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8); 1705 return 0; 1706 } 1707 break; 1708 1709 case AARCH64_OPND_SIMD_IMM: 1710 case AARCH64_OPND_SIMD_IMM_SFT: 1711 /* Qualifier check. */ 1712 switch (qualifier) 1713 { 1714 case AARCH64_OPND_QLF_LSL: 1715 if (opnd->shifter.kind != AARCH64_MOD_LSL) 1716 { 1717 set_other_error (mismatch_detail, idx, 1718 _("invalid shift operator")); 1719 return 0; 1720 } 1721 break; 1722 case AARCH64_OPND_QLF_MSL: 1723 if (opnd->shifter.kind != AARCH64_MOD_MSL) 1724 { 1725 set_other_error (mismatch_detail, idx, 1726 _("invalid shift operator")); 1727 return 0; 1728 } 1729 break; 1730 case AARCH64_OPND_QLF_NIL: 1731 if (opnd->shifter.kind != AARCH64_MOD_NONE) 1732 { 1733 set_other_error (mismatch_detail, idx, 1734 _("shift is not permitted")); 1735 return 0; 1736 } 1737 break; 1738 default: 1739 assert (0); 1740 return 0; 1741 } 1742 /* Is the immediate valid? */ 1743 assert (idx == 1); 1744 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8) 1745 { 1746 /* uimm8 or simm8 */ 1747 if (!value_in_range_p (opnd->imm.value, -128, 255)) 1748 { 1749 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255); 1750 return 0; 1751 } 1752 } 1753 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0) 1754 { 1755 /* uimm64 is not 1756 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee 1757 ffffffffgggggggghhhhhhhh'. */ 1758 set_other_error (mismatch_detail, idx, 1759 _("invalid value for immediate")); 1760 return 0; 1761 } 1762 /* Is the shift amount valid? */ 1763 switch (opnd->shifter.kind) 1764 { 1765 case AARCH64_MOD_LSL: 1766 size = aarch64_get_qualifier_esize (opnds[0].qualifier); 1767 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8)) 1768 { 1769 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 1770 (size - 1) * 8); 1771 return 0; 1772 } 1773 if (!value_aligned_p (opnd->shifter.amount, 8)) 1774 { 1775 set_unaligned_error (mismatch_detail, idx, 8); 1776 return 0; 1777 } 1778 break; 1779 case AARCH64_MOD_MSL: 1780 /* Only 8 and 16 are valid shift amount. */ 1781 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16) 1782 { 1783 set_other_error (mismatch_detail, idx, 1784 _("shift amount expected to be 0 or 16")); 1785 return 0; 1786 } 1787 break; 1788 default: 1789 if (opnd->shifter.kind != AARCH64_MOD_NONE) 1790 { 1791 set_other_error (mismatch_detail, idx, 1792 _("invalid shift operator")); 1793 return 0; 1794 } 1795 break; 1796 } 1797 break; 1798 1799 case AARCH64_OPND_FPIMM: 1800 case AARCH64_OPND_SIMD_FPIMM: 1801 if (opnd->imm.is_fp == 0) 1802 { 1803 set_other_error (mismatch_detail, idx, 1804 _("floating-point immediate expected")); 1805 return 0; 1806 } 1807 /* The value is expected to be an 8-bit floating-point constant with 1808 sign, 3-bit exponent and normalized 4 bits of precision, encoded 1809 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the 1810 instruction). */ 1811 if (!value_in_range_p (opnd->imm.value, 0, 255)) 1812 { 1813 set_other_error (mismatch_detail, idx, 1814 _("immediate out of range")); 1815 return 0; 1816 } 1817 if (opnd->shifter.kind != AARCH64_MOD_NONE) 1818 { 1819 set_other_error (mismatch_detail, idx, 1820 _("invalid shift operator")); 1821 return 0; 1822 } 1823 break; 1824 1825 default: 1826 break; 1827 } 1828 break; 1829 1830 case AARCH64_OPND_CLASS_CP_REG: 1831 /* Cn or Cm: 4-bit opcode field named for historical reasons. 1832 valid range: C0 - C15. */ 1833 if (opnd->reg.regno > 15) 1834 { 1835 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15); 1836 return 0; 1837 } 1838 break; 1839 1840 case AARCH64_OPND_CLASS_SYSTEM: 1841 switch (type) 1842 { 1843 case AARCH64_OPND_PSTATEFIELD: 1844 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4); 1845 /* MSR SPSel, #uimm4 1846 Uses uimm4 as a control value to select the stack pointer: if 1847 bit 0 is set it selects the current exception level's stack 1848 pointer, if bit 0 is clear it selects shared EL0 stack pointer. 1849 Bits 1 to 3 of uimm4 are reserved and should be zero. */ 1850 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1) 1851 { 1852 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1); 1853 return 0; 1854 } 1855 break; 1856 default: 1857 break; 1858 } 1859 break; 1860 1861 case AARCH64_OPND_CLASS_SIMD_ELEMENT: 1862 /* Get the upper bound for the element index. */ 1863 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1; 1864 /* Index out-of-range. */ 1865 if (!value_in_range_p (opnd->reglane.index, 0, num)) 1866 { 1867 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num); 1868 return 0; 1869 } 1870 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>]. 1871 <Vm> Is the vector register (V0-V31) or (V0-V15), whose 1872 number is encoded in "size:M:Rm": 1873 size <Vm> 1874 00 RESERVED 1875 01 0:Rm 1876 10 M:Rm 1877 11 RESERVED */ 1878 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H 1879 && !value_in_range_p (opnd->reglane.regno, 0, 15)) 1880 { 1881 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15); 1882 return 0; 1883 } 1884 break; 1885 1886 case AARCH64_OPND_CLASS_MODIFIED_REG: 1887 assert (idx == 1 || idx == 2); 1888 switch (type) 1889 { 1890 case AARCH64_OPND_Rm_EXT: 1891 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE 1892 && opnd->shifter.kind != AARCH64_MOD_LSL) 1893 { 1894 set_other_error (mismatch_detail, idx, 1895 _("extend operator expected")); 1896 return 0; 1897 } 1898 /* It is not optional unless at least one of "Rd" or "Rn" is '11111' 1899 (i.e. SP), in which case it defaults to LSL. The LSL alias is 1900 only valid when "Rd" or "Rn" is '11111', and is preferred in that 1901 case. */ 1902 if (!aarch64_stack_pointer_p (opnds + 0) 1903 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1))) 1904 { 1905 if (!opnd->shifter.operator_present) 1906 { 1907 set_other_error (mismatch_detail, idx, 1908 _("missing extend operator")); 1909 return 0; 1910 } 1911 else if (opnd->shifter.kind == AARCH64_MOD_LSL) 1912 { 1913 set_other_error (mismatch_detail, idx, 1914 _("'LSL' operator not allowed")); 1915 return 0; 1916 } 1917 } 1918 assert (opnd->shifter.operator_present /* Default to LSL. */ 1919 || opnd->shifter.kind == AARCH64_MOD_LSL); 1920 if (!value_in_range_p (opnd->shifter.amount, 0, 4)) 1921 { 1922 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4); 1923 return 0; 1924 } 1925 /* In the 64-bit form, the final register operand is written as Wm 1926 for all but the (possibly omitted) UXTX/LSL and SXTX 1927 operators. 1928 N.B. GAS allows X register to be used with any operator as a 1929 programming convenience. */ 1930 if (qualifier == AARCH64_OPND_QLF_X 1931 && opnd->shifter.kind != AARCH64_MOD_LSL 1932 && opnd->shifter.kind != AARCH64_MOD_UXTX 1933 && opnd->shifter.kind != AARCH64_MOD_SXTX) 1934 { 1935 set_other_error (mismatch_detail, idx, _("W register expected")); 1936 return 0; 1937 } 1938 break; 1939 1940 case AARCH64_OPND_Rm_SFT: 1941 /* ROR is not available to the shifted register operand in 1942 arithmetic instructions. */ 1943 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE) 1944 { 1945 set_other_error (mismatch_detail, idx, 1946 _("shift operator expected")); 1947 return 0; 1948 } 1949 if (opnd->shifter.kind == AARCH64_MOD_ROR 1950 && opcode->iclass != log_shift) 1951 { 1952 set_other_error (mismatch_detail, idx, 1953 _("'ROR' operator not allowed")); 1954 return 0; 1955 } 1956 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63; 1957 if (!value_in_range_p (opnd->shifter.amount, 0, num)) 1958 { 1959 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num); 1960 return 0; 1961 } 1962 break; 1963 1964 default: 1965 break; 1966 } 1967 break; 1968 1969 default: 1970 break; 1971 } 1972 1973 return 1; 1974 } 1975 1976 /* Main entrypoint for the operand constraint checking. 1977 1978 Return 1 if operands of *INST meet the constraint applied by the operand 1979 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is 1980 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when 1981 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set 1982 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL 1983 error kind when it is notified that an instruction does not pass the check). 1984 1985 Un-determined operand qualifiers may get established during the process. */ 1986 1987 int 1988 aarch64_match_operands_constraint (aarch64_inst *inst, 1989 aarch64_operand_error *mismatch_detail) 1990 { 1991 int i; 1992 1993 DEBUG_TRACE ("enter"); 1994 1995 /* Match operands' qualifier. 1996 *INST has already had qualifier establish for some, if not all, of 1997 its operands; we need to find out whether these established 1998 qualifiers match one of the qualifier sequence in 1999 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand 2000 with the corresponding qualifier in such a sequence. 2001 Only basic operand constraint checking is done here; the more thorough 2002 constraint checking will carried out by operand_general_constraint_met_p, 2003 which has be to called after this in order to get all of the operands' 2004 qualifiers established. */ 2005 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0) 2006 { 2007 DEBUG_TRACE ("FAIL on operand qualifier matching"); 2008 if (mismatch_detail) 2009 { 2010 /* Return an error type to indicate that it is the qualifier 2011 matching failure; we don't care about which operand as there 2012 are enough information in the opcode table to reproduce it. */ 2013 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT; 2014 mismatch_detail->index = -1; 2015 mismatch_detail->error = NULL; 2016 } 2017 return 0; 2018 } 2019 2020 /* Match operands' constraint. */ 2021 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) 2022 { 2023 enum aarch64_opnd type = inst->opcode->operands[i]; 2024 if (type == AARCH64_OPND_NIL) 2025 break; 2026 if (inst->operands[i].skip) 2027 { 2028 DEBUG_TRACE ("skip the incomplete operand %d", i); 2029 continue; 2030 } 2031 if (operand_general_constraint_met_p (inst->operands, i, type, 2032 inst->opcode, mismatch_detail) == 0) 2033 { 2034 DEBUG_TRACE ("FAIL on operand %d", i); 2035 return 0; 2036 } 2037 } 2038 2039 DEBUG_TRACE ("PASS"); 2040 2041 return 1; 2042 } 2043 2044 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE. 2045 Also updates the TYPE of each INST->OPERANDS with the corresponding 2046 value of OPCODE->OPERANDS. 2047 2048 Note that some operand qualifiers may need to be manually cleared by 2049 the caller before it further calls the aarch64_opcode_encode; by 2050 doing this, it helps the qualifier matching facilities work 2051 properly. */ 2052 2053 const aarch64_opcode* 2054 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode) 2055 { 2056 int i; 2057 const aarch64_opcode *old = inst->opcode; 2058 2059 inst->opcode = opcode; 2060 2061 /* Update the operand types. */ 2062 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) 2063 { 2064 inst->operands[i].type = opcode->operands[i]; 2065 if (opcode->operands[i] == AARCH64_OPND_NIL) 2066 break; 2067 } 2068 2069 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name); 2070 2071 return old; 2072 } 2073 2074 int 2075 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand) 2076 { 2077 int i; 2078 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) 2079 if (operands[i] == operand) 2080 return i; 2081 else if (operands[i] == AARCH64_OPND_NIL) 2082 break; 2083 return -1; 2084 } 2085 2086 /* [0][0] 32-bit integer regs with sp Wn 2087 [0][1] 64-bit integer regs with sp Xn sf=1 2088 [1][0] 32-bit integer regs with #0 Wn 2089 [1][1] 64-bit integer regs with #0 Xn sf=1 */ 2090 static const char *int_reg[2][2][32] = { 2091 #define R32 "w" 2092 #define R64 "x" 2093 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7", 2094 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15", 2095 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23", 2096 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" }, 2097 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7", 2098 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15", 2099 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23", 2100 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } }, 2101 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7", 2102 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15", 2103 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23", 2104 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" }, 2105 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7", 2106 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15", 2107 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23", 2108 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } } 2109 #undef R64 2110 #undef R32 2111 }; 2112 2113 /* Return the integer register name. 2114 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */ 2115 2116 static inline const char * 2117 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p) 2118 { 2119 const int has_zr = sp_reg_p ? 0 : 1; 2120 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1; 2121 return int_reg[has_zr][is_64][regno]; 2122 } 2123 2124 /* Like get_int_reg_name, but IS_64 is always 1. */ 2125 2126 static inline const char * 2127 get_64bit_int_reg_name (int regno, int sp_reg_p) 2128 { 2129 const int has_zr = sp_reg_p ? 0 : 1; 2130 return int_reg[has_zr][1][regno]; 2131 } 2132 2133 /* Types for expanding an encoded 8-bit value to a floating-point value. */ 2134 2135 typedef union 2136 { 2137 uint64_t i; 2138 double d; 2139 } double_conv_t; 2140 2141 typedef union 2142 { 2143 uint32_t i; 2144 float f; 2145 } single_conv_t; 2146 2147 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and 2148 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8 2149 (depending on the type of the instruction). IMM8 will be expanded to a 2150 single-precision floating-point value (IS_DP == 0) or a double-precision 2151 floating-point value (IS_DP == 1). The expanded value is returned. */ 2152 2153 static uint64_t 2154 expand_fp_imm (int is_dp, uint32_t imm8) 2155 { 2156 uint64_t imm; 2157 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4; 2158 2159 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */ 2160 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */ 2161 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */ 2162 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2) 2163 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */ 2164 if (is_dp) 2165 { 2166 imm = (imm8_7 << (63-32)) /* imm8<7> */ 2167 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */ 2168 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32)) 2169 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */ 2170 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */ 2171 imm <<= 32; 2172 } 2173 else 2174 { 2175 imm = (imm8_7 << 31) /* imm8<7> */ 2176 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */ 2177 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */ 2178 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */ 2179 } 2180 2181 return imm; 2182 } 2183 2184 /* Produce the string representation of the register list operand *OPND 2185 in the buffer pointed by BUF of size SIZE. */ 2186 static void 2187 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd) 2188 { 2189 const int num_regs = opnd->reglist.num_regs; 2190 const int first_reg = opnd->reglist.first_regno; 2191 const int last_reg = (first_reg + num_regs - 1) & 0x1f; 2192 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier); 2193 char tb[8]; /* Temporary buffer. */ 2194 2195 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index); 2196 assert (num_regs >= 1 && num_regs <= 4); 2197 2198 /* Prepare the index if any. */ 2199 if (opnd->reglist.has_index) 2200 snprintf (tb, 8, "[%d]", opnd->reglist.index); 2201 else 2202 tb[0] = '\0'; 2203 2204 /* The hyphenated form is preferred for disassembly if there are 2205 more than two registers in the list, and the register numbers 2206 are monotonically increasing in increments of one. */ 2207 if (num_regs > 2 && last_reg > first_reg) 2208 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name, 2209 last_reg, qlf_name, tb); 2210 else 2211 { 2212 const int reg0 = first_reg; 2213 const int reg1 = (first_reg + 1) & 0x1f; 2214 const int reg2 = (first_reg + 2) & 0x1f; 2215 const int reg3 = (first_reg + 3) & 0x1f; 2216 2217 switch (num_regs) 2218 { 2219 case 1: 2220 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb); 2221 break; 2222 case 2: 2223 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name, 2224 reg1, qlf_name, tb); 2225 break; 2226 case 3: 2227 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name, 2228 reg1, qlf_name, reg2, qlf_name, tb); 2229 break; 2230 case 4: 2231 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s", 2232 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name, 2233 reg3, qlf_name, tb); 2234 break; 2235 } 2236 } 2237 } 2238 2239 /* Produce the string representation of the register offset address operand 2240 *OPND in the buffer pointed by BUF of size SIZE. */ 2241 static void 2242 print_register_offset_address (char *buf, size_t size, 2243 const aarch64_opnd_info *opnd) 2244 { 2245 const size_t tblen = 16; 2246 char tb[tblen]; /* Temporary buffer. */ 2247 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */ 2248 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */ 2249 bfd_boolean print_extend_p = TRUE; 2250 bfd_boolean print_amount_p = TRUE; 2251 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name; 2252 2253 switch (opnd->shifter.kind) 2254 { 2255 case AARCH64_MOD_UXTW: wm_p = TRUE; break; 2256 case AARCH64_MOD_LSL : lsl_p = TRUE; break; 2257 case AARCH64_MOD_SXTW: wm_p = TRUE; break; 2258 case AARCH64_MOD_SXTX: break; 2259 default: assert (0); 2260 } 2261 2262 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B 2263 || !opnd->shifter.amount_present)) 2264 { 2265 /* Not print the shift/extend amount when the amount is zero and 2266 when it is not the special case of 8-bit load/store instruction. */ 2267 print_amount_p = FALSE; 2268 /* Likewise, no need to print the shift operator LSL in such a 2269 situation. */ 2270 if (lsl_p) 2271 print_extend_p = FALSE; 2272 } 2273 2274 /* Prepare for the extend/shift. */ 2275 if (print_extend_p) 2276 { 2277 if (print_amount_p) 2278 snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount); 2279 else 2280 snprintf (tb, tblen, ",%s", shift_name); 2281 } 2282 else 2283 tb[0] = '\0'; 2284 2285 snprintf (buf, size, "[%s,%c%d%s]", 2286 get_64bit_int_reg_name (opnd->addr.base_regno, 1), 2287 wm_p ? 'w' : 'x', opnd->addr.offset.regno, tb); 2288 } 2289 2290 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE 2291 in *BUF. The caller should pass in the maximum size of *BUF in SIZE. 2292 PC, PCREL_P and ADDRESS are used to pass in and return information about 2293 the PC-relative address calculation, where the PC value is passed in 2294 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL) 2295 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the 2296 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0. 2297 2298 The function serves both the disassembler and the assembler diagnostics 2299 issuer, which is the reason why it lives in this file. */ 2300 2301 void 2302 aarch64_print_operand (char *buf, size_t size, bfd_vma pc, 2303 const aarch64_opcode *opcode, 2304 const aarch64_opnd_info *opnds, int idx, int *pcrel_p, 2305 bfd_vma *address) 2306 { 2307 int i; 2308 const char *name = NULL; 2309 const aarch64_opnd_info *opnd = opnds + idx; 2310 enum aarch64_modifier_kind kind; 2311 uint64_t addr; 2312 2313 buf[0] = '\0'; 2314 if (pcrel_p) 2315 *pcrel_p = 0; 2316 2317 switch (opnd->type) 2318 { 2319 case AARCH64_OPND_Rd: 2320 case AARCH64_OPND_Rn: 2321 case AARCH64_OPND_Rm: 2322 case AARCH64_OPND_Rt: 2323 case AARCH64_OPND_Rt2: 2324 case AARCH64_OPND_Rs: 2325 case AARCH64_OPND_Ra: 2326 case AARCH64_OPND_Rt_SYS: 2327 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by 2328 the <ic_op>, therefore we we use opnd->present to override the 2329 generic optional-ness information. */ 2330 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present) 2331 break; 2332 /* Omit the operand, e.g. RET. */ 2333 if (optional_operand_p (opcode, idx) 2334 && opnd->reg.regno == get_optional_operand_default_value (opcode)) 2335 break; 2336 assert (opnd->qualifier == AARCH64_OPND_QLF_W 2337 || opnd->qualifier == AARCH64_OPND_QLF_X); 2338 snprintf (buf, size, "%s", 2339 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)); 2340 break; 2341 2342 case AARCH64_OPND_Rd_SP: 2343 case AARCH64_OPND_Rn_SP: 2344 assert (opnd->qualifier == AARCH64_OPND_QLF_W 2345 || opnd->qualifier == AARCH64_OPND_QLF_WSP 2346 || opnd->qualifier == AARCH64_OPND_QLF_X 2347 || opnd->qualifier == AARCH64_OPND_QLF_SP); 2348 snprintf (buf, size, "%s", 2349 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1)); 2350 break; 2351 2352 case AARCH64_OPND_Rm_EXT: 2353 kind = opnd->shifter.kind; 2354 assert (idx == 1 || idx == 2); 2355 if ((aarch64_stack_pointer_p (opnds) 2356 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1))) 2357 && ((opnd->qualifier == AARCH64_OPND_QLF_W 2358 && opnds[0].qualifier == AARCH64_OPND_QLF_W 2359 && kind == AARCH64_MOD_UXTW) 2360 || (opnd->qualifier == AARCH64_OPND_QLF_X 2361 && kind == AARCH64_MOD_UXTX))) 2362 { 2363 /* 'LSL' is the preferred form in this case. */ 2364 kind = AARCH64_MOD_LSL; 2365 if (opnd->shifter.amount == 0) 2366 { 2367 /* Shifter omitted. */ 2368 snprintf (buf, size, "%s", 2369 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)); 2370 break; 2371 } 2372 } 2373 if (opnd->shifter.amount) 2374 snprintf (buf, size, "%s, %s #%d", 2375 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0), 2376 aarch64_operand_modifiers[kind].name, 2377 opnd->shifter.amount); 2378 else 2379 snprintf (buf, size, "%s, %s", 2380 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0), 2381 aarch64_operand_modifiers[kind].name); 2382 break; 2383 2384 case AARCH64_OPND_Rm_SFT: 2385 assert (opnd->qualifier == AARCH64_OPND_QLF_W 2386 || opnd->qualifier == AARCH64_OPND_QLF_X); 2387 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL) 2388 snprintf (buf, size, "%s", 2389 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)); 2390 else 2391 snprintf (buf, size, "%s, %s #%d", 2392 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0), 2393 aarch64_operand_modifiers[opnd->shifter.kind].name, 2394 opnd->shifter.amount); 2395 break; 2396 2397 case AARCH64_OPND_Fd: 2398 case AARCH64_OPND_Fn: 2399 case AARCH64_OPND_Fm: 2400 case AARCH64_OPND_Fa: 2401 case AARCH64_OPND_Ft: 2402 case AARCH64_OPND_Ft2: 2403 case AARCH64_OPND_Sd: 2404 case AARCH64_OPND_Sn: 2405 case AARCH64_OPND_Sm: 2406 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier), 2407 opnd->reg.regno); 2408 break; 2409 2410 case AARCH64_OPND_Vd: 2411 case AARCH64_OPND_Vn: 2412 case AARCH64_OPND_Vm: 2413 snprintf (buf, size, "v%d.%s", opnd->reg.regno, 2414 aarch64_get_qualifier_name (opnd->qualifier)); 2415 break; 2416 2417 case AARCH64_OPND_Ed: 2418 case AARCH64_OPND_En: 2419 case AARCH64_OPND_Em: 2420 snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno, 2421 aarch64_get_qualifier_name (opnd->qualifier), 2422 opnd->reglane.index); 2423 break; 2424 2425 case AARCH64_OPND_VdD1: 2426 case AARCH64_OPND_VnD1: 2427 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno); 2428 break; 2429 2430 case AARCH64_OPND_LVn: 2431 case AARCH64_OPND_LVt: 2432 case AARCH64_OPND_LVt_AL: 2433 case AARCH64_OPND_LEt: 2434 print_register_list (buf, size, opnd); 2435 break; 2436 2437 case AARCH64_OPND_Cn: 2438 case AARCH64_OPND_Cm: 2439 snprintf (buf, size, "C%d", opnd->reg.regno); 2440 break; 2441 2442 case AARCH64_OPND_IDX: 2443 case AARCH64_OPND_IMM: 2444 case AARCH64_OPND_WIDTH: 2445 case AARCH64_OPND_UIMM3_OP1: 2446 case AARCH64_OPND_UIMM3_OP2: 2447 case AARCH64_OPND_BIT_NUM: 2448 case AARCH64_OPND_IMM_VLSL: 2449 case AARCH64_OPND_IMM_VLSR: 2450 case AARCH64_OPND_SHLL_IMM: 2451 case AARCH64_OPND_IMM0: 2452 case AARCH64_OPND_IMMR: 2453 case AARCH64_OPND_IMMS: 2454 case AARCH64_OPND_FBITS: 2455 snprintf (buf, size, "#%" PRIi64, opnd->imm.value); 2456 break; 2457 2458 case AARCH64_OPND_IMM_MOV: 2459 switch (aarch64_get_qualifier_esize (opnds[0].qualifier)) 2460 { 2461 case 4: /* e.g. MOV Wd, #<imm32>. */ 2462 { 2463 int imm32 = opnd->imm.value; 2464 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32); 2465 } 2466 break; 2467 case 8: /* e.g. MOV Xd, #<imm64>. */ 2468 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64, 2469 opnd->imm.value, opnd->imm.value); 2470 break; 2471 default: assert (0); 2472 } 2473 break; 2474 2475 case AARCH64_OPND_FPIMM0: 2476 snprintf (buf, size, "#0.0"); 2477 break; 2478 2479 case AARCH64_OPND_LIMM: 2480 case AARCH64_OPND_AIMM: 2481 case AARCH64_OPND_HALF: 2482 if (opnd->shifter.amount) 2483 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value, 2484 opnd->shifter.amount); 2485 else 2486 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value); 2487 break; 2488 2489 case AARCH64_OPND_SIMD_IMM: 2490 case AARCH64_OPND_SIMD_IMM_SFT: 2491 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL) 2492 || opnd->shifter.kind == AARCH64_MOD_NONE) 2493 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value); 2494 else 2495 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value, 2496 aarch64_operand_modifiers[opnd->shifter.kind].name, 2497 opnd->shifter.amount); 2498 break; 2499 2500 case AARCH64_OPND_FPIMM: 2501 case AARCH64_OPND_SIMD_FPIMM: 2502 switch (aarch64_get_qualifier_esize (opnds[0].qualifier)) 2503 { 2504 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */ 2505 { 2506 single_conv_t c; 2507 c.i = expand_fp_imm (0, opnd->imm.value); 2508 snprintf (buf, size, "#%.18e", c.f); 2509 } 2510 break; 2511 case 8: /* e.g. FMOV <Sd>, #<imm>. */ 2512 { 2513 double_conv_t c; 2514 c.i = expand_fp_imm (1, opnd->imm.value); 2515 snprintf (buf, size, "#%.18e", c.d); 2516 } 2517 break; 2518 default: assert (0); 2519 } 2520 break; 2521 2522 case AARCH64_OPND_CCMP_IMM: 2523 case AARCH64_OPND_NZCV: 2524 case AARCH64_OPND_EXCEPTION: 2525 case AARCH64_OPND_UIMM4: 2526 case AARCH64_OPND_UIMM7: 2527 if (optional_operand_p (opcode, idx) == TRUE 2528 && (opnd->imm.value == 2529 (int64_t) get_optional_operand_default_value (opcode))) 2530 /* Omit the operand, e.g. DCPS1. */ 2531 break; 2532 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value); 2533 break; 2534 2535 case AARCH64_OPND_COND: 2536 case AARCH64_OPND_COND1: 2537 snprintf (buf, size, "%s", opnd->cond->names[0]); 2538 break; 2539 2540 case AARCH64_OPND_ADDR_ADRP: 2541 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff) 2542 + opnd->imm.value; 2543 if (pcrel_p) 2544 *pcrel_p = 1; 2545 if (address) 2546 *address = addr; 2547 /* This is not necessary during the disassembling, as print_address_func 2548 in the disassemble_info will take care of the printing. But some 2549 other callers may be still interested in getting the string in *STR, 2550 so here we do snprintf regardless. */ 2551 snprintf (buf, size, "#0x%" PRIx64, addr); 2552 break; 2553 2554 case AARCH64_OPND_ADDR_PCREL14: 2555 case AARCH64_OPND_ADDR_PCREL19: 2556 case AARCH64_OPND_ADDR_PCREL21: 2557 case AARCH64_OPND_ADDR_PCREL26: 2558 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value; 2559 if (pcrel_p) 2560 *pcrel_p = 1; 2561 if (address) 2562 *address = addr; 2563 /* This is not necessary during the disassembling, as print_address_func 2564 in the disassemble_info will take care of the printing. But some 2565 other callers may be still interested in getting the string in *STR, 2566 so here we do snprintf regardless. */ 2567 snprintf (buf, size, "#0x%" PRIx64, addr); 2568 break; 2569 2570 case AARCH64_OPND_ADDR_SIMPLE: 2571 case AARCH64_OPND_SIMD_ADDR_SIMPLE: 2572 case AARCH64_OPND_SIMD_ADDR_POST: 2573 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1); 2574 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST) 2575 { 2576 if (opnd->addr.offset.is_reg) 2577 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno); 2578 else 2579 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm); 2580 } 2581 else 2582 snprintf (buf, size, "[%s]", name); 2583 break; 2584 2585 case AARCH64_OPND_ADDR_REGOFF: 2586 print_register_offset_address (buf, size, opnd); 2587 break; 2588 2589 case AARCH64_OPND_ADDR_SIMM7: 2590 case AARCH64_OPND_ADDR_SIMM9: 2591 case AARCH64_OPND_ADDR_SIMM9_2: 2592 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1); 2593 if (opnd->addr.writeback) 2594 { 2595 if (opnd->addr.preind) 2596 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm); 2597 else 2598 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm); 2599 } 2600 else 2601 { 2602 if (opnd->addr.offset.imm) 2603 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm); 2604 else 2605 snprintf (buf, size, "[%s]", name); 2606 } 2607 break; 2608 2609 case AARCH64_OPND_ADDR_UIMM12: 2610 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1); 2611 if (opnd->addr.offset.imm) 2612 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm); 2613 else 2614 snprintf (buf, size, "[%s]", name); 2615 break; 2616 2617 case AARCH64_OPND_SYSREG: 2618 for (i = 0; aarch64_sys_regs[i].name; ++i) 2619 if (aarch64_sys_regs[i].value == opnd->sysreg 2620 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])) 2621 break; 2622 if (aarch64_sys_regs[i].name) 2623 snprintf (buf, size, "%s", aarch64_sys_regs[i].name); 2624 else 2625 { 2626 /* Implementation defined system register. */ 2627 unsigned int value = opnd->sysreg; 2628 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3, 2629 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf, 2630 value & 0x7); 2631 } 2632 break; 2633 2634 case AARCH64_OPND_PSTATEFIELD: 2635 for (i = 0; aarch64_pstatefields[i].name; ++i) 2636 if (aarch64_pstatefields[i].value == opnd->pstatefield) 2637 break; 2638 assert (aarch64_pstatefields[i].name); 2639 snprintf (buf, size, "%s", aarch64_pstatefields[i].name); 2640 break; 2641 2642 case AARCH64_OPND_SYSREG_AT: 2643 case AARCH64_OPND_SYSREG_DC: 2644 case AARCH64_OPND_SYSREG_IC: 2645 case AARCH64_OPND_SYSREG_TLBI: 2646 snprintf (buf, size, "%s", opnd->sysins_op->template); 2647 break; 2648 2649 case AARCH64_OPND_BARRIER: 2650 snprintf (buf, size, "%s", opnd->barrier->name); 2651 break; 2652 2653 case AARCH64_OPND_BARRIER_ISB: 2654 /* Operand can be omitted, e.g. in DCPS1. */ 2655 if (! optional_operand_p (opcode, idx) 2656 || (opnd->barrier->value 2657 != get_optional_operand_default_value (opcode))) 2658 snprintf (buf, size, "#0x%x", opnd->barrier->value); 2659 break; 2660 2661 case AARCH64_OPND_PRFOP: 2662 if (opnd->prfop->name != NULL) 2663 snprintf (buf, size, "%s", opnd->prfop->name); 2664 else 2665 snprintf (buf, size, "#0x%02x", opnd->prfop->value); 2666 break; 2667 2668 default: 2669 assert (0); 2670 } 2671 } 2672 2673 #define CPENC(op0,op1,crn,crm,op2) \ 2674 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5) 2675 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */ 2676 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2)) 2677 /* for 3.9.10 System Instructions */ 2678 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2)) 2679 2680 #define C0 0 2681 #define C1 1 2682 #define C2 2 2683 #define C3 3 2684 #define C4 4 2685 #define C5 5 2686 #define C6 6 2687 #define C7 7 2688 #define C8 8 2689 #define C9 9 2690 #define C10 10 2691 #define C11 11 2692 #define C12 12 2693 #define C13 13 2694 #define C14 14 2695 #define C15 15 2696 2697 #ifdef F_DEPRECATED 2698 #undef F_DEPRECATED 2699 #endif 2700 #define F_DEPRECATED 0x1 /* Deprecated system register. */ 2701 2702 /* TODO there are two more issues need to be resolved 2703 1. handle read-only and write-only system registers 2704 2. handle cpu-implementation-defined system registers. */ 2705 const aarch64_sys_reg aarch64_sys_regs [] = 2706 { 2707 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */ 2708 { "elr_el1", CPEN_(0,C0,1), 0 }, 2709 { "sp_el0", CPEN_(0,C1,0), 0 }, 2710 { "spsel", CPEN_(0,C2,0), 0 }, 2711 { "daif", CPEN_(3,C2,1), 0 }, 2712 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */ 2713 { "nzcv", CPEN_(3,C2,0), 0 }, 2714 { "fpcr", CPEN_(3,C4,0), 0 }, 2715 { "fpsr", CPEN_(3,C4,1), 0 }, 2716 { "dspsr_el0", CPEN_(3,C5,0), 0 }, 2717 { "dlr_el0", CPEN_(3,C5,1), 0 }, 2718 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */ 2719 { "elr_el2", CPEN_(4,C0,1), 0 }, 2720 { "sp_el1", CPEN_(4,C1,0), 0 }, 2721 { "spsr_irq", CPEN_(4,C3,0), 0 }, 2722 { "spsr_abt", CPEN_(4,C3,1), 0 }, 2723 { "spsr_und", CPEN_(4,C3,2), 0 }, 2724 { "spsr_fiq", CPEN_(4,C3,3), 0 }, 2725 { "spsr_el3", CPEN_(6,C0,0), 0 }, 2726 { "elr_el3", CPEN_(6,C0,1), 0 }, 2727 { "sp_el2", CPEN_(6,C1,0), 0 }, 2728 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */ 2729 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */ 2730 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */ 2731 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */ 2732 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */ 2733 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */ 2734 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */ 2735 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */ 2736 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */ 2737 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */ 2738 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */ 2739 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */ 2740 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */ 2741 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */ 2742 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */ 2743 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */ 2744 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */ 2745 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */ 2746 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */ 2747 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */ 2748 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */ 2749 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */ 2750 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */ 2751 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */ 2752 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */ 2753 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */ 2754 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */ 2755 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */ 2756 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */ 2757 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */ 2758 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */ 2759 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */ 2760 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */ 2761 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */ 2762 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */ 2763 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */ 2764 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */ 2765 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */ 2766 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 }, 2767 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 }, 2768 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 }, 2769 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 }, 2770 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 }, 2771 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 }, 2772 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 }, 2773 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 }, 2774 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 }, 2775 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 }, 2776 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 }, 2777 { "scr_el3", CPENC(3,6,C1,C1,0), 0 }, 2778 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 }, 2779 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 }, 2780 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 }, 2781 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 }, 2782 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 }, 2783 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 }, 2784 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 }, 2785 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 }, 2786 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 }, 2787 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 }, 2788 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 }, 2789 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 }, 2790 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 }, 2791 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 }, 2792 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 }, 2793 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 }, 2794 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 }, 2795 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 }, 2796 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 }, 2797 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 }, 2798 { "esr_el1", CPENC(3,0,C5,C2,0), 0 }, 2799 { "esr_el2", CPENC(3,4,C5,C2,0), 0 }, 2800 { "esr_el3", CPENC(3,6,C5,C2,0), 0 }, 2801 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 }, 2802 { "far_el1", CPENC(3,0,C6,C0,0), 0 }, 2803 { "far_el2", CPENC(3,4,C6,C0,0), 0 }, 2804 { "far_el3", CPENC(3,6,C6,C0,0), 0 }, 2805 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 }, 2806 { "par_el1", CPENC(3,0,C7,C4,0), 0 }, 2807 { "mair_el1", CPENC(3,0,C10,C2,0), 0 }, 2808 { "mair_el2", CPENC(3,4,C10,C2,0), 0 }, 2809 { "mair_el3", CPENC(3,6,C10,C2,0), 0 }, 2810 { "amair_el1", CPENC(3,0,C10,C3,0), 0 }, 2811 { "amair_el2", CPENC(3,4,C10,C3,0), 0 }, 2812 { "amair_el3", CPENC(3,6,C10,C3,0), 0 }, 2813 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 }, 2814 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 }, 2815 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 }, 2816 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */ 2817 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */ 2818 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */ 2819 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 }, 2820 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 }, 2821 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 }, 2822 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */ 2823 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 }, 2824 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 }, 2825 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */ 2826 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 }, 2827 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 }, 2828 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 }, 2829 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */ 2830 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */ 2831 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */ 2832 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */ 2833 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 }, 2834 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 }, 2835 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 }, 2836 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 }, 2837 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 }, 2838 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 }, 2839 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 }, 2840 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 }, 2841 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 }, 2842 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 }, 2843 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 }, 2844 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 }, 2845 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 }, 2846 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 }, 2847 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 }, 2848 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 }, 2849 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 }, 2850 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 }, 2851 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 }, 2852 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 }, 2853 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */ 2854 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 }, 2855 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 }, 2856 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */ 2857 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */ 2858 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */ 2859 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */ 2860 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 }, 2861 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 }, 2862 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 }, 2863 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 }, 2864 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 }, 2865 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 }, 2866 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 }, 2867 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 }, 2868 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 }, 2869 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 }, 2870 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 }, 2871 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 }, 2872 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 }, 2873 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 }, 2874 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 }, 2875 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 }, 2876 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 }, 2877 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 }, 2878 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 }, 2879 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 }, 2880 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 }, 2881 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 }, 2882 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 }, 2883 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 }, 2884 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 }, 2885 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 }, 2886 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 }, 2887 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 }, 2888 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 }, 2889 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 }, 2890 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 }, 2891 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 }, 2892 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 }, 2893 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 }, 2894 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 }, 2895 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 }, 2896 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 }, 2897 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 }, 2898 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 }, 2899 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 }, 2900 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 }, 2901 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 }, 2902 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 }, 2903 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 }, 2904 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 }, 2905 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 }, 2906 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 }, 2907 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 }, 2908 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 }, 2909 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 }, 2910 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 }, 2911 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 }, 2912 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 }, 2913 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 }, 2914 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 }, 2915 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 }, 2916 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 }, 2917 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 }, 2918 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 }, 2919 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 }, 2920 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 }, 2921 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 }, 2922 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 }, 2923 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 }, 2924 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 }, 2925 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 }, 2926 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */ 2927 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */ 2928 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */ 2929 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 }, 2930 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 }, 2931 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 }, 2932 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 }, 2933 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */ 2934 2935 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 }, 2936 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 }, 2937 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 }, 2938 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 }, 2939 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */ 2940 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 }, 2941 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */ 2942 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */ 2943 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 }, 2944 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 }, 2945 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 }, 2946 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 }, 2947 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 }, 2948 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 }, 2949 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 }, 2950 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 }, 2951 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 }, 2952 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 }, 2953 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 }, 2954 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 }, 2955 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 }, 2956 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 }, 2957 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 }, 2958 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 }, 2959 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 }, 2960 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 }, 2961 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 }, 2962 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 }, 2963 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 }, 2964 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 }, 2965 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 }, 2966 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 }, 2967 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 }, 2968 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 }, 2969 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 }, 2970 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 }, 2971 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 }, 2972 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 }, 2973 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 }, 2974 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 }, 2975 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 }, 2976 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 }, 2977 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 }, 2978 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 }, 2979 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 }, 2980 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 }, 2981 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 }, 2982 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 }, 2983 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 }, 2984 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 }, 2985 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 }, 2986 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 }, 2987 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 }, 2988 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 }, 2989 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 }, 2990 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 }, 2991 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 }, 2992 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 }, 2993 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 }, 2994 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 }, 2995 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 }, 2996 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 }, 2997 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 }, 2998 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 }, 2999 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 }, 3000 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 }, 3001 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 }, 3002 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 }, 3003 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 }, 3004 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 }, 3005 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 }, 3006 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 }, 3007 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 }, 3008 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 }, 3009 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 }, 3010 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 }, 3011 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 }, 3012 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 }, 3013 { 0, CPENC(0,0,0,0,0), 0 }, 3014 }; 3015 3016 bfd_boolean 3017 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg) 3018 { 3019 return (reg->flags & F_DEPRECATED) != 0; 3020 } 3021 3022 const aarch64_sys_reg aarch64_pstatefields [] = 3023 { 3024 { "spsel", 0x05, 0 }, 3025 { "daifset", 0x1e, 0 }, 3026 { "daifclr", 0x1f, 0 }, 3027 { 0, CPENC(0,0,0,0,0), 0 }, 3028 }; 3029 3030 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] = 3031 { 3032 { "ialluis", CPENS(0,C7,C1,0), 0 }, 3033 { "iallu", CPENS(0,C7,C5,0), 0 }, 3034 { "ivau", CPENS(3,C7,C5,1), 1 }, 3035 { 0, CPENS(0,0,0,0), 0 } 3036 }; 3037 3038 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] = 3039 { 3040 { "zva", CPENS(3,C7,C4,1), 1 }, 3041 { "ivac", CPENS(0,C7,C6,1), 1 }, 3042 { "isw", CPENS(0,C7,C6,2), 1 }, 3043 { "cvac", CPENS(3,C7,C10,1), 1 }, 3044 { "csw", CPENS(0,C7,C10,2), 1 }, 3045 { "cvau", CPENS(3,C7,C11,1), 1 }, 3046 { "civac", CPENS(3,C7,C14,1), 1 }, 3047 { "cisw", CPENS(0,C7,C14,2), 1 }, 3048 { 0, CPENS(0,0,0,0), 0 } 3049 }; 3050 3051 const aarch64_sys_ins_reg aarch64_sys_regs_at[] = 3052 { 3053 { "s1e1r", CPENS(0,C7,C8,0), 1 }, 3054 { "s1e1w", CPENS(0,C7,C8,1), 1 }, 3055 { "s1e0r", CPENS(0,C7,C8,2), 1 }, 3056 { "s1e0w", CPENS(0,C7,C8,3), 1 }, 3057 { "s12e1r", CPENS(4,C7,C8,4), 1 }, 3058 { "s12e1w", CPENS(4,C7,C8,5), 1 }, 3059 { "s12e0r", CPENS(4,C7,C8,6), 1 }, 3060 { "s12e0w", CPENS(4,C7,C8,7), 1 }, 3061 { "s1e2r", CPENS(4,C7,C8,0), 1 }, 3062 { "s1e2w", CPENS(4,C7,C8,1), 1 }, 3063 { "s1e3r", CPENS(6,C7,C8,0), 1 }, 3064 { "s1e3w", CPENS(6,C7,C8,1), 1 }, 3065 { 0, CPENS(0,0,0,0), 0 } 3066 }; 3067 3068 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] = 3069 { 3070 { "vmalle1", CPENS(0,C8,C7,0), 0 }, 3071 { "vae1", CPENS(0,C8,C7,1), 1 }, 3072 { "aside1", CPENS(0,C8,C7,2), 1 }, 3073 { "vaae1", CPENS(0,C8,C7,3), 1 }, 3074 { "vmalle1is", CPENS(0,C8,C3,0), 0 }, 3075 { "vae1is", CPENS(0,C8,C3,1), 1 }, 3076 { "aside1is", CPENS(0,C8,C3,2), 1 }, 3077 { "vaae1is", CPENS(0,C8,C3,3), 1 }, 3078 { "ipas2e1is", CPENS(4,C8,C0,1), 1 }, 3079 { "ipas2le1is",CPENS(4,C8,C0,5), 1 }, 3080 { "ipas2e1", CPENS(4,C8,C4,1), 1 }, 3081 { "ipas2le1", CPENS(4,C8,C4,5), 1 }, 3082 { "vae2", CPENS(4,C8,C7,1), 1 }, 3083 { "vae2is", CPENS(4,C8,C3,1), 1 }, 3084 { "vmalls12e1",CPENS(4,C8,C7,6), 0 }, 3085 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 }, 3086 { "vae3", CPENS(6,C8,C7,1), 1 }, 3087 { "vae3is", CPENS(6,C8,C3,1), 1 }, 3088 { "alle2", CPENS(4,C8,C7,0), 0 }, 3089 { "alle2is", CPENS(4,C8,C3,0), 0 }, 3090 { "alle1", CPENS(4,C8,C7,4), 0 }, 3091 { "alle1is", CPENS(4,C8,C3,4), 0 }, 3092 { "alle3", CPENS(6,C8,C7,0), 0 }, 3093 { "alle3is", CPENS(6,C8,C3,0), 0 }, 3094 { "vale1is", CPENS(0,C8,C3,5), 1 }, 3095 { "vale2is", CPENS(4,C8,C3,5), 1 }, 3096 { "vale3is", CPENS(6,C8,C3,5), 1 }, 3097 { "vaale1is", CPENS(0,C8,C3,7), 1 }, 3098 { "vale1", CPENS(0,C8,C7,5), 1 }, 3099 { "vale2", CPENS(4,C8,C7,5), 1 }, 3100 { "vale3", CPENS(6,C8,C7,5), 1 }, 3101 { "vaale1", CPENS(0,C8,C7,7), 1 }, 3102 { 0, CPENS(0,0,0,0), 0 } 3103 }; 3104 3105 #undef C0 3106 #undef C1 3107 #undef C2 3108 #undef C3 3109 #undef C4 3110 #undef C5 3111 #undef C6 3112 #undef C7 3113 #undef C8 3114 #undef C9 3115 #undef C10 3116 #undef C11 3117 #undef C12 3118 #undef C13 3119 #undef C14 3120 #undef C15 3121 3122 /* Include the opcode description table as well as the operand description 3123 table. */ 3124 #include "aarch64-tbl.h" 3125