1 /* aarch64-asm.c -- AArch64 assembler support. 2 Copyright (C) 2012-2022 Free Software Foundation, Inc. 3 Contributed by ARM Ltd. 4 5 This file is part of the GNU opcodes library. 6 7 This library is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3, or (at your option) 10 any later version. 11 12 It is distributed in the hope that it will be useful, but WITHOUT 13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public 15 License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; see the file COPYING3. If not, 19 see <http://www.gnu.org/licenses/>. */ 20 21 #include "sysdep.h" 22 #include <stdarg.h> 23 #include "libiberty.h" 24 #include "aarch64-asm.h" 25 #include "opintl.h" 26 27 /* Utilities. */ 28 29 /* The unnamed arguments consist of the number of fields and information about 30 these fields where the VALUE will be inserted into CODE. MASK can be zero or 31 the base mask of the opcode. 32 33 N.B. the fields are required to be in such an order than the least signficant 34 field for VALUE comes the first, e.g. the <index> in 35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>] 36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in 37 the order of M, L, H. */ 38 39 static inline void 40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...) 41 { 42 uint32_t num; 43 const aarch64_field *field; 44 enum aarch64_field_kind kind; 45 va_list va; 46 47 va_start (va, mask); 48 num = va_arg (va, uint32_t); 49 assert (num <= 5); 50 while (num--) 51 { 52 kind = va_arg (va, enum aarch64_field_kind); 53 field = &fields[kind]; 54 insert_field (kind, code, value, mask); 55 value >>= field->width; 56 } 57 va_end (va); 58 } 59 60 /* Insert a raw field value VALUE into all fields in SELF->fields. 61 The least significant bit goes in the final field. */ 62 63 static void 64 insert_all_fields (const aarch64_operand *self, aarch64_insn *code, 65 aarch64_insn value) 66 { 67 unsigned int i; 68 enum aarch64_field_kind kind; 69 70 for (i = ARRAY_SIZE (self->fields); i-- > 0; ) 71 if (self->fields[i] != FLD_NIL) 72 { 73 kind = self->fields[i]; 74 insert_field (kind, code, value, 0); 75 value >>= fields[kind].width; 76 } 77 } 78 79 /* Operand inserters. */ 80 81 /* Insert nothing. */ 82 bool 83 aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED, 84 const aarch64_opnd_info *info ATTRIBUTE_UNUSED, 85 aarch64_insn *code ATTRIBUTE_UNUSED, 86 const aarch64_inst *inst ATTRIBUTE_UNUSED, 87 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 88 { 89 return true; 90 } 91 92 /* Insert register number. */ 93 bool 94 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info, 95 aarch64_insn *code, 96 const aarch64_inst *inst ATTRIBUTE_UNUSED, 97 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 98 { 99 insert_field (self->fields[0], code, info->reg.regno, 0); 100 return true; 101 } 102 103 /* Insert register number, index and/or other data for SIMD register element 104 operand, e.g. the last source operand in 105 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */ 106 bool 107 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info, 108 aarch64_insn *code, const aarch64_inst *inst, 109 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 110 { 111 /* regno */ 112 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask); 113 /* index and/or type */ 114 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins) 115 { 116 int pos = info->qualifier - AARCH64_OPND_QLF_S_B; 117 if (info->type == AARCH64_OPND_En 118 && inst->opcode->operands[0] == AARCH64_OPND_Ed) 119 { 120 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */ 121 assert (info->idx == 1); /* Vn */ 122 aarch64_insn value = info->reglane.index << pos; 123 insert_field (FLD_imm4, code, value, 0); 124 } 125 else 126 { 127 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>]. 128 imm5<3:0> <V> 129 0000 RESERVED 130 xxx1 B 131 xx10 H 132 x100 S 133 1000 D */ 134 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos; 135 insert_field (FLD_imm5, code, value, 0); 136 } 137 } 138 else if (inst->opcode->iclass == dotproduct) 139 { 140 unsigned reglane_index = info->reglane.index; 141 switch (info->qualifier) 142 { 143 case AARCH64_OPND_QLF_S_4B: 144 case AARCH64_OPND_QLF_S_2H: 145 /* L:H */ 146 assert (reglane_index < 4); 147 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H); 148 break; 149 default: 150 return false; 151 } 152 } 153 else if (inst->opcode->iclass == cryptosm3) 154 { 155 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */ 156 unsigned reglane_index = info->reglane.index; 157 assert (reglane_index < 4); 158 insert_field (FLD_SM3_imm2, code, reglane_index, 0); 159 } 160 else 161 { 162 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>] 163 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */ 164 unsigned reglane_index = info->reglane.index; 165 166 if (inst->opcode->op == OP_FCMLA_ELEM) 167 /* Complex operand takes two elements. */ 168 reglane_index *= 2; 169 170 switch (info->qualifier) 171 { 172 case AARCH64_OPND_QLF_S_H: 173 /* H:L:M */ 174 assert (reglane_index < 8); 175 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H); 176 break; 177 case AARCH64_OPND_QLF_S_S: 178 /* H:L */ 179 assert (reglane_index < 4); 180 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H); 181 break; 182 case AARCH64_OPND_QLF_S_D: 183 /* H */ 184 assert (reglane_index < 2); 185 insert_field (FLD_H, code, reglane_index, 0); 186 break; 187 default: 188 return false; 189 } 190 } 191 return true; 192 } 193 194 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */ 195 bool 196 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info, 197 aarch64_insn *code, 198 const aarch64_inst *inst ATTRIBUTE_UNUSED, 199 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 200 { 201 /* R */ 202 insert_field (self->fields[0], code, info->reglist.first_regno, 0); 203 /* len */ 204 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0); 205 return true; 206 } 207 208 /* Insert Rt and opcode fields for a register list operand, e.g. Vt 209 in AdvSIMD load/store instructions. */ 210 bool 211 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED, 212 const aarch64_opnd_info *info, aarch64_insn *code, 213 const aarch64_inst *inst, 214 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 215 { 216 aarch64_insn value = 0; 217 /* Number of elements in each structure to be loaded/stored. */ 218 unsigned num = get_opcode_dependent_value (inst->opcode); 219 220 /* Rt */ 221 insert_field (FLD_Rt, code, info->reglist.first_regno, 0); 222 /* opcode */ 223 switch (num) 224 { 225 case 1: 226 switch (info->reglist.num_regs) 227 { 228 case 1: value = 0x7; break; 229 case 2: value = 0xa; break; 230 case 3: value = 0x6; break; 231 case 4: value = 0x2; break; 232 default: return false; 233 } 234 break; 235 case 2: 236 value = info->reglist.num_regs == 4 ? 0x3 : 0x8; 237 break; 238 case 3: 239 value = 0x4; 240 break; 241 case 4: 242 value = 0x0; 243 break; 244 default: 245 return false; 246 } 247 insert_field (FLD_opcode, code, value, 0); 248 249 return true; 250 } 251 252 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load 253 single structure to all lanes instructions. */ 254 bool 255 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED, 256 const aarch64_opnd_info *info, aarch64_insn *code, 257 const aarch64_inst *inst, 258 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 259 { 260 aarch64_insn value; 261 /* The opcode dependent area stores the number of elements in 262 each structure to be loaded/stored. */ 263 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1; 264 265 /* Rt */ 266 insert_field (FLD_Rt, code, info->reglist.first_regno, 0); 267 /* S */ 268 value = (aarch64_insn) 0; 269 if (is_ld1r && info->reglist.num_regs == 2) 270 /* OP_LD1R does not have alternating variant, but have "two consecutive" 271 instead. */ 272 value = (aarch64_insn) 1; 273 insert_field (FLD_S, code, value, 0); 274 275 return true; 276 } 277 278 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list 279 operand e.g. Vt in AdvSIMD load/store single element instructions. */ 280 bool 281 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED, 282 const aarch64_opnd_info *info, aarch64_insn *code, 283 const aarch64_inst *inst ATTRIBUTE_UNUSED, 284 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 285 { 286 aarch64_field field = {0, 0}; 287 aarch64_insn QSsize = 0; /* fields Q:S:size. */ 288 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */ 289 290 assert (info->reglist.has_index); 291 292 /* Rt */ 293 insert_field (FLD_Rt, code, info->reglist.first_regno, 0); 294 /* Encode the index, opcode<2:1> and size. */ 295 switch (info->qualifier) 296 { 297 case AARCH64_OPND_QLF_S_B: 298 /* Index encoded in "Q:S:size". */ 299 QSsize = info->reglist.index; 300 opcodeh2 = 0x0; 301 break; 302 case AARCH64_OPND_QLF_S_H: 303 /* Index encoded in "Q:S:size<1>". */ 304 QSsize = info->reglist.index << 1; 305 opcodeh2 = 0x1; 306 break; 307 case AARCH64_OPND_QLF_S_S: 308 /* Index encoded in "Q:S". */ 309 QSsize = info->reglist.index << 2; 310 opcodeh2 = 0x2; 311 break; 312 case AARCH64_OPND_QLF_S_D: 313 /* Index encoded in "Q". */ 314 QSsize = info->reglist.index << 3 | 0x1; 315 opcodeh2 = 0x2; 316 break; 317 default: 318 return false; 319 } 320 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q); 321 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field); 322 insert_field_2 (&field, code, opcodeh2, 0); 323 324 return true; 325 } 326 327 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in 328 SSHR <Vd>.<T>, <Vn>.<T>, #<shift> 329 or SSHR <V><d>, <V><n>, #<shift>. */ 330 bool 331 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED, 332 const aarch64_opnd_info *info, 333 aarch64_insn *code, const aarch64_inst *inst, 334 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 335 { 336 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier); 337 aarch64_insn Q, imm; 338 339 if (inst->opcode->iclass == asimdshf) 340 { 341 /* Q 342 immh Q <T> 343 0000 x SEE AdvSIMD modified immediate 344 0001 0 8B 345 0001 1 16B 346 001x 0 4H 347 001x 1 8H 348 01xx 0 2S 349 01xx 1 4S 350 1xxx 0 RESERVED 351 1xxx 1 2D */ 352 Q = (val & 0x1) ? 1 : 0; 353 insert_field (FLD_Q, code, Q, inst->opcode->mask); 354 val >>= 1; 355 } 356 357 assert (info->type == AARCH64_OPND_IMM_VLSR 358 || info->type == AARCH64_OPND_IMM_VLSL); 359 360 if (info->type == AARCH64_OPND_IMM_VLSR) 361 /* immh:immb 362 immh <shift> 363 0000 SEE AdvSIMD modified immediate 364 0001 (16-UInt(immh:immb)) 365 001x (32-UInt(immh:immb)) 366 01xx (64-UInt(immh:immb)) 367 1xxx (128-UInt(immh:immb)) */ 368 imm = (16 << (unsigned)val) - info->imm.value; 369 else 370 /* immh:immb 371 immh <shift> 372 0000 SEE AdvSIMD modified immediate 373 0001 (UInt(immh:immb)-8) 374 001x (UInt(immh:immb)-16) 375 01xx (UInt(immh:immb)-32) 376 1xxx (UInt(immh:immb)-64) */ 377 imm = info->imm.value + (8 << (unsigned)val); 378 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh); 379 380 return true; 381 } 382 383 /* Insert fields for e.g. the immediate operands in 384 BFM <Wd>, <Wn>, #<immr>, #<imms>. */ 385 bool 386 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info, 387 aarch64_insn *code, 388 const aarch64_inst *inst ATTRIBUTE_UNUSED, 389 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 390 { 391 int64_t imm; 392 393 imm = info->imm.value; 394 if (operand_need_shift_by_two (self)) 395 imm >>= 2; 396 if (operand_need_shift_by_four (self)) 397 imm >>= 4; 398 insert_all_fields (self, code, imm); 399 return true; 400 } 401 402 /* Insert immediate and its shift amount for e.g. the last operand in 403 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */ 404 bool 405 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info, 406 aarch64_insn *code, const aarch64_inst *inst, 407 aarch64_operand_error *errors) 408 { 409 /* imm16 */ 410 aarch64_ins_imm (self, info, code, inst, errors); 411 /* hw */ 412 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0); 413 return true; 414 } 415 416 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in 417 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */ 418 bool 419 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED, 420 const aarch64_opnd_info *info, 421 aarch64_insn *code, 422 const aarch64_inst *inst ATTRIBUTE_UNUSED, 423 aarch64_operand_error *errors 424 ATTRIBUTE_UNUSED) 425 { 426 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier; 427 uint64_t imm = info->imm.value; 428 enum aarch64_modifier_kind kind = info->shifter.kind; 429 int amount = info->shifter.amount; 430 aarch64_field field = {0, 0}; 431 432 /* a:b:c:d:e:f:g:h */ 433 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8) 434 { 435 /* Either MOVI <Dd>, #<imm> 436 or MOVI <Vd>.2D, #<imm>. 437 <imm> is a 64-bit immediate 438 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh", 439 encoded in "a:b:c:d:e:f:g:h". */ 440 imm = aarch64_shrink_expanded_imm8 (imm); 441 assert ((int)imm >= 0); 442 } 443 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc); 444 445 if (kind == AARCH64_MOD_NONE) 446 return true; 447 448 /* shift amount partially in cmode */ 449 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL); 450 if (kind == AARCH64_MOD_LSL) 451 { 452 /* AARCH64_MOD_LSL: shift zeros. */ 453 int esize = aarch64_get_qualifier_esize (opnd0_qualifier); 454 assert (esize == 4 || esize == 2 || esize == 1); 455 /* For 8-bit move immediate, the optional LSL #0 does not require 456 encoding. */ 457 if (esize == 1) 458 return true; 459 amount >>= 3; 460 if (esize == 4) 461 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */ 462 else 463 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */ 464 } 465 else 466 { 467 /* AARCH64_MOD_MSL: shift ones. */ 468 amount >>= 4; 469 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */ 470 } 471 insert_field_2 (&field, code, amount, 0); 472 473 return true; 474 } 475 476 /* Insert fields for an 8-bit floating-point immediate. */ 477 bool 478 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info, 479 aarch64_insn *code, 480 const aarch64_inst *inst ATTRIBUTE_UNUSED, 481 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 482 { 483 insert_all_fields (self, code, info->imm.value); 484 return true; 485 } 486 487 /* Insert 1-bit rotation immediate (#90 or #270). */ 488 bool 489 aarch64_ins_imm_rotate1 (const aarch64_operand *self, 490 const aarch64_opnd_info *info, 491 aarch64_insn *code, const aarch64_inst *inst, 492 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 493 { 494 uint64_t rot = (info->imm.value - 90) / 180; 495 assert (rot < 2U); 496 insert_field (self->fields[0], code, rot, inst->opcode->mask); 497 return true; 498 } 499 500 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */ 501 bool 502 aarch64_ins_imm_rotate2 (const aarch64_operand *self, 503 const aarch64_opnd_info *info, 504 aarch64_insn *code, const aarch64_inst *inst, 505 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 506 { 507 uint64_t rot = info->imm.value / 90; 508 assert (rot < 4U); 509 insert_field (self->fields[0], code, rot, inst->opcode->mask); 510 return true; 511 } 512 513 /* Insert #<fbits> for the immediate operand in fp fix-point instructions, 514 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */ 515 bool 516 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info, 517 aarch64_insn *code, 518 const aarch64_inst *inst ATTRIBUTE_UNUSED, 519 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 520 { 521 insert_field (self->fields[0], code, 64 - info->imm.value, 0); 522 return true; 523 } 524 525 /* Insert arithmetic immediate for e.g. the last operand in 526 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */ 527 bool 528 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info, 529 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED, 530 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 531 { 532 /* shift */ 533 aarch64_insn value = info->shifter.amount ? 1 : 0; 534 insert_field (self->fields[0], code, value, 0); 535 /* imm12 (unsigned) */ 536 insert_field (self->fields[1], code, info->imm.value, 0); 537 return true; 538 } 539 540 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether 541 the operand should be inverted before encoding. */ 542 static bool 543 aarch64_ins_limm_1 (const aarch64_operand *self, 544 const aarch64_opnd_info *info, aarch64_insn *code, 545 const aarch64_inst *inst, bool invert_p, 546 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 547 { 548 bool res; 549 aarch64_insn value; 550 uint64_t imm = info->imm.value; 551 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier); 552 553 if (invert_p) 554 imm = ~imm; 555 /* The constraint check should guarantee that this will work. */ 556 res = aarch64_logical_immediate_p (imm, esize, &value); 557 if (res) 558 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1], 559 self->fields[0]); 560 return res; 561 } 562 563 /* Insert logical/bitmask immediate for e.g. the last operand in 564 ORR <Wd|WSP>, <Wn>, #<imm>. */ 565 bool 566 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info, 567 aarch64_insn *code, const aarch64_inst *inst, 568 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 569 { 570 return aarch64_ins_limm_1 (self, info, code, inst, 571 inst->opcode->op == OP_BIC, errors); 572 } 573 574 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */ 575 bool 576 aarch64_ins_inv_limm (const aarch64_operand *self, 577 const aarch64_opnd_info *info, aarch64_insn *code, 578 const aarch64_inst *inst, 579 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 580 { 581 return aarch64_ins_limm_1 (self, info, code, inst, true, errors); 582 } 583 584 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}] 585 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */ 586 bool 587 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info, 588 aarch64_insn *code, const aarch64_inst *inst, 589 aarch64_operand_error *errors) 590 { 591 aarch64_insn value = 0; 592 593 assert (info->idx == 0); 594 595 /* Rt */ 596 aarch64_ins_regno (self, info, code, inst, errors); 597 if (inst->opcode->iclass == ldstpair_indexed 598 || inst->opcode->iclass == ldstnapair_offs 599 || inst->opcode->iclass == ldstpair_off 600 || inst->opcode->iclass == loadlit) 601 { 602 /* size */ 603 switch (info->qualifier) 604 { 605 case AARCH64_OPND_QLF_S_S: value = 0; break; 606 case AARCH64_OPND_QLF_S_D: value = 1; break; 607 case AARCH64_OPND_QLF_S_Q: value = 2; break; 608 default: return false; 609 } 610 insert_field (FLD_ldst_size, code, value, 0); 611 } 612 else 613 { 614 /* opc[1]:size */ 615 value = aarch64_get_qualifier_standard_value (info->qualifier); 616 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1); 617 } 618 619 return true; 620 } 621 622 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */ 623 bool 624 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED, 625 const aarch64_opnd_info *info, aarch64_insn *code, 626 const aarch64_inst *inst ATTRIBUTE_UNUSED, 627 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 628 { 629 /* Rn */ 630 insert_field (FLD_Rn, code, info->addr.base_regno, 0); 631 return true; 632 } 633 634 /* Encode the address operand for e.g. 635 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ 636 bool 637 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED, 638 const aarch64_opnd_info *info, aarch64_insn *code, 639 const aarch64_inst *inst ATTRIBUTE_UNUSED, 640 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 641 { 642 aarch64_insn S; 643 enum aarch64_modifier_kind kind = info->shifter.kind; 644 645 /* Rn */ 646 insert_field (FLD_Rn, code, info->addr.base_regno, 0); 647 /* Rm */ 648 insert_field (FLD_Rm, code, info->addr.offset.regno, 0); 649 /* option */ 650 if (kind == AARCH64_MOD_LSL) 651 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */ 652 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0); 653 /* S */ 654 if (info->qualifier != AARCH64_OPND_QLF_S_B) 655 S = info->shifter.amount != 0; 656 else 657 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}, 658 S <amount> 659 0 [absent] 660 1 #0 661 Must be #0 if <extend> is explicitly LSL. */ 662 S = info->shifter.operator_present && info->shifter.amount_present; 663 insert_field (FLD_S, code, S, 0); 664 665 return true; 666 } 667 668 /* Encode the address operand for e.g. 669 stlur <Xt>, [<Xn|SP>{, <amount>}]. */ 670 bool 671 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED, 672 const aarch64_opnd_info *info, aarch64_insn *code, 673 const aarch64_inst *inst ATTRIBUTE_UNUSED, 674 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 675 { 676 /* Rn */ 677 insert_field (self->fields[0], code, info->addr.base_regno, 0); 678 679 /* simm9 */ 680 int imm = info->addr.offset.imm; 681 insert_field (self->fields[1], code, imm, 0); 682 683 /* writeback */ 684 if (info->addr.writeback) 685 { 686 assert (info->addr.preind == 1 && info->addr.postind == 0); 687 insert_field (self->fields[2], code, 1, 0); 688 } 689 return true; 690 } 691 692 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */ 693 bool 694 aarch64_ins_addr_simm (const aarch64_operand *self, 695 const aarch64_opnd_info *info, 696 aarch64_insn *code, 697 const aarch64_inst *inst ATTRIBUTE_UNUSED, 698 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 699 { 700 int imm; 701 702 /* Rn */ 703 insert_field (FLD_Rn, code, info->addr.base_regno, 0); 704 /* simm (imm9 or imm7) */ 705 imm = info->addr.offset.imm; 706 if (self->fields[0] == FLD_imm7 707 || info->qualifier == AARCH64_OPND_QLF_imm_tag) 708 /* scaled immediate in ld/st pair instructions.. */ 709 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier)); 710 insert_field (self->fields[0], code, imm, 0); 711 /* pre/post- index */ 712 if (info->addr.writeback) 713 { 714 assert (inst->opcode->iclass != ldst_unscaled 715 && inst->opcode->iclass != ldstnapair_offs 716 && inst->opcode->iclass != ldstpair_off 717 && inst->opcode->iclass != ldst_unpriv); 718 assert (info->addr.preind != info->addr.postind); 719 if (info->addr.preind) 720 insert_field (self->fields[1], code, 1, 0); 721 } 722 723 return true; 724 } 725 726 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */ 727 bool 728 aarch64_ins_addr_simm10 (const aarch64_operand *self, 729 const aarch64_opnd_info *info, 730 aarch64_insn *code, 731 const aarch64_inst *inst ATTRIBUTE_UNUSED, 732 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 733 { 734 int imm; 735 736 /* Rn */ 737 insert_field (self->fields[0], code, info->addr.base_regno, 0); 738 /* simm10 */ 739 imm = info->addr.offset.imm >> 3; 740 insert_field (self->fields[1], code, imm >> 9, 0); 741 insert_field (self->fields[2], code, imm, 0); 742 /* writeback */ 743 if (info->addr.writeback) 744 { 745 assert (info->addr.preind == 1 && info->addr.postind == 0); 746 insert_field (self->fields[3], code, 1, 0); 747 } 748 return true; 749 } 750 751 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */ 752 bool 753 aarch64_ins_addr_uimm12 (const aarch64_operand *self, 754 const aarch64_opnd_info *info, 755 aarch64_insn *code, 756 const aarch64_inst *inst ATTRIBUTE_UNUSED, 757 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 758 { 759 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier)); 760 761 /* Rn */ 762 insert_field (self->fields[0], code, info->addr.base_regno, 0); 763 /* uimm12 */ 764 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0); 765 return true; 766 } 767 768 /* Encode the address operand for e.g. 769 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */ 770 bool 771 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED, 772 const aarch64_opnd_info *info, aarch64_insn *code, 773 const aarch64_inst *inst ATTRIBUTE_UNUSED, 774 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 775 { 776 /* Rn */ 777 insert_field (FLD_Rn, code, info->addr.base_regno, 0); 778 /* Rm | #<amount> */ 779 if (info->addr.offset.is_reg) 780 insert_field (FLD_Rm, code, info->addr.offset.regno, 0); 781 else 782 insert_field (FLD_Rm, code, 0x1f, 0); 783 return true; 784 } 785 786 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */ 787 bool 788 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED, 789 const aarch64_opnd_info *info, aarch64_insn *code, 790 const aarch64_inst *inst ATTRIBUTE_UNUSED, 791 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 792 { 793 /* cond */ 794 insert_field (FLD_cond, code, info->cond->value, 0); 795 return true; 796 } 797 798 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */ 799 bool 800 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED, 801 const aarch64_opnd_info *info, aarch64_insn *code, 802 const aarch64_inst *inst, 803 aarch64_operand_error *detail ATTRIBUTE_UNUSED) 804 { 805 /* If a system instruction check if we have any restrictions on which 806 registers it can use. */ 807 if (inst->opcode->iclass == ic_system) 808 { 809 uint64_t opcode_flags 810 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE); 811 uint32_t sysreg_flags 812 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE); 813 814 /* Check to see if it's read-only, else check if it's write only. 815 if it's both or unspecified don't care. */ 816 if (opcode_flags == F_SYS_READ 817 && sysreg_flags 818 && sysreg_flags != F_REG_READ) 819 { 820 detail->kind = AARCH64_OPDE_SYNTAX_ERROR; 821 detail->error = _("specified register cannot be read from"); 822 detail->index = info->idx; 823 detail->non_fatal = true; 824 } 825 else if (opcode_flags == F_SYS_WRITE 826 && sysreg_flags 827 && sysreg_flags != F_REG_WRITE) 828 { 829 detail->kind = AARCH64_OPDE_SYNTAX_ERROR; 830 detail->error = _("specified register cannot be written to"); 831 detail->index = info->idx; 832 detail->non_fatal = true; 833 } 834 } 835 /* op0:op1:CRn:CRm:op2 */ 836 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5, 837 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0); 838 return true; 839 } 840 841 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */ 842 bool 843 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED, 844 const aarch64_opnd_info *info, aarch64_insn *code, 845 const aarch64_inst *inst ATTRIBUTE_UNUSED, 846 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 847 { 848 /* op1:op2 */ 849 insert_fields (code, info->pstatefield, inst->opcode->mask, 2, 850 FLD_op2, FLD_op1); 851 852 /* Extra CRm mask. */ 853 if (info->sysreg.flags | F_REG_IN_CRM) 854 insert_field (FLD_CRm, code, PSTATE_DECODE_CRM (info->sysreg.flags), 0); 855 return true; 856 } 857 858 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */ 859 bool 860 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED, 861 const aarch64_opnd_info *info, aarch64_insn *code, 862 const aarch64_inst *inst ATTRIBUTE_UNUSED, 863 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 864 { 865 /* op1:CRn:CRm:op2 */ 866 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4, 867 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1); 868 return true; 869 } 870 871 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */ 872 873 bool 874 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED, 875 const aarch64_opnd_info *info, aarch64_insn *code, 876 const aarch64_inst *inst ATTRIBUTE_UNUSED, 877 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 878 { 879 /* CRm */ 880 insert_field (FLD_CRm, code, info->barrier->value, 0); 881 return true; 882 } 883 884 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */ 885 886 bool 887 aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED, 888 const aarch64_opnd_info *info, aarch64_insn *code, 889 const aarch64_inst *inst ATTRIBUTE_UNUSED, 890 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 891 { 892 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate, 893 encoded in CRm<3:2>. */ 894 aarch64_insn value = (info->barrier->value >> 2) - 4; 895 insert_field (FLD_CRm_dsb_nxs, code, value, 0); 896 return true; 897 } 898 899 /* Encode the prefetch operation option operand for e.g. 900 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */ 901 902 bool 903 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED, 904 const aarch64_opnd_info *info, aarch64_insn *code, 905 const aarch64_inst *inst ATTRIBUTE_UNUSED, 906 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 907 { 908 /* prfop in Rt */ 909 insert_field (FLD_Rt, code, info->prfop->value, 0); 910 return true; 911 } 912 913 /* Encode the hint number for instructions that alias HINT but take an 914 operand. */ 915 916 bool 917 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED, 918 const aarch64_opnd_info *info, aarch64_insn *code, 919 const aarch64_inst *inst ATTRIBUTE_UNUSED, 920 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 921 { 922 /* CRm:op2. */ 923 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm); 924 return true; 925 } 926 927 /* Encode the extended register operand for e.g. 928 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ 929 bool 930 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED, 931 const aarch64_opnd_info *info, aarch64_insn *code, 932 const aarch64_inst *inst ATTRIBUTE_UNUSED, 933 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 934 { 935 enum aarch64_modifier_kind kind; 936 937 /* Rm */ 938 insert_field (FLD_Rm, code, info->reg.regno, 0); 939 /* option */ 940 kind = info->shifter.kind; 941 if (kind == AARCH64_MOD_LSL) 942 kind = info->qualifier == AARCH64_OPND_QLF_W 943 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX; 944 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0); 945 /* imm3 */ 946 insert_field (FLD_imm3, code, info->shifter.amount, 0); 947 948 return true; 949 } 950 951 /* Encode the shifted register operand for e.g. 952 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */ 953 bool 954 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED, 955 const aarch64_opnd_info *info, aarch64_insn *code, 956 const aarch64_inst *inst ATTRIBUTE_UNUSED, 957 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 958 { 959 /* Rm */ 960 insert_field (FLD_Rm, code, info->reg.regno, 0); 961 /* shift */ 962 insert_field (FLD_shift, code, 963 aarch64_get_operand_modifier_value (info->shifter.kind), 0); 964 /* imm6 */ 965 insert_field (FLD_imm6, code, info->shifter.amount, 0); 966 967 return true; 968 } 969 970 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL], 971 where <simm4> is a 4-bit signed value and where <factor> is 1 plus 972 SELF's operand-dependent value. fields[0] specifies the field that 973 holds <base>. <simm4> is encoded in the SVE_imm4 field. */ 974 bool 975 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self, 976 const aarch64_opnd_info *info, 977 aarch64_insn *code, 978 const aarch64_inst *inst ATTRIBUTE_UNUSED, 979 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 980 { 981 int factor = 1 + get_operand_specific_data (self); 982 insert_field (self->fields[0], code, info->addr.base_regno, 0); 983 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0); 984 return true; 985 } 986 987 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL], 988 where <simm6> is a 6-bit signed value and where <factor> is 1 plus 989 SELF's operand-dependent value. fields[0] specifies the field that 990 holds <base>. <simm6> is encoded in the SVE_imm6 field. */ 991 bool 992 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self, 993 const aarch64_opnd_info *info, 994 aarch64_insn *code, 995 const aarch64_inst *inst ATTRIBUTE_UNUSED, 996 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 997 { 998 int factor = 1 + get_operand_specific_data (self); 999 insert_field (self->fields[0], code, info->addr.base_regno, 0); 1000 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0); 1001 return true; 1002 } 1003 1004 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL], 1005 where <simm9> is a 9-bit signed value and where <factor> is 1 plus 1006 SELF's operand-dependent value. fields[0] specifies the field that 1007 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6 1008 and imm3 fields, with imm3 being the less-significant part. */ 1009 bool 1010 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self, 1011 const aarch64_opnd_info *info, 1012 aarch64_insn *code, 1013 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1014 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1015 { 1016 int factor = 1 + get_operand_specific_data (self); 1017 insert_field (self->fields[0], code, info->addr.base_regno, 0); 1018 insert_fields (code, info->addr.offset.imm / factor, 0, 1019 2, FLD_imm3, FLD_SVE_imm6); 1020 return true; 1021 } 1022 1023 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4> 1024 is a 4-bit signed number and where <shift> is SELF's operand-dependent 1025 value. fields[0] specifies the base register field. */ 1026 bool 1027 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self, 1028 const aarch64_opnd_info *info, aarch64_insn *code, 1029 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1030 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1031 { 1032 int factor = 1 << get_operand_specific_data (self); 1033 insert_field (self->fields[0], code, info->addr.base_regno, 0); 1034 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0); 1035 return true; 1036 } 1037 1038 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6> 1039 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent 1040 value. fields[0] specifies the base register field. */ 1041 bool 1042 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self, 1043 const aarch64_opnd_info *info, aarch64_insn *code, 1044 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1045 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1046 { 1047 int factor = 1 << get_operand_specific_data (self); 1048 insert_field (self->fields[0], code, info->addr.base_regno, 0); 1049 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0); 1050 return true; 1051 } 1052 1053 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift> 1054 is SELF's operand-dependent value. fields[0] specifies the base 1055 register field and fields[1] specifies the offset register field. */ 1056 bool 1057 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self, 1058 const aarch64_opnd_info *info, aarch64_insn *code, 1059 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1060 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1061 { 1062 insert_field (self->fields[0], code, info->addr.base_regno, 0); 1063 insert_field (self->fields[1], code, info->addr.offset.regno, 0); 1064 return true; 1065 } 1066 1067 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where 1068 <shift> is SELF's operand-dependent value. fields[0] specifies the 1069 base register field, fields[1] specifies the offset register field and 1070 fields[2] is a single-bit field that selects SXTW over UXTW. */ 1071 bool 1072 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self, 1073 const aarch64_opnd_info *info, aarch64_insn *code, 1074 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1075 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1076 { 1077 insert_field (self->fields[0], code, info->addr.base_regno, 0); 1078 insert_field (self->fields[1], code, info->addr.offset.regno, 0); 1079 if (info->shifter.kind == AARCH64_MOD_UXTW) 1080 insert_field (self->fields[2], code, 0, 0); 1081 else 1082 insert_field (self->fields[2], code, 1, 0); 1083 return true; 1084 } 1085 1086 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a 1087 5-bit unsigned number and where <shift> is SELF's operand-dependent value. 1088 fields[0] specifies the base register field. */ 1089 bool 1090 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self, 1091 const aarch64_opnd_info *info, aarch64_insn *code, 1092 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1093 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1094 { 1095 int factor = 1 << get_operand_specific_data (self); 1096 insert_field (self->fields[0], code, info->addr.base_regno, 0); 1097 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0); 1098 return true; 1099 } 1100 1101 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}], 1102 where <modifier> is fixed by the instruction and where <msz> is a 1103 2-bit unsigned number. fields[0] specifies the base register field 1104 and fields[1] specifies the offset register field. */ 1105 static bool 1106 aarch64_ext_sve_addr_zz (const aarch64_operand *self, 1107 const aarch64_opnd_info *info, aarch64_insn *code, 1108 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1109 { 1110 insert_field (self->fields[0], code, info->addr.base_regno, 0); 1111 insert_field (self->fields[1], code, info->addr.offset.regno, 0); 1112 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0); 1113 return true; 1114 } 1115 1116 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where 1117 <msz> is a 2-bit unsigned number. fields[0] specifies the base register 1118 field and fields[1] specifies the offset register field. */ 1119 bool 1120 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self, 1121 const aarch64_opnd_info *info, aarch64_insn *code, 1122 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1123 aarch64_operand_error *errors) 1124 { 1125 return aarch64_ext_sve_addr_zz (self, info, code, errors); 1126 } 1127 1128 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where 1129 <msz> is a 2-bit unsigned number. fields[0] specifies the base register 1130 field and fields[1] specifies the offset register field. */ 1131 bool 1132 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self, 1133 const aarch64_opnd_info *info, 1134 aarch64_insn *code, 1135 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1136 aarch64_operand_error *errors) 1137 { 1138 return aarch64_ext_sve_addr_zz (self, info, code, errors); 1139 } 1140 1141 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where 1142 <msz> is a 2-bit unsigned number. fields[0] specifies the base register 1143 field and fields[1] specifies the offset register field. */ 1144 bool 1145 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self, 1146 const aarch64_opnd_info *info, 1147 aarch64_insn *code, 1148 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1149 aarch64_operand_error *errors) 1150 { 1151 return aarch64_ext_sve_addr_zz (self, info, code, errors); 1152 } 1153 1154 /* Encode an SVE ADD/SUB immediate. */ 1155 bool 1156 aarch64_ins_sve_aimm (const aarch64_operand *self, 1157 const aarch64_opnd_info *info, aarch64_insn *code, 1158 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1159 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1160 { 1161 if (info->shifter.amount == 8) 1162 insert_all_fields (self, code, (info->imm.value & 0xff) | 256); 1163 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0) 1164 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256); 1165 else 1166 insert_all_fields (self, code, info->imm.value & 0xff); 1167 return true; 1168 } 1169 1170 /* Encode an SVE CPY/DUP immediate. */ 1171 bool 1172 aarch64_ins_sve_asimm (const aarch64_operand *self, 1173 const aarch64_opnd_info *info, aarch64_insn *code, 1174 const aarch64_inst *inst, 1175 aarch64_operand_error *errors) 1176 { 1177 return aarch64_ins_sve_aimm (self, info, code, inst, errors); 1178 } 1179 1180 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields 1181 array specifies which field to use for Zn. MM is encoded in the 1182 concatenation of imm5 and SVE_tszh, with imm5 being the less 1183 significant part. */ 1184 bool 1185 aarch64_ins_sve_index (const aarch64_operand *self, 1186 const aarch64_opnd_info *info, aarch64_insn *code, 1187 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1188 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1189 { 1190 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier); 1191 insert_field (self->fields[0], code, info->reglane.regno, 0); 1192 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0, 1193 2, FLD_imm5, FLD_SVE_tszh); 1194 return true; 1195 } 1196 1197 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */ 1198 bool 1199 aarch64_ins_sve_limm_mov (const aarch64_operand *self, 1200 const aarch64_opnd_info *info, aarch64_insn *code, 1201 const aarch64_inst *inst, 1202 aarch64_operand_error *errors) 1203 { 1204 return aarch64_ins_limm (self, info, code, inst, errors); 1205 } 1206 1207 /* Encode Zn[MM], where Zn occupies the least-significant part of the field 1208 and where MM occupies the most-significant part. The operand-dependent 1209 value specifies the number of bits in Zn. */ 1210 bool 1211 aarch64_ins_sve_quad_index (const aarch64_operand *self, 1212 const aarch64_opnd_info *info, aarch64_insn *code, 1213 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1214 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1215 { 1216 unsigned int reg_bits = get_operand_specific_data (self); 1217 assert (info->reglane.regno < (1U << reg_bits)); 1218 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno; 1219 insert_all_fields (self, code, val); 1220 return true; 1221 } 1222 1223 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field 1224 to use for Zn. */ 1225 bool 1226 aarch64_ins_sve_reglist (const aarch64_operand *self, 1227 const aarch64_opnd_info *info, aarch64_insn *code, 1228 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1229 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1230 { 1231 insert_field (self->fields[0], code, info->reglist.first_regno, 0); 1232 return true; 1233 } 1234 1235 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which 1236 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4 1237 field. */ 1238 bool 1239 aarch64_ins_sve_scale (const aarch64_operand *self, 1240 const aarch64_opnd_info *info, aarch64_insn *code, 1241 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1242 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1243 { 1244 insert_all_fields (self, code, info->imm.value); 1245 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0); 1246 return true; 1247 } 1248 1249 /* Encode an SVE shift left immediate. */ 1250 bool 1251 aarch64_ins_sve_shlimm (const aarch64_operand *self, 1252 const aarch64_opnd_info *info, aarch64_insn *code, 1253 const aarch64_inst *inst, 1254 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1255 { 1256 const aarch64_opnd_info *prev_operand; 1257 unsigned int esize; 1258 1259 assert (info->idx > 0); 1260 prev_operand = &inst->operands[info->idx - 1]; 1261 esize = aarch64_get_qualifier_esize (prev_operand->qualifier); 1262 insert_all_fields (self, code, 8 * esize + info->imm.value); 1263 return true; 1264 } 1265 1266 /* Encode an SVE shift right immediate. */ 1267 bool 1268 aarch64_ins_sve_shrimm (const aarch64_operand *self, 1269 const aarch64_opnd_info *info, aarch64_insn *code, 1270 const aarch64_inst *inst, 1271 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1272 { 1273 const aarch64_opnd_info *prev_operand; 1274 unsigned int esize; 1275 1276 unsigned int opnd_backshift = get_operand_specific_data (self); 1277 assert (info->idx >= (int)opnd_backshift); 1278 prev_operand = &inst->operands[info->idx - opnd_backshift]; 1279 esize = aarch64_get_qualifier_esize (prev_operand->qualifier); 1280 insert_all_fields (self, code, 16 * esize - info->imm.value); 1281 return true; 1282 } 1283 1284 /* Encode a single-bit immediate that selects between #0.5 and #1.0. 1285 The fields array specifies which field to use. */ 1286 bool 1287 aarch64_ins_sve_float_half_one (const aarch64_operand *self, 1288 const aarch64_opnd_info *info, 1289 aarch64_insn *code, 1290 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1291 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1292 { 1293 if (info->imm.value == 0x3f000000) 1294 insert_field (self->fields[0], code, 0, 0); 1295 else 1296 insert_field (self->fields[0], code, 1, 0); 1297 return true; 1298 } 1299 1300 /* Encode a single-bit immediate that selects between #0.5 and #2.0. 1301 The fields array specifies which field to use. */ 1302 bool 1303 aarch64_ins_sve_float_half_two (const aarch64_operand *self, 1304 const aarch64_opnd_info *info, 1305 aarch64_insn *code, 1306 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1307 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1308 { 1309 if (info->imm.value == 0x3f000000) 1310 insert_field (self->fields[0], code, 0, 0); 1311 else 1312 insert_field (self->fields[0], code, 1, 0); 1313 return true; 1314 } 1315 1316 /* Encode a single-bit immediate that selects between #0.0 and #1.0. 1317 The fields array specifies which field to use. */ 1318 bool 1319 aarch64_ins_sve_float_zero_one (const aarch64_operand *self, 1320 const aarch64_opnd_info *info, 1321 aarch64_insn *code, 1322 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1323 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1324 { 1325 if (info->imm.value == 0) 1326 insert_field (self->fields[0], code, 0, 0); 1327 else 1328 insert_field (self->fields[0], code, 1, 0); 1329 return true; 1330 } 1331 1332 /* Encode in SME instruction such as MOVA ZA tile vector register number, 1333 vector indicator, vector selector and immediate. */ 1334 bool 1335 aarch64_ins_sme_za_hv_tiles (const aarch64_operand *self, 1336 const aarch64_opnd_info *info, 1337 aarch64_insn *code, 1338 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1339 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1340 { 1341 int fld_size; 1342 int fld_q; 1343 int fld_v = info->za_tile_vector.v; 1344 int fld_rv = info->za_tile_vector.index.regno - 12; 1345 int fld_zan_imm = info->za_tile_vector.index.imm; 1346 int regno = info->za_tile_vector.regno; 1347 1348 switch (info->qualifier) 1349 { 1350 case AARCH64_OPND_QLF_S_B: 1351 fld_size = 0; 1352 fld_q = 0; 1353 break; 1354 case AARCH64_OPND_QLF_S_H: 1355 fld_size = 1; 1356 fld_q = 0; 1357 fld_zan_imm |= regno << 3; 1358 break; 1359 case AARCH64_OPND_QLF_S_S: 1360 fld_size = 2; 1361 fld_q = 0; 1362 fld_zan_imm |= regno << 2; 1363 break; 1364 case AARCH64_OPND_QLF_S_D: 1365 fld_size = 3; 1366 fld_q = 0; 1367 fld_zan_imm |= regno << 1; 1368 break; 1369 case AARCH64_OPND_QLF_S_Q: 1370 fld_size = 3; 1371 fld_q = 1; 1372 fld_zan_imm = regno; 1373 break; 1374 default: 1375 return false; 1376 } 1377 1378 insert_field (self->fields[0], code, fld_size, 0); 1379 insert_field (self->fields[1], code, fld_q, 0); 1380 insert_field (self->fields[2], code, fld_v, 0); 1381 insert_field (self->fields[3], code, fld_rv, 0); 1382 insert_field (self->fields[4], code, fld_zan_imm, 0); 1383 1384 return true; 1385 } 1386 1387 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names 1388 separated by commas, encoded in the "imm8" field. 1389 1390 For programmer convenience an assembler must also accept the names of 1391 32-bit, 16-bit and 8-bit element tiles which are converted into the 1392 corresponding set of 64-bit element tiles. 1393 */ 1394 bool 1395 aarch64_ins_sme_za_list (const aarch64_operand *self, 1396 const aarch64_opnd_info *info, 1397 aarch64_insn *code, 1398 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1399 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1400 { 1401 int fld_mask = info->imm.value; 1402 insert_field (self->fields[0], code, fld_mask, 0); 1403 return true; 1404 } 1405 1406 bool 1407 aarch64_ins_sme_za_array (const aarch64_operand *self, 1408 const aarch64_opnd_info *info, 1409 aarch64_insn *code, 1410 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1411 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1412 { 1413 int regno = info->za_tile_vector.index.regno - 12; 1414 int imm = info->za_tile_vector.index.imm; 1415 insert_field (self->fields[0], code, regno, 0); 1416 insert_field (self->fields[1], code, imm, 0); 1417 return true; 1418 } 1419 1420 bool 1421 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand *self, 1422 const aarch64_opnd_info *info, 1423 aarch64_insn *code, 1424 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1425 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1426 { 1427 int regno = info->addr.base_regno; 1428 int imm = info->addr.offset.imm; 1429 insert_field (self->fields[0], code, regno, 0); 1430 insert_field (self->fields[1], code, imm, 0); 1431 return true; 1432 } 1433 1434 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */ 1435 bool 1436 aarch64_ins_sme_sm_za (const aarch64_operand *self, 1437 const aarch64_opnd_info *info, 1438 aarch64_insn *code, 1439 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1440 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1441 { 1442 aarch64_insn fld_crm; 1443 /* Set CRm[3:1] bits. */ 1444 if (info->reg.regno == 's') 1445 fld_crm = 0x02 ; /* SVCRSM. */ 1446 else if (info->reg.regno == 'z') 1447 fld_crm = 0x04; /* SVCRZA. */ 1448 else 1449 return false; 1450 1451 insert_field (self->fields[0], code, fld_crm, 0); 1452 return true; 1453 } 1454 1455 /* Encode source scalable predicate register (Pn), name of the index base 1456 register W12-W15 (Rm), and optional element index, defaulting to 0, in the 1457 range 0 to one less than the number of vector elements in a 128-bit vector 1458 register, encoded in "i1:tszh:tszl". 1459 */ 1460 bool 1461 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand *self, 1462 const aarch64_opnd_info *info, 1463 aarch64_insn *code, 1464 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1465 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1466 { 1467 int fld_pn = info->za_tile_vector.regno; 1468 int fld_rm = info->za_tile_vector.index.regno - 12; 1469 int imm = info->za_tile_vector.index.imm; 1470 int fld_i1, fld_tszh, fld_tshl; 1471 1472 insert_field (self->fields[0], code, fld_rm, 0); 1473 insert_field (self->fields[1], code, fld_pn, 0); 1474 1475 /* Optional element index, defaulting to 0, in the range 0 to one less than 1476 the number of vector elements in a 128-bit vector register, encoded in 1477 "i1:tszh:tszl". 1478 1479 i1 tszh tszl <T> 1480 0 0 000 RESERVED 1481 x x xx1 B 1482 x x x10 H 1483 x x 100 S 1484 x 1 000 D 1485 */ 1486 switch (info->qualifier) 1487 { 1488 case AARCH64_OPND_QLF_S_B: 1489 /* <imm> is 4 bit value. */ 1490 fld_i1 = (imm >> 3) & 0x1; 1491 fld_tszh = (imm >> 2) & 0x1; 1492 fld_tshl = ((imm << 1) | 0x1) & 0x7; 1493 break; 1494 case AARCH64_OPND_QLF_S_H: 1495 /* <imm> is 3 bit value. */ 1496 fld_i1 = (imm >> 2) & 0x1; 1497 fld_tszh = (imm >> 1) & 0x1; 1498 fld_tshl = ((imm << 2) | 0x2) & 0x7; 1499 break; 1500 case AARCH64_OPND_QLF_S_S: 1501 /* <imm> is 2 bit value. */ 1502 fld_i1 = (imm >> 1) & 0x1; 1503 fld_tszh = imm & 0x1; 1504 fld_tshl = 0x4; 1505 break; 1506 case AARCH64_OPND_QLF_S_D: 1507 /* <imm> is 1 bit value. */ 1508 fld_i1 = imm & 0x1; 1509 fld_tszh = 0x1; 1510 fld_tshl = 0x0; 1511 break; 1512 default: 1513 return false; 1514 } 1515 1516 insert_field (self->fields[2], code, fld_i1, 0); 1517 insert_field (self->fields[3], code, fld_tszh, 0); 1518 insert_field (self->fields[4], code, fld_tshl, 0); 1519 return true; 1520 } 1521 1522 /* Insert X0-X30. Register 31 is unallocated. */ 1523 bool 1524 aarch64_ins_x0_to_x30 (const aarch64_operand *self, 1525 const aarch64_opnd_info *info, 1526 aarch64_insn *code, 1527 const aarch64_inst *inst ATTRIBUTE_UNUSED, 1528 aarch64_operand_error *errors ATTRIBUTE_UNUSED) 1529 { 1530 assert (info->reg.regno <= 30); 1531 insert_field (self->fields[0], code, info->reg.regno, 0); 1532 return true; 1533 } 1534 1535 /* Miscellaneous encoding functions. */ 1536 1537 /* Encode size[0], i.e. bit 22, for 1538 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */ 1539 1540 static void 1541 encode_asimd_fcvt (aarch64_inst *inst) 1542 { 1543 aarch64_insn value; 1544 aarch64_field field = {0, 0}; 1545 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_NIL; 1546 1547 switch (inst->opcode->op) 1548 { 1549 case OP_FCVTN: 1550 case OP_FCVTN2: 1551 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */ 1552 qualifier = inst->operands[1].qualifier; 1553 break; 1554 case OP_FCVTL: 1555 case OP_FCVTL2: 1556 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */ 1557 qualifier = inst->operands[0].qualifier; 1558 break; 1559 default: 1560 return; 1561 } 1562 assert (qualifier == AARCH64_OPND_QLF_V_4S 1563 || qualifier == AARCH64_OPND_QLF_V_2D); 1564 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1; 1565 gen_sub_field (FLD_size, 0, 1, &field); 1566 insert_field_2 (&field, &inst->value, value, 0); 1567 } 1568 1569 /* Encode size[0], i.e. bit 22, for 1570 e.g. FCVTXN <Vb><d>, <Va><n>. */ 1571 1572 static void 1573 encode_asisd_fcvtxn (aarch64_inst *inst) 1574 { 1575 aarch64_insn val = 1; 1576 aarch64_field field = {0, 0}; 1577 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S); 1578 gen_sub_field (FLD_size, 0, 1, &field); 1579 insert_field_2 (&field, &inst->value, val, 0); 1580 } 1581 1582 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */ 1583 static void 1584 encode_fcvt (aarch64_inst *inst) 1585 { 1586 aarch64_insn val; 1587 const aarch64_field field = {15, 2}; 1588 1589 /* opc dstsize */ 1590 switch (inst->operands[0].qualifier) 1591 { 1592 case AARCH64_OPND_QLF_S_S: val = 0; break; 1593 case AARCH64_OPND_QLF_S_D: val = 1; break; 1594 case AARCH64_OPND_QLF_S_H: val = 3; break; 1595 default: abort (); 1596 } 1597 insert_field_2 (&field, &inst->value, val, 0); 1598 1599 return; 1600 } 1601 1602 /* Return the index in qualifiers_list that INST is using. Should only 1603 be called once the qualifiers are known to be valid. */ 1604 1605 static int 1606 aarch64_get_variant (struct aarch64_inst *inst) 1607 { 1608 int i, nops, variant; 1609 1610 nops = aarch64_num_of_operands (inst->opcode); 1611 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant) 1612 { 1613 for (i = 0; i < nops; ++i) 1614 if (inst->opcode->qualifiers_list[variant][i] 1615 != inst->operands[i].qualifier) 1616 break; 1617 if (i == nops) 1618 return variant; 1619 } 1620 abort (); 1621 } 1622 1623 /* Do miscellaneous encodings that are not common enough to be driven by 1624 flags. */ 1625 1626 static void 1627 do_misc_encoding (aarch64_inst *inst) 1628 { 1629 unsigned int value; 1630 1631 switch (inst->opcode->op) 1632 { 1633 case OP_FCVT: 1634 encode_fcvt (inst); 1635 break; 1636 case OP_FCVTN: 1637 case OP_FCVTN2: 1638 case OP_FCVTL: 1639 case OP_FCVTL2: 1640 encode_asimd_fcvt (inst); 1641 break; 1642 case OP_FCVTXN_S: 1643 encode_asisd_fcvtxn (inst); 1644 break; 1645 case OP_MOV_P_P: 1646 case OP_MOVS_P_P: 1647 /* Copy Pn to Pm and Pg. */ 1648 value = extract_field (FLD_SVE_Pn, inst->value, 0); 1649 insert_field (FLD_SVE_Pm, &inst->value, value, 0); 1650 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0); 1651 break; 1652 case OP_MOV_Z_P_Z: 1653 /* Copy Zd to Zm. */ 1654 value = extract_field (FLD_SVE_Zd, inst->value, 0); 1655 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0); 1656 break; 1657 case OP_MOV_Z_V: 1658 /* Fill in the zero immediate. */ 1659 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0, 1660 2, FLD_imm5, FLD_SVE_tszh); 1661 break; 1662 case OP_MOV_Z_Z: 1663 /* Copy Zn to Zm. */ 1664 value = extract_field (FLD_SVE_Zn, inst->value, 0); 1665 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0); 1666 break; 1667 case OP_MOV_Z_Zi: 1668 break; 1669 case OP_MOVM_P_P_P: 1670 /* Copy Pd to Pm. */ 1671 value = extract_field (FLD_SVE_Pd, inst->value, 0); 1672 insert_field (FLD_SVE_Pm, &inst->value, value, 0); 1673 break; 1674 case OP_MOVZS_P_P_P: 1675 case OP_MOVZ_P_P_P: 1676 /* Copy Pn to Pm. */ 1677 value = extract_field (FLD_SVE_Pn, inst->value, 0); 1678 insert_field (FLD_SVE_Pm, &inst->value, value, 0); 1679 break; 1680 case OP_NOTS_P_P_P_Z: 1681 case OP_NOT_P_P_P_Z: 1682 /* Copy Pg to Pm. */ 1683 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0); 1684 insert_field (FLD_SVE_Pm, &inst->value, value, 0); 1685 break; 1686 default: break; 1687 } 1688 } 1689 1690 /* Encode the 'size' and 'Q' field for e.g. SHADD. */ 1691 static void 1692 encode_sizeq (aarch64_inst *inst) 1693 { 1694 aarch64_insn sizeq; 1695 enum aarch64_field_kind kind; 1696 int idx; 1697 1698 /* Get the index of the operand whose information we are going to use 1699 to encode the size and Q fields. 1700 This is deduced from the possible valid qualifier lists. */ 1701 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode); 1702 DEBUG_TRACE ("idx: %d; qualifier: %s", idx, 1703 aarch64_get_qualifier_name (inst->operands[idx].qualifier)); 1704 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier); 1705 /* Q */ 1706 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask); 1707 /* size */ 1708 if (inst->opcode->iclass == asisdlse 1709 || inst->opcode->iclass == asisdlsep 1710 || inst->opcode->iclass == asisdlso 1711 || inst->opcode->iclass == asisdlsop) 1712 kind = FLD_vldst_size; 1713 else 1714 kind = FLD_size; 1715 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask); 1716 } 1717 1718 /* Opcodes that have fields shared by multiple operands are usually flagged 1719 with flags. In this function, we detect such flags and use the 1720 information in one of the related operands to do the encoding. The 'one' 1721 operand is not any operand but one of the operands that has the enough 1722 information for such an encoding. */ 1723 1724 static void 1725 do_special_encoding (struct aarch64_inst *inst) 1726 { 1727 int idx; 1728 aarch64_insn value = 0; 1729 1730 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value); 1731 1732 /* Condition for truly conditional executed instructions, e.g. b.cond. */ 1733 if (inst->opcode->flags & F_COND) 1734 { 1735 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0); 1736 } 1737 if (inst->opcode->flags & F_SF) 1738 { 1739 idx = select_operand_for_sf_field_coding (inst->opcode); 1740 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X 1741 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP) 1742 ? 1 : 0; 1743 insert_field (FLD_sf, &inst->value, value, 0); 1744 if (inst->opcode->flags & F_N) 1745 insert_field (FLD_N, &inst->value, value, inst->opcode->mask); 1746 } 1747 if (inst->opcode->flags & F_LSE_SZ) 1748 { 1749 idx = select_operand_for_sf_field_coding (inst->opcode); 1750 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X 1751 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP) 1752 ? 1 : 0; 1753 insert_field (FLD_lse_sz, &inst->value, value, 0); 1754 } 1755 if (inst->opcode->flags & F_SIZEQ) 1756 encode_sizeq (inst); 1757 if (inst->opcode->flags & F_FPTYPE) 1758 { 1759 idx = select_operand_for_fptype_field_coding (inst->opcode); 1760 switch (inst->operands[idx].qualifier) 1761 { 1762 case AARCH64_OPND_QLF_S_S: value = 0; break; 1763 case AARCH64_OPND_QLF_S_D: value = 1; break; 1764 case AARCH64_OPND_QLF_S_H: value = 3; break; 1765 default: return; 1766 } 1767 insert_field (FLD_type, &inst->value, value, 0); 1768 } 1769 if (inst->opcode->flags & F_SSIZE) 1770 { 1771 enum aarch64_opnd_qualifier qualifier; 1772 idx = select_operand_for_scalar_size_field_coding (inst->opcode); 1773 qualifier = inst->operands[idx].qualifier; 1774 assert (qualifier >= AARCH64_OPND_QLF_S_B 1775 && qualifier <= AARCH64_OPND_QLF_S_Q); 1776 value = aarch64_get_qualifier_standard_value (qualifier); 1777 insert_field (FLD_size, &inst->value, value, inst->opcode->mask); 1778 } 1779 if (inst->opcode->flags & F_T) 1780 { 1781 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */ 1782 aarch64_field field = {0, 0}; 1783 enum aarch64_opnd_qualifier qualifier; 1784 1785 idx = 0; 1786 qualifier = inst->operands[idx].qualifier; 1787 assert (aarch64_get_operand_class (inst->opcode->operands[0]) 1788 == AARCH64_OPND_CLASS_SIMD_REG 1789 && qualifier >= AARCH64_OPND_QLF_V_8B 1790 && qualifier <= AARCH64_OPND_QLF_V_2D); 1791 /* imm5<3:0> q <t> 1792 0000 x reserved 1793 xxx1 0 8b 1794 xxx1 1 16b 1795 xx10 0 4h 1796 xx10 1 8h 1797 x100 0 2s 1798 x100 1 4s 1799 1000 0 reserved 1800 1000 1 2d */ 1801 value = aarch64_get_qualifier_standard_value (qualifier); 1802 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask); 1803 num = (int) value >> 1; 1804 assert (num >= 0 && num <= 3); 1805 gen_sub_field (FLD_imm5, 0, num + 1, &field); 1806 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask); 1807 } 1808 if (inst->opcode->flags & F_GPRSIZE_IN_Q) 1809 { 1810 /* Use Rt to encode in the case of e.g. 1811 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */ 1812 enum aarch64_opnd_qualifier qualifier; 1813 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt); 1814 if (idx == -1) 1815 /* Otherwise use the result operand, which has to be a integer 1816 register. */ 1817 idx = 0; 1818 assert (idx == 0 || idx == 1); 1819 assert (aarch64_get_operand_class (inst->opcode->operands[idx]) 1820 == AARCH64_OPND_CLASS_INT_REG); 1821 qualifier = inst->operands[idx].qualifier; 1822 insert_field (FLD_Q, &inst->value, 1823 aarch64_get_qualifier_standard_value (qualifier), 0); 1824 } 1825 if (inst->opcode->flags & F_LDS_SIZE) 1826 { 1827 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */ 1828 enum aarch64_opnd_qualifier qualifier; 1829 aarch64_field field = {0, 0}; 1830 assert (aarch64_get_operand_class (inst->opcode->operands[0]) 1831 == AARCH64_OPND_CLASS_INT_REG); 1832 gen_sub_field (FLD_opc, 0, 1, &field); 1833 qualifier = inst->operands[0].qualifier; 1834 insert_field_2 (&field, &inst->value, 1835 1 - aarch64_get_qualifier_standard_value (qualifier), 0); 1836 } 1837 /* Miscellaneous encoding as the last step. */ 1838 if (inst->opcode->flags & F_MISC) 1839 do_misc_encoding (inst); 1840 1841 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value); 1842 } 1843 1844 /* Some instructions (including all SVE ones) use the instruction class 1845 to describe how a qualifiers_list index is represented in the instruction 1846 encoding. If INST is such an instruction, encode the chosen qualifier 1847 variant. */ 1848 1849 static void 1850 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst) 1851 { 1852 int variant = 0; 1853 switch (inst->opcode->iclass) 1854 { 1855 case sve_cpy: 1856 insert_fields (&inst->value, aarch64_get_variant (inst), 1857 0, 2, FLD_SVE_M_14, FLD_size); 1858 break; 1859 1860 case sve_index: 1861 case sve_shift_pred: 1862 case sve_shift_unpred: 1863 case sve_shift_tsz_hsd: 1864 case sve_shift_tsz_bhsd: 1865 /* For indices and shift amounts, the variant is encoded as 1866 part of the immediate. */ 1867 break; 1868 1869 case sve_limm: 1870 /* For sve_limm, the .B, .H, and .S forms are just a convenience 1871 and depend on the immediate. They don't have a separate 1872 encoding. */ 1873 break; 1874 1875 case sve_misc: 1876 /* sve_misc instructions have only a single variant. */ 1877 break; 1878 1879 case sve_movprfx: 1880 insert_fields (&inst->value, aarch64_get_variant (inst), 1881 0, 2, FLD_SVE_M_16, FLD_size); 1882 break; 1883 1884 case sve_pred_zm: 1885 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0); 1886 break; 1887 1888 case sve_size_bhs: 1889 case sve_size_bhsd: 1890 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0); 1891 break; 1892 1893 case sve_size_hsd: 1894 /* MOD 3 For `OP_SVE_Vv_HSD`. */ 1895 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) % 3 + 1, 0); 1896 break; 1897 1898 case sve_size_bh: 1899 case sve_size_sd: 1900 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0); 1901 break; 1902 1903 case sve_size_sd2: 1904 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0); 1905 break; 1906 1907 case sve_size_hsd2: 1908 insert_field (FLD_SVE_size, &inst->value, 1909 aarch64_get_variant (inst) + 1, 0); 1910 break; 1911 1912 case sve_size_tsz_bhs: 1913 insert_fields (&inst->value, 1914 (1 << aarch64_get_variant (inst)), 1915 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz); 1916 break; 1917 1918 case sve_size_13: 1919 variant = aarch64_get_variant (inst) + 1; 1920 if (variant == 2) 1921 variant = 3; 1922 insert_field (FLD_size, &inst->value, variant, 0); 1923 break; 1924 1925 default: 1926 break; 1927 } 1928 } 1929 1930 /* Converters converting an alias opcode instruction to its real form. */ 1931 1932 /* ROR <Wd>, <Ws>, #<shift> 1933 is equivalent to: 1934 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */ 1935 static void 1936 convert_ror_to_extr (aarch64_inst *inst) 1937 { 1938 copy_operand_info (inst, 3, 2); 1939 copy_operand_info (inst, 2, 1); 1940 } 1941 1942 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb> 1943 is equivalent to: 1944 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */ 1945 static void 1946 convert_xtl_to_shll (aarch64_inst *inst) 1947 { 1948 inst->operands[2].qualifier = inst->operands[1].qualifier; 1949 inst->operands[2].imm.value = 0; 1950 } 1951 1952 /* Convert 1953 LSR <Xd>, <Xn>, #<shift> 1954 to 1955 UBFM <Xd>, <Xn>, #<shift>, #63. */ 1956 static void 1957 convert_sr_to_bfm (aarch64_inst *inst) 1958 { 1959 inst->operands[3].imm.value = 1960 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63; 1961 } 1962 1963 /* Convert MOV to ORR. */ 1964 static void 1965 convert_mov_to_orr (aarch64_inst *inst) 1966 { 1967 /* MOV <Vd>.<T>, <Vn>.<T> 1968 is equivalent to: 1969 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */ 1970 copy_operand_info (inst, 2, 1); 1971 } 1972 1973 /* When <imms> >= <immr>, the instruction written: 1974 SBFX <Xd>, <Xn>, #<lsb>, #<width> 1975 is equivalent to: 1976 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */ 1977 1978 static void 1979 convert_bfx_to_bfm (aarch64_inst *inst) 1980 { 1981 int64_t lsb, width; 1982 1983 /* Convert the operand. */ 1984 lsb = inst->operands[2].imm.value; 1985 width = inst->operands[3].imm.value; 1986 inst->operands[2].imm.value = lsb; 1987 inst->operands[3].imm.value = lsb + width - 1; 1988 } 1989 1990 /* When <imms> < <immr>, the instruction written: 1991 SBFIZ <Xd>, <Xn>, #<lsb>, #<width> 1992 is equivalent to: 1993 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */ 1994 1995 static void 1996 convert_bfi_to_bfm (aarch64_inst *inst) 1997 { 1998 int64_t lsb, width; 1999 2000 /* Convert the operand. */ 2001 lsb = inst->operands[2].imm.value; 2002 width = inst->operands[3].imm.value; 2003 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31) 2004 { 2005 inst->operands[2].imm.value = (32 - lsb) & 0x1f; 2006 inst->operands[3].imm.value = width - 1; 2007 } 2008 else 2009 { 2010 inst->operands[2].imm.value = (64 - lsb) & 0x3f; 2011 inst->operands[3].imm.value = width - 1; 2012 } 2013 } 2014 2015 /* The instruction written: 2016 BFC <Xd>, #<lsb>, #<width> 2017 is equivalent to: 2018 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */ 2019 2020 static void 2021 convert_bfc_to_bfm (aarch64_inst *inst) 2022 { 2023 int64_t lsb, width; 2024 2025 /* Insert XZR. */ 2026 copy_operand_info (inst, 3, 2); 2027 copy_operand_info (inst, 2, 1); 2028 copy_operand_info (inst, 1, 0); 2029 inst->operands[1].reg.regno = 0x1f; 2030 2031 /* Convert the immediate operand. */ 2032 lsb = inst->operands[2].imm.value; 2033 width = inst->operands[3].imm.value; 2034 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31) 2035 { 2036 inst->operands[2].imm.value = (32 - lsb) & 0x1f; 2037 inst->operands[3].imm.value = width - 1; 2038 } 2039 else 2040 { 2041 inst->operands[2].imm.value = (64 - lsb) & 0x3f; 2042 inst->operands[3].imm.value = width - 1; 2043 } 2044 } 2045 2046 /* The instruction written: 2047 LSL <Xd>, <Xn>, #<shift> 2048 is equivalent to: 2049 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */ 2050 2051 static void 2052 convert_lsl_to_ubfm (aarch64_inst *inst) 2053 { 2054 int64_t shift = inst->operands[2].imm.value; 2055 2056 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31) 2057 { 2058 inst->operands[2].imm.value = (32 - shift) & 0x1f; 2059 inst->operands[3].imm.value = 31 - shift; 2060 } 2061 else 2062 { 2063 inst->operands[2].imm.value = (64 - shift) & 0x3f; 2064 inst->operands[3].imm.value = 63 - shift; 2065 } 2066 } 2067 2068 /* CINC <Wd>, <Wn>, <cond> 2069 is equivalent to: 2070 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */ 2071 2072 static void 2073 convert_to_csel (aarch64_inst *inst) 2074 { 2075 copy_operand_info (inst, 3, 2); 2076 copy_operand_info (inst, 2, 1); 2077 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond); 2078 } 2079 2080 /* CSET <Wd>, <cond> 2081 is equivalent to: 2082 CSINC <Wd>, WZR, WZR, invert(<cond>). */ 2083 2084 static void 2085 convert_cset_to_csinc (aarch64_inst *inst) 2086 { 2087 copy_operand_info (inst, 3, 1); 2088 copy_operand_info (inst, 2, 0); 2089 copy_operand_info (inst, 1, 0); 2090 inst->operands[1].reg.regno = 0x1f; 2091 inst->operands[2].reg.regno = 0x1f; 2092 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond); 2093 } 2094 2095 /* MOV <Wd>, #<imm> 2096 is equivalent to: 2097 MOVZ <Wd>, #<imm16>, LSL #<shift>. */ 2098 2099 static void 2100 convert_mov_to_movewide (aarch64_inst *inst) 2101 { 2102 int is32; 2103 uint32_t shift_amount; 2104 uint64_t value = ~(uint64_t)0; 2105 2106 switch (inst->opcode->op) 2107 { 2108 case OP_MOV_IMM_WIDE: 2109 value = inst->operands[1].imm.value; 2110 break; 2111 case OP_MOV_IMM_WIDEN: 2112 value = ~inst->operands[1].imm.value; 2113 break; 2114 default: 2115 return; 2116 } 2117 inst->operands[1].type = AARCH64_OPND_HALF; 2118 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W; 2119 if (! aarch64_wide_constant_p (value, is32, &shift_amount)) 2120 /* The constraint check should have guaranteed this wouldn't happen. */ 2121 return; 2122 value >>= shift_amount; 2123 value &= 0xffff; 2124 inst->operands[1].imm.value = value; 2125 inst->operands[1].shifter.kind = AARCH64_MOD_LSL; 2126 inst->operands[1].shifter.amount = shift_amount; 2127 } 2128 2129 /* MOV <Wd>, #<imm> 2130 is equivalent to: 2131 ORR <Wd>, WZR, #<imm>. */ 2132 2133 static void 2134 convert_mov_to_movebitmask (aarch64_inst *inst) 2135 { 2136 copy_operand_info (inst, 2, 1); 2137 inst->operands[1].reg.regno = 0x1f; 2138 inst->operands[1].skip = 0; 2139 } 2140 2141 /* Some alias opcodes are assembled by being converted to their real-form. */ 2142 2143 static void 2144 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real) 2145 { 2146 const aarch64_opcode *alias = inst->opcode; 2147 2148 if ((alias->flags & F_CONV) == 0) 2149 goto convert_to_real_return; 2150 2151 switch (alias->op) 2152 { 2153 case OP_ASR_IMM: 2154 case OP_LSR_IMM: 2155 convert_sr_to_bfm (inst); 2156 break; 2157 case OP_LSL_IMM: 2158 convert_lsl_to_ubfm (inst); 2159 break; 2160 case OP_CINC: 2161 case OP_CINV: 2162 case OP_CNEG: 2163 convert_to_csel (inst); 2164 break; 2165 case OP_CSET: 2166 case OP_CSETM: 2167 convert_cset_to_csinc (inst); 2168 break; 2169 case OP_UBFX: 2170 case OP_BFXIL: 2171 case OP_SBFX: 2172 convert_bfx_to_bfm (inst); 2173 break; 2174 case OP_SBFIZ: 2175 case OP_BFI: 2176 case OP_UBFIZ: 2177 convert_bfi_to_bfm (inst); 2178 break; 2179 case OP_BFC: 2180 convert_bfc_to_bfm (inst); 2181 break; 2182 case OP_MOV_V: 2183 convert_mov_to_orr (inst); 2184 break; 2185 case OP_MOV_IMM_WIDE: 2186 case OP_MOV_IMM_WIDEN: 2187 convert_mov_to_movewide (inst); 2188 break; 2189 case OP_MOV_IMM_LOG: 2190 convert_mov_to_movebitmask (inst); 2191 break; 2192 case OP_ROR_IMM: 2193 convert_ror_to_extr (inst); 2194 break; 2195 case OP_SXTL: 2196 case OP_SXTL2: 2197 case OP_UXTL: 2198 case OP_UXTL2: 2199 convert_xtl_to_shll (inst); 2200 break; 2201 default: 2202 break; 2203 } 2204 2205 convert_to_real_return: 2206 aarch64_replace_opcode (inst, real); 2207 } 2208 2209 /* Encode *INST_ORI of the opcode code OPCODE. 2210 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the 2211 matched operand qualifier sequence in *QLF_SEQ. */ 2212 2213 bool 2214 aarch64_opcode_encode (const aarch64_opcode *opcode, 2215 const aarch64_inst *inst_ori, aarch64_insn *code, 2216 aarch64_opnd_qualifier_t *qlf_seq, 2217 aarch64_operand_error *mismatch_detail, 2218 aarch64_instr_sequence* insn_sequence) 2219 { 2220 int i; 2221 const aarch64_opcode *aliased; 2222 aarch64_inst copy, *inst; 2223 2224 DEBUG_TRACE ("enter with %s", opcode->name); 2225 2226 /* Create a copy of *INST_ORI, so that we can do any change we want. */ 2227 copy = *inst_ori; 2228 inst = © 2229 2230 assert (inst->opcode == NULL || inst->opcode == opcode); 2231 if (inst->opcode == NULL) 2232 inst->opcode = opcode; 2233 2234 /* Constrain the operands. 2235 After passing this, the encoding is guaranteed to succeed. */ 2236 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0) 2237 { 2238 DEBUG_TRACE ("FAIL since operand constraint not met"); 2239 return 0; 2240 } 2241 2242 /* Get the base value. 2243 Note: this has to be before the aliasing handling below in order to 2244 get the base value from the alias opcode before we move on to the 2245 aliased opcode for encoding. */ 2246 inst->value = opcode->opcode; 2247 2248 /* No need to do anything else if the opcode does not have any operand. */ 2249 if (aarch64_num_of_operands (opcode) == 0) 2250 goto encoding_exit; 2251 2252 /* Assign operand indexes and check types. Also put the matched 2253 operand qualifiers in *QLF_SEQ to return. */ 2254 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i) 2255 { 2256 assert (opcode->operands[i] == inst->operands[i].type); 2257 inst->operands[i].idx = i; 2258 if (qlf_seq != NULL) 2259 *qlf_seq = inst->operands[i].qualifier; 2260 } 2261 2262 aliased = aarch64_find_real_opcode (opcode); 2263 /* If the opcode is an alias and it does not ask for direct encoding by 2264 itself, the instruction will be transformed to the form of real opcode 2265 and the encoding will be carried out using the rules for the aliased 2266 opcode. */ 2267 if (aliased != NULL && (opcode->flags & F_CONV)) 2268 { 2269 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s", 2270 aliased->name, opcode->name); 2271 /* Convert the operands to the form of the real opcode. */ 2272 convert_to_real (inst, aliased); 2273 opcode = aliased; 2274 } 2275 2276 aarch64_opnd_info *info = inst->operands; 2277 2278 /* Call the inserter of each operand. */ 2279 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info) 2280 { 2281 const aarch64_operand *opnd; 2282 enum aarch64_opnd type = opcode->operands[i]; 2283 if (type == AARCH64_OPND_NIL) 2284 break; 2285 if (info->skip) 2286 { 2287 DEBUG_TRACE ("skip the incomplete operand %d", i); 2288 continue; 2289 } 2290 opnd = &aarch64_operands[type]; 2291 if (operand_has_inserter (opnd) 2292 && !aarch64_insert_operand (opnd, info, &inst->value, inst, 2293 mismatch_detail)) 2294 return false; 2295 } 2296 2297 /* Call opcode encoders indicated by flags. */ 2298 if (opcode_has_special_coder (opcode)) 2299 do_special_encoding (inst); 2300 2301 /* Possibly use the instruction class to encode the chosen qualifier 2302 variant. */ 2303 aarch64_encode_variant_using_iclass (inst); 2304 2305 /* Run a verifier if the instruction has one set. */ 2306 if (opcode->verifier) 2307 { 2308 enum err_type result = opcode->verifier (inst, *code, 0, true, 2309 mismatch_detail, insn_sequence); 2310 switch (result) 2311 { 2312 case ERR_UND: 2313 case ERR_UNP: 2314 case ERR_NYI: 2315 return false; 2316 default: 2317 break; 2318 } 2319 } 2320 2321 /* Always run constrain verifiers, this is needed because constrains need to 2322 maintain a global state. Regardless if the instruction has the flag set 2323 or not. */ 2324 enum err_type result = verify_constraints (inst, *code, 0, true, 2325 mismatch_detail, insn_sequence); 2326 switch (result) 2327 { 2328 case ERR_UND: 2329 case ERR_UNP: 2330 case ERR_NYI: 2331 return false; 2332 default: 2333 break; 2334 } 2335 2336 2337 encoding_exit: 2338 DEBUG_TRACE ("exit with %s", opcode->name); 2339 2340 *code = inst->value; 2341 2342 return true; 2343 } 2344