1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the AArch64MCCodeEmitter class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "MCTargetDesc/AArch64AddressingModes.h" 14 #include "MCTargetDesc/AArch64FixupKinds.h" 15 #include "MCTargetDesc/AArch64MCExpr.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/BinaryFormat/ELF.h" 19 #include "llvm/MC/MCCodeEmitter.h" 20 #include "llvm/MC/MCContext.h" 21 #include "llvm/MC/MCFixup.h" 22 #include "llvm/MC/MCInst.h" 23 #include "llvm/MC/MCInstrInfo.h" 24 #include "llvm/MC/MCRegisterInfo.h" 25 #include "llvm/MC/MCSubtargetInfo.h" 26 #include "llvm/Support/Casting.h" 27 #include "llvm/Support/EndianStream.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include <cassert> 30 #include <cstdint> 31 32 using namespace llvm; 33 34 #define DEBUG_TYPE "mccodeemitter" 35 36 STATISTIC(MCNumEmitted, "Number of MC instructions emitted."); 37 STATISTIC(MCNumFixups, "Number of MC fixups created."); 38 39 namespace { 40 41 class AArch64MCCodeEmitter : public MCCodeEmitter { 42 MCContext &Ctx; 43 44 public: 45 AArch64MCCodeEmitter(const MCInstrInfo &, MCContext &ctx) : Ctx(ctx) {} 46 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) = delete; 47 void operator=(const AArch64MCCodeEmitter &) = delete; 48 ~AArch64MCCodeEmitter() override = default; 49 50 // getBinaryCodeForInstr - TableGen'erated function for getting the 51 // binary encoding for an instruction. 52 uint64_t getBinaryCodeForInstr(const MCInst &MI, 53 SmallVectorImpl<MCFixup> &Fixups, 54 const MCSubtargetInfo &STI) const; 55 56 /// getMachineOpValue - Return binary encoding of operand. If the machine 57 /// operand requires relocation, record the relocation and return zero. 58 unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO, 59 SmallVectorImpl<MCFixup> &Fixups, 60 const MCSubtargetInfo &STI) const; 61 62 /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate 63 /// attached to a load, store or prfm instruction. If operand requires a 64 /// relocation, record it and return zero in that part of the encoding. 65 template <uint32_t FixupKind> 66 uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx, 67 SmallVectorImpl<MCFixup> &Fixups, 68 const MCSubtargetInfo &STI) const; 69 70 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label 71 /// target. 72 uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, 73 SmallVectorImpl<MCFixup> &Fixups, 74 const MCSubtargetInfo &STI) const; 75 76 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and 77 /// the 2-bit shift field. 78 uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, 79 SmallVectorImpl<MCFixup> &Fixups, 80 const MCSubtargetInfo &STI) const; 81 82 /// getCondBranchTargetOpValue - Return the encoded value for a conditional 83 /// branch target. 84 uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, 85 SmallVectorImpl<MCFixup> &Fixups, 86 const MCSubtargetInfo &STI) const; 87 88 /// getCondCompBranchTargetOpValue - Return the encoded value for a 89 /// conditional compare-and-branch target. 90 uint32_t getCondCompBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, 91 SmallVectorImpl<MCFixup> &Fixups, 92 const MCSubtargetInfo &STI) const; 93 94 /// getPAuthPCRelOpValue - Return the encoded value for a pointer 95 /// authentication pc-relative operand. 96 uint32_t getPAuthPCRelOpValue(const MCInst &MI, unsigned OpIdx, 97 SmallVectorImpl<MCFixup> &Fixups, 98 const MCSubtargetInfo &STI) const; 99 100 /// getLoadLiteralOpValue - Return the encoded value for a load-literal 101 /// pc-relative address. 102 uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx, 103 SmallVectorImpl<MCFixup> &Fixups, 104 const MCSubtargetInfo &STI) const; 105 106 /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store 107 /// instruction: bit 0 is whether a shift is present, bit 1 is whether the 108 /// operation is a sign extend (as opposed to a zero extend). 109 uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx, 110 SmallVectorImpl<MCFixup> &Fixups, 111 const MCSubtargetInfo &STI) const; 112 113 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- 114 /// branch target. 115 uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, 116 SmallVectorImpl<MCFixup> &Fixups, 117 const MCSubtargetInfo &STI) const; 118 119 /// getBranchTargetOpValue - Return the encoded value for an unconditional 120 /// branch target. 121 uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, 122 SmallVectorImpl<MCFixup> &Fixups, 123 const MCSubtargetInfo &STI) const; 124 125 /// getMoveWideImmOpValue - Return the encoded value for the immediate operand 126 /// of a MOVZ or MOVK instruction. 127 uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, 128 SmallVectorImpl<MCFixup> &Fixups, 129 const MCSubtargetInfo &STI) const; 130 131 /// getVecShifterOpValue - Return the encoded value for the vector shifter. 132 uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, 133 SmallVectorImpl<MCFixup> &Fixups, 134 const MCSubtargetInfo &STI) const; 135 136 /// getMoveVecShifterOpValue - Return the encoded value for the vector move 137 /// shifter (MSL). 138 uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx, 139 SmallVectorImpl<MCFixup> &Fixups, 140 const MCSubtargetInfo &STI) const; 141 142 /// getFixedPointScaleOpValue - Return the encoded value for the 143 // FP-to-fixed-point scale factor. 144 uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx, 145 SmallVectorImpl<MCFixup> &Fixups, 146 const MCSubtargetInfo &STI) const; 147 148 uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, 149 SmallVectorImpl<MCFixup> &Fixups, 150 const MCSubtargetInfo &STI) const; 151 uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, 152 SmallVectorImpl<MCFixup> &Fixups, 153 const MCSubtargetInfo &STI) const; 154 uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, 155 SmallVectorImpl<MCFixup> &Fixups, 156 const MCSubtargetInfo &STI) const; 157 uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, 158 SmallVectorImpl<MCFixup> &Fixups, 159 const MCSubtargetInfo &STI) const; 160 uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, 161 SmallVectorImpl<MCFixup> &Fixups, 162 const MCSubtargetInfo &STI) const; 163 uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, 164 SmallVectorImpl<MCFixup> &Fixups, 165 const MCSubtargetInfo &STI) const; 166 uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, 167 SmallVectorImpl<MCFixup> &Fixups, 168 const MCSubtargetInfo &STI) const; 169 uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, 170 SmallVectorImpl<MCFixup> &Fixups, 171 const MCSubtargetInfo &STI) const; 172 173 uint32_t getImm8OptLsl(const MCInst &MI, unsigned OpIdx, 174 SmallVectorImpl<MCFixup> &Fixups, 175 const MCSubtargetInfo &STI) const; 176 uint32_t getSVEIncDecImm(const MCInst &MI, unsigned OpIdx, 177 SmallVectorImpl<MCFixup> &Fixups, 178 const MCSubtargetInfo &STI) const; 179 180 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue, 181 const MCSubtargetInfo &STI) const; 182 183 void encodeInstruction(const MCInst &MI, SmallVectorImpl<char> &CB, 184 SmallVectorImpl<MCFixup> &Fixups, 185 const MCSubtargetInfo &STI) const override; 186 187 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue, 188 const MCSubtargetInfo &STI) const; 189 190 template<int hasRs, int hasRt2> unsigned 191 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue, 192 const MCSubtargetInfo &STI) const; 193 194 unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue, 195 const MCSubtargetInfo &STI) const; 196 197 template <unsigned Multiple, unsigned Min, unsigned Max> 198 uint32_t EncodeRegMul_MinMax(const MCInst &MI, unsigned OpIdx, 199 SmallVectorImpl<MCFixup> &Fixups, 200 const MCSubtargetInfo &STI) const; 201 uint32_t EncodeZK(const MCInst &MI, unsigned OpIdx, 202 SmallVectorImpl<MCFixup> &Fixups, 203 const MCSubtargetInfo &STI) const; 204 uint32_t EncodePNR_p8to15(const MCInst &MI, unsigned OpIdx, 205 SmallVectorImpl<MCFixup> &Fixups, 206 const MCSubtargetInfo &STI) const; 207 208 uint32_t EncodeZPR2StridedRegisterClass(const MCInst &MI, unsigned OpIdx, 209 SmallVectorImpl<MCFixup> &Fixups, 210 const MCSubtargetInfo &STI) const; 211 uint32_t EncodeZPR4StridedRegisterClass(const MCInst &MI, unsigned OpIdx, 212 SmallVectorImpl<MCFixup> &Fixups, 213 const MCSubtargetInfo &STI) const; 214 215 uint32_t EncodeMatrixTileListRegisterClass(const MCInst &MI, unsigned OpIdx, 216 SmallVectorImpl<MCFixup> &Fixups, 217 const MCSubtargetInfo &STI) const; 218 template <unsigned BaseReg> 219 uint32_t encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx, 220 SmallVectorImpl<MCFixup> &Fixups, 221 const MCSubtargetInfo &STI) const; 222 }; 223 224 } // end anonymous namespace 225 226 /// getMachineOpValue - Return binary encoding of operand. If the machine 227 /// operand requires relocation, record the relocation and return zero. 228 unsigned 229 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO, 230 SmallVectorImpl<MCFixup> &Fixups, 231 const MCSubtargetInfo &STI) const { 232 if (MO.isReg()) 233 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); 234 235 assert(MO.isImm() && "did not expect relocated expression"); 236 return static_cast<unsigned>(MO.getImm()); 237 } 238 239 template<unsigned FixupKind> uint32_t 240 AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx, 241 SmallVectorImpl<MCFixup> &Fixups, 242 const MCSubtargetInfo &STI) const { 243 const MCOperand &MO = MI.getOperand(OpIdx); 244 uint32_t ImmVal = 0; 245 246 if (MO.isImm()) 247 ImmVal = static_cast<uint32_t>(MO.getImm()); 248 else { 249 assert(MO.isExpr() && "unable to encode load/store imm operand"); 250 MCFixupKind Kind = MCFixupKind(FixupKind); 251 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 252 ++MCNumFixups; 253 } 254 255 return ImmVal; 256 } 257 258 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label 259 /// target. 260 uint32_t 261 AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx, 262 SmallVectorImpl<MCFixup> &Fixups, 263 const MCSubtargetInfo &STI) const { 264 const MCOperand &MO = MI.getOperand(OpIdx); 265 266 // If the destination is an immediate, we have nothing to do. 267 if (MO.isImm()) 268 return MO.getImm(); 269 assert(MO.isExpr() && "Unexpected target type!"); 270 const MCExpr *Expr = MO.getExpr(); 271 272 MCFixupKind Kind = MI.getOpcode() == AArch64::ADR 273 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21) 274 : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21); 275 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); 276 277 MCNumFixups += 1; 278 279 // All of the information is in the fixup. 280 return 0; 281 } 282 283 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and 284 /// the 2-bit shift field. The shift field is stored in bits 13-14 of the 285 /// return value. 286 uint32_t 287 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, 288 SmallVectorImpl<MCFixup> &Fixups, 289 const MCSubtargetInfo &STI) const { 290 // Suboperands are [imm, shifter]. 291 const MCOperand &MO = MI.getOperand(OpIdx); 292 const MCOperand &MO1 = MI.getOperand(OpIdx + 1); 293 assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL && 294 "unexpected shift type for add/sub immediate"); 295 unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm()); 296 assert((ShiftVal == 0 || ShiftVal == 12) && 297 "unexpected shift value for add/sub immediate"); 298 if (MO.isImm()) 299 return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << ShiftVal)); 300 assert(MO.isExpr() && "Unable to encode MCOperand!"); 301 const MCExpr *Expr = MO.getExpr(); 302 303 // Encode the 12 bits of the fixup. 304 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12); 305 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc())); 306 307 ++MCNumFixups; 308 309 // Set the shift bit of the add instruction for relocation types 310 // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12. 311 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) { 312 AArch64MCExpr::VariantKind RefKind = A64E->getKind(); 313 if (RefKind == AArch64MCExpr::VK_TPREL_HI12 || 314 RefKind == AArch64MCExpr::VK_DTPREL_HI12 || 315 RefKind == AArch64MCExpr::VK_SECREL_HI12) 316 ShiftVal = 12; 317 } 318 return ShiftVal == 0 ? 0 : (1 << ShiftVal); 319 } 320 321 /// getCondBranchTargetOpValue - Return the encoded value for a conditional 322 /// branch target. 323 uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue( 324 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 325 const MCSubtargetInfo &STI) const { 326 const MCOperand &MO = MI.getOperand(OpIdx); 327 328 // If the destination is an immediate, we have nothing to do. 329 if (MO.isImm()) 330 return MO.getImm(); 331 assert(MO.isExpr() && "Unexpected target type!"); 332 333 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19); 334 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 335 336 ++MCNumFixups; 337 338 // All of the information is in the fixup. 339 return 0; 340 } 341 342 /// getCondCompBranchTargetOpValue - Return the encoded value for a conditional 343 /// compare-and-branch target. 344 uint32_t AArch64MCCodeEmitter::getCondCompBranchTargetOpValue( 345 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 346 const MCSubtargetInfo &STI) const { 347 const MCOperand &MO = MI.getOperand(OpIdx); 348 349 // If the destination is an immediate, we have nothing to do. 350 if (MO.isImm()) 351 return MO.getImm(); 352 assert(MO.isExpr() && "Unexpected target type!"); 353 354 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch9); 355 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 356 357 ++MCNumFixups; 358 359 // All of the information is in the fixup. 360 return 0; 361 } 362 363 /// getPAuthPCRelOpValue - Return the encoded value for a pointer 364 /// authentication pc-relative operand. 365 uint32_t 366 AArch64MCCodeEmitter::getPAuthPCRelOpValue(const MCInst &MI, unsigned OpIdx, 367 SmallVectorImpl<MCFixup> &Fixups, 368 const MCSubtargetInfo &STI) const { 369 const MCOperand &MO = MI.getOperand(OpIdx); 370 371 // If the destination is an immediate, invert sign as it's a negative value 372 // that should be encoded as unsigned 373 if (MO.isImm()) 374 return -(MO.getImm()); 375 assert(MO.isExpr() && "Unexpected target type!"); 376 377 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch16); 378 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 379 380 ++MCNumFixups; 381 382 // All of the information is in the fixup. 383 return 0; 384 } 385 386 /// getLoadLiteralOpValue - Return the encoded value for a load-literal 387 /// pc-relative address. 388 uint32_t 389 AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx, 390 SmallVectorImpl<MCFixup> &Fixups, 391 const MCSubtargetInfo &STI) const { 392 const MCOperand &MO = MI.getOperand(OpIdx); 393 394 // If the destination is an immediate, we have nothing to do. 395 if (MO.isImm()) 396 return MO.getImm(); 397 assert(MO.isExpr() && "Unexpected target type!"); 398 399 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19); 400 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 401 402 ++MCNumFixups; 403 404 // All of the information is in the fixup. 405 return 0; 406 } 407 408 uint32_t 409 AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx, 410 SmallVectorImpl<MCFixup> &Fixups, 411 const MCSubtargetInfo &STI) const { 412 unsigned SignExtend = MI.getOperand(OpIdx).getImm(); 413 unsigned DoShift = MI.getOperand(OpIdx + 1).getImm(); 414 return (SignExtend << 1) | DoShift; 415 } 416 417 uint32_t 418 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, 419 SmallVectorImpl<MCFixup> &Fixups, 420 const MCSubtargetInfo &STI) const { 421 const MCOperand &MO = MI.getOperand(OpIdx); 422 423 if (MO.isImm()) 424 return MO.getImm(); 425 assert(MO.isExpr() && "Unexpected movz/movk immediate"); 426 427 Fixups.push_back(MCFixup::create( 428 0, MO.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw), MI.getLoc())); 429 430 ++MCNumFixups; 431 432 return 0; 433 } 434 435 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and- 436 /// branch target. 437 uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue( 438 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 439 const MCSubtargetInfo &STI) const { 440 const MCOperand &MO = MI.getOperand(OpIdx); 441 442 // If the destination is an immediate, we have nothing to do. 443 if (MO.isImm()) 444 return MO.getImm(); 445 assert(MO.isExpr() && "Unexpected ADR target type!"); 446 447 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14); 448 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 449 450 ++MCNumFixups; 451 452 // All of the information is in the fixup. 453 return 0; 454 } 455 456 /// getBranchTargetOpValue - Return the encoded value for an unconditional 457 /// branch target. 458 uint32_t 459 AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx, 460 SmallVectorImpl<MCFixup> &Fixups, 461 const MCSubtargetInfo &STI) const { 462 const MCOperand &MO = MI.getOperand(OpIdx); 463 464 // If the destination is an immediate, we have nothing to do. 465 if (MO.isImm()) 466 return MO.getImm(); 467 assert(MO.isExpr() && "Unexpected ADR target type!"); 468 469 MCFixupKind Kind = MI.getOpcode() == AArch64::BL 470 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26) 471 : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26); 472 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc())); 473 474 ++MCNumFixups; 475 476 // All of the information is in the fixup. 477 return 0; 478 } 479 480 /// getVecShifterOpValue - Return the encoded value for the vector shifter: 481 /// 482 /// 00 -> 0 483 /// 01 -> 8 484 /// 10 -> 16 485 /// 11 -> 24 486 uint32_t 487 AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx, 488 SmallVectorImpl<MCFixup> &Fixups, 489 const MCSubtargetInfo &STI) const { 490 const MCOperand &MO = MI.getOperand(OpIdx); 491 assert(MO.isImm() && "Expected an immediate value for the shift amount!"); 492 493 switch (MO.getImm()) { 494 default: 495 break; 496 case 0: 497 return 0; 498 case 8: 499 return 1; 500 case 16: 501 return 2; 502 case 24: 503 return 3; 504 } 505 506 llvm_unreachable("Invalid value for vector shift amount!"); 507 } 508 509 /// getFixedPointScaleOpValue - Return the encoded value for the 510 // FP-to-fixed-point scale factor. 511 uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue( 512 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 513 const MCSubtargetInfo &STI) const { 514 const MCOperand &MO = MI.getOperand(OpIdx); 515 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 516 return 64 - MO.getImm(); 517 } 518 519 uint32_t 520 AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx, 521 SmallVectorImpl<MCFixup> &Fixups, 522 const MCSubtargetInfo &STI) const { 523 const MCOperand &MO = MI.getOperand(OpIdx); 524 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 525 return 64 - MO.getImm(); 526 } 527 528 uint32_t 529 AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx, 530 SmallVectorImpl<MCFixup> &Fixups, 531 const MCSubtargetInfo &STI) const { 532 const MCOperand &MO = MI.getOperand(OpIdx); 533 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 534 return 32 - MO.getImm(); 535 } 536 537 uint32_t 538 AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx, 539 SmallVectorImpl<MCFixup> &Fixups, 540 const MCSubtargetInfo &STI) const { 541 const MCOperand &MO = MI.getOperand(OpIdx); 542 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 543 return 16 - MO.getImm(); 544 } 545 546 uint32_t 547 AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx, 548 SmallVectorImpl<MCFixup> &Fixups, 549 const MCSubtargetInfo &STI) const { 550 const MCOperand &MO = MI.getOperand(OpIdx); 551 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 552 return 8 - MO.getImm(); 553 } 554 555 uint32_t 556 AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx, 557 SmallVectorImpl<MCFixup> &Fixups, 558 const MCSubtargetInfo &STI) const { 559 const MCOperand &MO = MI.getOperand(OpIdx); 560 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 561 return MO.getImm() - 64; 562 } 563 564 uint32_t 565 AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx, 566 SmallVectorImpl<MCFixup> &Fixups, 567 const MCSubtargetInfo &STI) const { 568 const MCOperand &MO = MI.getOperand(OpIdx); 569 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 570 return MO.getImm() - 32; 571 } 572 573 uint32_t 574 AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx, 575 SmallVectorImpl<MCFixup> &Fixups, 576 const MCSubtargetInfo &STI) const { 577 const MCOperand &MO = MI.getOperand(OpIdx); 578 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 579 return MO.getImm() - 16; 580 } 581 582 uint32_t 583 AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx, 584 SmallVectorImpl<MCFixup> &Fixups, 585 const MCSubtargetInfo &STI) const { 586 const MCOperand &MO = MI.getOperand(OpIdx); 587 assert(MO.isImm() && "Expected an immediate value for the scale amount!"); 588 return MO.getImm() - 8; 589 } 590 591 template <unsigned Multiple, unsigned Min, unsigned Max> 592 uint32_t 593 AArch64MCCodeEmitter::EncodeRegMul_MinMax(const MCInst &MI, unsigned OpIdx, 594 SmallVectorImpl<MCFixup> &Fixups, 595 const MCSubtargetInfo &STI) const { 596 assert(llvm::isPowerOf2_32(Multiple) && "Multiple is not a power of 2"); 597 auto RegOpnd = MI.getOperand(OpIdx).getReg(); 598 unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegOpnd); 599 assert(RegVal >= Min && RegVal <= Max && (RegVal & (Multiple - 1)) == 0); 600 return (RegVal - Min) / Multiple; 601 } 602 603 // Zk Is the name of the control vector register Z20-Z23 or Z28-Z31, encoded in 604 // the "K:Zk" fields. Z20-Z23 = 000, 001,010, 011 and Z28-Z31 = 100, 101, 110, 605 // 111 606 uint32_t AArch64MCCodeEmitter::EncodeZK(const MCInst &MI, unsigned OpIdx, 607 SmallVectorImpl<MCFixup> &Fixups, 608 const MCSubtargetInfo &STI) const { 609 auto RegOpnd = MI.getOperand(OpIdx).getReg(); 610 unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegOpnd); 611 612 // ZZ8-Z31 => Reg is in 3..7 (offset 24) 613 if (RegOpnd > AArch64::Z27) 614 return (RegVal - 24); 615 616 assert((RegOpnd > AArch64::Z19 && RegOpnd < AArch64::Z24) && 617 "Expected ZK in Z20..Z23 or Z28..Z31"); 618 // Z20-Z23 => Reg is in 0..3 (offset 20) 619 return (RegVal - 20); 620 } 621 622 uint32_t 623 AArch64MCCodeEmitter::EncodePNR_p8to15(const MCInst &MI, unsigned OpIdx, 624 SmallVectorImpl<MCFixup> &Fixups, 625 const MCSubtargetInfo &STI) const { 626 auto RegOpnd = MI.getOperand(OpIdx).getReg(); 627 return RegOpnd - AArch64::PN8; 628 } 629 630 uint32_t AArch64MCCodeEmitter::EncodeZPR2StridedRegisterClass( 631 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 632 const MCSubtargetInfo &STI) const { 633 auto RegOpnd = MI.getOperand(OpIdx).getReg(); 634 unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegOpnd); 635 unsigned T = (RegVal & 0x10) >> 1; 636 unsigned Zt = RegVal & 0x7; 637 return T | Zt; 638 } 639 640 uint32_t AArch64MCCodeEmitter::EncodeZPR4StridedRegisterClass( 641 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 642 const MCSubtargetInfo &STI) const { 643 auto RegOpnd = MI.getOperand(OpIdx).getReg(); 644 unsigned RegVal = Ctx.getRegisterInfo()->getEncodingValue(RegOpnd); 645 unsigned T = (RegVal & 0x10) >> 2; 646 unsigned Zt = RegVal & 0x3; 647 return T | Zt; 648 } 649 650 uint32_t AArch64MCCodeEmitter::EncodeMatrixTileListRegisterClass( 651 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 652 const MCSubtargetInfo &STI) const { 653 unsigned RegMask = MI.getOperand(OpIdx).getImm(); 654 assert(RegMask <= 0xFF && "Invalid register mask!"); 655 return RegMask; 656 } 657 658 template <unsigned BaseReg> 659 uint32_t 660 AArch64MCCodeEmitter::encodeMatrixIndexGPR32(const MCInst &MI, unsigned OpIdx, 661 SmallVectorImpl<MCFixup> &Fixups, 662 const MCSubtargetInfo &STI) const { 663 auto RegOpnd = MI.getOperand(OpIdx).getReg(); 664 return RegOpnd - BaseReg; 665 } 666 667 uint32_t 668 AArch64MCCodeEmitter::getImm8OptLsl(const MCInst &MI, unsigned OpIdx, 669 SmallVectorImpl<MCFixup> &Fixups, 670 const MCSubtargetInfo &STI) const { 671 // Test shift 672 auto ShiftOpnd = MI.getOperand(OpIdx + 1).getImm(); 673 assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL && 674 "Unexpected shift type for imm8_opt_lsl immediate."); 675 676 unsigned ShiftVal = AArch64_AM::getShiftValue(ShiftOpnd); 677 assert((ShiftVal == 0 || ShiftVal == 8) && 678 "Unexpected shift value for imm8_opt_lsl immediate."); 679 680 // Test immediate 681 auto Immediate = MI.getOperand(OpIdx).getImm(); 682 return (Immediate & 0xff) | (ShiftVal == 0 ? 0 : (1 << ShiftVal)); 683 } 684 685 uint32_t 686 AArch64MCCodeEmitter::getSVEIncDecImm(const MCInst &MI, unsigned OpIdx, 687 SmallVectorImpl<MCFixup> &Fixups, 688 const MCSubtargetInfo &STI) const { 689 const MCOperand &MO = MI.getOperand(OpIdx); 690 assert(MO.isImm() && "Expected an immediate value!"); 691 // Normalize 1-16 range to 0-15. 692 return MO.getImm() - 1; 693 } 694 695 /// getMoveVecShifterOpValue - Return the encoded value for the vector move 696 /// shifter (MSL). 697 uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue( 698 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups, 699 const MCSubtargetInfo &STI) const { 700 const MCOperand &MO = MI.getOperand(OpIdx); 701 assert(MO.isImm() && 702 "Expected an immediate value for the move shift amount!"); 703 unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm()); 704 assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!"); 705 return ShiftVal == 8 ? 0 : 1; 706 } 707 708 unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue, 709 const MCSubtargetInfo &STI) const { 710 // If one of the signed fixup kinds is applied to a MOVZ instruction, the 711 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's 712 // job to ensure that any bits possibly affected by this are 0. This means we 713 // must zero out bit 30 (essentially emitting a MOVN). 714 MCOperand UImm16MO = MI.getOperand(1); 715 716 // Nothing to do if there's no fixup. 717 if (UImm16MO.isImm()) 718 return EncodedValue; 719 720 const MCExpr *E = UImm16MO.getExpr(); 721 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) { 722 switch (A64E->getKind()) { 723 case AArch64MCExpr::VK_DTPREL_G2: 724 case AArch64MCExpr::VK_DTPREL_G1: 725 case AArch64MCExpr::VK_DTPREL_G0: 726 case AArch64MCExpr::VK_GOTTPREL_G1: 727 case AArch64MCExpr::VK_TPREL_G2: 728 case AArch64MCExpr::VK_TPREL_G1: 729 case AArch64MCExpr::VK_TPREL_G0: 730 return EncodedValue & ~(1u << 30); 731 default: 732 // Nothing to do for an unsigned fixup. 733 return EncodedValue; 734 } 735 } 736 737 return EncodedValue; 738 } 739 740 void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, 741 SmallVectorImpl<char> &CB, 742 743 SmallVectorImpl<MCFixup> &Fixups, 744 const MCSubtargetInfo &STI) const { 745 if (MI.getOpcode() == AArch64::TLSDESCCALL) { 746 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the 747 // following (BLR) instruction. It doesn't emit any code itself so it 748 // doesn't go through the normal TableGenerated channels. 749 auto Reloc = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32 750 ? ELF::R_AARCH64_P32_TLSDESC_CALL 751 : ELF::R_AARCH64_TLSDESC_CALL; 752 Fixups.push_back( 753 MCFixup::create(0, MI.getOperand(0).getExpr(), 754 MCFixupKind(FirstLiteralRelocationKind + Reloc))); 755 return; 756 } 757 758 if (MI.getOpcode() == AArch64::SPACE) { 759 // SPACE just increases basic block size, in both cases no actual code. 760 return; 761 } 762 763 uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI); 764 support::endian::write<uint32_t>(CB, Binary, llvm::endianness::little); 765 ++MCNumEmitted; // Keep track of the # of mi's emitted. 766 } 767 768 unsigned 769 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI, 770 unsigned EncodedValue, 771 const MCSubtargetInfo &STI) const { 772 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31 773 // (i.e. all bits 1) but is ignored by the processor. 774 EncodedValue |= 0x1f << 10; 775 return EncodedValue; 776 } 777 778 template<int hasRs, int hasRt2> unsigned 779 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI, 780 unsigned EncodedValue, 781 const MCSubtargetInfo &STI) const { 782 if (!hasRs) EncodedValue |= 0x001F0000; 783 if (!hasRt2) EncodedValue |= 0x00007C00; 784 785 return EncodedValue; 786 } 787 788 unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison( 789 const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const { 790 // The Rm field of FCMP and friends is unused - it should be assembled 791 // as 0, but is ignored by the processor. 792 EncodedValue &= ~(0x1f << 16); 793 return EncodedValue; 794 } 795 796 #include "AArch64GenMCCodeEmitter.inc" 797 798 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII, 799 MCContext &Ctx) { 800 return new AArch64MCCodeEmitter(MCII, Ctx); 801 } 802