1 //===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 //===----------------------------------------------------------------------===// 11 // 12 /// \file 13 /// 14 /// This file contains definition for AMDGPU ISA disassembler 15 // 16 //===----------------------------------------------------------------------===// 17 18 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 19 20 #include "AMDGPUDisassembler.h" 21 #include "AMDGPU.h" 22 #include "AMDGPURegisterInfo.h" 23 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 24 #include "SIDefines.h" 25 #include "Utils/AMDGPUBaseInfo.h" 26 27 #include "llvm/BinaryFormat/ELF.h" 28 #include "llvm/MC/MCContext.h" 29 #include "llvm/MC/MCFixedLenDisassembler.h" 30 #include "llvm/MC/MCInst.h" 31 #include "llvm/MC/MCInstrDesc.h" 32 #include "llvm/MC/MCSubtargetInfo.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/Endian.h" 35 #include "llvm/Support/TargetRegistry.h" 36 37 using namespace llvm; 38 39 #define DEBUG_TYPE "amdgpu-disassembler" 40 41 typedef llvm::MCDisassembler::DecodeStatus DecodeStatus; 42 43 44 inline static MCDisassembler::DecodeStatus 45 addOperand(MCInst &Inst, const MCOperand& Opnd) { 46 Inst.addOperand(Opnd); 47 return Opnd.isValid() ? 48 MCDisassembler::Success : 49 MCDisassembler::SoftFail; 50 } 51 52 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, 53 uint16_t NameIdx) { 54 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); 55 if (OpIdx != -1) { 56 auto I = MI.begin(); 57 std::advance(I, OpIdx); 58 MI.insert(I, Op); 59 } 60 return OpIdx; 61 } 62 63 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 64 uint64_t Addr, const void *Decoder) { 65 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 66 67 APInt SignedOffset(18, Imm * 4, true); 68 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 69 70 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 71 return MCDisassembler::Success; 72 return addOperand(Inst, MCOperand::createImm(Imm)); 73 } 74 75 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ 76 static DecodeStatus StaticDecoderName(MCInst &Inst, \ 77 unsigned Imm, \ 78 uint64_t /*Addr*/, \ 79 const void *Decoder) { \ 80 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 81 return addOperand(Inst, DAsm->DecoderName(Imm)); \ 82 } 83 84 #define DECODE_OPERAND_REG(RegClass) \ 85 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass) 86 87 DECODE_OPERAND_REG(VGPR_32) 88 DECODE_OPERAND_REG(VS_32) 89 DECODE_OPERAND_REG(VS_64) 90 DECODE_OPERAND_REG(VS_128) 91 92 DECODE_OPERAND_REG(VReg_64) 93 DECODE_OPERAND_REG(VReg_96) 94 DECODE_OPERAND_REG(VReg_128) 95 96 DECODE_OPERAND_REG(SReg_32) 97 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC) 98 DECODE_OPERAND_REG(SReg_32_XEXEC_HI) 99 DECODE_OPERAND_REG(SReg_64) 100 DECODE_OPERAND_REG(SReg_64_XEXEC) 101 DECODE_OPERAND_REG(SReg_128) 102 DECODE_OPERAND_REG(SReg_256) 103 DECODE_OPERAND_REG(SReg_512) 104 105 106 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 107 unsigned Imm, 108 uint64_t Addr, 109 const void *Decoder) { 110 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 111 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 112 } 113 114 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst, 115 unsigned Imm, 116 uint64_t Addr, 117 const void *Decoder) { 118 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 119 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm)); 120 } 121 122 #define DECODE_SDWA(DecName) \ 123 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName) 124 125 DECODE_SDWA(Src32) 126 DECODE_SDWA(Src16) 127 DECODE_SDWA(VopcDst) 128 129 #include "AMDGPUGenDisassemblerTables.inc" 130 131 //===----------------------------------------------------------------------===// 132 // 133 //===----------------------------------------------------------------------===// 134 135 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 136 assert(Bytes.size() >= sizeof(T)); 137 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 138 Bytes = Bytes.slice(sizeof(T)); 139 return Res; 140 } 141 142 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 143 MCInst &MI, 144 uint64_t Inst, 145 uint64_t Address) const { 146 assert(MI.getOpcode() == 0); 147 assert(MI.getNumOperands() == 0); 148 MCInst TmpInst; 149 HasLiteral = false; 150 const auto SavedBytes = Bytes; 151 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 152 MI = TmpInst; 153 return MCDisassembler::Success; 154 } 155 Bytes = SavedBytes; 156 return MCDisassembler::Fail; 157 } 158 159 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 160 ArrayRef<uint8_t> Bytes_, 161 uint64_t Address, 162 raw_ostream &WS, 163 raw_ostream &CS) const { 164 CommentStream = &CS; 165 bool IsSDWA = false; 166 167 // ToDo: AMDGPUDisassembler supports only VI ISA. 168 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]) 169 report_fatal_error("Disassembly not yet supported for subtarget"); 170 171 const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size()); 172 Bytes = Bytes_.slice(0, MaxInstBytesNum); 173 174 DecodeStatus Res = MCDisassembler::Fail; 175 do { 176 // ToDo: better to switch encoding length using some bit predicate 177 // but it is unknown yet, so try all we can 178 179 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 180 // encodings 181 if (Bytes.size() >= 8) { 182 const uint64_t QW = eatBytes<uint64_t>(Bytes); 183 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 184 if (Res) break; 185 186 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 187 if (Res) { IsSDWA = true; break; } 188 189 Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address); 190 if (Res) { IsSDWA = true; break; } 191 } 192 193 // Reinitialize Bytes as DPP64 could have eaten too much 194 Bytes = Bytes_.slice(0, MaxInstBytesNum); 195 196 // Try decode 32-bit instruction 197 if (Bytes.size() < 4) break; 198 const uint32_t DW = eatBytes<uint32_t>(Bytes); 199 Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address); 200 if (Res) break; 201 202 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 203 if (Res) break; 204 205 if (Bytes.size() < 4) break; 206 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 207 Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address); 208 if (Res) break; 209 210 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 211 if (Res) break; 212 213 Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address); 214 } while (false); 215 216 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || 217 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si || 218 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) { 219 // Insert dummy unused src2_modifiers. 220 insertNamedMCOperand(MI, MCOperand::createImm(0), 221 AMDGPU::OpName::src2_modifiers); 222 } 223 224 if (Res && IsSDWA) 225 Res = convertSDWAInst(MI); 226 227 Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0; 228 return Res; 229 } 230 231 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const { 232 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 233 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) 234 // VOPC - insert clamp 235 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp); 236 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 237 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); 238 if (SDst != -1) { 239 // VOPC - insert VCC register as sdst 240 insertNamedMCOperand(MI, MCOperand::createReg(AMDGPU::VCC), 241 AMDGPU::OpName::sdst); 242 } else { 243 // VOP1/2 - insert omod if present in instruction 244 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod); 245 } 246 } 247 return MCDisassembler::Success; 248 } 249 250 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 251 return getContext().getRegisterInfo()-> 252 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 253 } 254 255 inline 256 MCOperand AMDGPUDisassembler::errOperand(unsigned V, 257 const Twine& ErrMsg) const { 258 *CommentStream << "Error: " + ErrMsg; 259 260 // ToDo: add support for error operands to MCInst.h 261 // return MCOperand::createError(V); 262 return MCOperand(); 263 } 264 265 inline 266 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 267 return MCOperand::createReg(RegId); 268 } 269 270 inline 271 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 272 unsigned Val) const { 273 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 274 if (Val >= RegCl.getNumRegs()) 275 return errOperand(Val, Twine(getRegClassName(RegClassID)) + 276 ": unknown register " + Twine(Val)); 277 return createRegOperand(RegCl.getRegister(Val)); 278 } 279 280 inline 281 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 282 unsigned Val) const { 283 // ToDo: SI/CI have 104 SGPRs, VI - 102 284 // Valery: here we accepting as much as we can, let assembler sort it out 285 int shift = 0; 286 switch (SRegClassID) { 287 case AMDGPU::SGPR_32RegClassID: 288 case AMDGPU::TTMP_32RegClassID: 289 break; 290 case AMDGPU::SGPR_64RegClassID: 291 case AMDGPU::TTMP_64RegClassID: 292 shift = 1; 293 break; 294 case AMDGPU::SGPR_128RegClassID: 295 case AMDGPU::TTMP_128RegClassID: 296 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 297 // this bundle? 298 case AMDGPU::SReg_256RegClassID: 299 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 300 // this bundle? 301 case AMDGPU::SReg_512RegClassID: 302 shift = 2; 303 break; 304 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 305 // this bundle? 306 default: 307 llvm_unreachable("unhandled register class"); 308 } 309 310 if (Val % (1 << shift)) { 311 *CommentStream << "Warning: " << getRegClassName(SRegClassID) 312 << ": scalar reg isn't aligned " << Val; 313 } 314 315 return createRegOperand(SRegClassID, Val >> shift); 316 } 317 318 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 319 return decodeSrcOp(OPW32, Val); 320 } 321 322 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 323 return decodeSrcOp(OPW64, Val); 324 } 325 326 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const { 327 return decodeSrcOp(OPW128, Val); 328 } 329 330 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 331 return decodeSrcOp(OPW16, Val); 332 } 333 334 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const { 335 return decodeSrcOp(OPWV216, Val); 336 } 337 338 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 339 // Some instructions have operand restrictions beyond what the encoding 340 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 341 // high bit. 342 Val &= 255; 343 344 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 345 } 346 347 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 348 return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 349 } 350 351 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 352 return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 353 } 354 355 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 356 return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 357 } 358 359 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 360 // table-gen generated disassembler doesn't care about operand types 361 // leaving only registry class so SSrc_32 operand turns into SReg_32 362 // and therefore we accept immediates and literals here as well 363 return decodeSrcOp(OPW32, Val); 364 } 365 366 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 367 unsigned Val) const { 368 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 369 return decodeOperand_SReg_32(Val); 370 } 371 372 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI( 373 unsigned Val) const { 374 // SReg_32_XM0 is SReg_32 without EXEC_HI 375 return decodeOperand_SReg_32(Val); 376 } 377 378 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 379 return decodeSrcOp(OPW64, Val); 380 } 381 382 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 383 return decodeSrcOp(OPW64, Val); 384 } 385 386 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 387 return decodeSrcOp(OPW128, Val); 388 } 389 390 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 391 return createSRegOperand(AMDGPU::SReg_256RegClassID, Val); 392 } 393 394 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 395 return createSRegOperand(AMDGPU::SReg_512RegClassID, Val); 396 } 397 398 399 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 400 // For now all literal constants are supposed to be unsigned integer 401 // ToDo: deal with signed/unsigned 64-bit integer constants 402 // ToDo: deal with float/double constants 403 if (!HasLiteral) { 404 if (Bytes.size() < 4) { 405 return errOperand(0, "cannot read literal, inst bytes left " + 406 Twine(Bytes.size())); 407 } 408 HasLiteral = true; 409 Literal = eatBytes<uint32_t>(Bytes); 410 } 411 return MCOperand::createImm(Literal); 412 } 413 414 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 415 using namespace AMDGPU::EncValues; 416 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 417 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 418 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 419 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 420 // Cast prevents negative overflow. 421 } 422 423 static int64_t getInlineImmVal32(unsigned Imm) { 424 switch (Imm) { 425 case 240: 426 return FloatToBits(0.5f); 427 case 241: 428 return FloatToBits(-0.5f); 429 case 242: 430 return FloatToBits(1.0f); 431 case 243: 432 return FloatToBits(-1.0f); 433 case 244: 434 return FloatToBits(2.0f); 435 case 245: 436 return FloatToBits(-2.0f); 437 case 246: 438 return FloatToBits(4.0f); 439 case 247: 440 return FloatToBits(-4.0f); 441 case 248: // 1 / (2 * PI) 442 return 0x3e22f983; 443 default: 444 llvm_unreachable("invalid fp inline imm"); 445 } 446 } 447 448 static int64_t getInlineImmVal64(unsigned Imm) { 449 switch (Imm) { 450 case 240: 451 return DoubleToBits(0.5); 452 case 241: 453 return DoubleToBits(-0.5); 454 case 242: 455 return DoubleToBits(1.0); 456 case 243: 457 return DoubleToBits(-1.0); 458 case 244: 459 return DoubleToBits(2.0); 460 case 245: 461 return DoubleToBits(-2.0); 462 case 246: 463 return DoubleToBits(4.0); 464 case 247: 465 return DoubleToBits(-4.0); 466 case 248: // 1 / (2 * PI) 467 return 0x3fc45f306dc9c882; 468 default: 469 llvm_unreachable("invalid fp inline imm"); 470 } 471 } 472 473 static int64_t getInlineImmVal16(unsigned Imm) { 474 switch (Imm) { 475 case 240: 476 return 0x3800; 477 case 241: 478 return 0xB800; 479 case 242: 480 return 0x3C00; 481 case 243: 482 return 0xBC00; 483 case 244: 484 return 0x4000; 485 case 245: 486 return 0xC000; 487 case 246: 488 return 0x4400; 489 case 247: 490 return 0xC400; 491 case 248: // 1 / (2 * PI) 492 return 0x3118; 493 default: 494 llvm_unreachable("invalid fp inline imm"); 495 } 496 } 497 498 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 499 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 500 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 501 502 // ToDo: case 248: 1/(2*PI) - is allowed only on VI 503 switch (Width) { 504 case OPW32: 505 return MCOperand::createImm(getInlineImmVal32(Imm)); 506 case OPW64: 507 return MCOperand::createImm(getInlineImmVal64(Imm)); 508 case OPW16: 509 case OPWV216: 510 return MCOperand::createImm(getInlineImmVal16(Imm)); 511 default: 512 llvm_unreachable("implement me"); 513 } 514 } 515 516 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 517 using namespace AMDGPU; 518 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 519 switch (Width) { 520 default: // fall 521 case OPW32: 522 case OPW16: 523 case OPWV216: 524 return VGPR_32RegClassID; 525 case OPW64: return VReg_64RegClassID; 526 case OPW128: return VReg_128RegClassID; 527 } 528 } 529 530 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 531 using namespace AMDGPU; 532 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 533 switch (Width) { 534 default: // fall 535 case OPW32: 536 case OPW16: 537 case OPWV216: 538 return SGPR_32RegClassID; 539 case OPW64: return SGPR_64RegClassID; 540 case OPW128: return SGPR_128RegClassID; 541 } 542 } 543 544 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 545 using namespace AMDGPU; 546 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 547 switch (Width) { 548 default: // fall 549 case OPW32: 550 case OPW16: 551 case OPWV216: 552 return TTMP_32RegClassID; 553 case OPW64: return TTMP_64RegClassID; 554 case OPW128: return TTMP_128RegClassID; 555 } 556 } 557 558 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 559 using namespace AMDGPU::EncValues; 560 assert(Val < 512); // enum9 561 562 if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 563 return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN); 564 } 565 if (Val <= SGPR_MAX) { 566 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 567 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 568 } 569 if (TTMP_MIN <= Val && Val <= TTMP_MAX) { 570 return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN); 571 } 572 573 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 574 return decodeIntImmed(Val); 575 576 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 577 return decodeFPImmed(Width, Val); 578 579 if (Val == LITERAL_CONST) 580 return decodeLiteralConstant(); 581 582 switch (Width) { 583 case OPW32: 584 case OPW16: 585 case OPWV216: 586 return decodeSpecialReg32(Val); 587 case OPW64: 588 return decodeSpecialReg64(Val); 589 default: 590 llvm_unreachable("unexpected immediate type"); 591 } 592 } 593 594 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 595 using namespace AMDGPU; 596 switch (Val) { 597 case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI)); 598 case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI)); 599 // ToDo: no support for xnack_mask_lo/_hi register 600 case 104: 601 case 105: break; 602 case 106: return createRegOperand(VCC_LO); 603 case 107: return createRegOperand(VCC_HI); 604 case 108: return createRegOperand(TBA_LO); 605 case 109: return createRegOperand(TBA_HI); 606 case 110: return createRegOperand(TMA_LO); 607 case 111: return createRegOperand(TMA_HI); 608 case 124: return createRegOperand(M0); 609 case 126: return createRegOperand(EXEC_LO); 610 case 127: return createRegOperand(EXEC_HI); 611 case 235: return createRegOperand(SRC_SHARED_BASE); 612 case 236: return createRegOperand(SRC_SHARED_LIMIT); 613 case 237: return createRegOperand(SRC_PRIVATE_BASE); 614 case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 615 // TODO: SRC_POPS_EXITING_WAVE_ID 616 // ToDo: no support for vccz register 617 case 251: break; 618 // ToDo: no support for execz register 619 case 252: break; 620 case 253: return createRegOperand(SCC); 621 default: break; 622 } 623 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 624 } 625 626 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 627 using namespace AMDGPU; 628 switch (Val) { 629 case 102: return createRegOperand(getMCReg(FLAT_SCR, STI)); 630 case 106: return createRegOperand(VCC); 631 case 108: return createRegOperand(TBA); 632 case 110: return createRegOperand(TMA); 633 case 126: return createRegOperand(EXEC); 634 default: break; 635 } 636 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 637 } 638 639 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, 640 unsigned Val) const { 641 using namespace AMDGPU::SDWA; 642 643 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 644 // XXX: static_cast<int> is needed to avoid stupid warning: 645 // compare with unsigned is always true 646 if (SDWA9EncValues::SRC_VGPR_MIN <= static_cast<int>(Val) && 647 Val <= SDWA9EncValues::SRC_VGPR_MAX) { 648 return createRegOperand(getVgprClassId(Width), 649 Val - SDWA9EncValues::SRC_VGPR_MIN); 650 } 651 if (SDWA9EncValues::SRC_SGPR_MIN <= Val && 652 Val <= SDWA9EncValues::SRC_SGPR_MAX) { 653 return createSRegOperand(getSgprClassId(Width), 654 Val - SDWA9EncValues::SRC_SGPR_MIN); 655 } 656 657 return decodeSpecialReg32(Val - SDWA9EncValues::SRC_SGPR_MIN); 658 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 659 return createRegOperand(getVgprClassId(Width), Val); 660 } 661 llvm_unreachable("unsupported target"); 662 } 663 664 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const { 665 return decodeSDWASrc(OPW16, Val); 666 } 667 668 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const { 669 return decodeSDWASrc(OPW32, Val); 670 } 671 672 673 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const { 674 using namespace AMDGPU::SDWA; 675 676 assert(STI.getFeatureBits()[AMDGPU::FeatureGFX9] && 677 "SDWAVopcDst should be present only on GFX9"); 678 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { 679 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 680 if (Val > AMDGPU::EncValues::SGPR_MAX) { 681 return decodeSpecialReg64(Val); 682 } else { 683 return createSRegOperand(getSgprClassId(OPW64), Val); 684 } 685 } else { 686 return createRegOperand(AMDGPU::VCC); 687 } 688 } 689 690 //===----------------------------------------------------------------------===// 691 // AMDGPUSymbolizer 692 //===----------------------------------------------------------------------===// 693 694 // Try to find symbol name for specified label 695 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 696 raw_ostream &/*cStream*/, int64_t Value, 697 uint64_t /*Address*/, bool IsBranch, 698 uint64_t /*Offset*/, uint64_t /*InstSize*/) { 699 typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy; 700 typedef std::vector<SymbolInfoTy> SectionSymbolsTy; 701 702 if (!IsBranch) { 703 return false; 704 } 705 706 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 707 auto Result = std::find_if(Symbols->begin(), Symbols->end(), 708 [Value](const SymbolInfoTy& Val) { 709 return std::get<0>(Val) == static_cast<uint64_t>(Value) 710 && std::get<2>(Val) == ELF::STT_NOTYPE; 711 }); 712 if (Result != Symbols->end()) { 713 auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 714 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 715 Inst.addOperand(MCOperand::createExpr(Add)); 716 return true; 717 } 718 return false; 719 } 720 721 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 722 int64_t Value, 723 uint64_t Address) { 724 llvm_unreachable("unimplemented"); 725 } 726 727 //===----------------------------------------------------------------------===// 728 // Initialization 729 //===----------------------------------------------------------------------===// 730 731 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 732 LLVMOpInfoCallback /*GetOpInfo*/, 733 LLVMSymbolLookupCallback /*SymbolLookUp*/, 734 void *DisInfo, 735 MCContext *Ctx, 736 std::unique_ptr<MCRelocationInfo> &&RelInfo) { 737 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 738 } 739 740 static MCDisassembler *createAMDGPUDisassembler(const Target &T, 741 const MCSubtargetInfo &STI, 742 MCContext &Ctx) { 743 return new AMDGPUDisassembler(STI, Ctx); 744 } 745 746 extern "C" void LLVMInitializeAMDGPUDisassembler() { 747 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 748 createAMDGPUDisassembler); 749 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 750 createAMDGPUSymbolizer); 751 } 752