1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 //===----------------------------------------------------------------------===// 11 // 12 /// \file 13 /// 14 /// This file contains definition for AMDGPU ISA disassembler 15 // 16 //===----------------------------------------------------------------------===// 17 18 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 19 20 #include "Disassembler/AMDGPUDisassembler.h" 21 #include "AMDGPU.h" 22 #include "AMDGPURegisterInfo.h" 23 #include "SIDefines.h" 24 #include "Utils/AMDGPUBaseInfo.h" 25 #include "llvm-c/Disassembler.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/BinaryFormat/ELF.h" 30 #include "llvm/MC/MCContext.h" 31 #include "llvm/MC/MCDisassembler/MCDisassembler.h" 32 #include "llvm/MC/MCExpr.h" 33 #include "llvm/MC/MCFixedLenDisassembler.h" 34 #include "llvm/MC/MCInst.h" 35 #include "llvm/MC/MCSubtargetInfo.h" 36 #include "llvm/Support/Endian.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Support/TargetRegistry.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include <algorithm> 42 #include <cassert> 43 #include <cstddef> 44 #include <cstdint> 45 #include <iterator> 46 #include <tuple> 47 #include <vector> 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "amdgpu-disassembler" 52 53 using DecodeStatus = llvm::MCDisassembler::DecodeStatus; 54 55 inline static MCDisassembler::DecodeStatus 56 addOperand(MCInst &Inst, const MCOperand& Opnd) { 57 Inst.addOperand(Opnd); 58 return Opnd.isValid() ? 59 MCDisassembler::Success : 60 MCDisassembler::SoftFail; 61 } 62 63 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, 64 uint16_t NameIdx) { 65 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); 66 if (OpIdx != -1) { 67 auto I = MI.begin(); 68 std::advance(I, OpIdx); 69 MI.insert(I, Op); 70 } 71 return OpIdx; 72 } 73 74 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 75 uint64_t Addr, const void *Decoder) { 76 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 77 78 APInt SignedOffset(18, Imm * 4, true); 79 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 80 81 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 82 return MCDisassembler::Success; 83 return addOperand(Inst, MCOperand::createImm(Imm)); 84 } 85 86 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ 87 static DecodeStatus StaticDecoderName(MCInst &Inst, \ 88 unsigned Imm, \ 89 uint64_t /*Addr*/, \ 90 const void *Decoder) { \ 91 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 92 return addOperand(Inst, DAsm->DecoderName(Imm)); \ 93 } 94 95 #define DECODE_OPERAND_REG(RegClass) \ 96 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass) 97 98 DECODE_OPERAND_REG(VGPR_32) 99 DECODE_OPERAND_REG(VS_32) 100 DECODE_OPERAND_REG(VS_64) 101 DECODE_OPERAND_REG(VS_128) 102 103 DECODE_OPERAND_REG(VReg_64) 104 DECODE_OPERAND_REG(VReg_96) 105 DECODE_OPERAND_REG(VReg_128) 106 107 DECODE_OPERAND_REG(SReg_32) 108 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC) 109 DECODE_OPERAND_REG(SReg_32_XEXEC_HI) 110 DECODE_OPERAND_REG(SReg_64) 111 DECODE_OPERAND_REG(SReg_64_XEXEC) 112 DECODE_OPERAND_REG(SReg_128) 113 DECODE_OPERAND_REG(SReg_256) 114 DECODE_OPERAND_REG(SReg_512) 115 116 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 117 unsigned Imm, 118 uint64_t Addr, 119 const void *Decoder) { 120 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 121 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 122 } 123 124 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst, 125 unsigned Imm, 126 uint64_t Addr, 127 const void *Decoder) { 128 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 129 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm)); 130 } 131 132 #define DECODE_SDWA(DecName) \ 133 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName) 134 135 DECODE_SDWA(Src32) 136 DECODE_SDWA(Src16) 137 DECODE_SDWA(VopcDst) 138 139 #include "AMDGPUGenDisassemblerTables.inc" 140 141 //===----------------------------------------------------------------------===// 142 // 143 //===----------------------------------------------------------------------===// 144 145 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 146 assert(Bytes.size() >= sizeof(T)); 147 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 148 Bytes = Bytes.slice(sizeof(T)); 149 return Res; 150 } 151 152 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 153 MCInst &MI, 154 uint64_t Inst, 155 uint64_t Address) const { 156 assert(MI.getOpcode() == 0); 157 assert(MI.getNumOperands() == 0); 158 MCInst TmpInst; 159 HasLiteral = false; 160 const auto SavedBytes = Bytes; 161 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 162 MI = TmpInst; 163 return MCDisassembler::Success; 164 } 165 Bytes = SavedBytes; 166 return MCDisassembler::Fail; 167 } 168 169 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 170 ArrayRef<uint8_t> Bytes_, 171 uint64_t Address, 172 raw_ostream &WS, 173 raw_ostream &CS) const { 174 CommentStream = &CS; 175 bool IsSDWA = false; 176 177 // ToDo: AMDGPUDisassembler supports only VI ISA. 178 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]) 179 report_fatal_error("Disassembly not yet supported for subtarget"); 180 181 const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size()); 182 Bytes = Bytes_.slice(0, MaxInstBytesNum); 183 184 DecodeStatus Res = MCDisassembler::Fail; 185 do { 186 // ToDo: better to switch encoding length using some bit predicate 187 // but it is unknown yet, so try all we can 188 189 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 190 // encodings 191 if (Bytes.size() >= 8) { 192 const uint64_t QW = eatBytes<uint64_t>(Bytes); 193 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 194 if (Res) break; 195 196 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 197 if (Res) { IsSDWA = true; break; } 198 199 Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address); 200 if (Res) { IsSDWA = true; break; } 201 } 202 203 // Reinitialize Bytes as DPP64 could have eaten too much 204 Bytes = Bytes_.slice(0, MaxInstBytesNum); 205 206 // Try decode 32-bit instruction 207 if (Bytes.size() < 4) break; 208 const uint32_t DW = eatBytes<uint32_t>(Bytes); 209 Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address); 210 if (Res) break; 211 212 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 213 if (Res) break; 214 215 if (Bytes.size() < 4) break; 216 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 217 Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address); 218 if (Res) break; 219 220 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 221 if (Res) break; 222 223 Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address); 224 } while (false); 225 226 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || 227 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si || 228 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) { 229 // Insert dummy unused src2_modifiers. 230 insertNamedMCOperand(MI, MCOperand::createImm(0), 231 AMDGPU::OpName::src2_modifiers); 232 } 233 234 if (Res && IsSDWA) 235 Res = convertSDWAInst(MI); 236 237 Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0; 238 return Res; 239 } 240 241 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const { 242 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 243 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) 244 // VOPC - insert clamp 245 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp); 246 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 247 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); 248 if (SDst != -1) { 249 // VOPC - insert VCC register as sdst 250 insertNamedMCOperand(MI, MCOperand::createReg(AMDGPU::VCC), 251 AMDGPU::OpName::sdst); 252 } else { 253 // VOP1/2 - insert omod if present in instruction 254 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod); 255 } 256 } 257 return MCDisassembler::Success; 258 } 259 260 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 261 return getContext().getRegisterInfo()-> 262 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 263 } 264 265 inline 266 MCOperand AMDGPUDisassembler::errOperand(unsigned V, 267 const Twine& ErrMsg) const { 268 *CommentStream << "Error: " + ErrMsg; 269 270 // ToDo: add support for error operands to MCInst.h 271 // return MCOperand::createError(V); 272 return MCOperand(); 273 } 274 275 inline 276 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 277 return MCOperand::createReg(RegId); 278 } 279 280 inline 281 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 282 unsigned Val) const { 283 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 284 if (Val >= RegCl.getNumRegs()) 285 return errOperand(Val, Twine(getRegClassName(RegClassID)) + 286 ": unknown register " + Twine(Val)); 287 return createRegOperand(RegCl.getRegister(Val)); 288 } 289 290 inline 291 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 292 unsigned Val) const { 293 // ToDo: SI/CI have 104 SGPRs, VI - 102 294 // Valery: here we accepting as much as we can, let assembler sort it out 295 int shift = 0; 296 switch (SRegClassID) { 297 case AMDGPU::SGPR_32RegClassID: 298 case AMDGPU::TTMP_32RegClassID: 299 break; 300 case AMDGPU::SGPR_64RegClassID: 301 case AMDGPU::TTMP_64RegClassID: 302 shift = 1; 303 break; 304 case AMDGPU::SGPR_128RegClassID: 305 case AMDGPU::TTMP_128RegClassID: 306 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 307 // this bundle? 308 case AMDGPU::SReg_256RegClassID: 309 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 310 // this bundle? 311 case AMDGPU::SReg_512RegClassID: 312 shift = 2; 313 break; 314 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 315 // this bundle? 316 default: 317 llvm_unreachable("unhandled register class"); 318 } 319 320 if (Val % (1 << shift)) { 321 *CommentStream << "Warning: " << getRegClassName(SRegClassID) 322 << ": scalar reg isn't aligned " << Val; 323 } 324 325 return createRegOperand(SRegClassID, Val >> shift); 326 } 327 328 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 329 return decodeSrcOp(OPW32, Val); 330 } 331 332 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 333 return decodeSrcOp(OPW64, Val); 334 } 335 336 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const { 337 return decodeSrcOp(OPW128, Val); 338 } 339 340 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 341 return decodeSrcOp(OPW16, Val); 342 } 343 344 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const { 345 return decodeSrcOp(OPWV216, Val); 346 } 347 348 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 349 // Some instructions have operand restrictions beyond what the encoding 350 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 351 // high bit. 352 Val &= 255; 353 354 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 355 } 356 357 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 358 return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 359 } 360 361 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 362 return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 363 } 364 365 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 366 return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 367 } 368 369 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 370 // table-gen generated disassembler doesn't care about operand types 371 // leaving only registry class so SSrc_32 operand turns into SReg_32 372 // and therefore we accept immediates and literals here as well 373 return decodeSrcOp(OPW32, Val); 374 } 375 376 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 377 unsigned Val) const { 378 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 379 return decodeOperand_SReg_32(Val); 380 } 381 382 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI( 383 unsigned Val) const { 384 // SReg_32_XM0 is SReg_32 without EXEC_HI 385 return decodeOperand_SReg_32(Val); 386 } 387 388 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 389 return decodeSrcOp(OPW64, Val); 390 } 391 392 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 393 return decodeSrcOp(OPW64, Val); 394 } 395 396 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 397 return decodeSrcOp(OPW128, Val); 398 } 399 400 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 401 return createSRegOperand(AMDGPU::SReg_256RegClassID, Val); 402 } 403 404 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 405 return createSRegOperand(AMDGPU::SReg_512RegClassID, Val); 406 } 407 408 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 409 // For now all literal constants are supposed to be unsigned integer 410 // ToDo: deal with signed/unsigned 64-bit integer constants 411 // ToDo: deal with float/double constants 412 if (!HasLiteral) { 413 if (Bytes.size() < 4) { 414 return errOperand(0, "cannot read literal, inst bytes left " + 415 Twine(Bytes.size())); 416 } 417 HasLiteral = true; 418 Literal = eatBytes<uint32_t>(Bytes); 419 } 420 return MCOperand::createImm(Literal); 421 } 422 423 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 424 using namespace AMDGPU::EncValues; 425 426 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 427 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 428 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 429 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 430 // Cast prevents negative overflow. 431 } 432 433 static int64_t getInlineImmVal32(unsigned Imm) { 434 switch (Imm) { 435 case 240: 436 return FloatToBits(0.5f); 437 case 241: 438 return FloatToBits(-0.5f); 439 case 242: 440 return FloatToBits(1.0f); 441 case 243: 442 return FloatToBits(-1.0f); 443 case 244: 444 return FloatToBits(2.0f); 445 case 245: 446 return FloatToBits(-2.0f); 447 case 246: 448 return FloatToBits(4.0f); 449 case 247: 450 return FloatToBits(-4.0f); 451 case 248: // 1 / (2 * PI) 452 return 0x3e22f983; 453 default: 454 llvm_unreachable("invalid fp inline imm"); 455 } 456 } 457 458 static int64_t getInlineImmVal64(unsigned Imm) { 459 switch (Imm) { 460 case 240: 461 return DoubleToBits(0.5); 462 case 241: 463 return DoubleToBits(-0.5); 464 case 242: 465 return DoubleToBits(1.0); 466 case 243: 467 return DoubleToBits(-1.0); 468 case 244: 469 return DoubleToBits(2.0); 470 case 245: 471 return DoubleToBits(-2.0); 472 case 246: 473 return DoubleToBits(4.0); 474 case 247: 475 return DoubleToBits(-4.0); 476 case 248: // 1 / (2 * PI) 477 return 0x3fc45f306dc9c882; 478 default: 479 llvm_unreachable("invalid fp inline imm"); 480 } 481 } 482 483 static int64_t getInlineImmVal16(unsigned Imm) { 484 switch (Imm) { 485 case 240: 486 return 0x3800; 487 case 241: 488 return 0xB800; 489 case 242: 490 return 0x3C00; 491 case 243: 492 return 0xBC00; 493 case 244: 494 return 0x4000; 495 case 245: 496 return 0xC000; 497 case 246: 498 return 0x4400; 499 case 247: 500 return 0xC400; 501 case 248: // 1 / (2 * PI) 502 return 0x3118; 503 default: 504 llvm_unreachable("invalid fp inline imm"); 505 } 506 } 507 508 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 509 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 510 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 511 512 // ToDo: case 248: 1/(2*PI) - is allowed only on VI 513 switch (Width) { 514 case OPW32: 515 return MCOperand::createImm(getInlineImmVal32(Imm)); 516 case OPW64: 517 return MCOperand::createImm(getInlineImmVal64(Imm)); 518 case OPW16: 519 case OPWV216: 520 return MCOperand::createImm(getInlineImmVal16(Imm)); 521 default: 522 llvm_unreachable("implement me"); 523 } 524 } 525 526 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 527 using namespace AMDGPU; 528 529 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 530 switch (Width) { 531 default: // fall 532 case OPW32: 533 case OPW16: 534 case OPWV216: 535 return VGPR_32RegClassID; 536 case OPW64: return VReg_64RegClassID; 537 case OPW128: return VReg_128RegClassID; 538 } 539 } 540 541 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 542 using namespace AMDGPU; 543 544 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 545 switch (Width) { 546 default: // fall 547 case OPW32: 548 case OPW16: 549 case OPWV216: 550 return SGPR_32RegClassID; 551 case OPW64: return SGPR_64RegClassID; 552 case OPW128: return SGPR_128RegClassID; 553 } 554 } 555 556 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 557 using namespace AMDGPU; 558 559 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 560 switch (Width) { 561 default: // fall 562 case OPW32: 563 case OPW16: 564 case OPWV216: 565 return TTMP_32RegClassID; 566 case OPW64: return TTMP_64RegClassID; 567 case OPW128: return TTMP_128RegClassID; 568 } 569 } 570 571 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 572 using namespace AMDGPU::EncValues; 573 574 assert(Val < 512); // enum9 575 576 if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 577 return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN); 578 } 579 if (Val <= SGPR_MAX) { 580 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 581 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 582 } 583 if (TTMP_MIN <= Val && Val <= TTMP_MAX) { 584 return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN); 585 } 586 587 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 588 return decodeIntImmed(Val); 589 590 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 591 return decodeFPImmed(Width, Val); 592 593 if (Val == LITERAL_CONST) 594 return decodeLiteralConstant(); 595 596 switch (Width) { 597 case OPW32: 598 case OPW16: 599 case OPWV216: 600 return decodeSpecialReg32(Val); 601 case OPW64: 602 return decodeSpecialReg64(Val); 603 default: 604 llvm_unreachable("unexpected immediate type"); 605 } 606 } 607 608 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 609 using namespace AMDGPU; 610 611 switch (Val) { 612 case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI)); 613 case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI)); 614 // ToDo: no support for xnack_mask_lo/_hi register 615 case 104: 616 case 105: break; 617 case 106: return createRegOperand(VCC_LO); 618 case 107: return createRegOperand(VCC_HI); 619 case 108: return createRegOperand(TBA_LO); 620 case 109: return createRegOperand(TBA_HI); 621 case 110: return createRegOperand(TMA_LO); 622 case 111: return createRegOperand(TMA_HI); 623 case 124: return createRegOperand(M0); 624 case 126: return createRegOperand(EXEC_LO); 625 case 127: return createRegOperand(EXEC_HI); 626 case 235: return createRegOperand(SRC_SHARED_BASE); 627 case 236: return createRegOperand(SRC_SHARED_LIMIT); 628 case 237: return createRegOperand(SRC_PRIVATE_BASE); 629 case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 630 // TODO: SRC_POPS_EXITING_WAVE_ID 631 // ToDo: no support for vccz register 632 case 251: break; 633 // ToDo: no support for execz register 634 case 252: break; 635 case 253: return createRegOperand(SCC); 636 default: break; 637 } 638 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 639 } 640 641 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 642 using namespace AMDGPU; 643 644 switch (Val) { 645 case 102: return createRegOperand(getMCReg(FLAT_SCR, STI)); 646 case 106: return createRegOperand(VCC); 647 case 108: return createRegOperand(TBA); 648 case 110: return createRegOperand(TMA); 649 case 126: return createRegOperand(EXEC); 650 default: break; 651 } 652 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 653 } 654 655 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, 656 unsigned Val) const { 657 using namespace AMDGPU::SDWA; 658 659 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) { 660 // XXX: static_cast<int> is needed to avoid stupid warning: 661 // compare with unsigned is always true 662 if (SDWA9EncValues::SRC_VGPR_MIN <= static_cast<int>(Val) && 663 Val <= SDWA9EncValues::SRC_VGPR_MAX) { 664 return createRegOperand(getVgprClassId(Width), 665 Val - SDWA9EncValues::SRC_VGPR_MIN); 666 } 667 if (SDWA9EncValues::SRC_SGPR_MIN <= Val && 668 Val <= SDWA9EncValues::SRC_SGPR_MAX) { 669 return createSRegOperand(getSgprClassId(Width), 670 Val - SDWA9EncValues::SRC_SGPR_MIN); 671 } 672 673 return decodeSpecialReg32(Val - SDWA9EncValues::SRC_SGPR_MIN); 674 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 675 return createRegOperand(getVgprClassId(Width), Val); 676 } 677 llvm_unreachable("unsupported target"); 678 } 679 680 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const { 681 return decodeSDWASrc(OPW16, Val); 682 } 683 684 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const { 685 return decodeSDWASrc(OPW32, Val); 686 } 687 688 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const { 689 using namespace AMDGPU::SDWA; 690 691 assert(STI.getFeatureBits()[AMDGPU::FeatureGFX9] && 692 "SDWAVopcDst should be present only on GFX9"); 693 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { 694 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 695 if (Val > AMDGPU::EncValues::SGPR_MAX) { 696 return decodeSpecialReg64(Val); 697 } else { 698 return createSRegOperand(getSgprClassId(OPW64), Val); 699 } 700 } else { 701 return createRegOperand(AMDGPU::VCC); 702 } 703 } 704 705 //===----------------------------------------------------------------------===// 706 // AMDGPUSymbolizer 707 //===----------------------------------------------------------------------===// 708 709 // Try to find symbol name for specified label 710 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 711 raw_ostream &/*cStream*/, int64_t Value, 712 uint64_t /*Address*/, bool IsBranch, 713 uint64_t /*Offset*/, uint64_t /*InstSize*/) { 714 using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>; 715 using SectionSymbolsTy = std::vector<SymbolInfoTy>; 716 717 if (!IsBranch) { 718 return false; 719 } 720 721 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 722 auto Result = std::find_if(Symbols->begin(), Symbols->end(), 723 [Value](const SymbolInfoTy& Val) { 724 return std::get<0>(Val) == static_cast<uint64_t>(Value) 725 && std::get<2>(Val) == ELF::STT_NOTYPE; 726 }); 727 if (Result != Symbols->end()) { 728 auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 729 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 730 Inst.addOperand(MCOperand::createExpr(Add)); 731 return true; 732 } 733 return false; 734 } 735 736 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 737 int64_t Value, 738 uint64_t Address) { 739 llvm_unreachable("unimplemented"); 740 } 741 742 //===----------------------------------------------------------------------===// 743 // Initialization 744 //===----------------------------------------------------------------------===// 745 746 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 747 LLVMOpInfoCallback /*GetOpInfo*/, 748 LLVMSymbolLookupCallback /*SymbolLookUp*/, 749 void *DisInfo, 750 MCContext *Ctx, 751 std::unique_ptr<MCRelocationInfo> &&RelInfo) { 752 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 753 } 754 755 static MCDisassembler *createAMDGPUDisassembler(const Target &T, 756 const MCSubtargetInfo &STI, 757 MCContext &Ctx) { 758 return new AMDGPUDisassembler(STI, Ctx); 759 } 760 761 extern "C" void LLVMInitializeAMDGPUDisassembler() { 762 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 763 createAMDGPUDisassembler); 764 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 765 createAMDGPUSymbolizer); 766 } 767