1 //===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 //===----------------------------------------------------------------------===// 11 // 12 /// \file 13 /// 14 /// This file contains definition for AMDGPU ISA disassembler 15 // 16 //===----------------------------------------------------------------------===// 17 18 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 19 20 #include "AMDGPUDisassembler.h" 21 #include "AMDGPU.h" 22 #include "AMDGPURegisterInfo.h" 23 #include "SIDefines.h" 24 #include "Utils/AMDGPUBaseInfo.h" 25 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 26 27 #include "llvm/MC/MCContext.h" 28 #include "llvm/MC/MCFixedLenDisassembler.h" 29 #include "llvm/MC/MCInst.h" 30 #include "llvm/MC/MCInstrDesc.h" 31 #include "llvm/MC/MCSubtargetInfo.h" 32 #include "llvm/Support/ELF.h" 33 #include "llvm/Support/Endian.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/TargetRegistry.h" 36 37 38 using namespace llvm; 39 40 #define DEBUG_TYPE "amdgpu-disassembler" 41 42 typedef llvm::MCDisassembler::DecodeStatus DecodeStatus; 43 44 45 inline static MCDisassembler::DecodeStatus 46 addOperand(MCInst &Inst, const MCOperand& Opnd) { 47 Inst.addOperand(Opnd); 48 return Opnd.isValid() ? 49 MCDisassembler::Success : 50 MCDisassembler::SoftFail; 51 } 52 53 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 54 uint64_t Addr, const void *Decoder) { 55 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 56 57 APInt SignedOffset(18, Imm * 4, true); 58 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 59 60 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 61 return MCDisassembler::Success; 62 return addOperand(Inst, MCOperand::createImm(Imm)); 63 } 64 65 #define DECODE_OPERAND2(RegClass, DecName) \ 66 static DecodeStatus Decode##RegClass##RegisterClass(MCInst &Inst, \ 67 unsigned Imm, \ 68 uint64_t /*Addr*/, \ 69 const void *Decoder) { \ 70 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 71 return addOperand(Inst, DAsm->decodeOperand_##DecName(Imm)); \ 72 } 73 74 #define DECODE_OPERAND(RegClass) DECODE_OPERAND2(RegClass, RegClass) 75 76 DECODE_OPERAND(VGPR_32) 77 DECODE_OPERAND(VS_32) 78 DECODE_OPERAND(VS_64) 79 80 DECODE_OPERAND(VReg_64) 81 DECODE_OPERAND(VReg_96) 82 DECODE_OPERAND(VReg_128) 83 84 DECODE_OPERAND(SReg_32) 85 DECODE_OPERAND(SReg_32_XM0_XEXEC) 86 DECODE_OPERAND(SReg_64) 87 DECODE_OPERAND(SReg_64_XEXEC) 88 DECODE_OPERAND(SReg_128) 89 DECODE_OPERAND(SReg_256) 90 DECODE_OPERAND(SReg_512) 91 92 93 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 94 unsigned Imm, 95 uint64_t Addr, 96 const void *Decoder) { 97 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 98 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 99 } 100 101 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst, 102 unsigned Imm, 103 uint64_t Addr, 104 const void *Decoder) { 105 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 106 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm)); 107 } 108 109 #include "AMDGPUGenDisassemblerTables.inc" 110 111 //===----------------------------------------------------------------------===// 112 // 113 //===----------------------------------------------------------------------===// 114 115 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 116 assert(Bytes.size() >= sizeof(T)); 117 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 118 Bytes = Bytes.slice(sizeof(T)); 119 return Res; 120 } 121 122 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 123 MCInst &MI, 124 uint64_t Inst, 125 uint64_t Address) const { 126 assert(MI.getOpcode() == 0); 127 assert(MI.getNumOperands() == 0); 128 MCInst TmpInst; 129 const auto SavedBytes = Bytes; 130 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 131 MI = TmpInst; 132 return MCDisassembler::Success; 133 } 134 Bytes = SavedBytes; 135 return MCDisassembler::Fail; 136 } 137 138 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 139 ArrayRef<uint8_t> Bytes_, 140 uint64_t Address, 141 raw_ostream &WS, 142 raw_ostream &CS) const { 143 CommentStream = &CS; 144 145 // ToDo: AMDGPUDisassembler supports only VI ISA. 146 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]) 147 report_fatal_error("Disassembly not yet supported for subtarget"); 148 149 const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size()); 150 Bytes = Bytes_.slice(0, MaxInstBytesNum); 151 152 DecodeStatus Res = MCDisassembler::Fail; 153 do { 154 // ToDo: better to switch encoding length using some bit predicate 155 // but it is unknown yet, so try all we can 156 157 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 158 // encodings 159 if (Bytes.size() >= 8) { 160 const uint64_t QW = eatBytes<uint64_t>(Bytes); 161 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 162 if (Res) break; 163 164 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 165 if (Res) break; 166 } 167 168 // Reinitialize Bytes as DPP64 could have eaten too much 169 Bytes = Bytes_.slice(0, MaxInstBytesNum); 170 171 // Try decode 32-bit instruction 172 if (Bytes.size() < 4) break; 173 const uint32_t DW = eatBytes<uint32_t>(Bytes); 174 Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address); 175 if (Res) break; 176 177 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 178 if (Res) break; 179 180 if (Bytes.size() < 4) break; 181 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 182 Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address); 183 if (Res) break; 184 185 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 186 } while (false); 187 188 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || 189 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si || 190 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) { 191 // Insert dummy unused src2_modifiers. 192 int Src2ModIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 193 AMDGPU::OpName::src2_modifiers); 194 auto I = MI.begin(); 195 std::advance(I, Src2ModIdx); 196 MI.insert(I, MCOperand::createImm(0)); 197 } 198 199 Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0; 200 return Res; 201 } 202 203 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 204 return getContext().getRegisterInfo()-> 205 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 206 } 207 208 inline 209 MCOperand AMDGPUDisassembler::errOperand(unsigned V, 210 const Twine& ErrMsg) const { 211 *CommentStream << "Error: " + ErrMsg; 212 213 // ToDo: add support for error operands to MCInst.h 214 // return MCOperand::createError(V); 215 return MCOperand(); 216 } 217 218 inline 219 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 220 return MCOperand::createReg(RegId); 221 } 222 223 inline 224 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 225 unsigned Val) const { 226 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 227 if (Val >= RegCl.getNumRegs()) 228 return errOperand(Val, Twine(getRegClassName(RegClassID)) + 229 ": unknown register " + Twine(Val)); 230 return createRegOperand(RegCl.getRegister(Val)); 231 } 232 233 inline 234 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 235 unsigned Val) const { 236 // ToDo: SI/CI have 104 SGPRs, VI - 102 237 // Valery: here we accepting as much as we can, let assembler sort it out 238 int shift = 0; 239 switch (SRegClassID) { 240 case AMDGPU::SGPR_32RegClassID: 241 case AMDGPU::TTMP_32RegClassID: 242 break; 243 case AMDGPU::SGPR_64RegClassID: 244 case AMDGPU::TTMP_64RegClassID: 245 shift = 1; 246 break; 247 case AMDGPU::SGPR_128RegClassID: 248 case AMDGPU::TTMP_128RegClassID: 249 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 250 // this bundle? 251 case AMDGPU::SReg_256RegClassID: 252 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 253 // this bundle? 254 case AMDGPU::SReg_512RegClassID: 255 shift = 2; 256 break; 257 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 258 // this bundle? 259 default: 260 llvm_unreachable("unhandled register class"); 261 } 262 263 if (Val % (1 << shift)) { 264 *CommentStream << "Warning: " << getRegClassName(SRegClassID) 265 << ": scalar reg isn't aligned " << Val; 266 } 267 268 return createRegOperand(SRegClassID, Val >> shift); 269 } 270 271 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 272 return decodeSrcOp(OPW32, Val); 273 } 274 275 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 276 return decodeSrcOp(OPW64, Val); 277 } 278 279 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 280 return decodeSrcOp(OPW16, Val); 281 } 282 283 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const { 284 return decodeSrcOp(OPWV216, Val); 285 } 286 287 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 288 // Some instructions have operand restrictions beyond what the encoding 289 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 290 // high bit. 291 Val &= 255; 292 293 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 294 } 295 296 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 297 return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 298 } 299 300 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 301 return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 302 } 303 304 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 305 return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 306 } 307 308 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 309 // table-gen generated disassembler doesn't care about operand types 310 // leaving only registry class so SSrc_32 operand turns into SReg_32 311 // and therefore we accept immediates and literals here as well 312 return decodeSrcOp(OPW32, Val); 313 } 314 315 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 316 unsigned Val) const { 317 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 318 return decodeOperand_SReg_32(Val); 319 } 320 321 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 322 return decodeSrcOp(OPW64, Val); 323 } 324 325 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 326 return decodeSrcOp(OPW64, Val); 327 } 328 329 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 330 return decodeSrcOp(OPW128, Val); 331 } 332 333 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 334 return createSRegOperand(AMDGPU::SReg_256RegClassID, Val); 335 } 336 337 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 338 return createSRegOperand(AMDGPU::SReg_512RegClassID, Val); 339 } 340 341 342 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 343 // For now all literal constants are supposed to be unsigned integer 344 // ToDo: deal with signed/unsigned 64-bit integer constants 345 // ToDo: deal with float/double constants 346 if (Bytes.size() < 4) 347 return errOperand(0, "cannot read literal, inst bytes left " + 348 Twine(Bytes.size())); 349 return MCOperand::createImm(eatBytes<uint32_t>(Bytes)); 350 } 351 352 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 353 using namespace AMDGPU::EncValues; 354 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 355 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 356 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 357 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 358 // Cast prevents negative overflow. 359 } 360 361 static int64_t getInlineImmVal32(unsigned Imm) { 362 switch (Imm) { 363 case 240: 364 return FloatToBits(0.5f); 365 case 241: 366 return FloatToBits(-0.5f); 367 case 242: 368 return FloatToBits(1.0f); 369 case 243: 370 return FloatToBits(-1.0f); 371 case 244: 372 return FloatToBits(2.0f); 373 case 245: 374 return FloatToBits(-2.0f); 375 case 246: 376 return FloatToBits(4.0f); 377 case 247: 378 return FloatToBits(-4.0f); 379 case 248: // 1 / (2 * PI) 380 return 0x3e22f983; 381 default: 382 llvm_unreachable("invalid fp inline imm"); 383 } 384 } 385 386 static int64_t getInlineImmVal64(unsigned Imm) { 387 switch (Imm) { 388 case 240: 389 return DoubleToBits(0.5); 390 case 241: 391 return DoubleToBits(-0.5); 392 case 242: 393 return DoubleToBits(1.0); 394 case 243: 395 return DoubleToBits(-1.0); 396 case 244: 397 return DoubleToBits(2.0); 398 case 245: 399 return DoubleToBits(-2.0); 400 case 246: 401 return DoubleToBits(4.0); 402 case 247: 403 return DoubleToBits(-4.0); 404 case 248: // 1 / (2 * PI) 405 return 0x3fc45f306dc9c882; 406 default: 407 llvm_unreachable("invalid fp inline imm"); 408 } 409 } 410 411 static int64_t getInlineImmVal16(unsigned Imm) { 412 switch (Imm) { 413 case 240: 414 return 0x3800; 415 case 241: 416 return 0xB800; 417 case 242: 418 return 0x3C00; 419 case 243: 420 return 0xBC00; 421 case 244: 422 return 0x4000; 423 case 245: 424 return 0xC000; 425 case 246: 426 return 0x4400; 427 case 247: 428 return 0xC400; 429 case 248: // 1 / (2 * PI) 430 return 0x3118; 431 default: 432 llvm_unreachable("invalid fp inline imm"); 433 } 434 } 435 436 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 437 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 438 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 439 440 // ToDo: case 248: 1/(2*PI) - is allowed only on VI 441 switch (Width) { 442 case OPW32: 443 return MCOperand::createImm(getInlineImmVal32(Imm)); 444 case OPW64: 445 return MCOperand::createImm(getInlineImmVal64(Imm)); 446 case OPW16: 447 case OPWV216: 448 return MCOperand::createImm(getInlineImmVal16(Imm)); 449 default: 450 llvm_unreachable("implement me"); 451 } 452 } 453 454 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 455 using namespace AMDGPU; 456 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 457 switch (Width) { 458 default: // fall 459 case OPW32: 460 case OPW16: 461 case OPWV216: 462 return VGPR_32RegClassID; 463 case OPW64: return VReg_64RegClassID; 464 case OPW128: return VReg_128RegClassID; 465 } 466 } 467 468 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 469 using namespace AMDGPU; 470 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 471 switch (Width) { 472 default: // fall 473 case OPW32: 474 case OPW16: 475 case OPWV216: 476 return SGPR_32RegClassID; 477 case OPW64: return SGPR_64RegClassID; 478 case OPW128: return SGPR_128RegClassID; 479 } 480 } 481 482 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 483 using namespace AMDGPU; 484 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 485 switch (Width) { 486 default: // fall 487 case OPW32: 488 case OPW16: 489 case OPWV216: 490 return TTMP_32RegClassID; 491 case OPW64: return TTMP_64RegClassID; 492 case OPW128: return TTMP_128RegClassID; 493 } 494 } 495 496 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 497 using namespace AMDGPU::EncValues; 498 assert(Val < 512); // enum9 499 500 if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 501 return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN); 502 } 503 if (Val <= SGPR_MAX) { 504 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 505 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 506 } 507 if (TTMP_MIN <= Val && Val <= TTMP_MAX) { 508 return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN); 509 } 510 511 assert(Width == OPW16 || Width == OPW32 || Width == OPW64); 512 513 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 514 return decodeIntImmed(Val); 515 516 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 517 return decodeFPImmed(Width, Val); 518 519 if (Val == LITERAL_CONST) 520 return decodeLiteralConstant(); 521 522 switch (Width) { 523 case OPW32: 524 case OPW16: 525 case OPWV216: 526 return decodeSpecialReg32(Val); 527 case OPW64: 528 return decodeSpecialReg64(Val); 529 default: 530 llvm_unreachable("unexpected immediate type"); 531 } 532 } 533 534 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 535 using namespace AMDGPU; 536 switch (Val) { 537 case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI)); 538 case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI)); 539 // ToDo: no support for xnack_mask_lo/_hi register 540 case 104: 541 case 105: break; 542 case 106: return createRegOperand(VCC_LO); 543 case 107: return createRegOperand(VCC_HI); 544 case 108: return createRegOperand(TBA_LO); 545 case 109: return createRegOperand(TBA_HI); 546 case 110: return createRegOperand(TMA_LO); 547 case 111: return createRegOperand(TMA_HI); 548 case 124: return createRegOperand(M0); 549 case 126: return createRegOperand(EXEC_LO); 550 case 127: return createRegOperand(EXEC_HI); 551 case 235: return createRegOperand(SRC_SHARED_BASE); 552 case 236: return createRegOperand(SRC_SHARED_LIMIT); 553 case 237: return createRegOperand(SRC_PRIVATE_BASE); 554 case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 555 // TODO: SRC_POPS_EXITING_WAVE_ID 556 // ToDo: no support for vccz register 557 case 251: break; 558 // ToDo: no support for execz register 559 case 252: break; 560 case 253: return createRegOperand(SCC); 561 default: break; 562 } 563 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 564 } 565 566 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 567 using namespace AMDGPU; 568 switch (Val) { 569 case 102: return createRegOperand(getMCReg(FLAT_SCR, STI)); 570 case 106: return createRegOperand(VCC); 571 case 108: return createRegOperand(TBA); 572 case 110: return createRegOperand(TMA); 573 case 126: return createRegOperand(EXEC); 574 default: break; 575 } 576 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 577 } 578 579 //===----------------------------------------------------------------------===// 580 // AMDGPUSymbolizer 581 //===----------------------------------------------------------------------===// 582 583 // Try to find symbol name for specified label 584 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 585 raw_ostream &/*cStream*/, int64_t Value, 586 uint64_t /*Address*/, bool IsBranch, 587 uint64_t /*Offset*/, uint64_t /*InstSize*/) { 588 typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy; 589 typedef std::vector<SymbolInfoTy> SectionSymbolsTy; 590 591 if (!IsBranch) { 592 return false; 593 } 594 595 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 596 auto Result = std::find_if(Symbols->begin(), Symbols->end(), 597 [Value](const SymbolInfoTy& Val) { 598 return std::get<0>(Val) == static_cast<uint64_t>(Value) 599 && std::get<2>(Val) == ELF::STT_NOTYPE; 600 }); 601 if (Result != Symbols->end()) { 602 auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 603 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 604 Inst.addOperand(MCOperand::createExpr(Add)); 605 return true; 606 } 607 return false; 608 } 609 610 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 611 int64_t Value, 612 uint64_t Address) { 613 llvm_unreachable("unimplemented"); 614 } 615 616 //===----------------------------------------------------------------------===// 617 // Initialization 618 //===----------------------------------------------------------------------===// 619 620 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 621 LLVMOpInfoCallback /*GetOpInfo*/, 622 LLVMSymbolLookupCallback /*SymbolLookUp*/, 623 void *DisInfo, 624 MCContext *Ctx, 625 std::unique_ptr<MCRelocationInfo> &&RelInfo) { 626 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 627 } 628 629 static MCDisassembler *createAMDGPUDisassembler(const Target &T, 630 const MCSubtargetInfo &STI, 631 MCContext &Ctx) { 632 return new AMDGPUDisassembler(STI, Ctx); 633 } 634 635 extern "C" void LLVMInitializeAMDGPUDisassembler() { 636 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 637 createAMDGPUDisassembler); 638 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 639 createAMDGPUSymbolizer); 640 } 641