1 //===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 //===----------------------------------------------------------------------===// 11 // 12 /// \file 13 /// 14 /// This file contains definition for AMDGPU ISA disassembler 15 // 16 //===----------------------------------------------------------------------===// 17 18 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 19 20 #include "AMDGPUDisassembler.h" 21 #include "AMDGPU.h" 22 #include "AMDGPURegisterInfo.h" 23 #include "SIDefines.h" 24 #include "Utils/AMDGPUBaseInfo.h" 25 26 #include "llvm/MC/MCContext.h" 27 #include "llvm/MC/MCFixedLenDisassembler.h" 28 #include "llvm/MC/MCInst.h" 29 #include "llvm/MC/MCInstrDesc.h" 30 #include "llvm/MC/MCSubtargetInfo.h" 31 #include "llvm/Support/ELF.h" 32 #include "llvm/Support/Endian.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/TargetRegistry.h" 35 36 37 using namespace llvm; 38 39 #define DEBUG_TYPE "amdgpu-disassembler" 40 41 typedef llvm::MCDisassembler::DecodeStatus DecodeStatus; 42 43 44 inline static MCDisassembler::DecodeStatus 45 addOperand(MCInst &Inst, const MCOperand& Opnd) { 46 Inst.addOperand(Opnd); 47 return Opnd.isValid() ? 48 MCDisassembler::Success : 49 MCDisassembler::SoftFail; 50 } 51 52 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 53 uint64_t Addr, const void *Decoder) { 54 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 55 56 APInt SignedOffset(18, Imm * 4, true); 57 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 58 59 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 60 return MCDisassembler::Success; 61 return addOperand(Inst, MCOperand::createImm(Imm)); 62 } 63 64 #define DECODE_OPERAND2(RegClass, DecName) \ 65 static DecodeStatus Decode##RegClass##RegisterClass(MCInst &Inst, \ 66 unsigned Imm, \ 67 uint64_t /*Addr*/, \ 68 const void *Decoder) { \ 69 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 70 return addOperand(Inst, DAsm->decodeOperand_##DecName(Imm)); \ 71 } 72 73 #define DECODE_OPERAND(RegClass) DECODE_OPERAND2(RegClass, RegClass) 74 75 DECODE_OPERAND(VGPR_32) 76 DECODE_OPERAND(VS_32) 77 DECODE_OPERAND(VS_64) 78 79 DECODE_OPERAND(VReg_64) 80 DECODE_OPERAND(VReg_96) 81 DECODE_OPERAND(VReg_128) 82 83 DECODE_OPERAND(SReg_32) 84 DECODE_OPERAND(SReg_32_XM0_XEXEC) 85 DECODE_OPERAND(SReg_64) 86 DECODE_OPERAND(SReg_64_XEXEC) 87 DECODE_OPERAND(SReg_128) 88 DECODE_OPERAND(SReg_256) 89 DECODE_OPERAND(SReg_512) 90 91 92 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 93 unsigned Imm, 94 uint64_t Addr, 95 const void *Decoder) { 96 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 97 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 98 } 99 100 #define GET_SUBTARGETINFO_ENUM 101 #include "AMDGPUGenSubtargetInfo.inc" 102 #undef GET_SUBTARGETINFO_ENUM 103 104 #include "AMDGPUGenDisassemblerTables.inc" 105 106 //===----------------------------------------------------------------------===// 107 // 108 //===----------------------------------------------------------------------===// 109 110 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 111 assert(Bytes.size() >= sizeof(T)); 112 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 113 Bytes = Bytes.slice(sizeof(T)); 114 return Res; 115 } 116 117 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 118 MCInst &MI, 119 uint64_t Inst, 120 uint64_t Address) const { 121 assert(MI.getOpcode() == 0); 122 assert(MI.getNumOperands() == 0); 123 MCInst TmpInst; 124 const auto SavedBytes = Bytes; 125 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 126 MI = TmpInst; 127 return MCDisassembler::Success; 128 } 129 Bytes = SavedBytes; 130 return MCDisassembler::Fail; 131 } 132 133 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 134 ArrayRef<uint8_t> Bytes_, 135 uint64_t Address, 136 raw_ostream &WS, 137 raw_ostream &CS) const { 138 CommentStream = &CS; 139 140 // ToDo: AMDGPUDisassembler supports only VI ISA. 141 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding]) 142 report_fatal_error("Disassembly not yet supported for subtarget"); 143 144 const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size()); 145 Bytes = Bytes_.slice(0, MaxInstBytesNum); 146 147 DecodeStatus Res = MCDisassembler::Fail; 148 do { 149 // ToDo: better to switch encoding length using some bit predicate 150 // but it is unknown yet, so try all we can 151 152 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 153 // encodings 154 if (Bytes.size() >= 8) { 155 const uint64_t QW = eatBytes<uint64_t>(Bytes); 156 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 157 if (Res) break; 158 159 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 160 if (Res) break; 161 } 162 163 // Reinitialize Bytes as DPP64 could have eaten too much 164 Bytes = Bytes_.slice(0, MaxInstBytesNum); 165 166 // Try decode 32-bit instruction 167 if (Bytes.size() < 4) break; 168 const uint32_t DW = eatBytes<uint32_t>(Bytes); 169 Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address); 170 if (Res) break; 171 172 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 173 if (Res) break; 174 175 if (Bytes.size() < 4) break; 176 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 177 Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address); 178 if (Res) break; 179 180 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 181 } while (false); 182 183 Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0; 184 return Res; 185 } 186 187 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 188 return getContext().getRegisterInfo()-> 189 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 190 } 191 192 inline 193 MCOperand AMDGPUDisassembler::errOperand(unsigned V, 194 const Twine& ErrMsg) const { 195 *CommentStream << "Error: " + ErrMsg; 196 197 // ToDo: add support for error operands to MCInst.h 198 // return MCOperand::createError(V); 199 return MCOperand(); 200 } 201 202 inline 203 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 204 return MCOperand::createReg(RegId); 205 } 206 207 inline 208 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 209 unsigned Val) const { 210 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 211 if (Val >= RegCl.getNumRegs()) 212 return errOperand(Val, Twine(getRegClassName(RegClassID)) + 213 ": unknown register " + Twine(Val)); 214 return createRegOperand(RegCl.getRegister(Val)); 215 } 216 217 inline 218 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 219 unsigned Val) const { 220 // ToDo: SI/CI have 104 SGPRs, VI - 102 221 // Valery: here we accepting as much as we can, let assembler sort it out 222 int shift = 0; 223 switch (SRegClassID) { 224 case AMDGPU::SGPR_32RegClassID: 225 case AMDGPU::TTMP_32RegClassID: 226 break; 227 case AMDGPU::SGPR_64RegClassID: 228 case AMDGPU::TTMP_64RegClassID: 229 shift = 1; 230 break; 231 case AMDGPU::SGPR_128RegClassID: 232 case AMDGPU::TTMP_128RegClassID: 233 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 234 // this bundle? 235 case AMDGPU::SReg_256RegClassID: 236 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 237 // this bundle? 238 case AMDGPU::SReg_512RegClassID: 239 shift = 2; 240 break; 241 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 242 // this bundle? 243 default: 244 llvm_unreachable("unhandled register class"); 245 } 246 247 if (Val % (1 << shift)) { 248 *CommentStream << "Warning: " << getRegClassName(SRegClassID) 249 << ": scalar reg isn't aligned " << Val; 250 } 251 252 return createRegOperand(SRegClassID, Val >> shift); 253 } 254 255 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 256 return decodeSrcOp(OPW32, Val); 257 } 258 259 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 260 return decodeSrcOp(OPW64, Val); 261 } 262 263 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 264 return decodeSrcOp(OPW16, Val); 265 } 266 267 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 268 // Some instructions have operand restrictions beyond what the encoding 269 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 270 // high bit. 271 Val &= 255; 272 273 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 274 } 275 276 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 277 return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 278 } 279 280 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 281 return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 282 } 283 284 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 285 return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 286 } 287 288 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 289 // table-gen generated disassembler doesn't care about operand types 290 // leaving only registry class so SSrc_32 operand turns into SReg_32 291 // and therefore we accept immediates and literals here as well 292 return decodeSrcOp(OPW32, Val); 293 } 294 295 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 296 unsigned Val) const { 297 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 298 return decodeOperand_SReg_32(Val); 299 } 300 301 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 302 return decodeSrcOp(OPW64, Val); 303 } 304 305 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 306 return decodeSrcOp(OPW64, Val); 307 } 308 309 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 310 return decodeSrcOp(OPW128, Val); 311 } 312 313 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 314 return createSRegOperand(AMDGPU::SReg_256RegClassID, Val); 315 } 316 317 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 318 return createSRegOperand(AMDGPU::SReg_512RegClassID, Val); 319 } 320 321 322 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 323 // For now all literal constants are supposed to be unsigned integer 324 // ToDo: deal with signed/unsigned 64-bit integer constants 325 // ToDo: deal with float/double constants 326 if (Bytes.size() < 4) 327 return errOperand(0, "cannot read literal, inst bytes left " + 328 Twine(Bytes.size())); 329 return MCOperand::createImm(eatBytes<uint32_t>(Bytes)); 330 } 331 332 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 333 using namespace AMDGPU::EncValues; 334 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 335 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 336 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 337 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 338 // Cast prevents negative overflow. 339 } 340 341 static int64_t getInlineImmVal32(unsigned Imm) { 342 switch (Imm) { 343 case 240: 344 return FloatToBits(0.5f); 345 case 241: 346 return FloatToBits(-0.5f); 347 case 242: 348 return FloatToBits(1.0f); 349 case 243: 350 return FloatToBits(-1.0f); 351 case 244: 352 return FloatToBits(2.0f); 353 case 245: 354 return FloatToBits(-2.0f); 355 case 246: 356 return FloatToBits(4.0f); 357 case 247: 358 return FloatToBits(-4.0f); 359 case 248: // 1 / (2 * PI) 360 return 0x3e22f983; 361 default: 362 llvm_unreachable("invalid fp inline imm"); 363 } 364 } 365 366 static int64_t getInlineImmVal64(unsigned Imm) { 367 switch (Imm) { 368 case 240: 369 return DoubleToBits(0.5); 370 case 241: 371 return DoubleToBits(-0.5); 372 case 242: 373 return DoubleToBits(1.0); 374 case 243: 375 return DoubleToBits(-1.0); 376 case 244: 377 return DoubleToBits(2.0); 378 case 245: 379 return DoubleToBits(-2.0); 380 case 246: 381 return DoubleToBits(4.0); 382 case 247: 383 return DoubleToBits(-4.0); 384 case 248: // 1 / (2 * PI) 385 return 0x3fc45f306dc9c882; 386 default: 387 llvm_unreachable("invalid fp inline imm"); 388 } 389 } 390 391 static int64_t getInlineImmVal16(unsigned Imm) { 392 switch (Imm) { 393 case 240: 394 return 0x3800; 395 case 241: 396 return 0xB800; 397 case 242: 398 return 0x3C00; 399 case 243: 400 return 0xBC00; 401 case 244: 402 return 0x4000; 403 case 245: 404 return 0xC000; 405 case 246: 406 return 0x4400; 407 case 247: 408 return 0xC400; 409 case 248: // 1 / (2 * PI) 410 return 0x3118; 411 default: 412 llvm_unreachable("invalid fp inline imm"); 413 } 414 } 415 416 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 417 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 418 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 419 420 // ToDo: case 248: 1/(2*PI) - is allowed only on VI 421 switch (Width) { 422 case OPW32: 423 return MCOperand::createImm(getInlineImmVal32(Imm)); 424 case OPW64: 425 return MCOperand::createImm(getInlineImmVal64(Imm)); 426 case OPW16: 427 return MCOperand::createImm(getInlineImmVal16(Imm)); 428 default: 429 llvm_unreachable("implement me"); 430 } 431 } 432 433 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 434 using namespace AMDGPU; 435 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 436 switch (Width) { 437 default: // fall 438 case OPW32: 439 case OPW16: 440 return VGPR_32RegClassID; 441 case OPW64: return VReg_64RegClassID; 442 case OPW128: return VReg_128RegClassID; 443 } 444 } 445 446 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 447 using namespace AMDGPU; 448 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 449 switch (Width) { 450 default: // fall 451 case OPW32: 452 case OPW16: 453 return SGPR_32RegClassID; 454 case OPW64: return SGPR_64RegClassID; 455 case OPW128: return SGPR_128RegClassID; 456 } 457 } 458 459 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 460 using namespace AMDGPU; 461 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 462 switch (Width) { 463 default: // fall 464 case OPW32: 465 case OPW16: 466 return TTMP_32RegClassID; 467 case OPW64: return TTMP_64RegClassID; 468 case OPW128: return TTMP_128RegClassID; 469 } 470 } 471 472 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 473 using namespace AMDGPU::EncValues; 474 assert(Val < 512); // enum9 475 476 if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 477 return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN); 478 } 479 if (Val <= SGPR_MAX) { 480 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 481 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 482 } 483 if (TTMP_MIN <= Val && Val <= TTMP_MAX) { 484 return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN); 485 } 486 487 assert(Width == OPW16 || Width == OPW32 || Width == OPW64); 488 489 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 490 return decodeIntImmed(Val); 491 492 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 493 return decodeFPImmed(Width, Val); 494 495 if (Val == LITERAL_CONST) 496 return decodeLiteralConstant(); 497 498 switch (Width) { 499 case OPW32: 500 case OPW16: 501 return decodeSpecialReg32(Val); 502 case OPW64: 503 return decodeSpecialReg64(Val); 504 default: 505 llvm_unreachable("unexpected immediate type"); 506 } 507 } 508 509 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 510 using namespace AMDGPU; 511 switch (Val) { 512 case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI)); 513 case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI)); 514 // ToDo: no support for xnack_mask_lo/_hi register 515 case 104: 516 case 105: break; 517 case 106: return createRegOperand(VCC_LO); 518 case 107: return createRegOperand(VCC_HI); 519 case 108: return createRegOperand(TBA_LO); 520 case 109: return createRegOperand(TBA_HI); 521 case 110: return createRegOperand(TMA_LO); 522 case 111: return createRegOperand(TMA_HI); 523 case 124: return createRegOperand(M0); 524 case 126: return createRegOperand(EXEC_LO); 525 case 127: return createRegOperand(EXEC_HI); 526 case 235: return createRegOperand(SRC_SHARED_BASE); 527 case 236: return createRegOperand(SRC_SHARED_LIMIT); 528 case 237: return createRegOperand(SRC_PRIVATE_BASE); 529 case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 530 // TODO: SRC_POPS_EXITING_WAVE_ID 531 // ToDo: no support for vccz register 532 case 251: break; 533 // ToDo: no support for execz register 534 case 252: break; 535 case 253: return createRegOperand(SCC); 536 default: break; 537 } 538 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 539 } 540 541 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 542 using namespace AMDGPU; 543 switch (Val) { 544 case 102: return createRegOperand(getMCReg(FLAT_SCR, STI)); 545 case 106: return createRegOperand(VCC); 546 case 108: return createRegOperand(TBA); 547 case 110: return createRegOperand(TMA); 548 case 126: return createRegOperand(EXEC); 549 default: break; 550 } 551 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 552 } 553 554 //===----------------------------------------------------------------------===// 555 // AMDGPUSymbolizer 556 //===----------------------------------------------------------------------===// 557 558 // Try to find symbol name for specified label 559 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 560 raw_ostream &/*cStream*/, int64_t Value, 561 uint64_t /*Address*/, bool IsBranch, 562 uint64_t /*Offset*/, uint64_t /*InstSize*/) { 563 typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy; 564 typedef std::vector<SymbolInfoTy> SectionSymbolsTy; 565 566 if (!IsBranch) { 567 return false; 568 } 569 570 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 571 auto Result = std::find_if(Symbols->begin(), Symbols->end(), 572 [Value](const SymbolInfoTy& Val) { 573 return std::get<0>(Val) == static_cast<uint64_t>(Value) 574 && std::get<2>(Val) == ELF::STT_NOTYPE; 575 }); 576 if (Result != Symbols->end()) { 577 auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 578 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 579 Inst.addOperand(MCOperand::createExpr(Add)); 580 return true; 581 } 582 return false; 583 } 584 585 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 586 int64_t Value, 587 uint64_t Address) { 588 llvm_unreachable("unimplemented"); 589 } 590 591 //===----------------------------------------------------------------------===// 592 // Initialization 593 //===----------------------------------------------------------------------===// 594 595 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 596 LLVMOpInfoCallback /*GetOpInfo*/, 597 LLVMSymbolLookupCallback /*SymbolLookUp*/, 598 void *DisInfo, 599 MCContext *Ctx, 600 std::unique_ptr<MCRelocationInfo> &&RelInfo) { 601 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 602 } 603 604 static MCDisassembler *createAMDGPUDisassembler(const Target &T, 605 const MCSubtargetInfo &STI, 606 MCContext &Ctx) { 607 return new AMDGPUDisassembler(STI, Ctx); 608 } 609 610 extern "C" void LLVMInitializeAMDGPUDisassembler() { 611 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 612 createAMDGPUDisassembler); 613 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 614 createAMDGPUSymbolizer); 615 } 616