xref: /llvm-project/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (revision ce941c9c380d37a670e3cd3e283ae4070a52859f)
1 //===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //===----------------------------------------------------------------------===//
11 //
12 /// \file
13 ///
14 /// This file contains definition for AMDGPU ISA disassembler
15 //
16 //===----------------------------------------------------------------------===//
17 
18 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
19 
20 #include "AMDGPUDisassembler.h"
21 #include "AMDGPU.h"
22 #include "AMDGPURegisterInfo.h"
23 #include "SIDefines.h"
24 #include "Utils/AMDGPUBaseInfo.h"
25 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
26 
27 #include "llvm/MC/MCContext.h"
28 #include "llvm/MC/MCFixedLenDisassembler.h"
29 #include "llvm/MC/MCInst.h"
30 #include "llvm/MC/MCInstrDesc.h"
31 #include "llvm/MC/MCSubtargetInfo.h"
32 #include "llvm/Support/ELF.h"
33 #include "llvm/Support/Endian.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/TargetRegistry.h"
36 
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "amdgpu-disassembler"
41 
42 typedef llvm::MCDisassembler::DecodeStatus DecodeStatus;
43 
44 
45 inline static MCDisassembler::DecodeStatus
46 addOperand(MCInst &Inst, const MCOperand& Opnd) {
47   Inst.addOperand(Opnd);
48   return Opnd.isValid() ?
49     MCDisassembler::Success :
50     MCDisassembler::SoftFail;
51 }
52 
53 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
54                                        uint64_t Addr, const void *Decoder) {
55   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
56 
57   APInt SignedOffset(18, Imm * 4, true);
58   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
59 
60   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
61     return MCDisassembler::Success;
62   return addOperand(Inst, MCOperand::createImm(Imm));
63 }
64 
65 #define DECODE_OPERAND2(RegClass, DecName) \
66 static DecodeStatus Decode##RegClass##RegisterClass(MCInst &Inst, \
67                                                     unsigned Imm, \
68                                                     uint64_t /*Addr*/, \
69                                                     const void *Decoder) { \
70   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
71   return addOperand(Inst, DAsm->decodeOperand_##DecName(Imm)); \
72 }
73 
74 #define DECODE_OPERAND(RegClass) DECODE_OPERAND2(RegClass, RegClass)
75 
76 DECODE_OPERAND(VGPR_32)
77 DECODE_OPERAND(VS_32)
78 DECODE_OPERAND(VS_64)
79 
80 DECODE_OPERAND(VReg_64)
81 DECODE_OPERAND(VReg_96)
82 DECODE_OPERAND(VReg_128)
83 
84 DECODE_OPERAND(SReg_32)
85 DECODE_OPERAND(SReg_32_XM0_XEXEC)
86 DECODE_OPERAND(SReg_64)
87 DECODE_OPERAND(SReg_64_XEXEC)
88 DECODE_OPERAND(SReg_128)
89 DECODE_OPERAND(SReg_256)
90 DECODE_OPERAND(SReg_512)
91 
92 
93 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
94                                          unsigned Imm,
95                                          uint64_t Addr,
96                                          const void *Decoder) {
97   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
98   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
99 }
100 
101 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
102                                          unsigned Imm,
103                                          uint64_t Addr,
104                                          const void *Decoder) {
105   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
106   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
107 }
108 
109 #include "AMDGPUGenDisassemblerTables.inc"
110 
111 //===----------------------------------------------------------------------===//
112 //
113 //===----------------------------------------------------------------------===//
114 
115 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
116   assert(Bytes.size() >= sizeof(T));
117   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
118   Bytes = Bytes.slice(sizeof(T));
119   return Res;
120 }
121 
122 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
123                                                MCInst &MI,
124                                                uint64_t Inst,
125                                                uint64_t Address) const {
126   assert(MI.getOpcode() == 0);
127   assert(MI.getNumOperands() == 0);
128   MCInst TmpInst;
129   HasLiteral = false;
130   const auto SavedBytes = Bytes;
131   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
132     MI = TmpInst;
133     return MCDisassembler::Success;
134   }
135   Bytes = SavedBytes;
136   return MCDisassembler::Fail;
137 }
138 
139 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
140                                                 ArrayRef<uint8_t> Bytes_,
141                                                 uint64_t Address,
142                                                 raw_ostream &WS,
143                                                 raw_ostream &CS) const {
144   CommentStream = &CS;
145 
146   // ToDo: AMDGPUDisassembler supports only VI ISA.
147   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding])
148     report_fatal_error("Disassembly not yet supported for subtarget");
149 
150   const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size());
151   Bytes = Bytes_.slice(0, MaxInstBytesNum);
152 
153   DecodeStatus Res = MCDisassembler::Fail;
154   do {
155     // ToDo: better to switch encoding length using some bit predicate
156     // but it is unknown yet, so try all we can
157 
158     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
159     // encodings
160     if (Bytes.size() >= 8) {
161       const uint64_t QW = eatBytes<uint64_t>(Bytes);
162       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
163       if (Res) break;
164 
165       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
166       if (Res) break;
167     }
168 
169     // Reinitialize Bytes as DPP64 could have eaten too much
170     Bytes = Bytes_.slice(0, MaxInstBytesNum);
171 
172     // Try decode 32-bit instruction
173     if (Bytes.size() < 4) break;
174     const uint32_t DW = eatBytes<uint32_t>(Bytes);
175     Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address);
176     if (Res) break;
177 
178     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
179     if (Res) break;
180 
181     if (Bytes.size() < 4) break;
182     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
183     Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address);
184     if (Res) break;
185 
186     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
187   } while (false);
188 
189   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
190               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
191               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) {
192     // Insert dummy unused src2_modifiers.
193     int Src2ModIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
194                                                 AMDGPU::OpName::src2_modifiers);
195     auto I = MI.begin();
196     std::advance(I, Src2ModIdx);
197     MI.insert(I, MCOperand::createImm(0));
198   }
199 
200   Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0;
201   return Res;
202 }
203 
204 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
205   return getContext().getRegisterInfo()->
206     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
207 }
208 
209 inline
210 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
211                                          const Twine& ErrMsg) const {
212   *CommentStream << "Error: " + ErrMsg;
213 
214   // ToDo: add support for error operands to MCInst.h
215   // return MCOperand::createError(V);
216   return MCOperand();
217 }
218 
219 inline
220 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
221   return MCOperand::createReg(RegId);
222 }
223 
224 inline
225 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
226                                                unsigned Val) const {
227   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
228   if (Val >= RegCl.getNumRegs())
229     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
230                            ": unknown register " + Twine(Val));
231   return createRegOperand(RegCl.getRegister(Val));
232 }
233 
234 inline
235 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
236                                                 unsigned Val) const {
237   // ToDo: SI/CI have 104 SGPRs, VI - 102
238   // Valery: here we accepting as much as we can, let assembler sort it out
239   int shift = 0;
240   switch (SRegClassID) {
241   case AMDGPU::SGPR_32RegClassID:
242   case AMDGPU::TTMP_32RegClassID:
243     break;
244   case AMDGPU::SGPR_64RegClassID:
245   case AMDGPU::TTMP_64RegClassID:
246     shift = 1;
247     break;
248   case AMDGPU::SGPR_128RegClassID:
249   case AMDGPU::TTMP_128RegClassID:
250   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
251   // this bundle?
252   case AMDGPU::SReg_256RegClassID:
253   // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
254   // this bundle?
255   case AMDGPU::SReg_512RegClassID:
256     shift = 2;
257     break;
258   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
259   // this bundle?
260   default:
261     llvm_unreachable("unhandled register class");
262   }
263 
264   if (Val % (1 << shift)) {
265     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
266                    << ": scalar reg isn't aligned " << Val;
267   }
268 
269   return createRegOperand(SRegClassID, Val >> shift);
270 }
271 
272 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
273   return decodeSrcOp(OPW32, Val);
274 }
275 
276 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
277   return decodeSrcOp(OPW64, Val);
278 }
279 
280 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
281   return decodeSrcOp(OPW16, Val);
282 }
283 
284 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
285   return decodeSrcOp(OPWV216, Val);
286 }
287 
288 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
289   // Some instructions have operand restrictions beyond what the encoding
290   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
291   // high bit.
292   Val &= 255;
293 
294   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
295 }
296 
297 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
298   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
299 }
300 
301 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
302   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
303 }
304 
305 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
306   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
307 }
308 
309 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
310   // table-gen generated disassembler doesn't care about operand types
311   // leaving only registry class so SSrc_32 operand turns into SReg_32
312   // and therefore we accept immediates and literals here as well
313   return decodeSrcOp(OPW32, Val);
314 }
315 
316 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
317   unsigned Val) const {
318   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
319   return decodeOperand_SReg_32(Val);
320 }
321 
322 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
323   return decodeSrcOp(OPW64, Val);
324 }
325 
326 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
327   return decodeSrcOp(OPW64, Val);
328 }
329 
330 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
331   return decodeSrcOp(OPW128, Val);
332 }
333 
334 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
335   return createSRegOperand(AMDGPU::SReg_256RegClassID, Val);
336 }
337 
338 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
339   return createSRegOperand(AMDGPU::SReg_512RegClassID, Val);
340 }
341 
342 
343 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
344   // For now all literal constants are supposed to be unsigned integer
345   // ToDo: deal with signed/unsigned 64-bit integer constants
346   // ToDo: deal with float/double constants
347   if (!HasLiteral) {
348     if (Bytes.size() < 4) {
349       return errOperand(0, "cannot read literal, inst bytes left " +
350                         Twine(Bytes.size()));
351     }
352     HasLiteral = true;
353     Literal = eatBytes<uint32_t>(Bytes);
354   }
355   return MCOperand::createImm(Literal);
356 }
357 
358 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
359   using namespace AMDGPU::EncValues;
360   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
361   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
362     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
363     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
364       // Cast prevents negative overflow.
365 }
366 
367 static int64_t getInlineImmVal32(unsigned Imm) {
368   switch (Imm) {
369   case 240:
370     return FloatToBits(0.5f);
371   case 241:
372     return FloatToBits(-0.5f);
373   case 242:
374     return FloatToBits(1.0f);
375   case 243:
376     return FloatToBits(-1.0f);
377   case 244:
378     return FloatToBits(2.0f);
379   case 245:
380     return FloatToBits(-2.0f);
381   case 246:
382     return FloatToBits(4.0f);
383   case 247:
384     return FloatToBits(-4.0f);
385   case 248: // 1 / (2 * PI)
386     return 0x3e22f983;
387   default:
388     llvm_unreachable("invalid fp inline imm");
389   }
390 }
391 
392 static int64_t getInlineImmVal64(unsigned Imm) {
393   switch (Imm) {
394   case 240:
395     return DoubleToBits(0.5);
396   case 241:
397     return DoubleToBits(-0.5);
398   case 242:
399     return DoubleToBits(1.0);
400   case 243:
401     return DoubleToBits(-1.0);
402   case 244:
403     return DoubleToBits(2.0);
404   case 245:
405     return DoubleToBits(-2.0);
406   case 246:
407     return DoubleToBits(4.0);
408   case 247:
409     return DoubleToBits(-4.0);
410   case 248: // 1 / (2 * PI)
411     return 0x3fc45f306dc9c882;
412   default:
413     llvm_unreachable("invalid fp inline imm");
414   }
415 }
416 
417 static int64_t getInlineImmVal16(unsigned Imm) {
418   switch (Imm) {
419   case 240:
420     return 0x3800;
421   case 241:
422     return 0xB800;
423   case 242:
424     return 0x3C00;
425   case 243:
426     return 0xBC00;
427   case 244:
428     return 0x4000;
429   case 245:
430     return 0xC000;
431   case 246:
432     return 0x4400;
433   case 247:
434     return 0xC400;
435   case 248: // 1 / (2 * PI)
436     return 0x3118;
437   default:
438     llvm_unreachable("invalid fp inline imm");
439   }
440 }
441 
442 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
443   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
444       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
445 
446   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
447   switch (Width) {
448   case OPW32:
449     return MCOperand::createImm(getInlineImmVal32(Imm));
450   case OPW64:
451     return MCOperand::createImm(getInlineImmVal64(Imm));
452   case OPW16:
453   case OPWV216:
454     return MCOperand::createImm(getInlineImmVal16(Imm));
455   default:
456     llvm_unreachable("implement me");
457   }
458 }
459 
460 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
461   using namespace AMDGPU;
462   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
463   switch (Width) {
464   default: // fall
465   case OPW32:
466   case OPW16:
467   case OPWV216:
468     return VGPR_32RegClassID;
469   case OPW64: return VReg_64RegClassID;
470   case OPW128: return VReg_128RegClassID;
471   }
472 }
473 
474 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
475   using namespace AMDGPU;
476   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
477   switch (Width) {
478   default: // fall
479   case OPW32:
480   case OPW16:
481   case OPWV216:
482     return SGPR_32RegClassID;
483   case OPW64: return SGPR_64RegClassID;
484   case OPW128: return SGPR_128RegClassID;
485   }
486 }
487 
488 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
489   using namespace AMDGPU;
490   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
491   switch (Width) {
492   default: // fall
493   case OPW32:
494   case OPW16:
495   case OPWV216:
496     return TTMP_32RegClassID;
497   case OPW64: return TTMP_64RegClassID;
498   case OPW128: return TTMP_128RegClassID;
499   }
500 }
501 
502 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
503   using namespace AMDGPU::EncValues;
504   assert(Val < 512); // enum9
505 
506   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
507     return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
508   }
509   if (Val <= SGPR_MAX) {
510     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
511     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
512   }
513   if (TTMP_MIN <= Val && Val <= TTMP_MAX) {
514     return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN);
515   }
516 
517   assert(Width == OPW16 || Width == OPW32 || Width == OPW64);
518 
519   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
520     return decodeIntImmed(Val);
521 
522   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
523     return decodeFPImmed(Width, Val);
524 
525   if (Val == LITERAL_CONST)
526     return decodeLiteralConstant();
527 
528   switch (Width) {
529   case OPW32:
530   case OPW16:
531   case OPWV216:
532     return decodeSpecialReg32(Val);
533   case OPW64:
534     return decodeSpecialReg64(Val);
535   default:
536     llvm_unreachable("unexpected immediate type");
537   }
538 }
539 
540 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
541   using namespace AMDGPU;
542   switch (Val) {
543   case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI));
544   case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI));
545     // ToDo: no support for xnack_mask_lo/_hi register
546   case 104:
547   case 105: break;
548   case 106: return createRegOperand(VCC_LO);
549   case 107: return createRegOperand(VCC_HI);
550   case 108: return createRegOperand(TBA_LO);
551   case 109: return createRegOperand(TBA_HI);
552   case 110: return createRegOperand(TMA_LO);
553   case 111: return createRegOperand(TMA_HI);
554   case 124: return createRegOperand(M0);
555   case 126: return createRegOperand(EXEC_LO);
556   case 127: return createRegOperand(EXEC_HI);
557   case 235: return createRegOperand(SRC_SHARED_BASE);
558   case 236: return createRegOperand(SRC_SHARED_LIMIT);
559   case 237: return createRegOperand(SRC_PRIVATE_BASE);
560   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
561     // TODO: SRC_POPS_EXITING_WAVE_ID
562     // ToDo: no support for vccz register
563   case 251: break;
564     // ToDo: no support for execz register
565   case 252: break;
566   case 253: return createRegOperand(SCC);
567   default: break;
568   }
569   return errOperand(Val, "unknown operand encoding " + Twine(Val));
570 }
571 
572 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
573   using namespace AMDGPU;
574   switch (Val) {
575   case 102: return createRegOperand(getMCReg(FLAT_SCR, STI));
576   case 106: return createRegOperand(VCC);
577   case 108: return createRegOperand(TBA);
578   case 110: return createRegOperand(TMA);
579   case 126: return createRegOperand(EXEC);
580   default: break;
581   }
582   return errOperand(Val, "unknown operand encoding " + Twine(Val));
583 }
584 
585 //===----------------------------------------------------------------------===//
586 // AMDGPUSymbolizer
587 //===----------------------------------------------------------------------===//
588 
589 // Try to find symbol name for specified label
590 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
591                                 raw_ostream &/*cStream*/, int64_t Value,
592                                 uint64_t /*Address*/, bool IsBranch,
593                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
594   typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy;
595   typedef std::vector<SymbolInfoTy> SectionSymbolsTy;
596 
597   if (!IsBranch) {
598     return false;
599   }
600 
601   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
602   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
603                              [Value](const SymbolInfoTy& Val) {
604                                 return std::get<0>(Val) == static_cast<uint64_t>(Value)
605                                     && std::get<2>(Val) == ELF::STT_NOTYPE;
606                              });
607   if (Result != Symbols->end()) {
608     auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
609     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
610     Inst.addOperand(MCOperand::createExpr(Add));
611     return true;
612   }
613   return false;
614 }
615 
616 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
617                                                        int64_t Value,
618                                                        uint64_t Address) {
619   llvm_unreachable("unimplemented");
620 }
621 
622 //===----------------------------------------------------------------------===//
623 // Initialization
624 //===----------------------------------------------------------------------===//
625 
626 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
627                               LLVMOpInfoCallback /*GetOpInfo*/,
628                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
629                               void *DisInfo,
630                               MCContext *Ctx,
631                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
632   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
633 }
634 
635 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
636                                                 const MCSubtargetInfo &STI,
637                                                 MCContext &Ctx) {
638   return new AMDGPUDisassembler(STI, Ctx);
639 }
640 
641 extern "C" void LLVMInitializeAMDGPUDisassembler() {
642   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
643                                          createAMDGPUDisassembler);
644   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
645                                        createAMDGPUSymbolizer);
646 }
647