xref: /llvm-project/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (revision 245b5ba3448b9d3f6de5962066557e253a6bc9a4)
1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 /// \file
12 ///
13 /// This file contains definition for AMDGPU ISA disassembler
14 //
15 //===----------------------------------------------------------------------===//
16 
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18 
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "AMDGPU.h"
21 #include "AMDGPURegisterInfo.h"
22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23 #include "SIDefines.h"
24 #include "TargetInfo/AMDGPUTargetInfo.h"
25 #include "Utils/AMDGPUBaseInfo.h"
26 #include "llvm-c/Disassembler.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/BinaryFormat/ELF.h"
31 #include "llvm/MC/MCAsmInfo.h"
32 #include "llvm/MC/MCContext.h"
33 #include "llvm/MC/MCDisassembler/MCDisassembler.h"
34 #include "llvm/MC/MCExpr.h"
35 #include "llvm/MC/MCFixedLenDisassembler.h"
36 #include "llvm/MC/MCInst.h"
37 #include "llvm/MC/MCSubtargetInfo.h"
38 #include "llvm/Support/Endian.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/TargetRegistry.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include <algorithm>
44 #include <cassert>
45 #include <cstddef>
46 #include <cstdint>
47 #include <iterator>
48 #include <tuple>
49 #include <vector>
50 
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "amdgpu-disassembler"
54 
55 #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
56                             : AMDGPU::EncValues::SGPR_MAX_SI)
57 
58 using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
59 
60 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI,
61                                        MCContext &Ctx,
62                                        MCInstrInfo const *MCII) :
63   MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
64   TargetMaxInstBytes(Ctx.getAsmInfo()->getMaxInstLength(&STI)) {
65 
66   // ToDo: AMDGPUDisassembler supports only VI ISA.
67   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10())
68     report_fatal_error("Disassembly not yet supported for subtarget");
69 }
70 
71 inline static MCDisassembler::DecodeStatus
72 addOperand(MCInst &Inst, const MCOperand& Opnd) {
73   Inst.addOperand(Opnd);
74   return Opnd.isValid() ?
75     MCDisassembler::Success :
76     MCDisassembler::SoftFail;
77 }
78 
79 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
80                                 uint16_t NameIdx) {
81   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
82   if (OpIdx != -1) {
83     auto I = MI.begin();
84     std::advance(I, OpIdx);
85     MI.insert(I, Op);
86   }
87   return OpIdx;
88 }
89 
90 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
91                                        uint64_t Addr, const void *Decoder) {
92   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
93 
94   // Our branches take a simm16, but we need two extra bits to account for the
95   // factor of 4.
96   APInt SignedOffset(18, Imm * 4, true);
97   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
98 
99   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
100     return MCDisassembler::Success;
101   return addOperand(Inst, MCOperand::createImm(Imm));
102 }
103 
104 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
105 static DecodeStatus StaticDecoderName(MCInst &Inst, \
106                                        unsigned Imm, \
107                                        uint64_t /*Addr*/, \
108                                        const void *Decoder) { \
109   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
110   return addOperand(Inst, DAsm->DecoderName(Imm)); \
111 }
112 
113 #define DECODE_OPERAND_REG(RegClass) \
114 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
115 
116 DECODE_OPERAND_REG(VGPR_32)
117 DECODE_OPERAND_REG(VRegOrLds_32)
118 DECODE_OPERAND_REG(VS_32)
119 DECODE_OPERAND_REG(VS_64)
120 DECODE_OPERAND_REG(VS_128)
121 
122 DECODE_OPERAND_REG(VReg_64)
123 DECODE_OPERAND_REG(VReg_96)
124 DECODE_OPERAND_REG(VReg_128)
125 
126 DECODE_OPERAND_REG(SReg_32)
127 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
128 DECODE_OPERAND_REG(SReg_32_XEXEC_HI)
129 DECODE_OPERAND_REG(SRegOrLds_32)
130 DECODE_OPERAND_REG(SReg_64)
131 DECODE_OPERAND_REG(SReg_64_XEXEC)
132 DECODE_OPERAND_REG(SReg_128)
133 DECODE_OPERAND_REG(SReg_256)
134 DECODE_OPERAND_REG(SReg_512)
135 
136 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
137                                          unsigned Imm,
138                                          uint64_t Addr,
139                                          const void *Decoder) {
140   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
141   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
142 }
143 
144 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
145                                          unsigned Imm,
146                                          uint64_t Addr,
147                                          const void *Decoder) {
148   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
149   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
150 }
151 
152 #define DECODE_SDWA(DecName) \
153 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
154 
155 DECODE_SDWA(Src32)
156 DECODE_SDWA(Src16)
157 DECODE_SDWA(VopcDst)
158 
159 #include "AMDGPUGenDisassemblerTables.inc"
160 
161 //===----------------------------------------------------------------------===//
162 //
163 //===----------------------------------------------------------------------===//
164 
165 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
166   assert(Bytes.size() >= sizeof(T));
167   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
168   Bytes = Bytes.slice(sizeof(T));
169   return Res;
170 }
171 
172 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
173                                                MCInst &MI,
174                                                uint64_t Inst,
175                                                uint64_t Address) const {
176   assert(MI.getOpcode() == 0);
177   assert(MI.getNumOperands() == 0);
178   MCInst TmpInst;
179   HasLiteral = false;
180   const auto SavedBytes = Bytes;
181   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
182     MI = TmpInst;
183     return MCDisassembler::Success;
184   }
185   Bytes = SavedBytes;
186   return MCDisassembler::Fail;
187 }
188 
189 static bool isValidDPP8(const MCInst &MI) {
190   using namespace llvm::AMDGPU::DPP;
191   int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi);
192   assert(FiIdx != -1);
193   if ((unsigned)FiIdx >= MI.getNumOperands())
194     return false;
195   unsigned Fi = MI.getOperand(FiIdx).getImm();
196   return Fi == DPP8_FI_0 || Fi == DPP8_FI_1;
197 }
198 
199 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
200                                                 ArrayRef<uint8_t> Bytes_,
201                                                 uint64_t Address,
202                                                 raw_ostream &WS,
203                                                 raw_ostream &CS) const {
204   CommentStream = &CS;
205   bool IsSDWA = false;
206 
207   unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
208   Bytes = Bytes_.slice(0, MaxInstBytesNum);
209 
210   DecodeStatus Res = MCDisassembler::Fail;
211   do {
212     // ToDo: better to switch encoding length using some bit predicate
213     // but it is unknown yet, so try all we can
214 
215     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
216     // encodings
217     if (Bytes.size() >= 8) {
218       const uint64_t QW = eatBytes<uint64_t>(Bytes);
219 
220       Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address);
221       if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
222         break;
223 
224       MI = MCInst(); // clear
225 
226       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
227       if (Res) break;
228 
229       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
230       if (Res) { IsSDWA = true;  break; }
231 
232       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
233       if (Res) { IsSDWA = true;  break; }
234 
235       Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address);
236       if (Res) { IsSDWA = true;  break; }
237 
238       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
239       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
240       // table first so we print the correct name.
241 
242       if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
243         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
244         if (Res) break;
245       }
246 
247       if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
248         Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
249         if (Res)
250           break;
251       }
252 
253       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
254       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
255       // table first so we print the correct name.
256       if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
257         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
258         if (Res)
259           break;
260       }
261     }
262 
263     // Reinitialize Bytes as DPP64 could have eaten too much
264     Bytes = Bytes_.slice(0, MaxInstBytesNum);
265 
266     // Try decode 32-bit instruction
267     if (Bytes.size() < 4) break;
268     const uint32_t DW = eatBytes<uint32_t>(Bytes);
269     Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address);
270     if (Res) break;
271 
272     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
273     if (Res) break;
274 
275     Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
276     if (Res) break;
277 
278     Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address);
279     if (Res) break;
280 
281     if (Bytes.size() < 4) break;
282     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
283     Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address);
284     if (Res) break;
285 
286     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
287     if (Res) break;
288 
289     Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address);
290     if (Res) break;
291 
292     Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address);
293   } while (false);
294 
295   if (Res && (MaxInstBytesNum - Bytes.size()) == 12 && (!HasLiteral ||
296         !(MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3))) {
297     MaxInstBytesNum = 8;
298     Bytes = Bytes_.slice(0, MaxInstBytesNum);
299     eatBytes<uint64_t>(Bytes);
300   }
301 
302   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
303               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
304               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10 ||
305               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ||
306               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi ||
307               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10 ||
308               MI.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10)) {
309     // Insert dummy unused src2_modifiers.
310     insertNamedMCOperand(MI, MCOperand::createImm(0),
311                          AMDGPU::OpName::src2_modifiers);
312   }
313 
314   if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
315     int VAddr0Idx =
316         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
317     int RsrcIdx =
318         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
319     unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
320     if (VAddr0Idx >= 0 && NSAArgs > 0) {
321       unsigned NSAWords = (NSAArgs + 3) / 4;
322       if (Bytes.size() < 4 * NSAWords) {
323         Res = MCDisassembler::Fail;
324       } else {
325         for (unsigned i = 0; i < NSAArgs; ++i) {
326           MI.insert(MI.begin() + VAddr0Idx + 1 + i,
327                     decodeOperand_VGPR_32(Bytes[i]));
328         }
329         Bytes = Bytes.slice(4 * NSAWords);
330       }
331     }
332 
333     if (Res)
334       Res = convertMIMGInst(MI);
335   }
336 
337   if (Res && IsSDWA)
338     Res = convertSDWAInst(MI);
339 
340   int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
341                                               AMDGPU::OpName::vdst_in);
342   if (VDstIn_Idx != -1) {
343     int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
344                            MCOI::OperandConstraint::TIED_TO);
345     if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
346          !MI.getOperand(VDstIn_Idx).isReg() ||
347          MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
348       if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
349         MI.erase(&MI.getOperand(VDstIn_Idx));
350       insertNamedMCOperand(MI,
351         MCOperand::createReg(MI.getOperand(Tied).getReg()),
352         AMDGPU::OpName::vdst_in);
353     }
354   }
355 
356   // if the opcode was not recognized we'll assume a Size of 4 bytes
357   // (unless there are fewer bytes left)
358   Size = Res ? (MaxInstBytesNum - Bytes.size())
359              : std::min((size_t)4, Bytes_.size());
360   return Res;
361 }
362 
363 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
364   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
365       STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
366     if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
367       // VOPC - insert clamp
368       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
369   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
370     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
371     if (SDst != -1) {
372       // VOPC - insert VCC register as sdst
373       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
374                            AMDGPU::OpName::sdst);
375     } else {
376       // VOP1/2 - insert omod if present in instruction
377       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
378     }
379   }
380   return MCDisassembler::Success;
381 }
382 
383 DecodeStatus AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const {
384   unsigned Opc = MI.getOpcode();
385   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
386 
387   // Insert dummy unused src modifiers.
388   if (MI.getNumOperands() < DescNumOps &&
389       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1)
390     insertNamedMCOperand(MI, MCOperand::createImm(0),
391                          AMDGPU::OpName::src0_modifiers);
392 
393   if (MI.getNumOperands() < DescNumOps &&
394       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers) != -1)
395     insertNamedMCOperand(MI, MCOperand::createImm(0),
396                          AMDGPU::OpName::src1_modifiers);
397 
398   return isValidDPP8(MI) ? MCDisassembler::Success : MCDisassembler::SoftFail;
399 }
400 
401 // Note that before gfx10, the MIMG encoding provided no information about
402 // VADDR size. Consequently, decoded instructions always show address as if it
403 // has 1 dword, which could be not really so.
404 DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
405 
406   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
407                                            AMDGPU::OpName::vdst);
408 
409   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
410                                             AMDGPU::OpName::vdata);
411   int VAddr0Idx =
412       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
413   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
414                                             AMDGPU::OpName::dmask);
415 
416   int TFEIdx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
417                                             AMDGPU::OpName::tfe);
418   int D16Idx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
419                                             AMDGPU::OpName::d16);
420 
421   assert(VDataIdx != -1);
422   assert(DMaskIdx != -1);
423   assert(TFEIdx != -1);
424 
425   const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
426   bool IsAtomic = (VDstIdx != -1);
427   bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4;
428 
429   bool IsNSA = false;
430   unsigned AddrSize = Info->VAddrDwords;
431 
432   if (STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
433     unsigned DimIdx =
434         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
435     const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
436         AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
437     const AMDGPU::MIMGDimInfo *Dim =
438         AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
439 
440     AddrSize = BaseOpcode->NumExtraArgs +
441                (BaseOpcode->Gradients ? Dim->NumGradients : 0) +
442                (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
443                (BaseOpcode->LodOrClampOrMip ? 1 : 0);
444     IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA;
445     if (!IsNSA) {
446       if (AddrSize > 8)
447         AddrSize = 16;
448       else if (AddrSize > 4)
449         AddrSize = 8;
450     } else {
451       if (AddrSize > Info->VAddrDwords) {
452         // The NSA encoding does not contain enough operands for the combination
453         // of base opcode / dimension. Should this be an error?
454         return MCDisassembler::Success;
455       }
456     }
457   }
458 
459   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
460   unsigned DstSize = IsGather4 ? 4 : std::max(countPopulation(DMask), 1u);
461 
462   bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
463   if (D16 && AMDGPU::hasPackedD16(STI)) {
464     DstSize = (DstSize + 1) / 2;
465   }
466 
467   // FIXME: Add tfe support
468   if (MI.getOperand(TFEIdx).getImm())
469     return MCDisassembler::Success;
470 
471   if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
472     return MCDisassembler::Success;
473 
474   int NewOpcode =
475       AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
476   if (NewOpcode == -1)
477     return MCDisassembler::Success;
478 
479   // Widen the register to the correct number of enabled channels.
480   unsigned NewVdata = AMDGPU::NoRegister;
481   if (DstSize != Info->VDataDwords) {
482     auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
483 
484     // Get first subregister of VData
485     unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
486     unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
487     Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
488 
489     NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
490                                        &MRI.getRegClass(DataRCID));
491     if (NewVdata == AMDGPU::NoRegister) {
492       // It's possible to encode this such that the low register + enabled
493       // components exceeds the register count.
494       return MCDisassembler::Success;
495     }
496   }
497 
498   unsigned NewVAddr0 = AMDGPU::NoRegister;
499   if (STI.getFeatureBits()[AMDGPU::FeatureGFX10] && !IsNSA &&
500       AddrSize != Info->VAddrDwords) {
501     unsigned VAddr0 = MI.getOperand(VAddr0Idx).getReg();
502     unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0);
503     VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0;
504 
505     auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass;
506     NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0,
507                                         &MRI.getRegClass(AddrRCID));
508     if (NewVAddr0 == AMDGPU::NoRegister)
509       return MCDisassembler::Success;
510   }
511 
512   MI.setOpcode(NewOpcode);
513 
514   if (NewVdata != AMDGPU::NoRegister) {
515     MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
516 
517     if (IsAtomic) {
518       // Atomic operations have an additional operand (a copy of data)
519       MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
520     }
521   }
522 
523   if (NewVAddr0 != AMDGPU::NoRegister) {
524     MI.getOperand(VAddr0Idx) = MCOperand::createReg(NewVAddr0);
525   } else if (IsNSA) {
526     assert(AddrSize <= Info->VAddrDwords);
527     MI.erase(MI.begin() + VAddr0Idx + AddrSize,
528              MI.begin() + VAddr0Idx + Info->VAddrDwords);
529   }
530 
531   return MCDisassembler::Success;
532 }
533 
534 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
535   return getContext().getRegisterInfo()->
536     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
537 }
538 
539 inline
540 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
541                                          const Twine& ErrMsg) const {
542   *CommentStream << "Error: " + ErrMsg;
543 
544   // ToDo: add support for error operands to MCInst.h
545   // return MCOperand::createError(V);
546   return MCOperand();
547 }
548 
549 inline
550 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
551   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
552 }
553 
554 inline
555 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
556                                                unsigned Val) const {
557   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
558   if (Val >= RegCl.getNumRegs())
559     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
560                            ": unknown register " + Twine(Val));
561   return createRegOperand(RegCl.getRegister(Val));
562 }
563 
564 inline
565 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
566                                                 unsigned Val) const {
567   // ToDo: SI/CI have 104 SGPRs, VI - 102
568   // Valery: here we accepting as much as we can, let assembler sort it out
569   int shift = 0;
570   switch (SRegClassID) {
571   case AMDGPU::SGPR_32RegClassID:
572   case AMDGPU::TTMP_32RegClassID:
573     break;
574   case AMDGPU::SGPR_64RegClassID:
575   case AMDGPU::TTMP_64RegClassID:
576     shift = 1;
577     break;
578   case AMDGPU::SGPR_128RegClassID:
579   case AMDGPU::TTMP_128RegClassID:
580   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
581   // this bundle?
582   case AMDGPU::SGPR_256RegClassID:
583   case AMDGPU::TTMP_256RegClassID:
584     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
585   // this bundle?
586   case AMDGPU::SGPR_512RegClassID:
587   case AMDGPU::TTMP_512RegClassID:
588     shift = 2;
589     break;
590   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
591   // this bundle?
592   default:
593     llvm_unreachable("unhandled register class");
594   }
595 
596   if (Val % (1 << shift)) {
597     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
598                    << ": scalar reg isn't aligned " << Val;
599   }
600 
601   return createRegOperand(SRegClassID, Val >> shift);
602 }
603 
604 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
605   return decodeSrcOp(OPW32, Val);
606 }
607 
608 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
609   return decodeSrcOp(OPW64, Val);
610 }
611 
612 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
613   return decodeSrcOp(OPW128, Val);
614 }
615 
616 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
617   return decodeSrcOp(OPW16, Val);
618 }
619 
620 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
621   return decodeSrcOp(OPWV216, Val);
622 }
623 
624 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
625   // Some instructions have operand restrictions beyond what the encoding
626   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
627   // high bit.
628   Val &= 255;
629 
630   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
631 }
632 
633 MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const {
634   return decodeSrcOp(OPW32, Val);
635 }
636 
637 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
638   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
639 }
640 
641 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
642   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
643 }
644 
645 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
646   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
647 }
648 
649 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
650   // table-gen generated disassembler doesn't care about operand types
651   // leaving only registry class so SSrc_32 operand turns into SReg_32
652   // and therefore we accept immediates and literals here as well
653   return decodeSrcOp(OPW32, Val);
654 }
655 
656 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
657   unsigned Val) const {
658   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
659   return decodeOperand_SReg_32(Val);
660 }
661 
662 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
663   unsigned Val) const {
664   // SReg_32_XM0 is SReg_32 without EXEC_HI
665   return decodeOperand_SReg_32(Val);
666 }
667 
668 MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const {
669   // table-gen generated disassembler doesn't care about operand types
670   // leaving only registry class so SSrc_32 operand turns into SReg_32
671   // and therefore we accept immediates and literals here as well
672   return decodeSrcOp(OPW32, Val);
673 }
674 
675 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
676   return decodeSrcOp(OPW64, Val);
677 }
678 
679 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
680   return decodeSrcOp(OPW64, Val);
681 }
682 
683 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
684   return decodeSrcOp(OPW128, Val);
685 }
686 
687 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
688   return decodeDstOp(OPW256, Val);
689 }
690 
691 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
692   return decodeDstOp(OPW512, Val);
693 }
694 
695 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
696   // For now all literal constants are supposed to be unsigned integer
697   // ToDo: deal with signed/unsigned 64-bit integer constants
698   // ToDo: deal with float/double constants
699   if (!HasLiteral) {
700     if (Bytes.size() < 4) {
701       return errOperand(0, "cannot read literal, inst bytes left " +
702                         Twine(Bytes.size()));
703     }
704     HasLiteral = true;
705     Literal = eatBytes<uint32_t>(Bytes);
706   }
707   return MCOperand::createImm(Literal);
708 }
709 
710 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
711   using namespace AMDGPU::EncValues;
712 
713   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
714   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
715     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
716     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
717       // Cast prevents negative overflow.
718 }
719 
720 static int64_t getInlineImmVal32(unsigned Imm) {
721   switch (Imm) {
722   case 240:
723     return FloatToBits(0.5f);
724   case 241:
725     return FloatToBits(-0.5f);
726   case 242:
727     return FloatToBits(1.0f);
728   case 243:
729     return FloatToBits(-1.0f);
730   case 244:
731     return FloatToBits(2.0f);
732   case 245:
733     return FloatToBits(-2.0f);
734   case 246:
735     return FloatToBits(4.0f);
736   case 247:
737     return FloatToBits(-4.0f);
738   case 248: // 1 / (2 * PI)
739     return 0x3e22f983;
740   default:
741     llvm_unreachable("invalid fp inline imm");
742   }
743 }
744 
745 static int64_t getInlineImmVal64(unsigned Imm) {
746   switch (Imm) {
747   case 240:
748     return DoubleToBits(0.5);
749   case 241:
750     return DoubleToBits(-0.5);
751   case 242:
752     return DoubleToBits(1.0);
753   case 243:
754     return DoubleToBits(-1.0);
755   case 244:
756     return DoubleToBits(2.0);
757   case 245:
758     return DoubleToBits(-2.0);
759   case 246:
760     return DoubleToBits(4.0);
761   case 247:
762     return DoubleToBits(-4.0);
763   case 248: // 1 / (2 * PI)
764     return 0x3fc45f306dc9c882;
765   default:
766     llvm_unreachable("invalid fp inline imm");
767   }
768 }
769 
770 static int64_t getInlineImmVal16(unsigned Imm) {
771   switch (Imm) {
772   case 240:
773     return 0x3800;
774   case 241:
775     return 0xB800;
776   case 242:
777     return 0x3C00;
778   case 243:
779     return 0xBC00;
780   case 244:
781     return 0x4000;
782   case 245:
783     return 0xC000;
784   case 246:
785     return 0x4400;
786   case 247:
787     return 0xC400;
788   case 248: // 1 / (2 * PI)
789     return 0x3118;
790   default:
791     llvm_unreachable("invalid fp inline imm");
792   }
793 }
794 
795 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
796   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
797       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
798 
799   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
800   switch (Width) {
801   case OPW32:
802     return MCOperand::createImm(getInlineImmVal32(Imm));
803   case OPW64:
804     return MCOperand::createImm(getInlineImmVal64(Imm));
805   case OPW16:
806   case OPWV216:
807     return MCOperand::createImm(getInlineImmVal16(Imm));
808   default:
809     llvm_unreachable("implement me");
810   }
811 }
812 
813 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
814   using namespace AMDGPU;
815 
816   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
817   switch (Width) {
818   default: // fall
819   case OPW32:
820   case OPW16:
821   case OPWV216:
822     return VGPR_32RegClassID;
823   case OPW64: return VReg_64RegClassID;
824   case OPW128: return VReg_128RegClassID;
825   }
826 }
827 
828 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
829   using namespace AMDGPU;
830 
831   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
832   switch (Width) {
833   default: // fall
834   case OPW32:
835   case OPW16:
836   case OPWV216:
837     return SGPR_32RegClassID;
838   case OPW64: return SGPR_64RegClassID;
839   case OPW128: return SGPR_128RegClassID;
840   case OPW256: return SGPR_256RegClassID;
841   case OPW512: return SGPR_512RegClassID;
842   }
843 }
844 
845 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
846   using namespace AMDGPU;
847 
848   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
849   switch (Width) {
850   default: // fall
851   case OPW32:
852   case OPW16:
853   case OPWV216:
854     return TTMP_32RegClassID;
855   case OPW64: return TTMP_64RegClassID;
856   case OPW128: return TTMP_128RegClassID;
857   case OPW256: return TTMP_256RegClassID;
858   case OPW512: return TTMP_512RegClassID;
859   }
860 }
861 
862 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
863   using namespace AMDGPU::EncValues;
864 
865   unsigned TTmpMin =
866       (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MIN : TTMP_VI_MIN;
867   unsigned TTmpMax =
868       (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MAX : TTMP_VI_MAX;
869 
870   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
871 }
872 
873 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
874   using namespace AMDGPU::EncValues;
875 
876   assert(Val < 512); // enum9
877 
878   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
879     return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
880   }
881   if (Val <= SGPR_MAX) {
882     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
883     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
884   }
885 
886   int TTmpIdx = getTTmpIdx(Val);
887   if (TTmpIdx >= 0) {
888     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
889   }
890 
891   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
892     return decodeIntImmed(Val);
893 
894   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
895     return decodeFPImmed(Width, Val);
896 
897   if (Val == LITERAL_CONST)
898     return decodeLiteralConstant();
899 
900   switch (Width) {
901   case OPW32:
902   case OPW16:
903   case OPWV216:
904     return decodeSpecialReg32(Val);
905   case OPW64:
906     return decodeSpecialReg64(Val);
907   default:
908     llvm_unreachable("unexpected immediate type");
909   }
910 }
911 
912 MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const {
913   using namespace AMDGPU::EncValues;
914 
915   assert(Val < 128);
916   assert(Width == OPW256 || Width == OPW512);
917 
918   if (Val <= SGPR_MAX) {
919     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
920     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
921   }
922 
923   int TTmpIdx = getTTmpIdx(Val);
924   if (TTmpIdx >= 0) {
925     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
926   }
927 
928   llvm_unreachable("unknown dst register");
929 }
930 
931 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
932   using namespace AMDGPU;
933 
934   switch (Val) {
935   case 102: return createRegOperand(FLAT_SCR_LO);
936   case 103: return createRegOperand(FLAT_SCR_HI);
937   case 104: return createRegOperand(XNACK_MASK_LO);
938   case 105: return createRegOperand(XNACK_MASK_HI);
939   case 106: return createRegOperand(VCC_LO);
940   case 107: return createRegOperand(VCC_HI);
941   case 108: return createRegOperand(TBA_LO);
942   case 109: return createRegOperand(TBA_HI);
943   case 110: return createRegOperand(TMA_LO);
944   case 111: return createRegOperand(TMA_HI);
945   case 124: return createRegOperand(M0);
946   case 125: return createRegOperand(SGPR_NULL);
947   case 126: return createRegOperand(EXEC_LO);
948   case 127: return createRegOperand(EXEC_HI);
949   case 235: return createRegOperand(SRC_SHARED_BASE);
950   case 236: return createRegOperand(SRC_SHARED_LIMIT);
951   case 237: return createRegOperand(SRC_PRIVATE_BASE);
952   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
953   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
954   case 251: return createRegOperand(SRC_VCCZ);
955   case 252: return createRegOperand(SRC_EXECZ);
956   case 253: return createRegOperand(SRC_SCC);
957   case 254: return createRegOperand(LDS_DIRECT);
958   default: break;
959   }
960   return errOperand(Val, "unknown operand encoding " + Twine(Val));
961 }
962 
963 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
964   using namespace AMDGPU;
965 
966   switch (Val) {
967   case 102: return createRegOperand(FLAT_SCR);
968   case 104: return createRegOperand(XNACK_MASK);
969   case 106: return createRegOperand(VCC);
970   case 108: return createRegOperand(TBA);
971   case 110: return createRegOperand(TMA);
972   case 126: return createRegOperand(EXEC);
973   case 235: return createRegOperand(SRC_SHARED_BASE);
974   case 236: return createRegOperand(SRC_SHARED_LIMIT);
975   case 237: return createRegOperand(SRC_PRIVATE_BASE);
976   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
977   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
978   case 251: return createRegOperand(SRC_VCCZ);
979   case 252: return createRegOperand(SRC_EXECZ);
980   case 253: return createRegOperand(SRC_SCC);
981   default: break;
982   }
983   return errOperand(Val, "unknown operand encoding " + Twine(Val));
984 }
985 
986 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
987                                             const unsigned Val) const {
988   using namespace AMDGPU::SDWA;
989   using namespace AMDGPU::EncValues;
990 
991   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
992       STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
993     // XXX: cast to int is needed to avoid stupid warning:
994     // compare with unsigned is always true
995     if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
996         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
997       return createRegOperand(getVgprClassId(Width),
998                               Val - SDWA9EncValues::SRC_VGPR_MIN);
999     }
1000     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
1001         Val <= (isGFX10() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
1002                           : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
1003       return createSRegOperand(getSgprClassId(Width),
1004                                Val - SDWA9EncValues::SRC_SGPR_MIN);
1005     }
1006     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
1007         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
1008       return createSRegOperand(getTtmpClassId(Width),
1009                                Val - SDWA9EncValues::SRC_TTMP_MIN);
1010     }
1011 
1012     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
1013 
1014     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
1015       return decodeIntImmed(SVal);
1016 
1017     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
1018       return decodeFPImmed(Width, SVal);
1019 
1020     return decodeSpecialReg32(SVal);
1021   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
1022     return createRegOperand(getVgprClassId(Width), Val);
1023   }
1024   llvm_unreachable("unsupported target");
1025 }
1026 
1027 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
1028   return decodeSDWASrc(OPW16, Val);
1029 }
1030 
1031 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
1032   return decodeSDWASrc(OPW32, Val);
1033 }
1034 
1035 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
1036   using namespace AMDGPU::SDWA;
1037 
1038   assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
1039           STI.getFeatureBits()[AMDGPU::FeatureGFX10]) &&
1040          "SDWAVopcDst should be present only on GFX9+");
1041 
1042   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
1043     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
1044 
1045     int TTmpIdx = getTTmpIdx(Val);
1046     if (TTmpIdx >= 0) {
1047       return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx);
1048     } else if (Val > SGPR_MAX) {
1049       return decodeSpecialReg64(Val);
1050     } else {
1051       return createSRegOperand(getSgprClassId(OPW64), Val);
1052     }
1053   } else {
1054     return createRegOperand(AMDGPU::VCC);
1055   }
1056 }
1057 
1058 bool AMDGPUDisassembler::isVI() const {
1059   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
1060 }
1061 
1062 bool AMDGPUDisassembler::isGFX9() const {
1063   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
1064 }
1065 
1066 bool AMDGPUDisassembler::isGFX10() const {
1067   return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
1068 }
1069 
1070 //===----------------------------------------------------------------------===//
1071 // AMDGPUSymbolizer
1072 //===----------------------------------------------------------------------===//
1073 
1074 // Try to find symbol name for specified label
1075 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
1076                                 raw_ostream &/*cStream*/, int64_t Value,
1077                                 uint64_t /*Address*/, bool IsBranch,
1078                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
1079   using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>;
1080   using SectionSymbolsTy = std::vector<SymbolInfoTy>;
1081 
1082   if (!IsBranch) {
1083     return false;
1084   }
1085 
1086   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
1087   if (!Symbols)
1088     return false;
1089 
1090   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
1091                              [Value](const SymbolInfoTy& Val) {
1092                                 return std::get<0>(Val) == static_cast<uint64_t>(Value)
1093                                     && std::get<2>(Val) == ELF::STT_NOTYPE;
1094                              });
1095   if (Result != Symbols->end()) {
1096     auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
1097     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
1098     Inst.addOperand(MCOperand::createExpr(Add));
1099     return true;
1100   }
1101   return false;
1102 }
1103 
1104 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
1105                                                        int64_t Value,
1106                                                        uint64_t Address) {
1107   llvm_unreachable("unimplemented");
1108 }
1109 
1110 //===----------------------------------------------------------------------===//
1111 // Initialization
1112 //===----------------------------------------------------------------------===//
1113 
1114 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
1115                               LLVMOpInfoCallback /*GetOpInfo*/,
1116                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
1117                               void *DisInfo,
1118                               MCContext *Ctx,
1119                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
1120   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
1121 }
1122 
1123 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
1124                                                 const MCSubtargetInfo &STI,
1125                                                 MCContext &Ctx) {
1126   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
1127 }
1128 
1129 extern "C" void LLVMInitializeAMDGPUDisassembler() {
1130   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
1131                                          createAMDGPUDisassembler);
1132   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
1133                                        createAMDGPUSymbolizer);
1134 }
1135