xref: /llvm-project/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (revision 8ce2ee9d5626623e139d9992c5d6032369c508d9)
1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 /// \file
12 ///
13 /// This file contains definition for AMDGPU ISA disassembler
14 //
15 //===----------------------------------------------------------------------===//
16 
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18 
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "AMDGPU.h"
21 #include "AMDGPURegisterInfo.h"
22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23 #include "SIDefines.h"
24 #include "TargetInfo/AMDGPUTargetInfo.h"
25 #include "Utils/AMDGPUBaseInfo.h"
26 #include "llvm-c/Disassembler.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/BinaryFormat/ELF.h"
31 #include "llvm/MC/MCContext.h"
32 #include "llvm/MC/MCDisassembler/MCDisassembler.h"
33 #include "llvm/MC/MCExpr.h"
34 #include "llvm/MC/MCFixedLenDisassembler.h"
35 #include "llvm/MC/MCInst.h"
36 #include "llvm/MC/MCSubtargetInfo.h"
37 #include "llvm/Support/Endian.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/TargetRegistry.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstddef>
45 #include <cstdint>
46 #include <iterator>
47 #include <tuple>
48 #include <vector>
49 
50 using namespace llvm;
51 
52 #define DEBUG_TYPE "amdgpu-disassembler"
53 
54 #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
55                             : AMDGPU::EncValues::SGPR_MAX_SI)
56 
57 using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
58 
59 inline static MCDisassembler::DecodeStatus
60 addOperand(MCInst &Inst, const MCOperand& Opnd) {
61   Inst.addOperand(Opnd);
62   return Opnd.isValid() ?
63     MCDisassembler::Success :
64     MCDisassembler::SoftFail;
65 }
66 
67 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
68                                 uint16_t NameIdx) {
69   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
70   if (OpIdx != -1) {
71     auto I = MI.begin();
72     std::advance(I, OpIdx);
73     MI.insert(I, Op);
74   }
75   return OpIdx;
76 }
77 
78 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
79                                        uint64_t Addr, const void *Decoder) {
80   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
81 
82   // Our branches take a simm16, but we need two extra bits to account for the
83   // factor of 4.
84   APInt SignedOffset(18, Imm * 4, true);
85   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
86 
87   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
88     return MCDisassembler::Success;
89   return addOperand(Inst, MCOperand::createImm(Imm));
90 }
91 
92 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
93 static DecodeStatus StaticDecoderName(MCInst &Inst, \
94                                        unsigned Imm, \
95                                        uint64_t /*Addr*/, \
96                                        const void *Decoder) { \
97   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
98   return addOperand(Inst, DAsm->DecoderName(Imm)); \
99 }
100 
101 #define DECODE_OPERAND_REG(RegClass) \
102 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
103 
104 DECODE_OPERAND_REG(VGPR_32)
105 DECODE_OPERAND_REG(VRegOrLds_32)
106 DECODE_OPERAND_REG(VS_32)
107 DECODE_OPERAND_REG(VS_64)
108 DECODE_OPERAND_REG(VS_128)
109 
110 DECODE_OPERAND_REG(VReg_64)
111 DECODE_OPERAND_REG(VReg_96)
112 DECODE_OPERAND_REG(VReg_128)
113 
114 DECODE_OPERAND_REG(SReg_32)
115 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
116 DECODE_OPERAND_REG(SReg_32_XEXEC_HI)
117 DECODE_OPERAND_REG(SRegOrLds_32)
118 DECODE_OPERAND_REG(SReg_64)
119 DECODE_OPERAND_REG(SReg_64_XEXEC)
120 DECODE_OPERAND_REG(SReg_128)
121 DECODE_OPERAND_REG(SReg_256)
122 DECODE_OPERAND_REG(SReg_512)
123 
124 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
125                                          unsigned Imm,
126                                          uint64_t Addr,
127                                          const void *Decoder) {
128   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
129   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
130 }
131 
132 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
133                                          unsigned Imm,
134                                          uint64_t Addr,
135                                          const void *Decoder) {
136   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
137   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
138 }
139 
140 #define DECODE_SDWA(DecName) \
141 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
142 
143 DECODE_SDWA(Src32)
144 DECODE_SDWA(Src16)
145 DECODE_SDWA(VopcDst)
146 
147 #include "AMDGPUGenDisassemblerTables.inc"
148 
149 //===----------------------------------------------------------------------===//
150 //
151 //===----------------------------------------------------------------------===//
152 
153 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
154   assert(Bytes.size() >= sizeof(T));
155   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
156   Bytes = Bytes.slice(sizeof(T));
157   return Res;
158 }
159 
160 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
161                                                MCInst &MI,
162                                                uint64_t Inst,
163                                                uint64_t Address) const {
164   assert(MI.getOpcode() == 0);
165   assert(MI.getNumOperands() == 0);
166   MCInst TmpInst;
167   HasLiteral = false;
168   const auto SavedBytes = Bytes;
169   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
170     MI = TmpInst;
171     return MCDisassembler::Success;
172   }
173   Bytes = SavedBytes;
174   return MCDisassembler::Fail;
175 }
176 
177 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
178                                                 ArrayRef<uint8_t> Bytes_,
179                                                 uint64_t Address,
180                                                 raw_ostream &WS,
181                                                 raw_ostream &CS) const {
182   CommentStream = &CS;
183   bool IsSDWA = false;
184 
185   // ToDo: AMDGPUDisassembler supports only VI ISA.
186   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10())
187     report_fatal_error("Disassembly not yet supported for subtarget");
188 
189   unsigned MaxInstBytesNum = (std::min)(
190     STI.getFeatureBits()[AMDGPU::FeatureGFX10] ? (size_t) 20 :
191     STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal] ? (size_t) 12 : (size_t)8,
192     Bytes_.size());
193   Bytes = Bytes_.slice(0, MaxInstBytesNum);
194 
195   DecodeStatus Res = MCDisassembler::Fail;
196   do {
197     // ToDo: better to switch encoding length using some bit predicate
198     // but it is unknown yet, so try all we can
199 
200     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
201     // encodings
202     if (Bytes.size() >= 8) {
203       const uint64_t QW = eatBytes<uint64_t>(Bytes);
204       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
205       if (Res) break;
206 
207       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
208       if (Res) { IsSDWA = true;  break; }
209 
210       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
211       if (Res) { IsSDWA = true;  break; }
212 
213       Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address);
214       if (Res) { IsSDWA = true;  break; }
215 
216       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
217       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
218       // table first so we print the correct name.
219 
220       if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
221         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
222         if (Res) break;
223       }
224 
225       if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
226         Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
227         if (Res)
228           break;
229       }
230 
231       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
232       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
233       // table first so we print the correct name.
234       if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
235         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
236         if (Res)
237           break;
238       }
239     }
240 
241     // Reinitialize Bytes as DPP64 could have eaten too much
242     Bytes = Bytes_.slice(0, MaxInstBytesNum);
243 
244     // Try decode 32-bit instruction
245     if (Bytes.size() < 4) break;
246     const uint32_t DW = eatBytes<uint32_t>(Bytes);
247     Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address);
248     if (Res) break;
249 
250     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
251     if (Res) break;
252 
253     Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
254     if (Res) break;
255 
256     Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address);
257     if (Res) break;
258 
259     if (Bytes.size() < 4) break;
260     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
261     Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address);
262     if (Res) break;
263 
264     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
265     if (Res) break;
266 
267     Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address);
268     if (Res) break;
269 
270     Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address);
271   } while (false);
272 
273   if (Res && (MaxInstBytesNum - Bytes.size()) == 12 && (!HasLiteral ||
274         !(MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3))) {
275     MaxInstBytesNum = 8;
276     Bytes = Bytes_.slice(0, MaxInstBytesNum);
277     eatBytes<uint64_t>(Bytes);
278   }
279 
280   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
281               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
282               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10 ||
283               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ||
284               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi ||
285               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10 ||
286               MI.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10)) {
287     // Insert dummy unused src2_modifiers.
288     insertNamedMCOperand(MI, MCOperand::createImm(0),
289                          AMDGPU::OpName::src2_modifiers);
290   }
291 
292   if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
293     int VAddr0Idx =
294         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
295     int RsrcIdx =
296         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
297     unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
298     if (VAddr0Idx >= 0 && NSAArgs > 0) {
299       unsigned NSAWords = (NSAArgs + 3) / 4;
300       if (Bytes.size() < 4 * NSAWords) {
301         Res = MCDisassembler::Fail;
302       } else {
303         for (unsigned i = 0; i < NSAArgs; ++i) {
304           MI.insert(MI.begin() + VAddr0Idx + 1 + i,
305                     decodeOperand_VGPR_32(Bytes[i]));
306         }
307         Bytes = Bytes.slice(4 * NSAWords);
308       }
309     }
310 
311     if (Res)
312       Res = convertMIMGInst(MI);
313   }
314 
315   if (Res && IsSDWA)
316     Res = convertSDWAInst(MI);
317 
318   int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
319                                               AMDGPU::OpName::vdst_in);
320   if (VDstIn_Idx != -1) {
321     int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
322                            MCOI::OperandConstraint::TIED_TO);
323     if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
324          !MI.getOperand(VDstIn_Idx).isReg() ||
325          MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
326       if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
327         MI.erase(&MI.getOperand(VDstIn_Idx));
328       insertNamedMCOperand(MI,
329         MCOperand::createReg(MI.getOperand(Tied).getReg()),
330         AMDGPU::OpName::vdst_in);
331     }
332   }
333 
334   // if the opcode was not recognized we'll assume a Size of 4 bytes
335   // (unless there are fewer bytes left)
336   Size = Res ? (MaxInstBytesNum - Bytes.size())
337              : std::min((size_t)4, Bytes_.size());
338   return Res;
339 }
340 
341 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
342   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
343       STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
344     if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
345       // VOPC - insert clamp
346       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
347   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
348     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
349     if (SDst != -1) {
350       // VOPC - insert VCC register as sdst
351       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
352                            AMDGPU::OpName::sdst);
353     } else {
354       // VOP1/2 - insert omod if present in instruction
355       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
356     }
357   }
358   return MCDisassembler::Success;
359 }
360 
361 // Note that before gfx10, the MIMG encoding provided no information about
362 // VADDR size. Consequently, decoded instructions always show address as if it
363 // has 1 dword, which could be not really so.
364 DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
365 
366   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
367                                            AMDGPU::OpName::vdst);
368 
369   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
370                                             AMDGPU::OpName::vdata);
371   int VAddr0Idx =
372       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
373   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
374                                             AMDGPU::OpName::dmask);
375 
376   int TFEIdx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
377                                             AMDGPU::OpName::tfe);
378   int D16Idx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
379                                             AMDGPU::OpName::d16);
380 
381   assert(VDataIdx != -1);
382   assert(DMaskIdx != -1);
383   assert(TFEIdx != -1);
384 
385   const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
386   bool IsAtomic = (VDstIdx != -1);
387   bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4;
388 
389   bool IsNSA = false;
390   unsigned AddrSize = Info->VAddrDwords;
391 
392   if (STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
393     unsigned DimIdx =
394         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
395     const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
396         AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
397     const AMDGPU::MIMGDimInfo *Dim =
398         AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
399 
400     AddrSize = BaseOpcode->NumExtraArgs +
401                (BaseOpcode->Gradients ? Dim->NumGradients : 0) +
402                (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
403                (BaseOpcode->LodOrClampOrMip ? 1 : 0);
404     IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA;
405     if (!IsNSA) {
406       if (AddrSize > 8)
407         AddrSize = 16;
408       else if (AddrSize > 4)
409         AddrSize = 8;
410     } else {
411       if (AddrSize > Info->VAddrDwords) {
412         // The NSA encoding does not contain enough operands for the combination
413         // of base opcode / dimension. Should this be an error?
414         return MCDisassembler::Success;
415       }
416     }
417   }
418 
419   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
420   unsigned DstSize = IsGather4 ? 4 : std::max(countPopulation(DMask), 1u);
421 
422   bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
423   if (D16 && AMDGPU::hasPackedD16(STI)) {
424     DstSize = (DstSize + 1) / 2;
425   }
426 
427   // FIXME: Add tfe support
428   if (MI.getOperand(TFEIdx).getImm())
429     return MCDisassembler::Success;
430 
431   if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
432     return MCDisassembler::Success;
433 
434   int NewOpcode =
435       AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
436   if (NewOpcode == -1)
437     return MCDisassembler::Success;
438 
439   // Widen the register to the correct number of enabled channels.
440   unsigned NewVdata = AMDGPU::NoRegister;
441   if (DstSize != Info->VDataDwords) {
442     auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
443 
444     // Get first subregister of VData
445     unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
446     unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
447     Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
448 
449     NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
450                                        &MRI.getRegClass(DataRCID));
451     if (NewVdata == AMDGPU::NoRegister) {
452       // It's possible to encode this such that the low register + enabled
453       // components exceeds the register count.
454       return MCDisassembler::Success;
455     }
456   }
457 
458   unsigned NewVAddr0 = AMDGPU::NoRegister;
459   if (STI.getFeatureBits()[AMDGPU::FeatureGFX10] && !IsNSA &&
460       AddrSize != Info->VAddrDwords) {
461     unsigned VAddr0 = MI.getOperand(VAddr0Idx).getReg();
462     unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0);
463     VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0;
464 
465     auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass;
466     NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0,
467                                         &MRI.getRegClass(AddrRCID));
468     if (NewVAddr0 == AMDGPU::NoRegister)
469       return MCDisassembler::Success;
470   }
471 
472   MI.setOpcode(NewOpcode);
473 
474   if (NewVdata != AMDGPU::NoRegister) {
475     MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
476 
477     if (IsAtomic) {
478       // Atomic operations have an additional operand (a copy of data)
479       MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
480     }
481   }
482 
483   if (NewVAddr0 != AMDGPU::NoRegister) {
484     MI.getOperand(VAddr0Idx) = MCOperand::createReg(NewVAddr0);
485   } else if (IsNSA) {
486     assert(AddrSize <= Info->VAddrDwords);
487     MI.erase(MI.begin() + VAddr0Idx + AddrSize,
488              MI.begin() + VAddr0Idx + Info->VAddrDwords);
489   }
490 
491   return MCDisassembler::Success;
492 }
493 
494 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
495   return getContext().getRegisterInfo()->
496     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
497 }
498 
499 inline
500 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
501                                          const Twine& ErrMsg) const {
502   *CommentStream << "Error: " + ErrMsg;
503 
504   // ToDo: add support for error operands to MCInst.h
505   // return MCOperand::createError(V);
506   return MCOperand();
507 }
508 
509 inline
510 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
511   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
512 }
513 
514 inline
515 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
516                                                unsigned Val) const {
517   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
518   if (Val >= RegCl.getNumRegs())
519     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
520                            ": unknown register " + Twine(Val));
521   return createRegOperand(RegCl.getRegister(Val));
522 }
523 
524 inline
525 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
526                                                 unsigned Val) const {
527   // ToDo: SI/CI have 104 SGPRs, VI - 102
528   // Valery: here we accepting as much as we can, let assembler sort it out
529   int shift = 0;
530   switch (SRegClassID) {
531   case AMDGPU::SGPR_32RegClassID:
532   case AMDGPU::TTMP_32RegClassID:
533     break;
534   case AMDGPU::SGPR_64RegClassID:
535   case AMDGPU::TTMP_64RegClassID:
536     shift = 1;
537     break;
538   case AMDGPU::SGPR_128RegClassID:
539   case AMDGPU::TTMP_128RegClassID:
540   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
541   // this bundle?
542   case AMDGPU::SGPR_256RegClassID:
543   case AMDGPU::TTMP_256RegClassID:
544     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
545   // this bundle?
546   case AMDGPU::SGPR_512RegClassID:
547   case AMDGPU::TTMP_512RegClassID:
548     shift = 2;
549     break;
550   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
551   // this bundle?
552   default:
553     llvm_unreachable("unhandled register class");
554   }
555 
556   if (Val % (1 << shift)) {
557     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
558                    << ": scalar reg isn't aligned " << Val;
559   }
560 
561   return createRegOperand(SRegClassID, Val >> shift);
562 }
563 
564 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
565   return decodeSrcOp(OPW32, Val);
566 }
567 
568 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
569   return decodeSrcOp(OPW64, Val);
570 }
571 
572 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
573   return decodeSrcOp(OPW128, Val);
574 }
575 
576 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
577   return decodeSrcOp(OPW16, Val);
578 }
579 
580 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
581   return decodeSrcOp(OPWV216, Val);
582 }
583 
584 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
585   // Some instructions have operand restrictions beyond what the encoding
586   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
587   // high bit.
588   Val &= 255;
589 
590   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
591 }
592 
593 MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const {
594   return decodeSrcOp(OPW32, Val);
595 }
596 
597 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
598   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
599 }
600 
601 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
602   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
603 }
604 
605 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
606   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
607 }
608 
609 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
610   // table-gen generated disassembler doesn't care about operand types
611   // leaving only registry class so SSrc_32 operand turns into SReg_32
612   // and therefore we accept immediates and literals here as well
613   return decodeSrcOp(OPW32, Val);
614 }
615 
616 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
617   unsigned Val) const {
618   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
619   return decodeOperand_SReg_32(Val);
620 }
621 
622 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
623   unsigned Val) const {
624   // SReg_32_XM0 is SReg_32 without EXEC_HI
625   return decodeOperand_SReg_32(Val);
626 }
627 
628 MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const {
629   // table-gen generated disassembler doesn't care about operand types
630   // leaving only registry class so SSrc_32 operand turns into SReg_32
631   // and therefore we accept immediates and literals here as well
632   return decodeSrcOp(OPW32, Val);
633 }
634 
635 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
636   return decodeSrcOp(OPW64, Val);
637 }
638 
639 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
640   return decodeSrcOp(OPW64, Val);
641 }
642 
643 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
644   return decodeSrcOp(OPW128, Val);
645 }
646 
647 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
648   return decodeDstOp(OPW256, Val);
649 }
650 
651 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
652   return decodeDstOp(OPW512, Val);
653 }
654 
655 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
656   // For now all literal constants are supposed to be unsigned integer
657   // ToDo: deal with signed/unsigned 64-bit integer constants
658   // ToDo: deal with float/double constants
659   if (!HasLiteral) {
660     if (Bytes.size() < 4) {
661       return errOperand(0, "cannot read literal, inst bytes left " +
662                         Twine(Bytes.size()));
663     }
664     HasLiteral = true;
665     Literal = eatBytes<uint32_t>(Bytes);
666   }
667   return MCOperand::createImm(Literal);
668 }
669 
670 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
671   using namespace AMDGPU::EncValues;
672 
673   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
674   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
675     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
676     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
677       // Cast prevents negative overflow.
678 }
679 
680 static int64_t getInlineImmVal32(unsigned Imm) {
681   switch (Imm) {
682   case 240:
683     return FloatToBits(0.5f);
684   case 241:
685     return FloatToBits(-0.5f);
686   case 242:
687     return FloatToBits(1.0f);
688   case 243:
689     return FloatToBits(-1.0f);
690   case 244:
691     return FloatToBits(2.0f);
692   case 245:
693     return FloatToBits(-2.0f);
694   case 246:
695     return FloatToBits(4.0f);
696   case 247:
697     return FloatToBits(-4.0f);
698   case 248: // 1 / (2 * PI)
699     return 0x3e22f983;
700   default:
701     llvm_unreachable("invalid fp inline imm");
702   }
703 }
704 
705 static int64_t getInlineImmVal64(unsigned Imm) {
706   switch (Imm) {
707   case 240:
708     return DoubleToBits(0.5);
709   case 241:
710     return DoubleToBits(-0.5);
711   case 242:
712     return DoubleToBits(1.0);
713   case 243:
714     return DoubleToBits(-1.0);
715   case 244:
716     return DoubleToBits(2.0);
717   case 245:
718     return DoubleToBits(-2.0);
719   case 246:
720     return DoubleToBits(4.0);
721   case 247:
722     return DoubleToBits(-4.0);
723   case 248: // 1 / (2 * PI)
724     return 0x3fc45f306dc9c882;
725   default:
726     llvm_unreachable("invalid fp inline imm");
727   }
728 }
729 
730 static int64_t getInlineImmVal16(unsigned Imm) {
731   switch (Imm) {
732   case 240:
733     return 0x3800;
734   case 241:
735     return 0xB800;
736   case 242:
737     return 0x3C00;
738   case 243:
739     return 0xBC00;
740   case 244:
741     return 0x4000;
742   case 245:
743     return 0xC000;
744   case 246:
745     return 0x4400;
746   case 247:
747     return 0xC400;
748   case 248: // 1 / (2 * PI)
749     return 0x3118;
750   default:
751     llvm_unreachable("invalid fp inline imm");
752   }
753 }
754 
755 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
756   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
757       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
758 
759   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
760   switch (Width) {
761   case OPW32:
762     return MCOperand::createImm(getInlineImmVal32(Imm));
763   case OPW64:
764     return MCOperand::createImm(getInlineImmVal64(Imm));
765   case OPW16:
766   case OPWV216:
767     return MCOperand::createImm(getInlineImmVal16(Imm));
768   default:
769     llvm_unreachable("implement me");
770   }
771 }
772 
773 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
774   using namespace AMDGPU;
775 
776   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
777   switch (Width) {
778   default: // fall
779   case OPW32:
780   case OPW16:
781   case OPWV216:
782     return VGPR_32RegClassID;
783   case OPW64: return VReg_64RegClassID;
784   case OPW128: return VReg_128RegClassID;
785   }
786 }
787 
788 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
789   using namespace AMDGPU;
790 
791   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
792   switch (Width) {
793   default: // fall
794   case OPW32:
795   case OPW16:
796   case OPWV216:
797     return SGPR_32RegClassID;
798   case OPW64: return SGPR_64RegClassID;
799   case OPW128: return SGPR_128RegClassID;
800   case OPW256: return SGPR_256RegClassID;
801   case OPW512: return SGPR_512RegClassID;
802   }
803 }
804 
805 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
806   using namespace AMDGPU;
807 
808   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
809   switch (Width) {
810   default: // fall
811   case OPW32:
812   case OPW16:
813   case OPWV216:
814     return TTMP_32RegClassID;
815   case OPW64: return TTMP_64RegClassID;
816   case OPW128: return TTMP_128RegClassID;
817   case OPW256: return TTMP_256RegClassID;
818   case OPW512: return TTMP_512RegClassID;
819   }
820 }
821 
822 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
823   using namespace AMDGPU::EncValues;
824 
825   unsigned TTmpMin =
826       (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MIN : TTMP_VI_MIN;
827   unsigned TTmpMax =
828       (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MAX : TTMP_VI_MAX;
829 
830   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
831 }
832 
833 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
834   using namespace AMDGPU::EncValues;
835 
836   assert(Val < 512); // enum9
837 
838   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
839     return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
840   }
841   if (Val <= SGPR_MAX) {
842     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
843     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
844   }
845 
846   int TTmpIdx = getTTmpIdx(Val);
847   if (TTmpIdx >= 0) {
848     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
849   }
850 
851   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
852     return decodeIntImmed(Val);
853 
854   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
855     return decodeFPImmed(Width, Val);
856 
857   if (Val == LITERAL_CONST)
858     return decodeLiteralConstant();
859 
860   switch (Width) {
861   case OPW32:
862   case OPW16:
863   case OPWV216:
864     return decodeSpecialReg32(Val);
865   case OPW64:
866     return decodeSpecialReg64(Val);
867   default:
868     llvm_unreachable("unexpected immediate type");
869   }
870 }
871 
872 MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const {
873   using namespace AMDGPU::EncValues;
874 
875   assert(Val < 128);
876   assert(Width == OPW256 || Width == OPW512);
877 
878   if (Val <= SGPR_MAX) {
879     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
880     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
881   }
882 
883   int TTmpIdx = getTTmpIdx(Val);
884   if (TTmpIdx >= 0) {
885     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
886   }
887 
888   llvm_unreachable("unknown dst register");
889 }
890 
891 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
892   using namespace AMDGPU;
893 
894   switch (Val) {
895   case 102: return createRegOperand(FLAT_SCR_LO);
896   case 103: return createRegOperand(FLAT_SCR_HI);
897   case 104: return createRegOperand(XNACK_MASK_LO);
898   case 105: return createRegOperand(XNACK_MASK_HI);
899   case 106: return createRegOperand(VCC_LO);
900   case 107: return createRegOperand(VCC_HI);
901   case 108: return createRegOperand(TBA_LO);
902   case 109: return createRegOperand(TBA_HI);
903   case 110: return createRegOperand(TMA_LO);
904   case 111: return createRegOperand(TMA_HI);
905   case 124: return createRegOperand(M0);
906   case 125: return createRegOperand(SGPR_NULL);
907   case 126: return createRegOperand(EXEC_LO);
908   case 127: return createRegOperand(EXEC_HI);
909   case 235: return createRegOperand(SRC_SHARED_BASE);
910   case 236: return createRegOperand(SRC_SHARED_LIMIT);
911   case 237: return createRegOperand(SRC_PRIVATE_BASE);
912   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
913   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
914     // ToDo: no support for vccz register
915   case 251: break;
916     // ToDo: no support for execz register
917   case 252: break;
918   case 253: return createRegOperand(SCC);
919   case 254: return createRegOperand(LDS_DIRECT);
920   default: break;
921   }
922   return errOperand(Val, "unknown operand encoding " + Twine(Val));
923 }
924 
925 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
926   using namespace AMDGPU;
927 
928   switch (Val) {
929   case 102: return createRegOperand(FLAT_SCR);
930   case 104: return createRegOperand(XNACK_MASK);
931   case 106: return createRegOperand(VCC);
932   case 108: return createRegOperand(TBA);
933   case 110: return createRegOperand(TMA);
934   case 126: return createRegOperand(EXEC);
935   case 235: return createRegOperand(SRC_SHARED_BASE);
936   case 236: return createRegOperand(SRC_SHARED_LIMIT);
937   case 237: return createRegOperand(SRC_PRIVATE_BASE);
938   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
939   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
940   default: break;
941   }
942   return errOperand(Val, "unknown operand encoding " + Twine(Val));
943 }
944 
945 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
946                                             const unsigned Val) const {
947   using namespace AMDGPU::SDWA;
948   using namespace AMDGPU::EncValues;
949 
950   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
951       STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
952     // XXX: cast to int is needed to avoid stupid warning:
953     // compare with unsigned is always true
954     if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
955         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
956       return createRegOperand(getVgprClassId(Width),
957                               Val - SDWA9EncValues::SRC_VGPR_MIN);
958     }
959     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
960         Val <= (isGFX10() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
961                           : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
962       return createSRegOperand(getSgprClassId(Width),
963                                Val - SDWA9EncValues::SRC_SGPR_MIN);
964     }
965     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
966         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
967       return createSRegOperand(getTtmpClassId(Width),
968                                Val - SDWA9EncValues::SRC_TTMP_MIN);
969     }
970 
971     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
972 
973     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
974       return decodeIntImmed(SVal);
975 
976     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
977       return decodeFPImmed(Width, SVal);
978 
979     return decodeSpecialReg32(SVal);
980   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
981     return createRegOperand(getVgprClassId(Width), Val);
982   }
983   llvm_unreachable("unsupported target");
984 }
985 
986 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
987   return decodeSDWASrc(OPW16, Val);
988 }
989 
990 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
991   return decodeSDWASrc(OPW32, Val);
992 }
993 
994 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
995   using namespace AMDGPU::SDWA;
996 
997   assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
998           STI.getFeatureBits()[AMDGPU::FeatureGFX10]) &&
999          "SDWAVopcDst should be present only on GFX9+");
1000 
1001   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
1002     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
1003 
1004     int TTmpIdx = getTTmpIdx(Val);
1005     if (TTmpIdx >= 0) {
1006       return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx);
1007     } else if (Val > SGPR_MAX) {
1008       return decodeSpecialReg64(Val);
1009     } else {
1010       return createSRegOperand(getSgprClassId(OPW64), Val);
1011     }
1012   } else {
1013     return createRegOperand(AMDGPU::VCC);
1014   }
1015 }
1016 
1017 bool AMDGPUDisassembler::isVI() const {
1018   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
1019 }
1020 
1021 bool AMDGPUDisassembler::isGFX9() const {
1022   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
1023 }
1024 
1025 bool AMDGPUDisassembler::isGFX10() const {
1026   return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
1027 }
1028 
1029 //===----------------------------------------------------------------------===//
1030 // AMDGPUSymbolizer
1031 //===----------------------------------------------------------------------===//
1032 
1033 // Try to find symbol name for specified label
1034 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
1035                                 raw_ostream &/*cStream*/, int64_t Value,
1036                                 uint64_t /*Address*/, bool IsBranch,
1037                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
1038   using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>;
1039   using SectionSymbolsTy = std::vector<SymbolInfoTy>;
1040 
1041   if (!IsBranch) {
1042     return false;
1043   }
1044 
1045   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
1046   if (!Symbols)
1047     return false;
1048 
1049   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
1050                              [Value](const SymbolInfoTy& Val) {
1051                                 return std::get<0>(Val) == static_cast<uint64_t>(Value)
1052                                     && std::get<2>(Val) == ELF::STT_NOTYPE;
1053                              });
1054   if (Result != Symbols->end()) {
1055     auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
1056     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
1057     Inst.addOperand(MCOperand::createExpr(Add));
1058     return true;
1059   }
1060   return false;
1061 }
1062 
1063 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
1064                                                        int64_t Value,
1065                                                        uint64_t Address) {
1066   llvm_unreachable("unimplemented");
1067 }
1068 
1069 //===----------------------------------------------------------------------===//
1070 // Initialization
1071 //===----------------------------------------------------------------------===//
1072 
1073 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
1074                               LLVMOpInfoCallback /*GetOpInfo*/,
1075                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
1076                               void *DisInfo,
1077                               MCContext *Ctx,
1078                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
1079   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
1080 }
1081 
1082 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
1083                                                 const MCSubtargetInfo &STI,
1084                                                 MCContext &Ctx) {
1085   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
1086 }
1087 
1088 extern "C" void LLVMInitializeAMDGPUDisassembler() {
1089   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
1090                                          createAMDGPUDisassembler);
1091   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
1092                                        createAMDGPUSymbolizer);
1093 }
1094