xref: /llvm-project/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (revision 264b5d9e8817fce8c47d2b06aba2d9244e426794)
1 //===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //===----------------------------------------------------------------------===//
11 //
12 /// \file
13 ///
14 /// This file contains definition for AMDGPU ISA disassembler
15 //
16 //===----------------------------------------------------------------------===//
17 
18 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
19 
20 #include "AMDGPUDisassembler.h"
21 #include "AMDGPU.h"
22 #include "AMDGPURegisterInfo.h"
23 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
24 #include "SIDefines.h"
25 #include "Utils/AMDGPUBaseInfo.h"
26 
27 #include "llvm/BinaryFormat/ELF.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCFixedLenDisassembler.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/MC/MCSubtargetInfo.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/Endian.h"
35 #include "llvm/Support/TargetRegistry.h"
36 
37 using namespace llvm;
38 
39 #define DEBUG_TYPE "amdgpu-disassembler"
40 
41 typedef llvm::MCDisassembler::DecodeStatus DecodeStatus;
42 
43 
44 inline static MCDisassembler::DecodeStatus
45 addOperand(MCInst &Inst, const MCOperand& Opnd) {
46   Inst.addOperand(Opnd);
47   return Opnd.isValid() ?
48     MCDisassembler::Success :
49     MCDisassembler::SoftFail;
50 }
51 
52 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
53                                        uint64_t Addr, const void *Decoder) {
54   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
55 
56   APInt SignedOffset(18, Imm * 4, true);
57   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
58 
59   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
60     return MCDisassembler::Success;
61   return addOperand(Inst, MCOperand::createImm(Imm));
62 }
63 
64 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
65 static DecodeStatus StaticDecoderName(MCInst &Inst, \
66                                        unsigned Imm, \
67                                        uint64_t /*Addr*/, \
68                                        const void *Decoder) { \
69   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
70   return addOperand(Inst, DAsm->DecoderName(Imm)); \
71 }
72 
73 #define DECODE_OPERAND_REG(RegClass) \
74 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
75 
76 DECODE_OPERAND_REG(VGPR_32)
77 DECODE_OPERAND_REG(VS_32)
78 DECODE_OPERAND_REG(VS_64)
79 
80 DECODE_OPERAND_REG(VReg_64)
81 DECODE_OPERAND_REG(VReg_96)
82 DECODE_OPERAND_REG(VReg_128)
83 
84 DECODE_OPERAND_REG(SReg_32)
85 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
86 DECODE_OPERAND_REG(SReg_64)
87 DECODE_OPERAND_REG(SReg_64_XEXEC)
88 DECODE_OPERAND_REG(SReg_128)
89 DECODE_OPERAND_REG(SReg_256)
90 DECODE_OPERAND_REG(SReg_512)
91 
92 
93 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
94                                          unsigned Imm,
95                                          uint64_t Addr,
96                                          const void *Decoder) {
97   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
98   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
99 }
100 
101 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
102                                          unsigned Imm,
103                                          uint64_t Addr,
104                                          const void *Decoder) {
105   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
106   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
107 }
108 
109 #define DECODE_SDWA9(DecName) \
110 DECODE_OPERAND(decodeSDWA9##DecName, decodeSDWA9##DecName)
111 
112 DECODE_SDWA9(Src32)
113 DECODE_SDWA9(Src16)
114 DECODE_SDWA9(VopcDst)
115 
116 #include "AMDGPUGenDisassemblerTables.inc"
117 
118 //===----------------------------------------------------------------------===//
119 //
120 //===----------------------------------------------------------------------===//
121 
122 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
123   assert(Bytes.size() >= sizeof(T));
124   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
125   Bytes = Bytes.slice(sizeof(T));
126   return Res;
127 }
128 
129 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
130                                                MCInst &MI,
131                                                uint64_t Inst,
132                                                uint64_t Address) const {
133   assert(MI.getOpcode() == 0);
134   assert(MI.getNumOperands() == 0);
135   MCInst TmpInst;
136   HasLiteral = false;
137   const auto SavedBytes = Bytes;
138   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
139     MI = TmpInst;
140     return MCDisassembler::Success;
141   }
142   Bytes = SavedBytes;
143   return MCDisassembler::Fail;
144 }
145 
146 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
147                                                 ArrayRef<uint8_t> Bytes_,
148                                                 uint64_t Address,
149                                                 raw_ostream &WS,
150                                                 raw_ostream &CS) const {
151   CommentStream = &CS;
152 
153   // ToDo: AMDGPUDisassembler supports only VI ISA.
154   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding])
155     report_fatal_error("Disassembly not yet supported for subtarget");
156 
157   const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size());
158   Bytes = Bytes_.slice(0, MaxInstBytesNum);
159 
160   DecodeStatus Res = MCDisassembler::Fail;
161   do {
162     // ToDo: better to switch encoding length using some bit predicate
163     // but it is unknown yet, so try all we can
164 
165     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
166     // encodings
167     if (Bytes.size() >= 8) {
168       const uint64_t QW = eatBytes<uint64_t>(Bytes);
169       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
170       if (Res) break;
171 
172       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
173       if (Res) break;
174 
175       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
176       if (Res) break;
177     }
178 
179     // Reinitialize Bytes as DPP64 could have eaten too much
180     Bytes = Bytes_.slice(0, MaxInstBytesNum);
181 
182     // Try decode 32-bit instruction
183     if (Bytes.size() < 4) break;
184     const uint32_t DW = eatBytes<uint32_t>(Bytes);
185     Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address);
186     if (Res) break;
187 
188     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
189     if (Res) break;
190 
191     if (Bytes.size() < 4) break;
192     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
193     Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address);
194     if (Res) break;
195 
196     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
197   } while (false);
198 
199   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
200               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
201               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) {
202     // Insert dummy unused src2_modifiers.
203     int Src2ModIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
204                                                 AMDGPU::OpName::src2_modifiers);
205     auto I = MI.begin();
206     std::advance(I, Src2ModIdx);
207     MI.insert(I, MCOperand::createImm(0));
208   }
209 
210   Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0;
211   return Res;
212 }
213 
214 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
215   return getContext().getRegisterInfo()->
216     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
217 }
218 
219 inline
220 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
221                                          const Twine& ErrMsg) const {
222   *CommentStream << "Error: " + ErrMsg;
223 
224   // ToDo: add support for error operands to MCInst.h
225   // return MCOperand::createError(V);
226   return MCOperand();
227 }
228 
229 inline
230 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
231   return MCOperand::createReg(RegId);
232 }
233 
234 inline
235 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
236                                                unsigned Val) const {
237   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
238   if (Val >= RegCl.getNumRegs())
239     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
240                            ": unknown register " + Twine(Val));
241   return createRegOperand(RegCl.getRegister(Val));
242 }
243 
244 inline
245 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
246                                                 unsigned Val) const {
247   // ToDo: SI/CI have 104 SGPRs, VI - 102
248   // Valery: here we accepting as much as we can, let assembler sort it out
249   int shift = 0;
250   switch (SRegClassID) {
251   case AMDGPU::SGPR_32RegClassID:
252   case AMDGPU::TTMP_32RegClassID:
253     break;
254   case AMDGPU::SGPR_64RegClassID:
255   case AMDGPU::TTMP_64RegClassID:
256     shift = 1;
257     break;
258   case AMDGPU::SGPR_128RegClassID:
259   case AMDGPU::TTMP_128RegClassID:
260   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
261   // this bundle?
262   case AMDGPU::SReg_256RegClassID:
263   // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
264   // this bundle?
265   case AMDGPU::SReg_512RegClassID:
266     shift = 2;
267     break;
268   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
269   // this bundle?
270   default:
271     llvm_unreachable("unhandled register class");
272   }
273 
274   if (Val % (1 << shift)) {
275     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
276                    << ": scalar reg isn't aligned " << Val;
277   }
278 
279   return createRegOperand(SRegClassID, Val >> shift);
280 }
281 
282 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
283   return decodeSrcOp(OPW32, Val);
284 }
285 
286 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
287   return decodeSrcOp(OPW64, Val);
288 }
289 
290 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
291   return decodeSrcOp(OPW16, Val);
292 }
293 
294 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
295   return decodeSrcOp(OPWV216, Val);
296 }
297 
298 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
299   // Some instructions have operand restrictions beyond what the encoding
300   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
301   // high bit.
302   Val &= 255;
303 
304   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
305 }
306 
307 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
308   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
309 }
310 
311 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
312   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
313 }
314 
315 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
316   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
317 }
318 
319 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
320   // table-gen generated disassembler doesn't care about operand types
321   // leaving only registry class so SSrc_32 operand turns into SReg_32
322   // and therefore we accept immediates and literals here as well
323   return decodeSrcOp(OPW32, Val);
324 }
325 
326 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
327   unsigned Val) const {
328   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
329   return decodeOperand_SReg_32(Val);
330 }
331 
332 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
333   return decodeSrcOp(OPW64, Val);
334 }
335 
336 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
337   return decodeSrcOp(OPW64, Val);
338 }
339 
340 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
341   return decodeSrcOp(OPW128, Val);
342 }
343 
344 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
345   return createSRegOperand(AMDGPU::SReg_256RegClassID, Val);
346 }
347 
348 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
349   return createSRegOperand(AMDGPU::SReg_512RegClassID, Val);
350 }
351 
352 
353 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
354   // For now all literal constants are supposed to be unsigned integer
355   // ToDo: deal with signed/unsigned 64-bit integer constants
356   // ToDo: deal with float/double constants
357   if (!HasLiteral) {
358     if (Bytes.size() < 4) {
359       return errOperand(0, "cannot read literal, inst bytes left " +
360                         Twine(Bytes.size()));
361     }
362     HasLiteral = true;
363     Literal = eatBytes<uint32_t>(Bytes);
364   }
365   return MCOperand::createImm(Literal);
366 }
367 
368 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
369   using namespace AMDGPU::EncValues;
370   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
371   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
372     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
373     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
374       // Cast prevents negative overflow.
375 }
376 
377 static int64_t getInlineImmVal32(unsigned Imm) {
378   switch (Imm) {
379   case 240:
380     return FloatToBits(0.5f);
381   case 241:
382     return FloatToBits(-0.5f);
383   case 242:
384     return FloatToBits(1.0f);
385   case 243:
386     return FloatToBits(-1.0f);
387   case 244:
388     return FloatToBits(2.0f);
389   case 245:
390     return FloatToBits(-2.0f);
391   case 246:
392     return FloatToBits(4.0f);
393   case 247:
394     return FloatToBits(-4.0f);
395   case 248: // 1 / (2 * PI)
396     return 0x3e22f983;
397   default:
398     llvm_unreachable("invalid fp inline imm");
399   }
400 }
401 
402 static int64_t getInlineImmVal64(unsigned Imm) {
403   switch (Imm) {
404   case 240:
405     return DoubleToBits(0.5);
406   case 241:
407     return DoubleToBits(-0.5);
408   case 242:
409     return DoubleToBits(1.0);
410   case 243:
411     return DoubleToBits(-1.0);
412   case 244:
413     return DoubleToBits(2.0);
414   case 245:
415     return DoubleToBits(-2.0);
416   case 246:
417     return DoubleToBits(4.0);
418   case 247:
419     return DoubleToBits(-4.0);
420   case 248: // 1 / (2 * PI)
421     return 0x3fc45f306dc9c882;
422   default:
423     llvm_unreachable("invalid fp inline imm");
424   }
425 }
426 
427 static int64_t getInlineImmVal16(unsigned Imm) {
428   switch (Imm) {
429   case 240:
430     return 0x3800;
431   case 241:
432     return 0xB800;
433   case 242:
434     return 0x3C00;
435   case 243:
436     return 0xBC00;
437   case 244:
438     return 0x4000;
439   case 245:
440     return 0xC000;
441   case 246:
442     return 0x4400;
443   case 247:
444     return 0xC400;
445   case 248: // 1 / (2 * PI)
446     return 0x3118;
447   default:
448     llvm_unreachable("invalid fp inline imm");
449   }
450 }
451 
452 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
453   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
454       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
455 
456   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
457   switch (Width) {
458   case OPW32:
459     return MCOperand::createImm(getInlineImmVal32(Imm));
460   case OPW64:
461     return MCOperand::createImm(getInlineImmVal64(Imm));
462   case OPW16:
463   case OPWV216:
464     return MCOperand::createImm(getInlineImmVal16(Imm));
465   default:
466     llvm_unreachable("implement me");
467   }
468 }
469 
470 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
471   using namespace AMDGPU;
472   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
473   switch (Width) {
474   default: // fall
475   case OPW32:
476   case OPW16:
477   case OPWV216:
478     return VGPR_32RegClassID;
479   case OPW64: return VReg_64RegClassID;
480   case OPW128: return VReg_128RegClassID;
481   }
482 }
483 
484 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
485   using namespace AMDGPU;
486   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
487   switch (Width) {
488   default: // fall
489   case OPW32:
490   case OPW16:
491   case OPWV216:
492     return SGPR_32RegClassID;
493   case OPW64: return SGPR_64RegClassID;
494   case OPW128: return SGPR_128RegClassID;
495   }
496 }
497 
498 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
499   using namespace AMDGPU;
500   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
501   switch (Width) {
502   default: // fall
503   case OPW32:
504   case OPW16:
505   case OPWV216:
506     return TTMP_32RegClassID;
507   case OPW64: return TTMP_64RegClassID;
508   case OPW128: return TTMP_128RegClassID;
509   }
510 }
511 
512 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
513   using namespace AMDGPU::EncValues;
514   assert(Val < 512); // enum9
515 
516   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
517     return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
518   }
519   if (Val <= SGPR_MAX) {
520     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
521     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
522   }
523   if (TTMP_MIN <= Val && Val <= TTMP_MAX) {
524     return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN);
525   }
526 
527   assert(Width == OPW16 || Width == OPW32 || Width == OPW64);
528 
529   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
530     return decodeIntImmed(Val);
531 
532   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
533     return decodeFPImmed(Width, Val);
534 
535   if (Val == LITERAL_CONST)
536     return decodeLiteralConstant();
537 
538   switch (Width) {
539   case OPW32:
540   case OPW16:
541   case OPWV216:
542     return decodeSpecialReg32(Val);
543   case OPW64:
544     return decodeSpecialReg64(Val);
545   default:
546     llvm_unreachable("unexpected immediate type");
547   }
548 }
549 
550 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
551   using namespace AMDGPU;
552   switch (Val) {
553   case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI));
554   case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI));
555     // ToDo: no support for xnack_mask_lo/_hi register
556   case 104:
557   case 105: break;
558   case 106: return createRegOperand(VCC_LO);
559   case 107: return createRegOperand(VCC_HI);
560   case 108: return createRegOperand(TBA_LO);
561   case 109: return createRegOperand(TBA_HI);
562   case 110: return createRegOperand(TMA_LO);
563   case 111: return createRegOperand(TMA_HI);
564   case 124: return createRegOperand(M0);
565   case 126: return createRegOperand(EXEC_LO);
566   case 127: return createRegOperand(EXEC_HI);
567   case 235: return createRegOperand(SRC_SHARED_BASE);
568   case 236: return createRegOperand(SRC_SHARED_LIMIT);
569   case 237: return createRegOperand(SRC_PRIVATE_BASE);
570   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
571     // TODO: SRC_POPS_EXITING_WAVE_ID
572     // ToDo: no support for vccz register
573   case 251: break;
574     // ToDo: no support for execz register
575   case 252: break;
576   case 253: return createRegOperand(SCC);
577   default: break;
578   }
579   return errOperand(Val, "unknown operand encoding " + Twine(Val));
580 }
581 
582 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
583   using namespace AMDGPU;
584   switch (Val) {
585   case 102: return createRegOperand(getMCReg(FLAT_SCR, STI));
586   case 106: return createRegOperand(VCC);
587   case 108: return createRegOperand(TBA);
588   case 110: return createRegOperand(TMA);
589   case 126: return createRegOperand(EXEC);
590   default: break;
591   }
592   return errOperand(Val, "unknown operand encoding " + Twine(Val));
593 }
594 
595 MCOperand AMDGPUDisassembler::decodeSDWA9Src(const OpWidthTy Width,
596                                              unsigned Val) const {
597   using namespace AMDGPU::SDWA;
598 
599   if (SDWA9EncValues::SRC_VGPR_MIN <= Val &&
600       Val <= SDWA9EncValues::SRC_VGPR_MAX) {
601     return createRegOperand(getVgprClassId(Width),
602                             Val - SDWA9EncValues::SRC_VGPR_MIN);
603   }
604   if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
605       Val <= SDWA9EncValues::SRC_SGPR_MAX) {
606     return createSRegOperand(getSgprClassId(Width),
607                              Val - SDWA9EncValues::SRC_SGPR_MIN);
608   }
609 
610   return decodeSpecialReg32(Val - SDWA9EncValues::SRC_SGPR_MIN);
611 }
612 
613 MCOperand AMDGPUDisassembler::decodeSDWA9Src16(unsigned Val) const {
614   return decodeSDWA9Src(OPW16, Val);
615 }
616 
617 MCOperand AMDGPUDisassembler::decodeSDWA9Src32(unsigned Val) const {
618   return decodeSDWA9Src(OPW32, Val);
619 }
620 
621 
622 MCOperand AMDGPUDisassembler::decodeSDWA9VopcDst(unsigned Val) const {
623   using namespace AMDGPU::SDWA;
624 
625   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
626     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
627     if (Val > AMDGPU::EncValues::SGPR_MAX) {
628       return decodeSpecialReg64(Val);
629     } else {
630       return createSRegOperand(getSgprClassId(OPW64), Val);
631     }
632   } else {
633     return createRegOperand(AMDGPU::VCC);
634   }
635 }
636 
637 //===----------------------------------------------------------------------===//
638 // AMDGPUSymbolizer
639 //===----------------------------------------------------------------------===//
640 
641 // Try to find symbol name for specified label
642 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
643                                 raw_ostream &/*cStream*/, int64_t Value,
644                                 uint64_t /*Address*/, bool IsBranch,
645                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
646   typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy;
647   typedef std::vector<SymbolInfoTy> SectionSymbolsTy;
648 
649   if (!IsBranch) {
650     return false;
651   }
652 
653   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
654   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
655                              [Value](const SymbolInfoTy& Val) {
656                                 return std::get<0>(Val) == static_cast<uint64_t>(Value)
657                                     && std::get<2>(Val) == ELF::STT_NOTYPE;
658                              });
659   if (Result != Symbols->end()) {
660     auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
661     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
662     Inst.addOperand(MCOperand::createExpr(Add));
663     return true;
664   }
665   return false;
666 }
667 
668 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
669                                                        int64_t Value,
670                                                        uint64_t Address) {
671   llvm_unreachable("unimplemented");
672 }
673 
674 //===----------------------------------------------------------------------===//
675 // Initialization
676 //===----------------------------------------------------------------------===//
677 
678 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
679                               LLVMOpInfoCallback /*GetOpInfo*/,
680                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
681                               void *DisInfo,
682                               MCContext *Ctx,
683                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
684   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
685 }
686 
687 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
688                                                 const MCSubtargetInfo &STI,
689                                                 MCContext &Ctx) {
690   return new AMDGPUDisassembler(STI, Ctx);
691 }
692 
693 extern "C" void LLVMInitializeAMDGPUDisassembler() {
694   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
695                                          createAMDGPUDisassembler);
696   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
697                                        createAMDGPUSymbolizer);
698 }
699