xref: /llvm-project/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (revision 7238faa4ae977523903192e287d442eb53c49ee5)
1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 /// \file
12 ///
13 /// This file contains definition for AMDGPU ISA disassembler
14 //
15 //===----------------------------------------------------------------------===//
16 
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18 
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "AMDGPU.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "SIDefines.h"
23 #include "TargetInfo/AMDGPUTargetInfo.h"
24 #include "Utils/AMDGPUBaseInfo.h"
25 #include "llvm-c/Disassembler.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/BinaryFormat/ELF.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCContext.h"
32 #include "llvm/MC/MCDisassembler/MCDisassembler.h"
33 #include "llvm/MC/MCExpr.h"
34 #include "llvm/MC/MCFixedLenDisassembler.h"
35 #include "llvm/MC/MCInst.h"
36 #include "llvm/MC/MCSubtargetInfo.h"
37 #include "llvm/Support/AMDHSAKernelDescriptor.h"
38 #include "llvm/Support/Endian.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/TargetRegistry.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include <algorithm>
44 #include <cassert>
45 #include <cstddef>
46 #include <cstdint>
47 #include <iterator>
48 #include <tuple>
49 #include <vector>
50 
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "amdgpu-disassembler"
54 
55 #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
56                             : AMDGPU::EncValues::SGPR_MAX_SI)
57 
58 using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
59 
60 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI,
61                                        MCContext &Ctx,
62                                        MCInstrInfo const *MCII) :
63   MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
64   TargetMaxInstBytes(Ctx.getAsmInfo()->getMaxInstLength(&STI)) {
65 
66   // ToDo: AMDGPUDisassembler supports only VI ISA.
67   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10())
68     report_fatal_error("Disassembly not yet supported for subtarget");
69 }
70 
71 inline static MCDisassembler::DecodeStatus
72 addOperand(MCInst &Inst, const MCOperand& Opnd) {
73   Inst.addOperand(Opnd);
74   return Opnd.isValid() ?
75     MCDisassembler::Success :
76     MCDisassembler::Fail;
77 }
78 
79 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
80                                 uint16_t NameIdx) {
81   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
82   if (OpIdx != -1) {
83     auto I = MI.begin();
84     std::advance(I, OpIdx);
85     MI.insert(I, Op);
86   }
87   return OpIdx;
88 }
89 
90 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
91                                        uint64_t Addr, const void *Decoder) {
92   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
93 
94   // Our branches take a simm16, but we need two extra bits to account for the
95   // factor of 4.
96   APInt SignedOffset(18, Imm * 4, true);
97   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
98 
99   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
100     return MCDisassembler::Success;
101   return addOperand(Inst, MCOperand::createImm(Imm));
102 }
103 
104 static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm,
105                                      uint64_t Addr, const void *Decoder) {
106   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
107   int64_t Offset;
108   if (DAsm->isVI()) {         // VI supports 20-bit unsigned offsets.
109     Offset = Imm & 0xFFFFF;
110   } else {                    // GFX9+ supports 21-bit signed offsets.
111     Offset = SignExtend64<21>(Imm);
112   }
113   return addOperand(Inst, MCOperand::createImm(Offset));
114 }
115 
116 static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val,
117                                   uint64_t Addr, const void *Decoder) {
118   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
119   return addOperand(Inst, DAsm->decodeBoolReg(Val));
120 }
121 
122 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
123 static DecodeStatus StaticDecoderName(MCInst &Inst, \
124                                        unsigned Imm, \
125                                        uint64_t /*Addr*/, \
126                                        const void *Decoder) { \
127   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
128   return addOperand(Inst, DAsm->DecoderName(Imm)); \
129 }
130 
131 #define DECODE_OPERAND_REG(RegClass) \
132 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
133 
134 DECODE_OPERAND_REG(VGPR_32)
135 DECODE_OPERAND_REG(VRegOrLds_32)
136 DECODE_OPERAND_REG(VS_32)
137 DECODE_OPERAND_REG(VS_64)
138 DECODE_OPERAND_REG(VS_128)
139 
140 DECODE_OPERAND_REG(VReg_64)
141 DECODE_OPERAND_REG(VReg_96)
142 DECODE_OPERAND_REG(VReg_128)
143 DECODE_OPERAND_REG(VReg_256)
144 DECODE_OPERAND_REG(VReg_512)
145 
146 DECODE_OPERAND_REG(SReg_32)
147 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
148 DECODE_OPERAND_REG(SReg_32_XEXEC_HI)
149 DECODE_OPERAND_REG(SRegOrLds_32)
150 DECODE_OPERAND_REG(SReg_64)
151 DECODE_OPERAND_REG(SReg_64_XEXEC)
152 DECODE_OPERAND_REG(SReg_128)
153 DECODE_OPERAND_REG(SReg_256)
154 DECODE_OPERAND_REG(SReg_512)
155 
156 DECODE_OPERAND_REG(AGPR_32)
157 DECODE_OPERAND_REG(AReg_128)
158 DECODE_OPERAND_REG(AReg_512)
159 DECODE_OPERAND_REG(AReg_1024)
160 DECODE_OPERAND_REG(AV_32)
161 DECODE_OPERAND_REG(AV_64)
162 
163 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
164                                          unsigned Imm,
165                                          uint64_t Addr,
166                                          const void *Decoder) {
167   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
168   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
169 }
170 
171 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
172                                          unsigned Imm,
173                                          uint64_t Addr,
174                                          const void *Decoder) {
175   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
176   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
177 }
178 
179 static DecodeStatus decodeOperand_VS_16(MCInst &Inst,
180                                         unsigned Imm,
181                                         uint64_t Addr,
182                                         const void *Decoder) {
183   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
184   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
185 }
186 
187 static DecodeStatus decodeOperand_VS_32(MCInst &Inst,
188                                         unsigned Imm,
189                                         uint64_t Addr,
190                                         const void *Decoder) {
191   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
192   return addOperand(Inst, DAsm->decodeOperand_VS_32(Imm));
193 }
194 
195 static DecodeStatus decodeOperand_AReg_128(MCInst &Inst,
196                                            unsigned Imm,
197                                            uint64_t Addr,
198                                            const void *Decoder) {
199   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
200   return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW128, Imm | 512));
201 }
202 
203 static DecodeStatus decodeOperand_AReg_512(MCInst &Inst,
204                                            unsigned Imm,
205                                            uint64_t Addr,
206                                            const void *Decoder) {
207   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
208   return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW512, Imm | 512));
209 }
210 
211 static DecodeStatus decodeOperand_AReg_1024(MCInst &Inst,
212                                             unsigned Imm,
213                                             uint64_t Addr,
214                                             const void *Decoder) {
215   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
216   return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW1024, Imm | 512));
217 }
218 
219 static DecodeStatus decodeOperand_SReg_32(MCInst &Inst,
220                                           unsigned Imm,
221                                           uint64_t Addr,
222                                           const void *Decoder) {
223   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
224   return addOperand(Inst, DAsm->decodeOperand_SReg_32(Imm));
225 }
226 
227 static DecodeStatus decodeOperand_VGPR_32(MCInst &Inst,
228                                          unsigned Imm,
229                                          uint64_t Addr,
230                                          const void *Decoder) {
231   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
232   return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW32, Imm));
233 }
234 
235 #define DECODE_SDWA(DecName) \
236 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
237 
238 DECODE_SDWA(Src32)
239 DECODE_SDWA(Src16)
240 DECODE_SDWA(VopcDst)
241 
242 #include "AMDGPUGenDisassemblerTables.inc"
243 
244 //===----------------------------------------------------------------------===//
245 //
246 //===----------------------------------------------------------------------===//
247 
248 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
249   assert(Bytes.size() >= sizeof(T));
250   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
251   Bytes = Bytes.slice(sizeof(T));
252   return Res;
253 }
254 
255 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
256                                                MCInst &MI,
257                                                uint64_t Inst,
258                                                uint64_t Address) const {
259   assert(MI.getOpcode() == 0);
260   assert(MI.getNumOperands() == 0);
261   MCInst TmpInst;
262   HasLiteral = false;
263   const auto SavedBytes = Bytes;
264   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
265     MI = TmpInst;
266     return MCDisassembler::Success;
267   }
268   Bytes = SavedBytes;
269   return MCDisassembler::Fail;
270 }
271 
272 static bool isValidDPP8(const MCInst &MI) {
273   using namespace llvm::AMDGPU::DPP;
274   int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi);
275   assert(FiIdx != -1);
276   if ((unsigned)FiIdx >= MI.getNumOperands())
277     return false;
278   unsigned Fi = MI.getOperand(FiIdx).getImm();
279   return Fi == DPP8_FI_0 || Fi == DPP8_FI_1;
280 }
281 
282 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
283                                                 ArrayRef<uint8_t> Bytes_,
284                                                 uint64_t Address,
285                                                 raw_ostream &CS) const {
286   CommentStream = &CS;
287   bool IsSDWA = false;
288 
289   unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
290   Bytes = Bytes_.slice(0, MaxInstBytesNum);
291 
292   DecodeStatus Res = MCDisassembler::Fail;
293   do {
294     // ToDo: better to switch encoding length using some bit predicate
295     // but it is unknown yet, so try all we can
296 
297     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
298     // encodings
299     if (Bytes.size() >= 8) {
300       const uint64_t QW = eatBytes<uint64_t>(Bytes);
301 
302       if (STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]) {
303         Res = tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address);
304         if (Res) {
305           if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8)
306               == -1)
307             break;
308           if (convertDPP8Inst(MI) == MCDisassembler::Success)
309             break;
310           MI = MCInst(); // clear
311         }
312       }
313 
314       Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address);
315       if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
316         break;
317 
318       MI = MCInst(); // clear
319 
320       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
321       if (Res) break;
322 
323       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
324       if (Res) { IsSDWA = true;  break; }
325 
326       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
327       if (Res) { IsSDWA = true;  break; }
328 
329       Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address);
330       if (Res) { IsSDWA = true;  break; }
331 
332       if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
333         Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
334         if (Res)
335           break;
336       }
337 
338       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
339       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
340       // table first so we print the correct name.
341       if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
342         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
343         if (Res)
344           break;
345       }
346     }
347 
348     // Reinitialize Bytes as DPP64 could have eaten too much
349     Bytes = Bytes_.slice(0, MaxInstBytesNum);
350 
351     // Try decode 32-bit instruction
352     if (Bytes.size() < 4) break;
353     const uint32_t DW = eatBytes<uint32_t>(Bytes);
354     Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address);
355     if (Res) break;
356 
357     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
358     if (Res) break;
359 
360     Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
361     if (Res) break;
362 
363     if (STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]) {
364       Res = tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address);
365       if (Res) break;
366     }
367 
368     Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address);
369     if (Res) break;
370 
371     if (Bytes.size() < 4) break;
372     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
373     Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address);
374     if (Res) break;
375 
376     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
377     if (Res) break;
378 
379     Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address);
380     if (Res) break;
381 
382     Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address);
383   } while (false);
384 
385   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
386               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
387               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10 ||
388               MI.getOpcode() == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
389               MI.getOpcode() == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
390               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ||
391               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi ||
392               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10 ||
393               MI.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10)) {
394     // Insert dummy unused src2_modifiers.
395     insertNamedMCOperand(MI, MCOperand::createImm(0),
396                          AMDGPU::OpName::src2_modifiers);
397   }
398 
399   if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
400     int VAddr0Idx =
401         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
402     int RsrcIdx =
403         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
404     unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
405     if (VAddr0Idx >= 0 && NSAArgs > 0) {
406       unsigned NSAWords = (NSAArgs + 3) / 4;
407       if (Bytes.size() < 4 * NSAWords) {
408         Res = MCDisassembler::Fail;
409       } else {
410         for (unsigned i = 0; i < NSAArgs; ++i) {
411           MI.insert(MI.begin() + VAddr0Idx + 1 + i,
412                     decodeOperand_VGPR_32(Bytes[i]));
413         }
414         Bytes = Bytes.slice(4 * NSAWords);
415       }
416     }
417 
418     if (Res)
419       Res = convertMIMGInst(MI);
420   }
421 
422   if (Res && IsSDWA)
423     Res = convertSDWAInst(MI);
424 
425   int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
426                                               AMDGPU::OpName::vdst_in);
427   if (VDstIn_Idx != -1) {
428     int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
429                            MCOI::OperandConstraint::TIED_TO);
430     if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
431          !MI.getOperand(VDstIn_Idx).isReg() ||
432          MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
433       if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
434         MI.erase(&MI.getOperand(VDstIn_Idx));
435       insertNamedMCOperand(MI,
436         MCOperand::createReg(MI.getOperand(Tied).getReg()),
437         AMDGPU::OpName::vdst_in);
438     }
439   }
440 
441   // if the opcode was not recognized we'll assume a Size of 4 bytes
442   // (unless there are fewer bytes left)
443   Size = Res ? (MaxInstBytesNum - Bytes.size())
444              : std::min((size_t)4, Bytes_.size());
445   return Res;
446 }
447 
448 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
449   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
450       STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
451     if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
452       // VOPC - insert clamp
453       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
454   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
455     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
456     if (SDst != -1) {
457       // VOPC - insert VCC register as sdst
458       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
459                            AMDGPU::OpName::sdst);
460     } else {
461       // VOP1/2 - insert omod if present in instruction
462       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
463     }
464   }
465   return MCDisassembler::Success;
466 }
467 
468 DecodeStatus AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const {
469   unsigned Opc = MI.getOpcode();
470   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
471 
472   // Insert dummy unused src modifiers.
473   if (MI.getNumOperands() < DescNumOps &&
474       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1)
475     insertNamedMCOperand(MI, MCOperand::createImm(0),
476                          AMDGPU::OpName::src0_modifiers);
477 
478   if (MI.getNumOperands() < DescNumOps &&
479       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers) != -1)
480     insertNamedMCOperand(MI, MCOperand::createImm(0),
481                          AMDGPU::OpName::src1_modifiers);
482 
483   return isValidDPP8(MI) ? MCDisassembler::Success : MCDisassembler::SoftFail;
484 }
485 
486 // Note that before gfx10, the MIMG encoding provided no information about
487 // VADDR size. Consequently, decoded instructions always show address as if it
488 // has 1 dword, which could be not really so.
489 DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
490 
491   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
492                                            AMDGPU::OpName::vdst);
493 
494   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
495                                             AMDGPU::OpName::vdata);
496   int VAddr0Idx =
497       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
498   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
499                                             AMDGPU::OpName::dmask);
500 
501   int TFEIdx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
502                                             AMDGPU::OpName::tfe);
503   int D16Idx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
504                                             AMDGPU::OpName::d16);
505 
506   assert(VDataIdx != -1);
507   if (DMaskIdx == -1 || TFEIdx == -1) {// intersect_ray
508     if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16) > -1) {
509       assert(MI.getOpcode() == AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16_sa ||
510              MI.getOpcode() == AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16_nsa ||
511              MI.getOpcode() == AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16_sa ||
512              MI.getOpcode() == AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16_nsa);
513       addOperand(MI, MCOperand::createImm(1));
514     }
515     return MCDisassembler::Success;
516   }
517 
518   const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
519   bool IsAtomic = (VDstIdx != -1);
520   bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4;
521 
522   bool IsNSA = false;
523   unsigned AddrSize = Info->VAddrDwords;
524 
525   if (STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
526     unsigned DimIdx =
527         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
528     const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
529         AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
530     const AMDGPU::MIMGDimInfo *Dim =
531         AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
532 
533     AddrSize = BaseOpcode->NumExtraArgs +
534                (BaseOpcode->Gradients ? Dim->NumGradients : 0) +
535                (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
536                (BaseOpcode->LodOrClampOrMip ? 1 : 0);
537     IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA;
538     if (!IsNSA) {
539       if (AddrSize > 8)
540         AddrSize = 16;
541       else if (AddrSize > 4)
542         AddrSize = 8;
543     } else {
544       if (AddrSize > Info->VAddrDwords) {
545         // The NSA encoding does not contain enough operands for the combination
546         // of base opcode / dimension. Should this be an error?
547         return MCDisassembler::Success;
548       }
549     }
550   }
551 
552   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
553   unsigned DstSize = IsGather4 ? 4 : std::max(countPopulation(DMask), 1u);
554 
555   bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
556   if (D16 && AMDGPU::hasPackedD16(STI)) {
557     DstSize = (DstSize + 1) / 2;
558   }
559 
560   // FIXME: Add tfe support
561   if (MI.getOperand(TFEIdx).getImm())
562     return MCDisassembler::Success;
563 
564   if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
565     return MCDisassembler::Success;
566 
567   int NewOpcode =
568       AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
569   if (NewOpcode == -1)
570     return MCDisassembler::Success;
571 
572   // Widen the register to the correct number of enabled channels.
573   unsigned NewVdata = AMDGPU::NoRegister;
574   if (DstSize != Info->VDataDwords) {
575     auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
576 
577     // Get first subregister of VData
578     unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
579     unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
580     Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
581 
582     NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
583                                        &MRI.getRegClass(DataRCID));
584     if (NewVdata == AMDGPU::NoRegister) {
585       // It's possible to encode this such that the low register + enabled
586       // components exceeds the register count.
587       return MCDisassembler::Success;
588     }
589   }
590 
591   unsigned NewVAddr0 = AMDGPU::NoRegister;
592   if (STI.getFeatureBits()[AMDGPU::FeatureGFX10] && !IsNSA &&
593       AddrSize != Info->VAddrDwords) {
594     unsigned VAddr0 = MI.getOperand(VAddr0Idx).getReg();
595     unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0);
596     VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0;
597 
598     auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass;
599     NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0,
600                                         &MRI.getRegClass(AddrRCID));
601     if (NewVAddr0 == AMDGPU::NoRegister)
602       return MCDisassembler::Success;
603   }
604 
605   MI.setOpcode(NewOpcode);
606 
607   if (NewVdata != AMDGPU::NoRegister) {
608     MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
609 
610     if (IsAtomic) {
611       // Atomic operations have an additional operand (a copy of data)
612       MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
613     }
614   }
615 
616   if (NewVAddr0 != AMDGPU::NoRegister) {
617     MI.getOperand(VAddr0Idx) = MCOperand::createReg(NewVAddr0);
618   } else if (IsNSA) {
619     assert(AddrSize <= Info->VAddrDwords);
620     MI.erase(MI.begin() + VAddr0Idx + AddrSize,
621              MI.begin() + VAddr0Idx + Info->VAddrDwords);
622   }
623 
624   return MCDisassembler::Success;
625 }
626 
627 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
628   return getContext().getRegisterInfo()->
629     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
630 }
631 
632 inline
633 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
634                                          const Twine& ErrMsg) const {
635   *CommentStream << "Error: " + ErrMsg;
636 
637   // ToDo: add support for error operands to MCInst.h
638   // return MCOperand::createError(V);
639   return MCOperand();
640 }
641 
642 inline
643 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
644   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
645 }
646 
647 inline
648 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
649                                                unsigned Val) const {
650   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
651   if (Val >= RegCl.getNumRegs())
652     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
653                            ": unknown register " + Twine(Val));
654   return createRegOperand(RegCl.getRegister(Val));
655 }
656 
657 inline
658 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
659                                                 unsigned Val) const {
660   // ToDo: SI/CI have 104 SGPRs, VI - 102
661   // Valery: here we accepting as much as we can, let assembler sort it out
662   int shift = 0;
663   switch (SRegClassID) {
664   case AMDGPU::SGPR_32RegClassID:
665   case AMDGPU::TTMP_32RegClassID:
666     break;
667   case AMDGPU::SGPR_64RegClassID:
668   case AMDGPU::TTMP_64RegClassID:
669     shift = 1;
670     break;
671   case AMDGPU::SGPR_128RegClassID:
672   case AMDGPU::TTMP_128RegClassID:
673   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
674   // this bundle?
675   case AMDGPU::SGPR_256RegClassID:
676   case AMDGPU::TTMP_256RegClassID:
677     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
678   // this bundle?
679   case AMDGPU::SGPR_512RegClassID:
680   case AMDGPU::TTMP_512RegClassID:
681     shift = 2;
682     break;
683   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
684   // this bundle?
685   default:
686     llvm_unreachable("unhandled register class");
687   }
688 
689   if (Val % (1 << shift)) {
690     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
691                    << ": scalar reg isn't aligned " << Val;
692   }
693 
694   return createRegOperand(SRegClassID, Val >> shift);
695 }
696 
697 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
698   return decodeSrcOp(OPW32, Val);
699 }
700 
701 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
702   return decodeSrcOp(OPW64, Val);
703 }
704 
705 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
706   return decodeSrcOp(OPW128, Val);
707 }
708 
709 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
710   return decodeSrcOp(OPW16, Val);
711 }
712 
713 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
714   return decodeSrcOp(OPWV216, Val);
715 }
716 
717 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
718   // Some instructions have operand restrictions beyond what the encoding
719   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
720   // high bit.
721   Val &= 255;
722 
723   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
724 }
725 
726 MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const {
727   return decodeSrcOp(OPW32, Val);
728 }
729 
730 MCOperand AMDGPUDisassembler::decodeOperand_AGPR_32(unsigned Val) const {
731   return createRegOperand(AMDGPU::AGPR_32RegClassID, Val & 255);
732 }
733 
734 MCOperand AMDGPUDisassembler::decodeOperand_AReg_128(unsigned Val) const {
735   return createRegOperand(AMDGPU::AReg_128RegClassID, Val & 255);
736 }
737 
738 MCOperand AMDGPUDisassembler::decodeOperand_AReg_512(unsigned Val) const {
739   return createRegOperand(AMDGPU::AReg_512RegClassID, Val & 255);
740 }
741 
742 MCOperand AMDGPUDisassembler::decodeOperand_AReg_1024(unsigned Val) const {
743   return createRegOperand(AMDGPU::AReg_1024RegClassID, Val & 255);
744 }
745 
746 MCOperand AMDGPUDisassembler::decodeOperand_AV_32(unsigned Val) const {
747   return decodeSrcOp(OPW32, Val);
748 }
749 
750 MCOperand AMDGPUDisassembler::decodeOperand_AV_64(unsigned Val) const {
751   return decodeSrcOp(OPW64, Val);
752 }
753 
754 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
755   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
756 }
757 
758 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
759   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
760 }
761 
762 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
763   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
764 }
765 
766 MCOperand AMDGPUDisassembler::decodeOperand_VReg_256(unsigned Val) const {
767   return createRegOperand(AMDGPU::VReg_256RegClassID, Val);
768 }
769 
770 MCOperand AMDGPUDisassembler::decodeOperand_VReg_512(unsigned Val) const {
771   return createRegOperand(AMDGPU::VReg_512RegClassID, Val);
772 }
773 
774 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
775   // table-gen generated disassembler doesn't care about operand types
776   // leaving only registry class so SSrc_32 operand turns into SReg_32
777   // and therefore we accept immediates and literals here as well
778   return decodeSrcOp(OPW32, Val);
779 }
780 
781 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
782   unsigned Val) const {
783   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
784   return decodeOperand_SReg_32(Val);
785 }
786 
787 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
788   unsigned Val) const {
789   // SReg_32_XM0 is SReg_32 without EXEC_HI
790   return decodeOperand_SReg_32(Val);
791 }
792 
793 MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const {
794   // table-gen generated disassembler doesn't care about operand types
795   // leaving only registry class so SSrc_32 operand turns into SReg_32
796   // and therefore we accept immediates and literals here as well
797   return decodeSrcOp(OPW32, Val);
798 }
799 
800 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
801   return decodeSrcOp(OPW64, Val);
802 }
803 
804 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
805   return decodeSrcOp(OPW64, Val);
806 }
807 
808 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
809   return decodeSrcOp(OPW128, Val);
810 }
811 
812 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
813   return decodeDstOp(OPW256, Val);
814 }
815 
816 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
817   return decodeDstOp(OPW512, Val);
818 }
819 
820 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
821   // For now all literal constants are supposed to be unsigned integer
822   // ToDo: deal with signed/unsigned 64-bit integer constants
823   // ToDo: deal with float/double constants
824   if (!HasLiteral) {
825     if (Bytes.size() < 4) {
826       return errOperand(0, "cannot read literal, inst bytes left " +
827                         Twine(Bytes.size()));
828     }
829     HasLiteral = true;
830     Literal = eatBytes<uint32_t>(Bytes);
831   }
832   return MCOperand::createImm(Literal);
833 }
834 
835 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
836   using namespace AMDGPU::EncValues;
837 
838   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
839   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
840     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
841     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
842       // Cast prevents negative overflow.
843 }
844 
845 static int64_t getInlineImmVal32(unsigned Imm) {
846   switch (Imm) {
847   case 240:
848     return FloatToBits(0.5f);
849   case 241:
850     return FloatToBits(-0.5f);
851   case 242:
852     return FloatToBits(1.0f);
853   case 243:
854     return FloatToBits(-1.0f);
855   case 244:
856     return FloatToBits(2.0f);
857   case 245:
858     return FloatToBits(-2.0f);
859   case 246:
860     return FloatToBits(4.0f);
861   case 247:
862     return FloatToBits(-4.0f);
863   case 248: // 1 / (2 * PI)
864     return 0x3e22f983;
865   default:
866     llvm_unreachable("invalid fp inline imm");
867   }
868 }
869 
870 static int64_t getInlineImmVal64(unsigned Imm) {
871   switch (Imm) {
872   case 240:
873     return DoubleToBits(0.5);
874   case 241:
875     return DoubleToBits(-0.5);
876   case 242:
877     return DoubleToBits(1.0);
878   case 243:
879     return DoubleToBits(-1.0);
880   case 244:
881     return DoubleToBits(2.0);
882   case 245:
883     return DoubleToBits(-2.0);
884   case 246:
885     return DoubleToBits(4.0);
886   case 247:
887     return DoubleToBits(-4.0);
888   case 248: // 1 / (2 * PI)
889     return 0x3fc45f306dc9c882;
890   default:
891     llvm_unreachable("invalid fp inline imm");
892   }
893 }
894 
895 static int64_t getInlineImmVal16(unsigned Imm) {
896   switch (Imm) {
897   case 240:
898     return 0x3800;
899   case 241:
900     return 0xB800;
901   case 242:
902     return 0x3C00;
903   case 243:
904     return 0xBC00;
905   case 244:
906     return 0x4000;
907   case 245:
908     return 0xC000;
909   case 246:
910     return 0x4400;
911   case 247:
912     return 0xC400;
913   case 248: // 1 / (2 * PI)
914     return 0x3118;
915   default:
916     llvm_unreachable("invalid fp inline imm");
917   }
918 }
919 
920 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
921   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
922       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
923 
924   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
925   switch (Width) {
926   case OPW32:
927   case OPW128: // splat constants
928   case OPW512:
929   case OPW1024:
930     return MCOperand::createImm(getInlineImmVal32(Imm));
931   case OPW64:
932     return MCOperand::createImm(getInlineImmVal64(Imm));
933   case OPW16:
934   case OPWV216:
935     return MCOperand::createImm(getInlineImmVal16(Imm));
936   default:
937     llvm_unreachable("implement me");
938   }
939 }
940 
941 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
942   using namespace AMDGPU;
943 
944   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
945   switch (Width) {
946   default: // fall
947   case OPW32:
948   case OPW16:
949   case OPWV216:
950     return VGPR_32RegClassID;
951   case OPW64: return VReg_64RegClassID;
952   case OPW128: return VReg_128RegClassID;
953   }
954 }
955 
956 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width) const {
957   using namespace AMDGPU;
958 
959   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
960   switch (Width) {
961   default: // fall
962   case OPW32:
963   case OPW16:
964   case OPWV216:
965     return AGPR_32RegClassID;
966   case OPW64: return AReg_64RegClassID;
967   case OPW128: return AReg_128RegClassID;
968   case OPW256: return AReg_256RegClassID;
969   case OPW512: return AReg_512RegClassID;
970   case OPW1024: return AReg_1024RegClassID;
971   }
972 }
973 
974 
975 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
976   using namespace AMDGPU;
977 
978   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
979   switch (Width) {
980   default: // fall
981   case OPW32:
982   case OPW16:
983   case OPWV216:
984     return SGPR_32RegClassID;
985   case OPW64: return SGPR_64RegClassID;
986   case OPW128: return SGPR_128RegClassID;
987   case OPW256: return SGPR_256RegClassID;
988   case OPW512: return SGPR_512RegClassID;
989   }
990 }
991 
992 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
993   using namespace AMDGPU;
994 
995   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
996   switch (Width) {
997   default: // fall
998   case OPW32:
999   case OPW16:
1000   case OPWV216:
1001     return TTMP_32RegClassID;
1002   case OPW64: return TTMP_64RegClassID;
1003   case OPW128: return TTMP_128RegClassID;
1004   case OPW256: return TTMP_256RegClassID;
1005   case OPW512: return TTMP_512RegClassID;
1006   }
1007 }
1008 
1009 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
1010   using namespace AMDGPU::EncValues;
1011 
1012   unsigned TTmpMin =
1013       (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MIN : TTMP_VI_MIN;
1014   unsigned TTmpMax =
1015       (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MAX : TTMP_VI_MAX;
1016 
1017   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
1018 }
1019 
1020 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
1021   using namespace AMDGPU::EncValues;
1022 
1023   assert(Val < 1024); // enum10
1024 
1025   bool IsAGPR = Val & 512;
1026   Val &= 511;
1027 
1028   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
1029     return createRegOperand(IsAGPR ? getAgprClassId(Width)
1030                                    : getVgprClassId(Width), Val - VGPR_MIN);
1031   }
1032   if (Val <= SGPR_MAX) {
1033     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
1034     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1035   }
1036 
1037   int TTmpIdx = getTTmpIdx(Val);
1038   if (TTmpIdx >= 0) {
1039     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1040   }
1041 
1042   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
1043     return decodeIntImmed(Val);
1044 
1045   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
1046     return decodeFPImmed(Width, Val);
1047 
1048   if (Val == LITERAL_CONST)
1049     return decodeLiteralConstant();
1050 
1051   switch (Width) {
1052   case OPW32:
1053   case OPW16:
1054   case OPWV216:
1055     return decodeSpecialReg32(Val);
1056   case OPW64:
1057     return decodeSpecialReg64(Val);
1058   default:
1059     llvm_unreachable("unexpected immediate type");
1060   }
1061 }
1062 
1063 MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const {
1064   using namespace AMDGPU::EncValues;
1065 
1066   assert(Val < 128);
1067   assert(Width == OPW256 || Width == OPW512);
1068 
1069   if (Val <= SGPR_MAX) {
1070     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
1071     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1072   }
1073 
1074   int TTmpIdx = getTTmpIdx(Val);
1075   if (TTmpIdx >= 0) {
1076     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1077   }
1078 
1079   llvm_unreachable("unknown dst register");
1080 }
1081 
1082 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
1083   using namespace AMDGPU;
1084 
1085   switch (Val) {
1086   case 102: return createRegOperand(FLAT_SCR_LO);
1087   case 103: return createRegOperand(FLAT_SCR_HI);
1088   case 104: return createRegOperand(XNACK_MASK_LO);
1089   case 105: return createRegOperand(XNACK_MASK_HI);
1090   case 106: return createRegOperand(VCC_LO);
1091   case 107: return createRegOperand(VCC_HI);
1092   case 108: return createRegOperand(TBA_LO);
1093   case 109: return createRegOperand(TBA_HI);
1094   case 110: return createRegOperand(TMA_LO);
1095   case 111: return createRegOperand(TMA_HI);
1096   case 124: return createRegOperand(M0);
1097   case 125: return createRegOperand(SGPR_NULL);
1098   case 126: return createRegOperand(EXEC_LO);
1099   case 127: return createRegOperand(EXEC_HI);
1100   case 235: return createRegOperand(SRC_SHARED_BASE);
1101   case 236: return createRegOperand(SRC_SHARED_LIMIT);
1102   case 237: return createRegOperand(SRC_PRIVATE_BASE);
1103   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1104   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1105   case 251: return createRegOperand(SRC_VCCZ);
1106   case 252: return createRegOperand(SRC_EXECZ);
1107   case 253: return createRegOperand(SRC_SCC);
1108   case 254: return createRegOperand(LDS_DIRECT);
1109   default: break;
1110   }
1111   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1112 }
1113 
1114 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
1115   using namespace AMDGPU;
1116 
1117   switch (Val) {
1118   case 102: return createRegOperand(FLAT_SCR);
1119   case 104: return createRegOperand(XNACK_MASK);
1120   case 106: return createRegOperand(VCC);
1121   case 108: return createRegOperand(TBA);
1122   case 110: return createRegOperand(TMA);
1123   case 125: return createRegOperand(SGPR_NULL);
1124   case 126: return createRegOperand(EXEC);
1125   case 235: return createRegOperand(SRC_SHARED_BASE);
1126   case 236: return createRegOperand(SRC_SHARED_LIMIT);
1127   case 237: return createRegOperand(SRC_PRIVATE_BASE);
1128   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1129   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1130   case 251: return createRegOperand(SRC_VCCZ);
1131   case 252: return createRegOperand(SRC_EXECZ);
1132   case 253: return createRegOperand(SRC_SCC);
1133   default: break;
1134   }
1135   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1136 }
1137 
1138 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
1139                                             const unsigned Val) const {
1140   using namespace AMDGPU::SDWA;
1141   using namespace AMDGPU::EncValues;
1142 
1143   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
1144       STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
1145     // XXX: cast to int is needed to avoid stupid warning:
1146     // compare with unsigned is always true
1147     if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
1148         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
1149       return createRegOperand(getVgprClassId(Width),
1150                               Val - SDWA9EncValues::SRC_VGPR_MIN);
1151     }
1152     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
1153         Val <= (isGFX10() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
1154                           : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
1155       return createSRegOperand(getSgprClassId(Width),
1156                                Val - SDWA9EncValues::SRC_SGPR_MIN);
1157     }
1158     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
1159         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
1160       return createSRegOperand(getTtmpClassId(Width),
1161                                Val - SDWA9EncValues::SRC_TTMP_MIN);
1162     }
1163 
1164     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
1165 
1166     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
1167       return decodeIntImmed(SVal);
1168 
1169     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
1170       return decodeFPImmed(Width, SVal);
1171 
1172     return decodeSpecialReg32(SVal);
1173   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
1174     return createRegOperand(getVgprClassId(Width), Val);
1175   }
1176   llvm_unreachable("unsupported target");
1177 }
1178 
1179 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
1180   return decodeSDWASrc(OPW16, Val);
1181 }
1182 
1183 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
1184   return decodeSDWASrc(OPW32, Val);
1185 }
1186 
1187 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
1188   using namespace AMDGPU::SDWA;
1189 
1190   assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
1191           STI.getFeatureBits()[AMDGPU::FeatureGFX10]) &&
1192          "SDWAVopcDst should be present only on GFX9+");
1193 
1194   bool IsWave64 = STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64];
1195 
1196   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
1197     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
1198 
1199     int TTmpIdx = getTTmpIdx(Val);
1200     if (TTmpIdx >= 0) {
1201       auto TTmpClsId = getTtmpClassId(IsWave64 ? OPW64 : OPW32);
1202       return createSRegOperand(TTmpClsId, TTmpIdx);
1203     } else if (Val > SGPR_MAX) {
1204       return IsWave64 ? decodeSpecialReg64(Val)
1205                       : decodeSpecialReg32(Val);
1206     } else {
1207       return createSRegOperand(getSgprClassId(IsWave64 ? OPW64 : OPW32), Val);
1208     }
1209   } else {
1210     return createRegOperand(IsWave64 ? AMDGPU::VCC : AMDGPU::VCC_LO);
1211   }
1212 }
1213 
1214 MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
1215   return STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64] ?
1216     decodeOperand_SReg_64(Val) : decodeOperand_SReg_32(Val);
1217 }
1218 
1219 bool AMDGPUDisassembler::isVI() const {
1220   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
1221 }
1222 
1223 bool AMDGPUDisassembler::isGFX9() const {
1224   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
1225 }
1226 
1227 bool AMDGPUDisassembler::isGFX10() const {
1228   return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
1229 }
1230 
1231 //===----------------------------------------------------------------------===//
1232 // AMDGPU specific symbol handling
1233 //===----------------------------------------------------------------------===//
1234 #define PRINT_DIRECTIVE(DIRECTIVE, MASK)                                       \
1235   do {                                                                         \
1236     KdStream << Indent << DIRECTIVE " "                                        \
1237              << ((FourByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n';           \
1238   } while (0)
1239 
1240 // NOLINTNEXTLINE(readability-identifier-naming)
1241 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1(
1242     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
1243   using namespace amdhsa;
1244   StringRef Indent = "\t";
1245 
1246   // We cannot accurately backward compute #VGPRs used from
1247   // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same
1248   // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we
1249   // simply calculate the inverse of what the assembler does.
1250 
1251   uint32_t GranulatedWorkitemVGPRCount =
1252       (FourByteBuffer & COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT) >>
1253       COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_SHIFT;
1254 
1255   uint32_t NextFreeVGPR = (GranulatedWorkitemVGPRCount + 1) *
1256                           AMDGPU::IsaInfo::getVGPREncodingGranule(&STI);
1257 
1258   KdStream << Indent << ".amdhsa_next_free_vgpr " << NextFreeVGPR << '\n';
1259 
1260   // We cannot backward compute values used to calculate
1261   // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following
1262   // directives can't be computed:
1263   // .amdhsa_reserve_vcc
1264   // .amdhsa_reserve_flat_scratch
1265   // .amdhsa_reserve_xnack_mask
1266   // They take their respective default values if not specified in the assembly.
1267   //
1268   // GRANULATED_WAVEFRONT_SGPR_COUNT
1269   //    = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK)
1270   //
1271   // We compute the inverse as though all directives apart from NEXT_FREE_SGPR
1272   // are set to 0. So while disassembling we consider that:
1273   //
1274   // GRANULATED_WAVEFRONT_SGPR_COUNT
1275   //    = f(NEXT_FREE_SGPR + 0 + 0 + 0)
1276   //
1277   // The disassembler cannot recover the original values of those 3 directives.
1278 
1279   uint32_t GranulatedWavefrontSGPRCount =
1280       (FourByteBuffer & COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT) >>
1281       COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_SHIFT;
1282 
1283   if (isGFX10() && GranulatedWavefrontSGPRCount)
1284     return MCDisassembler::Fail;
1285 
1286   uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) *
1287                           AMDGPU::IsaInfo::getSGPREncodingGranule(&STI);
1288 
1289   KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n';
1290   KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n';
1291   KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n';
1292   KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n";
1293 
1294   if (FourByteBuffer & COMPUTE_PGM_RSRC1_PRIORITY)
1295     return MCDisassembler::Fail;
1296 
1297   PRINT_DIRECTIVE(".amdhsa_float_round_mode_32",
1298                   COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
1299   PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64",
1300                   COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
1301   PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32",
1302                   COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
1303   PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64",
1304                   COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
1305 
1306   if (FourByteBuffer & COMPUTE_PGM_RSRC1_PRIV)
1307     return MCDisassembler::Fail;
1308 
1309   PRINT_DIRECTIVE(".amdhsa_dx10_clamp", COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP);
1310 
1311   if (FourByteBuffer & COMPUTE_PGM_RSRC1_DEBUG_MODE)
1312     return MCDisassembler::Fail;
1313 
1314   PRINT_DIRECTIVE(".amdhsa_ieee_mode", COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE);
1315 
1316   if (FourByteBuffer & COMPUTE_PGM_RSRC1_BULKY)
1317     return MCDisassembler::Fail;
1318 
1319   if (FourByteBuffer & COMPUTE_PGM_RSRC1_CDBG_USER)
1320     return MCDisassembler::Fail;
1321 
1322   PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_FP16_OVFL);
1323 
1324   if (FourByteBuffer & COMPUTE_PGM_RSRC1_RESERVED0)
1325     return MCDisassembler::Fail;
1326 
1327   if (isGFX10()) {
1328     PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode",
1329                     COMPUTE_PGM_RSRC1_WGP_MODE);
1330     PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_MEM_ORDERED);
1331     PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_FWD_PROGRESS);
1332   }
1333   return MCDisassembler::Success;
1334 }
1335 
1336 // NOLINTNEXTLINE(readability-identifier-naming)
1337 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC2(
1338     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
1339   using namespace amdhsa;
1340   StringRef Indent = "\t";
1341   PRINT_DIRECTIVE(
1342       ".amdhsa_system_sgpr_private_segment_wavefront_offset",
1343       COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET);
1344   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x",
1345                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
1346   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y",
1347                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
1348   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z",
1349                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
1350   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info",
1351                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
1352   PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id",
1353                   COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
1354 
1355   if (FourByteBuffer & COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH)
1356     return MCDisassembler::Fail;
1357 
1358   if (FourByteBuffer & COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY)
1359     return MCDisassembler::Fail;
1360 
1361   if (FourByteBuffer & COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE)
1362     return MCDisassembler::Fail;
1363 
1364   PRINT_DIRECTIVE(
1365       ".amdhsa_exception_fp_ieee_invalid_op",
1366       COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
1367   PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src",
1368                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
1369   PRINT_DIRECTIVE(
1370       ".amdhsa_exception_fp_ieee_div_zero",
1371       COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
1372   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow",
1373                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
1374   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow",
1375                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
1376   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact",
1377                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
1378   PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero",
1379                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
1380 
1381   if (FourByteBuffer & COMPUTE_PGM_RSRC2_RESERVED0)
1382     return MCDisassembler::Fail;
1383 
1384   return MCDisassembler::Success;
1385 }
1386 
1387 #undef PRINT_DIRECTIVE
1388 
1389 MCDisassembler::DecodeStatus
1390 AMDGPUDisassembler::decodeKernelDescriptorDirective(
1391     DataExtractor::Cursor &Cursor, ArrayRef<uint8_t> Bytes,
1392     raw_string_ostream &KdStream) const {
1393 #define PRINT_DIRECTIVE(DIRECTIVE, MASK)                                       \
1394   do {                                                                         \
1395     KdStream << Indent << DIRECTIVE " "                                        \
1396              << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n';            \
1397   } while (0)
1398 
1399   uint16_t TwoByteBuffer = 0;
1400   uint32_t FourByteBuffer = 0;
1401   uint64_t EightByteBuffer = 0;
1402 
1403   StringRef ReservedBytes;
1404   StringRef Indent = "\t";
1405 
1406   assert(Bytes.size() == 64);
1407   DataExtractor DE(Bytes, /*IsLittleEndian=*/true, /*AddressSize=*/8);
1408 
1409   switch (Cursor.tell()) {
1410   case amdhsa::GROUP_SEGMENT_FIXED_SIZE_OFFSET:
1411     FourByteBuffer = DE.getU32(Cursor);
1412     KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer
1413              << '\n';
1414     return MCDisassembler::Success;
1415 
1416   case amdhsa::PRIVATE_SEGMENT_FIXED_SIZE_OFFSET:
1417     FourByteBuffer = DE.getU32(Cursor);
1418     KdStream << Indent << ".amdhsa_private_segment_fixed_size "
1419              << FourByteBuffer << '\n';
1420     return MCDisassembler::Success;
1421 
1422   case amdhsa::RESERVED0_OFFSET:
1423     // 8 reserved bytes, must be 0.
1424     EightByteBuffer = DE.getU64(Cursor);
1425     if (EightByteBuffer) {
1426       return MCDisassembler::Fail;
1427     }
1428     return MCDisassembler::Success;
1429 
1430   case amdhsa::KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET:
1431     // KERNEL_CODE_ENTRY_BYTE_OFFSET
1432     // So far no directive controls this for Code Object V3, so simply skip for
1433     // disassembly.
1434     DE.skip(Cursor, 8);
1435     return MCDisassembler::Success;
1436 
1437   case amdhsa::RESERVED1_OFFSET:
1438     // 20 reserved bytes, must be 0.
1439     ReservedBytes = DE.getBytes(Cursor, 20);
1440     for (int I = 0; I < 20; ++I) {
1441       if (ReservedBytes[I] != 0) {
1442         return MCDisassembler::Fail;
1443       }
1444     }
1445     return MCDisassembler::Success;
1446 
1447   case amdhsa::COMPUTE_PGM_RSRC3_OFFSET:
1448     // COMPUTE_PGM_RSRC3
1449     //  - Only set for GFX10, GFX6-9 have this to be 0.
1450     //  - Currently no directives directly control this.
1451     FourByteBuffer = DE.getU32(Cursor);
1452     if (!isGFX10() && FourByteBuffer) {
1453       return MCDisassembler::Fail;
1454     }
1455     return MCDisassembler::Success;
1456 
1457   case amdhsa::COMPUTE_PGM_RSRC1_OFFSET:
1458     FourByteBuffer = DE.getU32(Cursor);
1459     if (decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream) ==
1460         MCDisassembler::Fail) {
1461       return MCDisassembler::Fail;
1462     }
1463     return MCDisassembler::Success;
1464 
1465   case amdhsa::COMPUTE_PGM_RSRC2_OFFSET:
1466     FourByteBuffer = DE.getU32(Cursor);
1467     if (decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream) ==
1468         MCDisassembler::Fail) {
1469       return MCDisassembler::Fail;
1470     }
1471     return MCDisassembler::Success;
1472 
1473   case amdhsa::KERNEL_CODE_PROPERTIES_OFFSET:
1474     using namespace amdhsa;
1475     TwoByteBuffer = DE.getU16(Cursor);
1476 
1477     PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer",
1478                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
1479     PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr",
1480                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
1481     PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr",
1482                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
1483     PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr",
1484                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
1485     PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id",
1486                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
1487     PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init",
1488                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
1489     PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size",
1490                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
1491 
1492     if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0)
1493       return MCDisassembler::Fail;
1494 
1495     // Reserved for GFX9
1496     if (isGFX9() &&
1497         (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) {
1498       return MCDisassembler::Fail;
1499     } else if (isGFX10()) {
1500       PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
1501                       KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
1502     }
1503 
1504     if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1)
1505       return MCDisassembler::Fail;
1506 
1507     return MCDisassembler::Success;
1508 
1509   case amdhsa::RESERVED2_OFFSET:
1510     // 6 bytes from here are reserved, must be 0.
1511     ReservedBytes = DE.getBytes(Cursor, 6);
1512     for (int I = 0; I < 6; ++I) {
1513       if (ReservedBytes[I] != 0)
1514         return MCDisassembler::Fail;
1515     }
1516     return MCDisassembler::Success;
1517 
1518   default:
1519     llvm_unreachable("Unhandled index. Case statements cover everything.");
1520     return MCDisassembler::Fail;
1521   }
1522 #undef PRINT_DIRECTIVE
1523 }
1524 
1525 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeKernelDescriptor(
1526     StringRef KdName, ArrayRef<uint8_t> Bytes, uint64_t KdAddress) const {
1527   // CP microcode requires the kernel descriptor to be 64 aligned.
1528   if (Bytes.size() != 64 || KdAddress % 64 != 0)
1529     return MCDisassembler::Fail;
1530 
1531   std::string Kd;
1532   raw_string_ostream KdStream(Kd);
1533   KdStream << ".amdhsa_kernel " << KdName << '\n';
1534 
1535   DataExtractor::Cursor C(0);
1536   while (C && C.tell() < Bytes.size()) {
1537     MCDisassembler::DecodeStatus Status =
1538         decodeKernelDescriptorDirective(C, Bytes, KdStream);
1539 
1540     cantFail(C.takeError());
1541 
1542     if (Status == MCDisassembler::Fail)
1543       return MCDisassembler::Fail;
1544   }
1545   KdStream << ".end_amdhsa_kernel\n";
1546   outs() << KdStream.str();
1547   return MCDisassembler::Success;
1548 }
1549 
1550 Optional<MCDisassembler::DecodeStatus>
1551 AMDGPUDisassembler::onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size,
1552                                   ArrayRef<uint8_t> Bytes, uint64_t Address,
1553                                   raw_ostream &CStream) const {
1554   // Right now only kernel descriptor needs to be handled.
1555   // We ignore all other symbols for target specific handling.
1556   // TODO:
1557   // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code
1558   // Object V2 and V3 when symbols are marked protected.
1559 
1560   // amd_kernel_code_t for Code Object V2.
1561   if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) {
1562     Size = 256;
1563     return MCDisassembler::Fail;
1564   }
1565 
1566   // Code Object V3 kernel descriptors.
1567   StringRef Name = Symbol.Name;
1568   if (Symbol.Type == ELF::STT_OBJECT && Name.endswith(StringRef(".kd"))) {
1569     Size = 64; // Size = 64 regardless of success or failure.
1570     return decodeKernelDescriptor(Name.drop_back(3), Bytes, Address);
1571   }
1572   return None;
1573 }
1574 
1575 //===----------------------------------------------------------------------===//
1576 // AMDGPUSymbolizer
1577 //===----------------------------------------------------------------------===//
1578 
1579 // Try to find symbol name for specified label
1580 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
1581                                 raw_ostream &/*cStream*/, int64_t Value,
1582                                 uint64_t /*Address*/, bool IsBranch,
1583                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
1584 
1585   if (!IsBranch) {
1586     return false;
1587   }
1588 
1589   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
1590   if (!Symbols)
1591     return false;
1592 
1593   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
1594                              [Value](const SymbolInfoTy& Val) {
1595                                 return Val.Addr == static_cast<uint64_t>(Value)
1596                                     && Val.Type == ELF::STT_NOTYPE;
1597                              });
1598   if (Result != Symbols->end()) {
1599     auto *Sym = Ctx.getOrCreateSymbol(Result->Name);
1600     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
1601     Inst.addOperand(MCOperand::createExpr(Add));
1602     return true;
1603   }
1604   return false;
1605 }
1606 
1607 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
1608                                                        int64_t Value,
1609                                                        uint64_t Address) {
1610   llvm_unreachable("unimplemented");
1611 }
1612 
1613 //===----------------------------------------------------------------------===//
1614 // Initialization
1615 //===----------------------------------------------------------------------===//
1616 
1617 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
1618                               LLVMOpInfoCallback /*GetOpInfo*/,
1619                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
1620                               void *DisInfo,
1621                               MCContext *Ctx,
1622                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
1623   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
1624 }
1625 
1626 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
1627                                                 const MCSubtargetInfo &STI,
1628                                                 MCContext &Ctx) {
1629   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
1630 }
1631 
1632 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler() {
1633   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
1634                                          createAMDGPUDisassembler);
1635   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
1636                                        createAMDGPUSymbolizer);
1637 }
1638