xref: /llvm-project/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (revision b2adeae8650fb720873ad7fa39153beaa8194afc)
1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 /// \file
12 ///
13 /// This file contains definition for AMDGPU ISA disassembler
14 //
15 //===----------------------------------------------------------------------===//
16 
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18 
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIDefines.h"
22 #include "SIRegisterInfo.h"
23 #include "TargetInfo/AMDGPUTargetInfo.h"
24 #include "Utils/AMDGPUAsmUtils.h"
25 #include "Utils/AMDGPUBaseInfo.h"
26 #include "llvm-c/DisassemblerTypes.h"
27 #include "llvm/BinaryFormat/ELF.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCContext.h"
30 #include "llvm/MC/MCDecoderOps.h"
31 #include "llvm/MC/MCExpr.h"
32 #include "llvm/MC/MCInstrDesc.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/MC/MCSubtargetInfo.h"
35 #include "llvm/MC/TargetRegistry.h"
36 #include "llvm/Support/AMDHSAKernelDescriptor.h"
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "amdgpu-disassembler"
41 
42 #define SGPR_MAX                                                               \
43   (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10                           \
44                  : AMDGPU::EncValues::SGPR_MAX_SI)
45 
46 using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
47 
48 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI,
49                                        MCContext &Ctx, MCInstrInfo const *MCII)
50     : MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
51       MAI(*Ctx.getAsmInfo()), TargetMaxInstBytes(MAI.getMaxInstLength(&STI)),
52       CodeObjectVersion(AMDGPU::getDefaultAMDHSACodeObjectVersion()) {
53   // ToDo: AMDGPUDisassembler supports only VI ISA.
54   if (!STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !isGFX10Plus())
55     report_fatal_error("Disassembly not yet supported for subtarget");
56 
57   for (auto [Symbol, Code] : AMDGPU::UCVersion::getGFXVersions())
58     createConstantSymbolExpr(Symbol, Code);
59 
60   UCVersionW64Expr = createConstantSymbolExpr("UC_VERSION_W64_BIT", 0x2000);
61   UCVersionW32Expr = createConstantSymbolExpr("UC_VERSION_W32_BIT", 0x4000);
62   UCVersionMDPExpr = createConstantSymbolExpr("UC_VERSION_MDP_BIT", 0x8000);
63 }
64 
65 void AMDGPUDisassembler::setABIVersion(unsigned Version) {
66   CodeObjectVersion = AMDGPU::getAMDHSACodeObjectVersion(Version);
67 }
68 
69 inline static MCDisassembler::DecodeStatus
70 addOperand(MCInst &Inst, const MCOperand& Opnd) {
71   Inst.addOperand(Opnd);
72   return Opnd.isValid() ?
73     MCDisassembler::Success :
74     MCDisassembler::Fail;
75 }
76 
77 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
78                                 uint16_t NameIdx) {
79   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
80   if (OpIdx != -1) {
81     auto *I = MI.begin();
82     std::advance(I, OpIdx);
83     MI.insert(I, Op);
84   }
85   return OpIdx;
86 }
87 
88 static DecodeStatus decodeSOPPBrTarget(MCInst &Inst, unsigned Imm,
89                                        uint64_t Addr,
90                                        const MCDisassembler *Decoder) {
91   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
92 
93   // Our branches take a simm16.
94   int64_t Offset = SignExtend64<16>(Imm) * 4 + 4 + Addr;
95 
96   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2, 0))
97     return MCDisassembler::Success;
98   return addOperand(Inst, MCOperand::createImm(Imm));
99 }
100 
101 static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, uint64_t Addr,
102                                      const MCDisassembler *Decoder) {
103   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
104   int64_t Offset;
105   if (DAsm->isGFX12Plus()) { // GFX12 supports 24-bit signed offsets.
106     Offset = SignExtend64<24>(Imm);
107   } else if (DAsm->isVI()) { // VI supports 20-bit unsigned offsets.
108     Offset = Imm & 0xFFFFF;
109   } else { // GFX9+ supports 21-bit signed offsets.
110     Offset = SignExtend64<21>(Imm);
111   }
112   return addOperand(Inst, MCOperand::createImm(Offset));
113 }
114 
115 static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr,
116                                   const MCDisassembler *Decoder) {
117   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
118   return addOperand(Inst, DAsm->decodeBoolReg(Val));
119 }
120 
121 static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val,
122                                        uint64_t Addr,
123                                        const MCDisassembler *Decoder) {
124   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
125   return addOperand(Inst, DAsm->decodeSplitBarrier(Val));
126 }
127 
128 static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
129                                  const MCDisassembler *Decoder) {
130   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
131   return addOperand(Inst, DAsm->decodeDpp8FI(Val));
132 }
133 
134 #define DECODE_OPERAND(StaticDecoderName, DecoderName)                         \
135   static DecodeStatus StaticDecoderName(MCInst &Inst, unsigned Imm,            \
136                                         uint64_t /*Addr*/,                     \
137                                         const MCDisassembler *Decoder) {       \
138     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
139     return addOperand(Inst, DAsm->DecoderName(Imm));                           \
140   }
141 
142 // Decoder for registers, decode directly using RegClassID. Imm(8-bit) is
143 // number of register. Used by VGPR only and AGPR only operands.
144 #define DECODE_OPERAND_REG_8(RegClass)                                         \
145   static DecodeStatus Decode##RegClass##RegisterClass(                         \
146       MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,                           \
147       const MCDisassembler *Decoder) {                                         \
148     assert(Imm < (1 << 8) && "8-bit encoding");                                \
149     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
150     return addOperand(                                                         \
151         Inst, DAsm->createRegOperand(AMDGPU::RegClass##RegClassID, Imm));      \
152   }
153 
154 #define DECODE_SrcOp(Name, EncSize, OpWidth, EncImm, MandatoryLiteral,         \
155                      ImmWidth)                                                 \
156   static DecodeStatus Name(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,      \
157                            const MCDisassembler *Decoder) {                    \
158     assert(Imm < (1 << EncSize) && #EncSize "-bit encoding");                  \
159     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
160     return addOperand(Inst,                                                    \
161                       DAsm->decodeSrcOp(AMDGPUDisassembler::OpWidth, EncImm,   \
162                                         MandatoryLiteral, ImmWidth));          \
163   }
164 
165 static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize,
166                                 AMDGPUDisassembler::OpWidthTy OpWidth,
167                                 unsigned Imm, unsigned EncImm,
168                                 bool MandatoryLiteral, unsigned ImmWidth,
169                                 AMDGPU::OperandSemantics Sema,
170                                 const MCDisassembler *Decoder) {
171   assert(Imm < (1U << EncSize) && "Operand doesn't fit encoding!");
172   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
173   return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm, MandatoryLiteral,
174                                             ImmWidth, Sema));
175 }
176 
177 // Decoder for registers. Imm(7-bit) is number of register, uses decodeSrcOp to
178 // get register class. Used by SGPR only operands.
179 #define DECODE_OPERAND_REG_7(RegClass, OpWidth)                                \
180   DECODE_SrcOp(Decode##RegClass##RegisterClass, 7, OpWidth, Imm, false, 0)
181 
182 // Decoder for registers. Imm(10-bit): Imm{7-0} is number of register,
183 // Imm{9} is acc(agpr or vgpr) Imm{8} should be 0 (see VOP3Pe_SMFMAC).
184 // Set Imm{8} to 1 (IS_VGPR) to decode using 'enum10' from decodeSrcOp.
185 // Used by AV_ register classes (AGPR or VGPR only register operands).
186 template <AMDGPUDisassembler::OpWidthTy OpWidth>
187 static DecodeStatus decodeAV10(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
188                                const MCDisassembler *Decoder) {
189   return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm | AMDGPU::EncValues::IS_VGPR,
190                      false, 0, AMDGPU::OperandSemantics::INT, Decoder);
191 }
192 
193 // Decoder for Src(9-bit encoding) registers only.
194 template <AMDGPUDisassembler::OpWidthTy OpWidth>
195 static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm,
196                                   uint64_t /* Addr */,
197                                   const MCDisassembler *Decoder) {
198   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, false, 0,
199                      AMDGPU::OperandSemantics::INT, Decoder);
200 }
201 
202 // Decoder for Src(9-bit encoding) AGPR, register number encoded in 9bits, set
203 // Imm{9} to 1 (set acc) and decode using 'enum10' from decodeSrcOp, registers
204 // only.
205 template <AMDGPUDisassembler::OpWidthTy OpWidth>
206 static DecodeStatus decodeSrcA9(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
207                                 const MCDisassembler *Decoder) {
208   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, false, 0,
209                      AMDGPU::OperandSemantics::INT, Decoder);
210 }
211 
212 // Decoder for 'enum10' from decodeSrcOp, Imm{0-8} is 9-bit Src encoding
213 // Imm{9} is acc, registers only.
214 template <AMDGPUDisassembler::OpWidthTy OpWidth>
215 static DecodeStatus decodeSrcAV10(MCInst &Inst, unsigned Imm,
216                                   uint64_t /* Addr */,
217                                   const MCDisassembler *Decoder) {
218   return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm, false, 0,
219                      AMDGPU::OperandSemantics::INT, Decoder);
220 }
221 
222 // Decoder for RegisterOperands using 9-bit Src encoding. Operand can be
223 // register from RegClass or immediate. Registers that don't belong to RegClass
224 // will be decoded and InstPrinter will report warning. Immediate will be
225 // decoded into constant of size ImmWidth, should match width of immediate used
226 // by OperandType (important for floating point types).
227 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
228           unsigned OperandSemantics>
229 static DecodeStatus decodeSrcRegOrImm9(MCInst &Inst, unsigned Imm,
230                                        uint64_t /* Addr */,
231                                        const MCDisassembler *Decoder) {
232   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, false, ImmWidth,
233                      (AMDGPU::OperandSemantics)OperandSemantics, Decoder);
234 }
235 
236 // Decoder for Src(9-bit encoding) AGPR or immediate. Set Imm{9} to 1 (set acc)
237 // and decode using 'enum10' from decodeSrcOp.
238 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
239           unsigned OperandSemantics>
240 static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm,
241                                         uint64_t /* Addr */,
242                                         const MCDisassembler *Decoder) {
243   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, false, ImmWidth,
244                      (AMDGPU::OperandSemantics)OperandSemantics, Decoder);
245 }
246 
247 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
248           unsigned OperandSemantics>
249 static DecodeStatus decodeSrcRegOrImmDeferred9(MCInst &Inst, unsigned Imm,
250                                                uint64_t /* Addr */,
251                                                const MCDisassembler *Decoder) {
252   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, true, ImmWidth,
253                      (AMDGPU::OperandSemantics)OperandSemantics, Decoder);
254 }
255 
256 // Default decoders generated by tablegen: 'Decode<RegClass>RegisterClass'
257 // when RegisterClass is used as an operand. Most often used for destination
258 // operands.
259 
260 DECODE_OPERAND_REG_8(VGPR_32)
261 DECODE_OPERAND_REG_8(VGPR_32_Lo128)
262 DECODE_OPERAND_REG_8(VReg_64)
263 DECODE_OPERAND_REG_8(VReg_96)
264 DECODE_OPERAND_REG_8(VReg_128)
265 DECODE_OPERAND_REG_8(VReg_192)
266 DECODE_OPERAND_REG_8(VReg_256)
267 DECODE_OPERAND_REG_8(VReg_288)
268 DECODE_OPERAND_REG_8(VReg_352)
269 DECODE_OPERAND_REG_8(VReg_384)
270 DECODE_OPERAND_REG_8(VReg_512)
271 DECODE_OPERAND_REG_8(VReg_1024)
272 
273 DECODE_OPERAND_REG_7(SReg_32, OPW32)
274 DECODE_OPERAND_REG_7(SReg_32_XEXEC, OPW32)
275 DECODE_OPERAND_REG_7(SReg_32_XM0_XEXEC, OPW32)
276 DECODE_OPERAND_REG_7(SReg_32_XEXEC_HI, OPW32)
277 DECODE_OPERAND_REG_7(SReg_64, OPW64)
278 DECODE_OPERAND_REG_7(SReg_64_XEXEC, OPW64)
279 DECODE_OPERAND_REG_7(SReg_64_XEXEC_XNULL, OPW64)
280 DECODE_OPERAND_REG_7(SReg_96, OPW96)
281 DECODE_OPERAND_REG_7(SReg_128, OPW128)
282 DECODE_OPERAND_REG_7(SReg_128_XNULL, OPW128)
283 DECODE_OPERAND_REG_7(SReg_256, OPW256)
284 DECODE_OPERAND_REG_7(SReg_256_XNULL, OPW256)
285 DECODE_OPERAND_REG_7(SReg_512, OPW512)
286 
287 DECODE_OPERAND_REG_8(AGPR_32)
288 DECODE_OPERAND_REG_8(AReg_64)
289 DECODE_OPERAND_REG_8(AReg_128)
290 DECODE_OPERAND_REG_8(AReg_256)
291 DECODE_OPERAND_REG_8(AReg_512)
292 DECODE_OPERAND_REG_8(AReg_1024)
293 
294 static DecodeStatus DecodeVGPR_16RegisterClass(MCInst &Inst, unsigned Imm,
295                                                uint64_t /*Addr*/,
296                                                const MCDisassembler *Decoder) {
297   assert(isUInt<10>(Imm) && "10-bit encoding expected");
298   assert((Imm & (1 << 8)) == 0 && "Imm{8} should not be used");
299 
300   bool IsHi = Imm & (1 << 9);
301   unsigned RegIdx = Imm & 0xff;
302   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
303   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
304 }
305 
306 static DecodeStatus
307 DecodeVGPR_16_Lo128RegisterClass(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,
308                                  const MCDisassembler *Decoder) {
309   assert(isUInt<8>(Imm) && "8-bit encoding expected");
310 
311   bool IsHi = Imm & (1 << 7);
312   unsigned RegIdx = Imm & 0x7f;
313   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
314   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
315 }
316 
317 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
318           unsigned OperandSemantics>
319 static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm,
320                                                 uint64_t /*Addr*/,
321                                                 const MCDisassembler *Decoder) {
322   assert(isUInt<9>(Imm) && "9-bit encoding expected");
323 
324   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
325   if (Imm & AMDGPU::EncValues::IS_VGPR) {
326     bool IsHi = Imm & (1 << 7);
327     unsigned RegIdx = Imm & 0x7f;
328     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
329   }
330   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
331                               OpWidth, Imm & 0xFF, false, ImmWidth,
332                               (AMDGPU::OperandSemantics)OperandSemantics));
333 }
334 
335 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
336           unsigned OperandSemantics>
337 static DecodeStatus
338 decodeOperand_VSrcT16_Lo128_Deferred(MCInst &Inst, unsigned Imm,
339                                      uint64_t /*Addr*/,
340                                      const MCDisassembler *Decoder) {
341   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
342   assert(isUInt<9>(Imm) && "9-bit encoding expected");
343 
344   if (Imm & AMDGPU::EncValues::IS_VGPR) {
345     bool IsHi = Imm & (1 << 7);
346     unsigned RegIdx = Imm & 0x7f;
347     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
348   }
349   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
350                               OpWidth, Imm & 0xFF, true, ImmWidth,
351                               (AMDGPU::OperandSemantics)OperandSemantics));
352 }
353 
354 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
355           unsigned OperandSemantics>
356 static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
357                                           uint64_t /*Addr*/,
358                                           const MCDisassembler *Decoder) {
359   assert(isUInt<10>(Imm) && "10-bit encoding expected");
360 
361   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
362   if (Imm & AMDGPU::EncValues::IS_VGPR) {
363     bool IsHi = Imm & (1 << 9);
364     unsigned RegIdx = Imm & 0xff;
365     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
366   }
367   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
368                               OpWidth, Imm & 0xFF, false, ImmWidth,
369                               (AMDGPU::OperandSemantics)OperandSemantics));
370 }
371 
372 static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm,
373                                           uint64_t /*Addr*/,
374                                           const MCDisassembler *Decoder) {
375   assert(isUInt<10>(Imm) && "10-bit encoding expected");
376   assert(Imm & AMDGPU::EncValues::IS_VGPR && "VGPR expected");
377 
378   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
379 
380   bool IsHi = Imm & (1 << 9);
381   unsigned RegIdx = Imm & 0xff;
382   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
383 }
384 
385 static DecodeStatus decodeOperand_KImmFP(MCInst &Inst, unsigned Imm,
386                                          uint64_t Addr,
387                                          const MCDisassembler *Decoder) {
388   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
389   return addOperand(Inst, DAsm->decodeMandatoryLiteralConstant(Imm));
390 }
391 
392 static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val,
393                                           uint64_t Addr, const void *Decoder) {
394   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
395   return addOperand(Inst, DAsm->decodeVOPDDstYOp(Inst, Val));
396 }
397 
398 static bool IsAGPROperand(const MCInst &Inst, int OpIdx,
399                           const MCRegisterInfo *MRI) {
400   if (OpIdx < 0)
401     return false;
402 
403   const MCOperand &Op = Inst.getOperand(OpIdx);
404   if (!Op.isReg())
405     return false;
406 
407   MCRegister Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0);
408   auto Reg = Sub ? Sub : Op.getReg();
409   return Reg >= AMDGPU::AGPR0 && Reg <= AMDGPU::AGPR255;
410 }
411 
412 static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm,
413                                  AMDGPUDisassembler::OpWidthTy Opw,
414                                  const MCDisassembler *Decoder) {
415   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
416   if (!DAsm->isGFX90A()) {
417     Imm &= 511;
418   } else {
419     // If atomic has both vdata and vdst their register classes are tied.
420     // The bit is decoded along with the vdst, first operand. We need to
421     // change register class to AGPR if vdst was AGPR.
422     // If a DS instruction has both data0 and data1 their register classes
423     // are also tied.
424     unsigned Opc = Inst.getOpcode();
425     uint64_t TSFlags = DAsm->getMCII()->get(Opc).TSFlags;
426     uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
427                                                         : AMDGPU::OpName::vdata;
428     const MCRegisterInfo *MRI = DAsm->getContext().getRegisterInfo();
429     int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataNameIdx);
430     if ((int)Inst.getNumOperands() == DataIdx) {
431       int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
432       if (IsAGPROperand(Inst, DstIdx, MRI))
433         Imm |= 512;
434     }
435 
436     if (TSFlags & SIInstrFlags::DS) {
437       int Data2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
438       if ((int)Inst.getNumOperands() == Data2Idx &&
439           IsAGPROperand(Inst, DataIdx, MRI))
440         Imm |= 512;
441     }
442   }
443   return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256));
444 }
445 
446 template <AMDGPUDisassembler::OpWidthTy Opw>
447 static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm,
448                                  uint64_t /* Addr */,
449                                  const MCDisassembler *Decoder) {
450   return decodeAVLdSt(Inst, Imm, Opw, Decoder);
451 }
452 
453 static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm,
454                                            uint64_t Addr,
455                                            const MCDisassembler *Decoder) {
456   assert(Imm < (1 << 9) && "9-bit encoding");
457   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
458   return addOperand(Inst,
459                     DAsm->decodeSrcOp(AMDGPUDisassembler::OPW64, Imm, false, 64,
460                                       AMDGPU::OperandSemantics::FP64));
461 }
462 
463 #define DECODE_SDWA(DecName) \
464 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
465 
466 DECODE_SDWA(Src32)
467 DECODE_SDWA(Src16)
468 DECODE_SDWA(VopcDst)
469 
470 static DecodeStatus decodeVersionImm(MCInst &Inst, unsigned Imm,
471                                      uint64_t /* Addr */,
472                                      const MCDisassembler *Decoder) {
473   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
474   return addOperand(Inst, DAsm->decodeVersionImm(Imm));
475 }
476 
477 #include "AMDGPUGenDisassemblerTables.inc"
478 
479 //===----------------------------------------------------------------------===//
480 //
481 //===----------------------------------------------------------------------===//
482 
483 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
484   assert(Bytes.size() >= sizeof(T));
485   const auto Res =
486       support::endian::read<T, llvm::endianness::little>(Bytes.data());
487   Bytes = Bytes.slice(sizeof(T));
488   return Res;
489 }
490 
491 static inline DecoderUInt128 eat12Bytes(ArrayRef<uint8_t> &Bytes) {
492   assert(Bytes.size() >= 12);
493   uint64_t Lo =
494       support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
495   Bytes = Bytes.slice(8);
496   uint64_t Hi =
497       support::endian::read<uint32_t, llvm::endianness::little>(Bytes.data());
498   Bytes = Bytes.slice(4);
499   return DecoderUInt128(Lo, Hi);
500 }
501 
502 static inline DecoderUInt128 eat16Bytes(ArrayRef<uint8_t> &Bytes) {
503   assert(Bytes.size() >= 16);
504   uint64_t Lo =
505       support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
506   Bytes = Bytes.slice(8);
507   uint64_t Hi =
508       support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
509   Bytes = Bytes.slice(8);
510   return DecoderUInt128(Lo, Hi);
511 }
512 
513 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
514                                                 ArrayRef<uint8_t> Bytes_,
515                                                 uint64_t Address,
516                                                 raw_ostream &CS) const {
517   unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
518   Bytes = Bytes_.slice(0, MaxInstBytesNum);
519 
520   // In case the opcode is not recognized we'll assume a Size of 4 bytes (unless
521   // there are fewer bytes left). This will be overridden on success.
522   Size = std::min((size_t)4, Bytes_.size());
523 
524   do {
525     // ToDo: better to switch encoding length using some bit predicate
526     // but it is unknown yet, so try all we can
527 
528     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
529     // encodings
530     if (isGFX11Plus() && Bytes.size() >= 12 ) {
531       DecoderUInt128 DecW = eat12Bytes(Bytes);
532 
533       if (isGFX11() &&
534           tryDecodeInst(DecoderTableGFX1196, DecoderTableGFX11_FAKE1696, MI,
535                         DecW, Address, CS))
536         break;
537 
538       if (isGFX12() &&
539           tryDecodeInst(DecoderTableGFX1296, DecoderTableGFX12_FAKE1696, MI,
540                         DecW, Address, CS))
541         break;
542 
543       if (isGFX12() &&
544           tryDecodeInst(DecoderTableGFX12W6496, MI, DecW, Address, CS))
545         break;
546 
547       // Reinitialize Bytes
548       Bytes = Bytes_.slice(0, MaxInstBytesNum);
549 
550     } else if (Bytes.size() >= 16 &&
551                STI.hasFeature(AMDGPU::FeatureGFX950Insts)) {
552       DecoderUInt128 DecW = eat16Bytes(Bytes);
553       if (tryDecodeInst(DecoderTableGFX940128, MI, DecW, Address, CS))
554         break;
555 
556       // Reinitialize Bytes
557       Bytes = Bytes_.slice(0, MaxInstBytesNum);
558     }
559 
560     if (Bytes.size() >= 8) {
561       const uint64_t QW = eatBytes<uint64_t>(Bytes);
562 
563       if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
564           tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address, CS))
565         break;
566 
567       if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) &&
568           tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address, CS))
569         break;
570 
571       if (STI.hasFeature(AMDGPU::FeatureGFX950Insts) &&
572           tryDecodeInst(DecoderTableGFX95064, MI, QW, Address, CS))
573         break;
574 
575       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
576       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
577       // table first so we print the correct name.
578       if (STI.hasFeature(AMDGPU::FeatureFmaMixInsts) &&
579           tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address, CS))
580         break;
581 
582       if (STI.hasFeature(AMDGPU::FeatureGFX940Insts) &&
583           tryDecodeInst(DecoderTableGFX94064, MI, QW, Address, CS))
584         break;
585 
586       if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
587           tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address, CS))
588         break;
589 
590       if ((isVI() || isGFX9()) &&
591           tryDecodeInst(DecoderTableGFX864, MI, QW, Address, CS))
592         break;
593 
594       if (isGFX9() && tryDecodeInst(DecoderTableGFX964, MI, QW, Address, CS))
595         break;
596 
597       if (isGFX10() && tryDecodeInst(DecoderTableGFX1064, MI, QW, Address, CS))
598         break;
599 
600       if (isGFX12() &&
601           tryDecodeInst(DecoderTableGFX1264, DecoderTableGFX12_FAKE1664, MI, QW,
602                         Address, CS))
603         break;
604 
605       if (isGFX11() &&
606           tryDecodeInst(DecoderTableGFX1164, DecoderTableGFX11_FAKE1664, MI, QW,
607                         Address, CS))
608         break;
609 
610       if (isGFX11() &&
611           tryDecodeInst(DecoderTableGFX11W6464, MI, QW, Address, CS))
612         break;
613 
614       if (isGFX12() &&
615           tryDecodeInst(DecoderTableGFX12W6464, MI, QW, Address, CS))
616         break;
617 
618       // Reinitialize Bytes
619       Bytes = Bytes_.slice(0, MaxInstBytesNum);
620     }
621 
622     // Try decode 32-bit instruction
623     if (Bytes.size() >= 4) {
624       const uint32_t DW = eatBytes<uint32_t>(Bytes);
625 
626       if ((isVI() || isGFX9()) &&
627           tryDecodeInst(DecoderTableGFX832, MI, DW, Address, CS))
628         break;
629 
630       if (tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address, CS))
631         break;
632 
633       if (isGFX9() && tryDecodeInst(DecoderTableGFX932, MI, DW, Address, CS))
634         break;
635 
636       if (STI.hasFeature(AMDGPU::FeatureGFX950Insts) &&
637           tryDecodeInst(DecoderTableGFX95032, MI, DW, Address, CS))
638         break;
639 
640       if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
641           tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address, CS))
642         break;
643 
644       if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
645           tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address, CS))
646         break;
647 
648       if (isGFX10() && tryDecodeInst(DecoderTableGFX1032, MI, DW, Address, CS))
649         break;
650 
651       if (isGFX11() &&
652           tryDecodeInst(DecoderTableGFX1132, DecoderTableGFX11_FAKE1632, MI, DW,
653                         Address, CS))
654         break;
655 
656       if (isGFX12() &&
657           tryDecodeInst(DecoderTableGFX1232, DecoderTableGFX12_FAKE1632, MI, DW,
658                         Address, CS))
659         break;
660     }
661 
662     return MCDisassembler::Fail;
663   } while (false);
664 
665   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DPP) {
666     if (isMacDPP(MI))
667       convertMacDPPInst(MI);
668 
669     if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P)
670       convertVOP3PDPPInst(MI);
671     else if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC) ||
672              AMDGPU::isVOPC64DPP(MI.getOpcode()))
673       convertVOPCDPPInst(MI); // Special VOP3 case
674     else if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8) !=
675              -1)
676       convertDPP8Inst(MI);
677     else if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3)
678       convertVOP3DPPInst(MI); // Regular VOP3 case
679   }
680 
681   convertTrue16OpSel(MI);
682 
683   if (AMDGPU::isMAC(MI.getOpcode())) {
684     // Insert dummy unused src2_modifiers.
685     insertNamedMCOperand(MI, MCOperand::createImm(0),
686                          AMDGPU::OpName::src2_modifiers);
687   }
688 
689   if (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp ||
690       MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp) {
691     // Insert dummy unused src2_modifiers.
692     insertNamedMCOperand(MI, MCOperand::createImm(0),
693                          AMDGPU::OpName::src2_modifiers);
694   }
695 
696   if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DS) &&
697       !AMDGPU::hasGDS(STI)) {
698     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::gds);
699   }
700 
701   if (MCII->get(MI.getOpcode()).TSFlags &
702       (SIInstrFlags::MUBUF | SIInstrFlags::FLAT | SIInstrFlags::SMRD)) {
703     int CPolPos = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
704                                              AMDGPU::OpName::cpol);
705     if (CPolPos != -1) {
706       unsigned CPol =
707           (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsAtomicRet) ?
708               AMDGPU::CPol::GLC : 0;
709       if (MI.getNumOperands() <= (unsigned)CPolPos) {
710         insertNamedMCOperand(MI, MCOperand::createImm(CPol),
711                              AMDGPU::OpName::cpol);
712       } else if (CPol) {
713         MI.getOperand(CPolPos).setImm(MI.getOperand(CPolPos).getImm() | CPol);
714       }
715     }
716   }
717 
718   if ((MCII->get(MI.getOpcode()).TSFlags &
719        (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) &&
720       (STI.hasFeature(AMDGPU::FeatureGFX90AInsts))) {
721     // GFX90A lost TFE, its place is occupied by ACC.
722     int TFEOpIdx =
723         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
724     if (TFEOpIdx != -1) {
725       auto *TFEIter = MI.begin();
726       std::advance(TFEIter, TFEOpIdx);
727       MI.insert(TFEIter, MCOperand::createImm(0));
728     }
729   }
730 
731   if (MCII->get(MI.getOpcode()).TSFlags &
732       (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) {
733     int SWZOpIdx =
734         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz);
735     if (SWZOpIdx != -1) {
736       auto *SWZIter = MI.begin();
737       std::advance(SWZIter, SWZOpIdx);
738       MI.insert(SWZIter, MCOperand::createImm(0));
739     }
740   }
741 
742   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG) {
743     int VAddr0Idx =
744         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
745     int RsrcIdx =
746         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
747     unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
748     if (VAddr0Idx >= 0 && NSAArgs > 0) {
749       unsigned NSAWords = (NSAArgs + 3) / 4;
750       if (Bytes.size() < 4 * NSAWords)
751         return MCDisassembler::Fail;
752       for (unsigned i = 0; i < NSAArgs; ++i) {
753         const unsigned VAddrIdx = VAddr0Idx + 1 + i;
754         auto VAddrRCID =
755             MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
756         MI.insert(MI.begin() + VAddrIdx, createRegOperand(VAddrRCID, Bytes[i]));
757       }
758       Bytes = Bytes.slice(4 * NSAWords);
759     }
760 
761     convertMIMGInst(MI);
762   }
763 
764   if (MCII->get(MI.getOpcode()).TSFlags &
765       (SIInstrFlags::VIMAGE | SIInstrFlags::VSAMPLE))
766     convertMIMGInst(MI);
767 
768   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::EXP)
769     convertEXPInst(MI);
770 
771   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VINTERP)
772     convertVINTERPInst(MI);
773 
774   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SDWA)
775     convertSDWAInst(MI);
776 
777   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsMAI)
778     convertMAIInst(MI);
779 
780   int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
781                                               AMDGPU::OpName::vdst_in);
782   if (VDstIn_Idx != -1) {
783     int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
784                            MCOI::OperandConstraint::TIED_TO);
785     if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
786          !MI.getOperand(VDstIn_Idx).isReg() ||
787          MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
788       if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
789         MI.erase(&MI.getOperand(VDstIn_Idx));
790       insertNamedMCOperand(MI,
791         MCOperand::createReg(MI.getOperand(Tied).getReg()),
792         AMDGPU::OpName::vdst_in);
793     }
794   }
795 
796   int ImmLitIdx =
797       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm);
798   bool IsSOPK = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SOPK;
799   if (ImmLitIdx != -1 && !IsSOPK)
800     convertFMAanyK(MI, ImmLitIdx);
801 
802   Size = MaxInstBytesNum - Bytes.size();
803   return MCDisassembler::Success;
804 }
805 
806 void AMDGPUDisassembler::convertEXPInst(MCInst &MI) const {
807   if (STI.hasFeature(AMDGPU::FeatureGFX11Insts)) {
808     // The MCInst still has these fields even though they are no longer encoded
809     // in the GFX11 instruction.
810     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vm);
811     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::compr);
812   }
813 }
814 
815 void AMDGPUDisassembler::convertVINTERPInst(MCInst &MI) const {
816   convertTrue16OpSel(MI);
817   if (MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx11 ||
818       MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx11 ||
819       MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx12 ||
820       MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx12 ||
821       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx11 ||
822       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx11 ||
823       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx12 ||
824       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx12 ||
825       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx11 ||
826       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx11 ||
827       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx12 ||
828       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx12 ||
829       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx11 ||
830       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx11 ||
831       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx12 ||
832       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx12) {
833     // The MCInst has this field that is not directly encoded in the
834     // instruction.
835     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::op_sel);
836   }
837 }
838 
839 void AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
840   if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
841       STI.hasFeature(AMDGPU::FeatureGFX10)) {
842     if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::sdst))
843       // VOPC - insert clamp
844       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
845   } else if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
846     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
847     if (SDst != -1) {
848       // VOPC - insert VCC register as sdst
849       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
850                            AMDGPU::OpName::sdst);
851     } else {
852       // VOP1/2 - insert omod if present in instruction
853       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
854     }
855   }
856 }
857 
858 /// Adjust the register values used by V_MFMA_F8F6F4_f8_f8 instructions to the
859 /// appropriate subregister for the used format width.
860 static void adjustMFMA_F8F6F4OpRegClass(const MCRegisterInfo &MRI,
861                                         MCOperand &MO, uint8_t NumRegs) {
862   switch (NumRegs) {
863   case 4:
864     return MO.setReg(MRI.getSubReg(MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3));
865   case 6:
866     return MO.setReg(
867         MRI.getSubReg(MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5));
868   case 8:
869     // No-op in cases where one operand is still f8/bf8.
870     return;
871   default:
872     llvm_unreachable("Unexpected size for mfma f8f6f4 operand");
873   }
874 }
875 
876 /// f8f6f4 instructions have different pseudos depending on the used formats. In
877 /// the disassembler table, we only have the variants with the largest register
878 /// classes which assume using an fp8/bf8 format for both operands. The actual
879 /// register class depends on the format in blgp and cbsz operands. Adjust the
880 /// register classes depending on the used format.
881 void AMDGPUDisassembler::convertMAIInst(MCInst &MI) const {
882   int BlgpIdx =
883       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::blgp);
884   if (BlgpIdx == -1)
885     return;
886 
887   int CbszIdx =
888       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::cbsz);
889 
890   unsigned CBSZ = MI.getOperand(CbszIdx).getImm();
891   unsigned BLGP = MI.getOperand(BlgpIdx).getImm();
892 
893   const AMDGPU::MFMA_F8F6F4_Info *AdjustedRegClassOpcode =
894       AMDGPU::getMFMA_F8F6F4_WithFormatArgs(CBSZ, BLGP, MI.getOpcode());
895   if (!AdjustedRegClassOpcode ||
896       AdjustedRegClassOpcode->Opcode == MI.getOpcode())
897     return;
898 
899   MI.setOpcode(AdjustedRegClassOpcode->Opcode);
900   int Src0Idx =
901       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
902   int Src1Idx =
903       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
904   adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src0Idx),
905                               AdjustedRegClassOpcode->NumRegsSrcA);
906   adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src1Idx),
907                               AdjustedRegClassOpcode->NumRegsSrcB);
908 }
909 
910 struct VOPModifiers {
911   unsigned OpSel = 0;
912   unsigned OpSelHi = 0;
913   unsigned NegLo = 0;
914   unsigned NegHi = 0;
915 };
916 
917 // Reconstruct values of VOP3/VOP3P operands such as op_sel.
918 // Note that these values do not affect disassembler output,
919 // so this is only necessary for consistency with src_modifiers.
920 static VOPModifiers collectVOPModifiers(const MCInst &MI,
921                                         bool IsVOP3P = false) {
922   VOPModifiers Modifiers;
923   unsigned Opc = MI.getOpcode();
924   const int ModOps[] = {AMDGPU::OpName::src0_modifiers,
925                         AMDGPU::OpName::src1_modifiers,
926                         AMDGPU::OpName::src2_modifiers};
927   for (int J = 0; J < 3; ++J) {
928     int OpIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
929     if (OpIdx == -1)
930       continue;
931 
932     unsigned Val = MI.getOperand(OpIdx).getImm();
933 
934     Modifiers.OpSel |= !!(Val & SISrcMods::OP_SEL_0) << J;
935     if (IsVOP3P) {
936       Modifiers.OpSelHi |= !!(Val & SISrcMods::OP_SEL_1) << J;
937       Modifiers.NegLo |= !!(Val & SISrcMods::NEG) << J;
938       Modifiers.NegHi |= !!(Val & SISrcMods::NEG_HI) << J;
939     } else if (J == 0) {
940       Modifiers.OpSel |= !!(Val & SISrcMods::DST_OP_SEL) << 3;
941     }
942   }
943 
944   return Modifiers;
945 }
946 
947 // Instructions decode the op_sel/suffix bits into the src_modifier
948 // operands. Copy those bits into the src operands for true16 VGPRs.
949 void AMDGPUDisassembler::convertTrue16OpSel(MCInst &MI) const {
950   const unsigned Opc = MI.getOpcode();
951   const MCRegisterClass &ConversionRC =
952       MRI.getRegClass(AMDGPU::VGPR_16RegClassID);
953   constexpr std::array<std::tuple<int, int, unsigned>, 4> OpAndOpMods = {
954       {{AMDGPU::OpName::src0, AMDGPU::OpName::src0_modifiers,
955         SISrcMods::OP_SEL_0},
956        {AMDGPU::OpName::src1, AMDGPU::OpName::src1_modifiers,
957         SISrcMods::OP_SEL_0},
958        {AMDGPU::OpName::src2, AMDGPU::OpName::src2_modifiers,
959         SISrcMods::OP_SEL_0},
960        {AMDGPU::OpName::vdst, AMDGPU::OpName::src0_modifiers,
961         SISrcMods::DST_OP_SEL}}};
962   for (const auto &[OpName, OpModsName, OpSelMask] : OpAndOpMods) {
963     int OpIdx = AMDGPU::getNamedOperandIdx(Opc, OpName);
964     int OpModsIdx = AMDGPU::getNamedOperandIdx(Opc, OpModsName);
965     if (OpIdx == -1 || OpModsIdx == -1)
966       continue;
967     MCOperand &Op = MI.getOperand(OpIdx);
968     if (!Op.isReg())
969       continue;
970     if (!ConversionRC.contains(Op.getReg()))
971       continue;
972     unsigned OpEnc = MRI.getEncodingValue(Op.getReg());
973     const MCOperand &OpMods = MI.getOperand(OpModsIdx);
974     unsigned ModVal = OpMods.getImm();
975     if (ModVal & OpSelMask) { // isHi
976       unsigned RegIdx = OpEnc & AMDGPU::HWEncoding::REG_IDX_MASK;
977       Op.setReg(ConversionRC.getRegister(RegIdx * 2 + 1));
978     }
979   }
980 }
981 
982 // MAC opcodes have special old and src2 operands.
983 // src2 is tied to dst, while old is not tied (but assumed to be).
984 bool AMDGPUDisassembler::isMacDPP(MCInst &MI) const {
985   constexpr int DST_IDX = 0;
986   auto Opcode = MI.getOpcode();
987   const auto &Desc = MCII->get(Opcode);
988   auto OldIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::old);
989 
990   if (OldIdx != -1 && Desc.getOperandConstraint(
991                           OldIdx, MCOI::OperandConstraint::TIED_TO) == -1) {
992     assert(AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2));
993     assert(Desc.getOperandConstraint(
994                AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2),
995                MCOI::OperandConstraint::TIED_TO) == DST_IDX);
996     (void)DST_IDX;
997     return true;
998   }
999 
1000   return false;
1001 }
1002 
1003 // Create dummy old operand and insert dummy unused src2_modifiers
1004 void AMDGPUDisassembler::convertMacDPPInst(MCInst &MI) const {
1005   assert(MI.getNumOperands() + 1 < MCII->get(MI.getOpcode()).getNumOperands());
1006   insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
1007   insertNamedMCOperand(MI, MCOperand::createImm(0),
1008                        AMDGPU::OpName::src2_modifiers);
1009 }
1010 
1011 void AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const {
1012   unsigned Opc = MI.getOpcode();
1013 
1014   int VDstInIdx =
1015       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
1016   if (VDstInIdx != -1)
1017     insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
1018 
1019   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1020   if (MI.getNumOperands() < DescNumOps &&
1021       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1022     convertTrue16OpSel(MI);
1023     auto Mods = collectVOPModifiers(MI);
1024     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1025                          AMDGPU::OpName::op_sel);
1026   } else {
1027     // Insert dummy unused src modifiers.
1028     if (MI.getNumOperands() < DescNumOps &&
1029         AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1030       insertNamedMCOperand(MI, MCOperand::createImm(0),
1031                            AMDGPU::OpName::src0_modifiers);
1032 
1033     if (MI.getNumOperands() < DescNumOps &&
1034         AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1035       insertNamedMCOperand(MI, MCOperand::createImm(0),
1036                            AMDGPU::OpName::src1_modifiers);
1037   }
1038 }
1039 
1040 void AMDGPUDisassembler::convertVOP3DPPInst(MCInst &MI) const {
1041   convertTrue16OpSel(MI);
1042 
1043   int VDstInIdx =
1044       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
1045   if (VDstInIdx != -1)
1046     insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
1047 
1048   unsigned Opc = MI.getOpcode();
1049   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1050   if (MI.getNumOperands() < DescNumOps &&
1051       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1052     auto Mods = collectVOPModifiers(MI);
1053     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1054                          AMDGPU::OpName::op_sel);
1055   }
1056 }
1057 
1058 // Note that before gfx10, the MIMG encoding provided no information about
1059 // VADDR size. Consequently, decoded instructions always show address as if it
1060 // has 1 dword, which could be not really so.
1061 void AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
1062   auto TSFlags = MCII->get(MI.getOpcode()).TSFlags;
1063 
1064   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1065                                            AMDGPU::OpName::vdst);
1066 
1067   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1068                                             AMDGPU::OpName::vdata);
1069   int VAddr0Idx =
1070       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
1071   int RsrcOpName = (TSFlags & SIInstrFlags::MIMG) ? AMDGPU::OpName::srsrc
1072                                                   : AMDGPU::OpName::rsrc;
1073   int RsrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), RsrcOpName);
1074   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1075                                             AMDGPU::OpName::dmask);
1076 
1077   int TFEIdx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1078                                             AMDGPU::OpName::tfe);
1079   int D16Idx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1080                                             AMDGPU::OpName::d16);
1081 
1082   const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
1083   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1084       AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
1085 
1086   assert(VDataIdx != -1);
1087   if (BaseOpcode->BVH) {
1088     // Add A16 operand for intersect_ray instructions
1089     addOperand(MI, MCOperand::createImm(BaseOpcode->A16));
1090     return;
1091   }
1092 
1093   bool IsAtomic = (VDstIdx != -1);
1094   bool IsGather4 = TSFlags & SIInstrFlags::Gather4;
1095   bool IsVSample = TSFlags & SIInstrFlags::VSAMPLE;
1096   bool IsNSA = false;
1097   bool IsPartialNSA = false;
1098   unsigned AddrSize = Info->VAddrDwords;
1099 
1100   if (isGFX10Plus()) {
1101     unsigned DimIdx =
1102         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
1103     int A16Idx =
1104         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16);
1105     const AMDGPU::MIMGDimInfo *Dim =
1106         AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
1107     const bool IsA16 = (A16Idx != -1 && MI.getOperand(A16Idx).getImm());
1108 
1109     AddrSize =
1110         AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, AMDGPU::hasG16(STI));
1111 
1112     // VSAMPLE insts that do not use vaddr3 behave the same as NSA forms.
1113     // VIMAGE insts other than BVH never use vaddr4.
1114     IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA ||
1115             Info->MIMGEncoding == AMDGPU::MIMGEncGfx11NSA ||
1116             Info->MIMGEncoding == AMDGPU::MIMGEncGfx12;
1117     if (!IsNSA) {
1118       if (!IsVSample && AddrSize > 12)
1119         AddrSize = 16;
1120     } else {
1121       if (AddrSize > Info->VAddrDwords) {
1122         if (!STI.hasFeature(AMDGPU::FeaturePartialNSAEncoding)) {
1123           // The NSA encoding does not contain enough operands for the
1124           // combination of base opcode / dimension. Should this be an error?
1125           return;
1126         }
1127         IsPartialNSA = true;
1128       }
1129     }
1130   }
1131 
1132   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
1133   unsigned DstSize = IsGather4 ? 4 : std::max(llvm::popcount(DMask), 1);
1134 
1135   bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
1136   if (D16 && AMDGPU::hasPackedD16(STI)) {
1137     DstSize = (DstSize + 1) / 2;
1138   }
1139 
1140   if (TFEIdx != -1 && MI.getOperand(TFEIdx).getImm())
1141     DstSize += 1;
1142 
1143   if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
1144     return;
1145 
1146   int NewOpcode =
1147       AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
1148   if (NewOpcode == -1)
1149     return;
1150 
1151   // Widen the register to the correct number of enabled channels.
1152   MCRegister NewVdata;
1153   if (DstSize != Info->VDataDwords) {
1154     auto DataRCID = MCII->get(NewOpcode).operands()[VDataIdx].RegClass;
1155 
1156     // Get first subregister of VData
1157     MCRegister Vdata0 = MI.getOperand(VDataIdx).getReg();
1158     MCRegister VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
1159     Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
1160 
1161     NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
1162                                        &MRI.getRegClass(DataRCID));
1163     if (!NewVdata) {
1164       // It's possible to encode this such that the low register + enabled
1165       // components exceeds the register count.
1166       return;
1167     }
1168   }
1169 
1170   // If not using NSA on GFX10+, widen vaddr0 address register to correct size.
1171   // If using partial NSA on GFX11+ widen last address register.
1172   int VAddrSAIdx = IsPartialNSA ? (RsrcIdx - 1) : VAddr0Idx;
1173   MCRegister NewVAddrSA;
1174   if (STI.hasFeature(AMDGPU::FeatureNSAEncoding) && (!IsNSA || IsPartialNSA) &&
1175       AddrSize != Info->VAddrDwords) {
1176     MCRegister VAddrSA = MI.getOperand(VAddrSAIdx).getReg();
1177     MCRegister VAddrSubSA = MRI.getSubReg(VAddrSA, AMDGPU::sub0);
1178     VAddrSA = VAddrSubSA ? VAddrSubSA : VAddrSA;
1179 
1180     auto AddrRCID = MCII->get(NewOpcode).operands()[VAddrSAIdx].RegClass;
1181     NewVAddrSA = MRI.getMatchingSuperReg(VAddrSA, AMDGPU::sub0,
1182                                         &MRI.getRegClass(AddrRCID));
1183     if (!NewVAddrSA)
1184       return;
1185   }
1186 
1187   MI.setOpcode(NewOpcode);
1188 
1189   if (NewVdata != AMDGPU::NoRegister) {
1190     MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
1191 
1192     if (IsAtomic) {
1193       // Atomic operations have an additional operand (a copy of data)
1194       MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
1195     }
1196   }
1197 
1198   if (NewVAddrSA) {
1199     MI.getOperand(VAddrSAIdx) = MCOperand::createReg(NewVAddrSA);
1200   } else if (IsNSA) {
1201     assert(AddrSize <= Info->VAddrDwords);
1202     MI.erase(MI.begin() + VAddr0Idx + AddrSize,
1203              MI.begin() + VAddr0Idx + Info->VAddrDwords);
1204   }
1205 }
1206 
1207 // Opsel and neg bits are used in src_modifiers and standalone operands. Autogen
1208 // decoder only adds to src_modifiers, so manually add the bits to the other
1209 // operands.
1210 void AMDGPUDisassembler::convertVOP3PDPPInst(MCInst &MI) const {
1211   unsigned Opc = MI.getOpcode();
1212   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1213   auto Mods = collectVOPModifiers(MI, true);
1214 
1215   if (MI.getNumOperands() < DescNumOps &&
1216       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vdst_in))
1217     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vdst_in);
1218 
1219   if (MI.getNumOperands() < DescNumOps &&
1220       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel))
1221     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1222                          AMDGPU::OpName::op_sel);
1223   if (MI.getNumOperands() < DescNumOps &&
1224       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel_hi))
1225     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSelHi),
1226                          AMDGPU::OpName::op_sel_hi);
1227   if (MI.getNumOperands() < DescNumOps &&
1228       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_lo))
1229     insertNamedMCOperand(MI, MCOperand::createImm(Mods.NegLo),
1230                          AMDGPU::OpName::neg_lo);
1231   if (MI.getNumOperands() < DescNumOps &&
1232       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_hi))
1233     insertNamedMCOperand(MI, MCOperand::createImm(Mods.NegHi),
1234                          AMDGPU::OpName::neg_hi);
1235 }
1236 
1237 // Create dummy old operand and insert optional operands
1238 void AMDGPUDisassembler::convertVOPCDPPInst(MCInst &MI) const {
1239   unsigned Opc = MI.getOpcode();
1240   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1241 
1242   if (MI.getNumOperands() < DescNumOps &&
1243       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::old))
1244     insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
1245 
1246   if (MI.getNumOperands() < DescNumOps &&
1247       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1248     insertNamedMCOperand(MI, MCOperand::createImm(0),
1249                          AMDGPU::OpName::src0_modifiers);
1250 
1251   if (MI.getNumOperands() < DescNumOps &&
1252       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1253     insertNamedMCOperand(MI, MCOperand::createImm(0),
1254                          AMDGPU::OpName::src1_modifiers);
1255 }
1256 
1257 void AMDGPUDisassembler::convertFMAanyK(MCInst &MI, int ImmLitIdx) const {
1258   assert(HasLiteral && "Should have decoded a literal");
1259   const MCInstrDesc &Desc = MCII->get(MI.getOpcode());
1260   unsigned DescNumOps = Desc.getNumOperands();
1261   insertNamedMCOperand(MI, MCOperand::createImm(Literal),
1262                        AMDGPU::OpName::immDeferred);
1263   assert(DescNumOps == MI.getNumOperands());
1264   for (unsigned I = 0; I < DescNumOps; ++I) {
1265     auto &Op = MI.getOperand(I);
1266     auto OpType = Desc.operands()[I].OperandType;
1267     bool IsDeferredOp = (OpType == AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED ||
1268                          OpType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED);
1269     if (Op.isImm() && Op.getImm() == AMDGPU::EncValues::LITERAL_CONST &&
1270         IsDeferredOp)
1271       Op.setImm(Literal);
1272   }
1273 }
1274 
1275 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
1276   return getContext().getRegisterInfo()->
1277     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
1278 }
1279 
1280 inline
1281 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
1282                                          const Twine& ErrMsg) const {
1283   *CommentStream << "Error: " + ErrMsg;
1284 
1285   // ToDo: add support for error operands to MCInst.h
1286   // return MCOperand::createError(V);
1287   return MCOperand();
1288 }
1289 
1290 inline
1291 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
1292   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
1293 }
1294 
1295 inline
1296 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
1297                                                unsigned Val) const {
1298   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
1299   if (Val >= RegCl.getNumRegs())
1300     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
1301                            ": unknown register " + Twine(Val));
1302   return createRegOperand(RegCl.getRegister(Val));
1303 }
1304 
1305 inline
1306 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
1307                                                 unsigned Val) const {
1308   // ToDo: SI/CI have 104 SGPRs, VI - 102
1309   // Valery: here we accepting as much as we can, let assembler sort it out
1310   int shift = 0;
1311   switch (SRegClassID) {
1312   case AMDGPU::SGPR_32RegClassID:
1313   case AMDGPU::TTMP_32RegClassID:
1314     break;
1315   case AMDGPU::SGPR_64RegClassID:
1316   case AMDGPU::TTMP_64RegClassID:
1317     shift = 1;
1318     break;
1319   case AMDGPU::SGPR_96RegClassID:
1320   case AMDGPU::TTMP_96RegClassID:
1321   case AMDGPU::SGPR_128RegClassID:
1322   case AMDGPU::TTMP_128RegClassID:
1323   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
1324   // this bundle?
1325   case AMDGPU::SGPR_256RegClassID:
1326   case AMDGPU::TTMP_256RegClassID:
1327     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
1328   // this bundle?
1329   case AMDGPU::SGPR_288RegClassID:
1330   case AMDGPU::TTMP_288RegClassID:
1331   case AMDGPU::SGPR_320RegClassID:
1332   case AMDGPU::TTMP_320RegClassID:
1333   case AMDGPU::SGPR_352RegClassID:
1334   case AMDGPU::TTMP_352RegClassID:
1335   case AMDGPU::SGPR_384RegClassID:
1336   case AMDGPU::TTMP_384RegClassID:
1337   case AMDGPU::SGPR_512RegClassID:
1338   case AMDGPU::TTMP_512RegClassID:
1339     shift = 2;
1340     break;
1341   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
1342   // this bundle?
1343   default:
1344     llvm_unreachable("unhandled register class");
1345   }
1346 
1347   if (Val % (1 << shift)) {
1348     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
1349                    << ": scalar reg isn't aligned " << Val;
1350   }
1351 
1352   return createRegOperand(SRegClassID, Val >> shift);
1353 }
1354 
1355 MCOperand AMDGPUDisassembler::createVGPR16Operand(unsigned RegIdx,
1356                                                   bool IsHi) const {
1357   unsigned RegIdxInVGPR16 = RegIdx * 2 + (IsHi ? 1 : 0);
1358   return createRegOperand(AMDGPU::VGPR_16RegClassID, RegIdxInVGPR16);
1359 }
1360 
1361 // Decode Literals for insts which always have a literal in the encoding
1362 MCOperand
1363 AMDGPUDisassembler::decodeMandatoryLiteralConstant(unsigned Val) const {
1364   if (HasLiteral) {
1365     assert(
1366         AMDGPU::hasVOPD(STI) &&
1367         "Should only decode multiple kimm with VOPD, check VSrc operand types");
1368     if (Literal != Val)
1369       return errOperand(Val, "More than one unique literal is illegal");
1370   }
1371   HasLiteral = true;
1372   Literal = Val;
1373   return MCOperand::createImm(Literal);
1374 }
1375 
1376 MCOperand AMDGPUDisassembler::decodeLiteralConstant(bool ExtendFP64) const {
1377   // For now all literal constants are supposed to be unsigned integer
1378   // ToDo: deal with signed/unsigned 64-bit integer constants
1379   // ToDo: deal with float/double constants
1380   if (!HasLiteral) {
1381     if (Bytes.size() < 4) {
1382       return errOperand(0, "cannot read literal, inst bytes left " +
1383                         Twine(Bytes.size()));
1384     }
1385     HasLiteral = true;
1386     Literal = Literal64 = eatBytes<uint32_t>(Bytes);
1387     if (ExtendFP64)
1388       Literal64 <<= 32;
1389   }
1390   return MCOperand::createImm(ExtendFP64 ? Literal64 : Literal);
1391 }
1392 
1393 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
1394   using namespace AMDGPU::EncValues;
1395 
1396   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
1397   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
1398     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
1399     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
1400       // Cast prevents negative overflow.
1401 }
1402 
1403 static int64_t getInlineImmVal32(unsigned Imm) {
1404   switch (Imm) {
1405   case 240:
1406     return llvm::bit_cast<uint32_t>(0.5f);
1407   case 241:
1408     return llvm::bit_cast<uint32_t>(-0.5f);
1409   case 242:
1410     return llvm::bit_cast<uint32_t>(1.0f);
1411   case 243:
1412     return llvm::bit_cast<uint32_t>(-1.0f);
1413   case 244:
1414     return llvm::bit_cast<uint32_t>(2.0f);
1415   case 245:
1416     return llvm::bit_cast<uint32_t>(-2.0f);
1417   case 246:
1418     return llvm::bit_cast<uint32_t>(4.0f);
1419   case 247:
1420     return llvm::bit_cast<uint32_t>(-4.0f);
1421   case 248: // 1 / (2 * PI)
1422     return 0x3e22f983;
1423   default:
1424     llvm_unreachable("invalid fp inline imm");
1425   }
1426 }
1427 
1428 static int64_t getInlineImmVal64(unsigned Imm) {
1429   switch (Imm) {
1430   case 240:
1431     return llvm::bit_cast<uint64_t>(0.5);
1432   case 241:
1433     return llvm::bit_cast<uint64_t>(-0.5);
1434   case 242:
1435     return llvm::bit_cast<uint64_t>(1.0);
1436   case 243:
1437     return llvm::bit_cast<uint64_t>(-1.0);
1438   case 244:
1439     return llvm::bit_cast<uint64_t>(2.0);
1440   case 245:
1441     return llvm::bit_cast<uint64_t>(-2.0);
1442   case 246:
1443     return llvm::bit_cast<uint64_t>(4.0);
1444   case 247:
1445     return llvm::bit_cast<uint64_t>(-4.0);
1446   case 248: // 1 / (2 * PI)
1447     return 0x3fc45f306dc9c882;
1448   default:
1449     llvm_unreachable("invalid fp inline imm");
1450   }
1451 }
1452 
1453 static int64_t getInlineImmValF16(unsigned Imm) {
1454   switch (Imm) {
1455   case 240:
1456     return 0x3800;
1457   case 241:
1458     return 0xB800;
1459   case 242:
1460     return 0x3C00;
1461   case 243:
1462     return 0xBC00;
1463   case 244:
1464     return 0x4000;
1465   case 245:
1466     return 0xC000;
1467   case 246:
1468     return 0x4400;
1469   case 247:
1470     return 0xC400;
1471   case 248: // 1 / (2 * PI)
1472     return 0x3118;
1473   default:
1474     llvm_unreachable("invalid fp inline imm");
1475   }
1476 }
1477 
1478 static int64_t getInlineImmValBF16(unsigned Imm) {
1479   switch (Imm) {
1480   case 240:
1481     return 0x3F00;
1482   case 241:
1483     return 0xBF00;
1484   case 242:
1485     return 0x3F80;
1486   case 243:
1487     return 0xBF80;
1488   case 244:
1489     return 0x4000;
1490   case 245:
1491     return 0xC000;
1492   case 246:
1493     return 0x4080;
1494   case 247:
1495     return 0xC080;
1496   case 248: // 1 / (2 * PI)
1497     return 0x3E22;
1498   default:
1499     llvm_unreachable("invalid fp inline imm");
1500   }
1501 }
1502 
1503 static int64_t getInlineImmVal16(unsigned Imm, AMDGPU::OperandSemantics Sema) {
1504   return (Sema == AMDGPU::OperandSemantics::BF16) ? getInlineImmValBF16(Imm)
1505                                                   : getInlineImmValF16(Imm);
1506 }
1507 
1508 MCOperand AMDGPUDisassembler::decodeFPImmed(unsigned ImmWidth, unsigned Imm,
1509                                             AMDGPU::OperandSemantics Sema) {
1510   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN &&
1511          Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
1512 
1513   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
1514   // ImmWidth 0 is a default case where operand should not allow immediates.
1515   // Imm value is still decoded into 32 bit immediate operand, inst printer will
1516   // use it to print verbose error message.
1517   switch (ImmWidth) {
1518   case 0:
1519   case 32:
1520     return MCOperand::createImm(getInlineImmVal32(Imm));
1521   case 64:
1522     return MCOperand::createImm(getInlineImmVal64(Imm));
1523   case 16:
1524     return MCOperand::createImm(getInlineImmVal16(Imm, Sema));
1525   default:
1526     llvm_unreachable("implement me");
1527   }
1528 }
1529 
1530 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
1531   using namespace AMDGPU;
1532 
1533   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1534   switch (Width) {
1535   default: // fall
1536   case OPW32:
1537   case OPW16:
1538   case OPWV216:
1539     return VGPR_32RegClassID;
1540   case OPW64:
1541   case OPWV232: return VReg_64RegClassID;
1542   case OPW96: return VReg_96RegClassID;
1543   case OPW128: return VReg_128RegClassID;
1544   case OPW192: return VReg_192RegClassID;
1545   case OPW160: return VReg_160RegClassID;
1546   case OPW256: return VReg_256RegClassID;
1547   case OPW288: return VReg_288RegClassID;
1548   case OPW320: return VReg_320RegClassID;
1549   case OPW352: return VReg_352RegClassID;
1550   case OPW384: return VReg_384RegClassID;
1551   case OPW512: return VReg_512RegClassID;
1552   case OPW1024: return VReg_1024RegClassID;
1553   }
1554 }
1555 
1556 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width) const {
1557   using namespace AMDGPU;
1558 
1559   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1560   switch (Width) {
1561   default: // fall
1562   case OPW32:
1563   case OPW16:
1564   case OPWV216:
1565     return AGPR_32RegClassID;
1566   case OPW64:
1567   case OPWV232: return AReg_64RegClassID;
1568   case OPW96: return AReg_96RegClassID;
1569   case OPW128: return AReg_128RegClassID;
1570   case OPW160: return AReg_160RegClassID;
1571   case OPW256: return AReg_256RegClassID;
1572   case OPW288: return AReg_288RegClassID;
1573   case OPW320: return AReg_320RegClassID;
1574   case OPW352: return AReg_352RegClassID;
1575   case OPW384: return AReg_384RegClassID;
1576   case OPW512: return AReg_512RegClassID;
1577   case OPW1024: return AReg_1024RegClassID;
1578   }
1579 }
1580 
1581 
1582 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
1583   using namespace AMDGPU;
1584 
1585   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1586   switch (Width) {
1587   default: // fall
1588   case OPW32:
1589   case OPW16:
1590   case OPWV216:
1591     return SGPR_32RegClassID;
1592   case OPW64:
1593   case OPWV232: return SGPR_64RegClassID;
1594   case OPW96: return SGPR_96RegClassID;
1595   case OPW128: return SGPR_128RegClassID;
1596   case OPW160: return SGPR_160RegClassID;
1597   case OPW256: return SGPR_256RegClassID;
1598   case OPW288: return SGPR_288RegClassID;
1599   case OPW320: return SGPR_320RegClassID;
1600   case OPW352: return SGPR_352RegClassID;
1601   case OPW384: return SGPR_384RegClassID;
1602   case OPW512: return SGPR_512RegClassID;
1603   }
1604 }
1605 
1606 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
1607   using namespace AMDGPU;
1608 
1609   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1610   switch (Width) {
1611   default: // fall
1612   case OPW32:
1613   case OPW16:
1614   case OPWV216:
1615     return TTMP_32RegClassID;
1616   case OPW64:
1617   case OPWV232: return TTMP_64RegClassID;
1618   case OPW128: return TTMP_128RegClassID;
1619   case OPW256: return TTMP_256RegClassID;
1620   case OPW288: return TTMP_288RegClassID;
1621   case OPW320: return TTMP_320RegClassID;
1622   case OPW352: return TTMP_352RegClassID;
1623   case OPW384: return TTMP_384RegClassID;
1624   case OPW512: return TTMP_512RegClassID;
1625   }
1626 }
1627 
1628 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
1629   using namespace AMDGPU::EncValues;
1630 
1631   unsigned TTmpMin = isGFX9Plus() ? TTMP_GFX9PLUS_MIN : TTMP_VI_MIN;
1632   unsigned TTmpMax = isGFX9Plus() ? TTMP_GFX9PLUS_MAX : TTMP_VI_MAX;
1633 
1634   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
1635 }
1636 
1637 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val,
1638                                           bool MandatoryLiteral,
1639                                           unsigned ImmWidth,
1640                                           AMDGPU::OperandSemantics Sema) const {
1641   using namespace AMDGPU::EncValues;
1642 
1643   assert(Val < 1024); // enum10
1644 
1645   bool IsAGPR = Val & 512;
1646   Val &= 511;
1647 
1648   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
1649     return createRegOperand(IsAGPR ? getAgprClassId(Width)
1650                                    : getVgprClassId(Width), Val - VGPR_MIN);
1651   }
1652   return decodeNonVGPRSrcOp(Width, Val & 0xFF, MandatoryLiteral, ImmWidth,
1653                             Sema);
1654 }
1655 
1656 MCOperand
1657 AMDGPUDisassembler::decodeNonVGPRSrcOp(const OpWidthTy Width, unsigned Val,
1658                                        bool MandatoryLiteral, unsigned ImmWidth,
1659                                        AMDGPU::OperandSemantics Sema) const {
1660   // Cases when Val{8} is 1 (vgpr, agpr or true 16 vgpr) should have been
1661   // decoded earlier.
1662   assert(Val < (1 << 8) && "9-bit Src encoding when Val{8} is 0");
1663   using namespace AMDGPU::EncValues;
1664 
1665   if (Val <= SGPR_MAX) {
1666     // "SGPR_MIN <= Val" is always true and causes compilation warning.
1667     static_assert(SGPR_MIN == 0);
1668     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1669   }
1670 
1671   int TTmpIdx = getTTmpIdx(Val);
1672   if (TTmpIdx >= 0) {
1673     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1674   }
1675 
1676   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
1677     return decodeIntImmed(Val);
1678 
1679   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
1680     return decodeFPImmed(ImmWidth, Val, Sema);
1681 
1682   if (Val == LITERAL_CONST) {
1683     if (MandatoryLiteral)
1684       // Keep a sentinel value for deferred setting
1685       return MCOperand::createImm(LITERAL_CONST);
1686     return decodeLiteralConstant(Sema == AMDGPU::OperandSemantics::FP64);
1687   }
1688 
1689   switch (Width) {
1690   case OPW32:
1691   case OPW16:
1692   case OPWV216:
1693     return decodeSpecialReg32(Val);
1694   case OPW64:
1695   case OPWV232:
1696     return decodeSpecialReg64(Val);
1697   case OPW96:
1698   case OPW128:
1699   case OPW256:
1700   case OPW512:
1701     return decodeSpecialReg96Plus(Val);
1702   default:
1703     llvm_unreachable("unexpected immediate type");
1704   }
1705 }
1706 
1707 // Bit 0 of DstY isn't stored in the instruction, because it's always the
1708 // opposite of bit 0 of DstX.
1709 MCOperand AMDGPUDisassembler::decodeVOPDDstYOp(MCInst &Inst,
1710                                                unsigned Val) const {
1711   int VDstXInd =
1712       AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::vdstX);
1713   assert(VDstXInd != -1);
1714   assert(Inst.getOperand(VDstXInd).isReg());
1715   unsigned XDstReg = MRI.getEncodingValue(Inst.getOperand(VDstXInd).getReg());
1716   Val |= ~XDstReg & 1;
1717   auto Width = llvm::AMDGPUDisassembler::OPW32;
1718   return createRegOperand(getVgprClassId(Width), Val);
1719 }
1720 
1721 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
1722   using namespace AMDGPU;
1723 
1724   switch (Val) {
1725   // clang-format off
1726   case 102: return createRegOperand(FLAT_SCR_LO);
1727   case 103: return createRegOperand(FLAT_SCR_HI);
1728   case 104: return createRegOperand(XNACK_MASK_LO);
1729   case 105: return createRegOperand(XNACK_MASK_HI);
1730   case 106: return createRegOperand(VCC_LO);
1731   case 107: return createRegOperand(VCC_HI);
1732   case 108: return createRegOperand(TBA_LO);
1733   case 109: return createRegOperand(TBA_HI);
1734   case 110: return createRegOperand(TMA_LO);
1735   case 111: return createRegOperand(TMA_HI);
1736   case 124:
1737     return isGFX11Plus() ? createRegOperand(SGPR_NULL) : createRegOperand(M0);
1738   case 125:
1739     return isGFX11Plus() ? createRegOperand(M0) : createRegOperand(SGPR_NULL);
1740   case 126: return createRegOperand(EXEC_LO);
1741   case 127: return createRegOperand(EXEC_HI);
1742   case 235: return createRegOperand(SRC_SHARED_BASE_LO);
1743   case 236: return createRegOperand(SRC_SHARED_LIMIT_LO);
1744   case 237: return createRegOperand(SRC_PRIVATE_BASE_LO);
1745   case 238: return createRegOperand(SRC_PRIVATE_LIMIT_LO);
1746   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1747   case 251: return createRegOperand(SRC_VCCZ);
1748   case 252: return createRegOperand(SRC_EXECZ);
1749   case 253: return createRegOperand(SRC_SCC);
1750   case 254: return createRegOperand(LDS_DIRECT);
1751   default: break;
1752     // clang-format on
1753   }
1754   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1755 }
1756 
1757 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
1758   using namespace AMDGPU;
1759 
1760   switch (Val) {
1761   case 102: return createRegOperand(FLAT_SCR);
1762   case 104: return createRegOperand(XNACK_MASK);
1763   case 106: return createRegOperand(VCC);
1764   case 108: return createRegOperand(TBA);
1765   case 110: return createRegOperand(TMA);
1766   case 124:
1767     if (isGFX11Plus())
1768       return createRegOperand(SGPR_NULL);
1769     break;
1770   case 125:
1771     if (!isGFX11Plus())
1772       return createRegOperand(SGPR_NULL);
1773     break;
1774   case 126: return createRegOperand(EXEC);
1775   case 235: return createRegOperand(SRC_SHARED_BASE);
1776   case 236: return createRegOperand(SRC_SHARED_LIMIT);
1777   case 237: return createRegOperand(SRC_PRIVATE_BASE);
1778   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1779   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1780   case 251: return createRegOperand(SRC_VCCZ);
1781   case 252: return createRegOperand(SRC_EXECZ);
1782   case 253: return createRegOperand(SRC_SCC);
1783   default: break;
1784   }
1785   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1786 }
1787 
1788 MCOperand AMDGPUDisassembler::decodeSpecialReg96Plus(unsigned Val) const {
1789   using namespace AMDGPU;
1790 
1791   switch (Val) {
1792   case 124:
1793     if (isGFX11Plus())
1794       return createRegOperand(SGPR_NULL);
1795     break;
1796   case 125:
1797     if (!isGFX11Plus())
1798       return createRegOperand(SGPR_NULL);
1799     break;
1800   default:
1801     break;
1802   }
1803   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1804 }
1805 
1806 MCOperand
1807 AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, const unsigned Val,
1808                                   unsigned ImmWidth,
1809                                   AMDGPU::OperandSemantics Sema) const {
1810   using namespace AMDGPU::SDWA;
1811   using namespace AMDGPU::EncValues;
1812 
1813   if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
1814       STI.hasFeature(AMDGPU::FeatureGFX10)) {
1815     // XXX: cast to int is needed to avoid stupid warning:
1816     // compare with unsigned is always true
1817     if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
1818         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
1819       return createRegOperand(getVgprClassId(Width),
1820                               Val - SDWA9EncValues::SRC_VGPR_MIN);
1821     }
1822     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
1823         Val <= (isGFX10Plus() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
1824                               : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
1825       return createSRegOperand(getSgprClassId(Width),
1826                                Val - SDWA9EncValues::SRC_SGPR_MIN);
1827     }
1828     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
1829         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
1830       return createSRegOperand(getTtmpClassId(Width),
1831                                Val - SDWA9EncValues::SRC_TTMP_MIN);
1832     }
1833 
1834     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
1835 
1836     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
1837       return decodeIntImmed(SVal);
1838 
1839     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
1840       return decodeFPImmed(ImmWidth, SVal, Sema);
1841 
1842     return decodeSpecialReg32(SVal);
1843   }
1844   if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands))
1845     return createRegOperand(getVgprClassId(Width), Val);
1846   llvm_unreachable("unsupported target");
1847 }
1848 
1849 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
1850   return decodeSDWASrc(OPW16, Val, 16, AMDGPU::OperandSemantics::FP16);
1851 }
1852 
1853 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
1854   return decodeSDWASrc(OPW32, Val, 32, AMDGPU::OperandSemantics::FP32);
1855 }
1856 
1857 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
1858   using namespace AMDGPU::SDWA;
1859 
1860   assert((STI.hasFeature(AMDGPU::FeatureGFX9) ||
1861           STI.hasFeature(AMDGPU::FeatureGFX10)) &&
1862          "SDWAVopcDst should be present only on GFX9+");
1863 
1864   bool IsWave32 = STI.hasFeature(AMDGPU::FeatureWavefrontSize32);
1865 
1866   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
1867     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
1868 
1869     int TTmpIdx = getTTmpIdx(Val);
1870     if (TTmpIdx >= 0) {
1871       auto TTmpClsId = getTtmpClassId(IsWave32 ? OPW32 : OPW64);
1872       return createSRegOperand(TTmpClsId, TTmpIdx);
1873     }
1874     if (Val > SGPR_MAX) {
1875       return IsWave32 ? decodeSpecialReg32(Val) : decodeSpecialReg64(Val);
1876     }
1877     return createSRegOperand(getSgprClassId(IsWave32 ? OPW32 : OPW64), Val);
1878   }
1879   return createRegOperand(IsWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC);
1880 }
1881 
1882 MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
1883   return STI.hasFeature(AMDGPU::FeatureWavefrontSize32)
1884              ? decodeSrcOp(OPW32, Val)
1885              : decodeSrcOp(OPW64, Val);
1886 }
1887 
1888 MCOperand AMDGPUDisassembler::decodeSplitBarrier(unsigned Val) const {
1889   return decodeSrcOp(OPW32, Val);
1890 }
1891 
1892 MCOperand AMDGPUDisassembler::decodeDpp8FI(unsigned Val) const {
1893   if (Val != AMDGPU::DPP::DPP8_FI_0 && Val != AMDGPU::DPP::DPP8_FI_1)
1894     return MCOperand();
1895   return MCOperand::createImm(Val);
1896 }
1897 
1898 MCOperand AMDGPUDisassembler::decodeVersionImm(unsigned Imm) const {
1899   using VersionField = AMDGPU::EncodingField<7, 0>;
1900   using W64Bit = AMDGPU::EncodingBit<13>;
1901   using W32Bit = AMDGPU::EncodingBit<14>;
1902   using MDPBit = AMDGPU::EncodingBit<15>;
1903   using Encoding = AMDGPU::EncodingFields<VersionField, W64Bit, W32Bit, MDPBit>;
1904 
1905   auto [Version, W64, W32, MDP] = Encoding::decode(Imm);
1906 
1907   // Decode into a plain immediate if any unused bits are raised.
1908   if (Encoding::encode(Version, W64, W32, MDP) != Imm)
1909     return MCOperand::createImm(Imm);
1910 
1911   const auto &Versions = AMDGPU::UCVersion::getGFXVersions();
1912   const auto *I = find_if(
1913       Versions, [Version = Version](const AMDGPU::UCVersion::GFXVersion &V) {
1914         return V.Code == Version;
1915       });
1916   MCContext &Ctx = getContext();
1917   const MCExpr *E;
1918   if (I == Versions.end())
1919     E = MCConstantExpr::create(Version, Ctx);
1920   else
1921     E = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(I->Symbol), Ctx);
1922 
1923   if (W64)
1924     E = MCBinaryExpr::createOr(E, UCVersionW64Expr, Ctx);
1925   if (W32)
1926     E = MCBinaryExpr::createOr(E, UCVersionW32Expr, Ctx);
1927   if (MDP)
1928     E = MCBinaryExpr::createOr(E, UCVersionMDPExpr, Ctx);
1929 
1930   return MCOperand::createExpr(E);
1931 }
1932 
1933 bool AMDGPUDisassembler::isVI() const {
1934   return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
1935 }
1936 
1937 bool AMDGPUDisassembler::isGFX9() const { return AMDGPU::isGFX9(STI); }
1938 
1939 bool AMDGPUDisassembler::isGFX90A() const {
1940   return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
1941 }
1942 
1943 bool AMDGPUDisassembler::isGFX9Plus() const { return AMDGPU::isGFX9Plus(STI); }
1944 
1945 bool AMDGPUDisassembler::isGFX10() const { return AMDGPU::isGFX10(STI); }
1946 
1947 bool AMDGPUDisassembler::isGFX10Plus() const {
1948   return AMDGPU::isGFX10Plus(STI);
1949 }
1950 
1951 bool AMDGPUDisassembler::isGFX11() const {
1952   return STI.hasFeature(AMDGPU::FeatureGFX11);
1953 }
1954 
1955 bool AMDGPUDisassembler::isGFX11Plus() const {
1956   return AMDGPU::isGFX11Plus(STI);
1957 }
1958 
1959 bool AMDGPUDisassembler::isGFX12() const {
1960   return STI.hasFeature(AMDGPU::FeatureGFX12);
1961 }
1962 
1963 bool AMDGPUDisassembler::isGFX12Plus() const {
1964   return AMDGPU::isGFX12Plus(STI);
1965 }
1966 
1967 bool AMDGPUDisassembler::hasArchitectedFlatScratch() const {
1968   return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
1969 }
1970 
1971 bool AMDGPUDisassembler::hasKernargPreload() const {
1972   return AMDGPU::hasKernargPreload(STI);
1973 }
1974 
1975 //===----------------------------------------------------------------------===//
1976 // AMDGPU specific symbol handling
1977 //===----------------------------------------------------------------------===//
1978 
1979 /// Print a string describing the reserved bit range specified by Mask with
1980 /// offset BaseBytes for use in error comments. Mask is a single continuous
1981 /// range of 1s surrounded by zeros. The format here is meant to align with the
1982 /// tables that describe these bits in llvm.org/docs/AMDGPUUsage.html.
1983 static SmallString<32> getBitRangeFromMask(uint32_t Mask, unsigned BaseBytes) {
1984   SmallString<32> Result;
1985   raw_svector_ostream S(Result);
1986 
1987   int TrailingZeros = llvm::countr_zero(Mask);
1988   int PopCount = llvm::popcount(Mask);
1989 
1990   if (PopCount == 1) {
1991     S << "bit (" << (TrailingZeros + BaseBytes * CHAR_BIT) << ')';
1992   } else {
1993     S << "bits in range ("
1994       << (TrailingZeros + PopCount - 1 + BaseBytes * CHAR_BIT) << ':'
1995       << (TrailingZeros + BaseBytes * CHAR_BIT) << ')';
1996   }
1997 
1998   return Result;
1999 }
2000 
2001 #define GET_FIELD(MASK) (AMDHSA_BITS_GET(FourByteBuffer, MASK))
2002 #define PRINT_DIRECTIVE(DIRECTIVE, MASK)                                       \
2003   do {                                                                         \
2004     KdStream << Indent << DIRECTIVE " " << GET_FIELD(MASK) << '\n';            \
2005   } while (0)
2006 #define PRINT_PSEUDO_DIRECTIVE_COMMENT(DIRECTIVE, MASK)                        \
2007   do {                                                                         \
2008     KdStream << Indent << MAI.getCommentString() << ' ' << DIRECTIVE " "       \
2009              << GET_FIELD(MASK) << '\n';                                       \
2010   } while (0)
2011 
2012 #define CHECK_RESERVED_BITS_IMPL(MASK, DESC, MSG)                              \
2013   do {                                                                         \
2014     if (FourByteBuffer & (MASK)) {                                             \
2015       return createStringError(std::errc::invalid_argument,                    \
2016                                "kernel descriptor " DESC                       \
2017                                " reserved %s set" MSG,                         \
2018                                getBitRangeFromMask((MASK), 0).c_str());        \
2019     }                                                                          \
2020   } while (0)
2021 
2022 #define CHECK_RESERVED_BITS(MASK) CHECK_RESERVED_BITS_IMPL(MASK, #MASK, "")
2023 #define CHECK_RESERVED_BITS_MSG(MASK, MSG)                                     \
2024   CHECK_RESERVED_BITS_IMPL(MASK, #MASK, ", " MSG)
2025 #define CHECK_RESERVED_BITS_DESC(MASK, DESC)                                   \
2026   CHECK_RESERVED_BITS_IMPL(MASK, DESC, "")
2027 #define CHECK_RESERVED_BITS_DESC_MSG(MASK, DESC, MSG)                          \
2028   CHECK_RESERVED_BITS_IMPL(MASK, DESC, ", " MSG)
2029 
2030 // NOLINTNEXTLINE(readability-identifier-naming)
2031 Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1(
2032     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2033   using namespace amdhsa;
2034   StringRef Indent = "\t";
2035 
2036   // We cannot accurately backward compute #VGPRs used from
2037   // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same
2038   // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we
2039   // simply calculate the inverse of what the assembler does.
2040 
2041   uint32_t GranulatedWorkitemVGPRCount =
2042       GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT);
2043 
2044   uint32_t NextFreeVGPR =
2045       (GranulatedWorkitemVGPRCount + 1) *
2046       AMDGPU::IsaInfo::getVGPREncodingGranule(&STI, EnableWavefrontSize32);
2047 
2048   KdStream << Indent << ".amdhsa_next_free_vgpr " << NextFreeVGPR << '\n';
2049 
2050   // We cannot backward compute values used to calculate
2051   // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following
2052   // directives can't be computed:
2053   // .amdhsa_reserve_vcc
2054   // .amdhsa_reserve_flat_scratch
2055   // .amdhsa_reserve_xnack_mask
2056   // They take their respective default values if not specified in the assembly.
2057   //
2058   // GRANULATED_WAVEFRONT_SGPR_COUNT
2059   //    = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK)
2060   //
2061   // We compute the inverse as though all directives apart from NEXT_FREE_SGPR
2062   // are set to 0. So while disassembling we consider that:
2063   //
2064   // GRANULATED_WAVEFRONT_SGPR_COUNT
2065   //    = f(NEXT_FREE_SGPR + 0 + 0 + 0)
2066   //
2067   // The disassembler cannot recover the original values of those 3 directives.
2068 
2069   uint32_t GranulatedWavefrontSGPRCount =
2070       GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT);
2071 
2072   if (isGFX10Plus())
2073     CHECK_RESERVED_BITS_MSG(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
2074                             "must be zero on gfx10+");
2075 
2076   uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) *
2077                           AMDGPU::IsaInfo::getSGPREncodingGranule(&STI);
2078 
2079   KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n';
2080   if (!hasArchitectedFlatScratch())
2081     KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n';
2082   KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n';
2083   KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n";
2084 
2085   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIORITY);
2086 
2087   PRINT_DIRECTIVE(".amdhsa_float_round_mode_32",
2088                   COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
2089   PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64",
2090                   COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
2091   PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32",
2092                   COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
2093   PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64",
2094                   COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
2095 
2096   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIV);
2097 
2098   if (!isGFX12Plus())
2099     PRINT_DIRECTIVE(".amdhsa_dx10_clamp",
2100                     COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP);
2101 
2102   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_DEBUG_MODE);
2103 
2104   if (!isGFX12Plus())
2105     PRINT_DIRECTIVE(".amdhsa_ieee_mode",
2106                     COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE);
2107 
2108   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_BULKY);
2109   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_CDBG_USER);
2110 
2111   if (isGFX9Plus())
2112     PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL);
2113 
2114   if (!isGFX9Plus())
2115     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC1_GFX6_GFX8_RESERVED0,
2116                                  "COMPUTE_PGM_RSRC1", "must be zero pre-gfx9");
2117 
2118   CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC1_RESERVED1, "COMPUTE_PGM_RSRC1");
2119 
2120   if (!isGFX10Plus())
2121     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC1_GFX6_GFX9_RESERVED2,
2122                                  "COMPUTE_PGM_RSRC1", "must be zero pre-gfx10");
2123 
2124   if (isGFX10Plus()) {
2125     PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode",
2126                     COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE);
2127     PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED);
2128     PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS);
2129   }
2130 
2131   if (isGFX12Plus())
2132     PRINT_DIRECTIVE(".amdhsa_round_robin_scheduling",
2133                     COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN);
2134 
2135   return true;
2136 }
2137 
2138 // NOLINTNEXTLINE(readability-identifier-naming)
2139 Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC2(
2140     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2141   using namespace amdhsa;
2142   StringRef Indent = "\t";
2143   if (hasArchitectedFlatScratch())
2144     PRINT_DIRECTIVE(".amdhsa_enable_private_segment",
2145                     COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2146   else
2147     PRINT_DIRECTIVE(".amdhsa_system_sgpr_private_segment_wavefront_offset",
2148                     COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2149   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x",
2150                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
2151   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y",
2152                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
2153   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z",
2154                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
2155   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info",
2156                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
2157   PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id",
2158                   COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
2159 
2160   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH);
2161   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY);
2162   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE);
2163 
2164   PRINT_DIRECTIVE(
2165       ".amdhsa_exception_fp_ieee_invalid_op",
2166       COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
2167   PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src",
2168                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
2169   PRINT_DIRECTIVE(
2170       ".amdhsa_exception_fp_ieee_div_zero",
2171       COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
2172   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow",
2173                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
2174   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow",
2175                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
2176   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact",
2177                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
2178   PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero",
2179                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
2180 
2181   CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC2_RESERVED0, "COMPUTE_PGM_RSRC2");
2182 
2183   return true;
2184 }
2185 
2186 // NOLINTNEXTLINE(readability-identifier-naming)
2187 Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC3(
2188     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2189   using namespace amdhsa;
2190   StringRef Indent = "\t";
2191   if (isGFX90A()) {
2192     KdStream << Indent << ".amdhsa_accum_offset "
2193              << (GET_FIELD(COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
2194              << '\n';
2195 
2196     PRINT_DIRECTIVE(".amdhsa_tg_split", COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
2197 
2198     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX90A_RESERVED0,
2199                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx90a");
2200     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX90A_RESERVED1,
2201                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx90a");
2202   } else if (isGFX10Plus()) {
2203     // Bits [0-3].
2204     if (!isGFX12Plus()) {
2205       if (!EnableWavefrontSize32 || !*EnableWavefrontSize32) {
2206         PRINT_DIRECTIVE(".amdhsa_shared_vgpr_count",
2207                         COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2208       } else {
2209         PRINT_PSEUDO_DIRECTIVE_COMMENT(
2210             "SHARED_VGPR_COUNT",
2211             COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2212       }
2213     } else {
2214       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX12_PLUS_RESERVED0,
2215                                    "COMPUTE_PGM_RSRC3",
2216                                    "must be zero on gfx12+");
2217     }
2218 
2219     // Bits [4-11].
2220     if (isGFX11()) {
2221       PRINT_PSEUDO_DIRECTIVE_COMMENT("INST_PREF_SIZE",
2222                                      COMPUTE_PGM_RSRC3_GFX11_INST_PREF_SIZE);
2223       PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_START",
2224                                      COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_START);
2225       PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_END",
2226                                      COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_END);
2227     } else if (isGFX12Plus()) {
2228       PRINT_PSEUDO_DIRECTIVE_COMMENT(
2229           "INST_PREF_SIZE", COMPUTE_PGM_RSRC3_GFX12_PLUS_INST_PREF_SIZE);
2230     } else {
2231       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_RESERVED1,
2232                                    "COMPUTE_PGM_RSRC3",
2233                                    "must be zero on gfx10");
2234     }
2235 
2236     // Bits [12].
2237     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED2,
2238                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx10+");
2239 
2240     // Bits [13].
2241     if (isGFX12Plus()) {
2242       PRINT_PSEUDO_DIRECTIVE_COMMENT("GLG_EN",
2243                                      COMPUTE_PGM_RSRC3_GFX12_PLUS_GLG_EN);
2244     } else {
2245       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_GFX11_RESERVED3,
2246                                    "COMPUTE_PGM_RSRC3",
2247                                    "must be zero on gfx10 or gfx11");
2248     }
2249 
2250     // Bits [14-30].
2251     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED4,
2252                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx10+");
2253 
2254     // Bits [31].
2255     if (isGFX11Plus()) {
2256       PRINT_PSEUDO_DIRECTIVE_COMMENT("IMAGE_OP",
2257                                      COMPUTE_PGM_RSRC3_GFX11_PLUS_IMAGE_OP);
2258     } else {
2259       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_RESERVED5,
2260                                    "COMPUTE_PGM_RSRC3",
2261                                    "must be zero on gfx10");
2262     }
2263   } else if (FourByteBuffer) {
2264     return createStringError(
2265         std::errc::invalid_argument,
2266         "kernel descriptor COMPUTE_PGM_RSRC3 must be all zero before gfx9");
2267   }
2268   return true;
2269 }
2270 #undef PRINT_PSEUDO_DIRECTIVE_COMMENT
2271 #undef PRINT_DIRECTIVE
2272 #undef GET_FIELD
2273 #undef CHECK_RESERVED_BITS_IMPL
2274 #undef CHECK_RESERVED_BITS
2275 #undef CHECK_RESERVED_BITS_MSG
2276 #undef CHECK_RESERVED_BITS_DESC
2277 #undef CHECK_RESERVED_BITS_DESC_MSG
2278 
2279 /// Create an error object to return from onSymbolStart for reserved kernel
2280 /// descriptor bits being set.
2281 static Error createReservedKDBitsError(uint32_t Mask, unsigned BaseBytes,
2282                                        const char *Msg = "") {
2283   return createStringError(
2284       std::errc::invalid_argument, "kernel descriptor reserved %s set%s%s",
2285       getBitRangeFromMask(Mask, BaseBytes).c_str(), *Msg ? ", " : "", Msg);
2286 }
2287 
2288 /// Create an error object to return from onSymbolStart for reserved kernel
2289 /// descriptor bytes being set.
2290 static Error createReservedKDBytesError(unsigned BaseInBytes,
2291                                         unsigned WidthInBytes) {
2292   // Create an error comment in the same format as the "Kernel Descriptor"
2293   // table here: https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor .
2294   return createStringError(
2295       std::errc::invalid_argument,
2296       "kernel descriptor reserved bits in range (%u:%u) set",
2297       (BaseInBytes + WidthInBytes) * CHAR_BIT - 1, BaseInBytes * CHAR_BIT);
2298 }
2299 
2300 Expected<bool> AMDGPUDisassembler::decodeKernelDescriptorDirective(
2301     DataExtractor::Cursor &Cursor, ArrayRef<uint8_t> Bytes,
2302     raw_string_ostream &KdStream) const {
2303 #define PRINT_DIRECTIVE(DIRECTIVE, MASK)                                       \
2304   do {                                                                         \
2305     KdStream << Indent << DIRECTIVE " "                                        \
2306              << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n';            \
2307   } while (0)
2308 
2309   uint16_t TwoByteBuffer = 0;
2310   uint32_t FourByteBuffer = 0;
2311 
2312   StringRef ReservedBytes;
2313   StringRef Indent = "\t";
2314 
2315   assert(Bytes.size() == 64);
2316   DataExtractor DE(Bytes, /*IsLittleEndian=*/true, /*AddressSize=*/8);
2317 
2318   switch (Cursor.tell()) {
2319   case amdhsa::GROUP_SEGMENT_FIXED_SIZE_OFFSET:
2320     FourByteBuffer = DE.getU32(Cursor);
2321     KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer
2322              << '\n';
2323     return true;
2324 
2325   case amdhsa::PRIVATE_SEGMENT_FIXED_SIZE_OFFSET:
2326     FourByteBuffer = DE.getU32(Cursor);
2327     KdStream << Indent << ".amdhsa_private_segment_fixed_size "
2328              << FourByteBuffer << '\n';
2329     return true;
2330 
2331   case amdhsa::KERNARG_SIZE_OFFSET:
2332     FourByteBuffer = DE.getU32(Cursor);
2333     KdStream << Indent << ".amdhsa_kernarg_size "
2334              << FourByteBuffer << '\n';
2335     return true;
2336 
2337   case amdhsa::RESERVED0_OFFSET:
2338     // 4 reserved bytes, must be 0.
2339     ReservedBytes = DE.getBytes(Cursor, 4);
2340     for (int I = 0; I < 4; ++I) {
2341       if (ReservedBytes[I] != 0)
2342         return createReservedKDBytesError(amdhsa::RESERVED0_OFFSET, 4);
2343     }
2344     return true;
2345 
2346   case amdhsa::KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET:
2347     // KERNEL_CODE_ENTRY_BYTE_OFFSET
2348     // So far no directive controls this for Code Object V3, so simply skip for
2349     // disassembly.
2350     DE.skip(Cursor, 8);
2351     return true;
2352 
2353   case amdhsa::RESERVED1_OFFSET:
2354     // 20 reserved bytes, must be 0.
2355     ReservedBytes = DE.getBytes(Cursor, 20);
2356     for (int I = 0; I < 20; ++I) {
2357       if (ReservedBytes[I] != 0)
2358         return createReservedKDBytesError(amdhsa::RESERVED1_OFFSET, 20);
2359     }
2360     return true;
2361 
2362   case amdhsa::COMPUTE_PGM_RSRC3_OFFSET:
2363     FourByteBuffer = DE.getU32(Cursor);
2364     return decodeCOMPUTE_PGM_RSRC3(FourByteBuffer, KdStream);
2365 
2366   case amdhsa::COMPUTE_PGM_RSRC1_OFFSET:
2367     FourByteBuffer = DE.getU32(Cursor);
2368     return decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream);
2369 
2370   case amdhsa::COMPUTE_PGM_RSRC2_OFFSET:
2371     FourByteBuffer = DE.getU32(Cursor);
2372     return decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream);
2373 
2374   case amdhsa::KERNEL_CODE_PROPERTIES_OFFSET:
2375     using namespace amdhsa;
2376     TwoByteBuffer = DE.getU16(Cursor);
2377 
2378     if (!hasArchitectedFlatScratch())
2379       PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer",
2380                       KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
2381     PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr",
2382                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
2383     PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr",
2384                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
2385     PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr",
2386                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
2387     PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id",
2388                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
2389     if (!hasArchitectedFlatScratch())
2390       PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init",
2391                       KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
2392     PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size",
2393                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
2394 
2395     if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0)
2396       return createReservedKDBitsError(KERNEL_CODE_PROPERTY_RESERVED0,
2397                                        amdhsa::KERNEL_CODE_PROPERTIES_OFFSET);
2398 
2399     // Reserved for GFX9
2400     if (isGFX9() &&
2401         (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) {
2402       return createReservedKDBitsError(
2403           KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
2404           amdhsa::KERNEL_CODE_PROPERTIES_OFFSET, "must be zero on gfx9");
2405     }
2406     if (isGFX10Plus()) {
2407       PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
2408                       KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2409     }
2410 
2411     if (CodeObjectVersion >= AMDGPU::AMDHSA_COV5)
2412       PRINT_DIRECTIVE(".amdhsa_uses_dynamic_stack",
2413                       KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK);
2414 
2415     if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1) {
2416       return createReservedKDBitsError(KERNEL_CODE_PROPERTY_RESERVED1,
2417                                        amdhsa::KERNEL_CODE_PROPERTIES_OFFSET);
2418     }
2419 
2420     return true;
2421 
2422   case amdhsa::KERNARG_PRELOAD_OFFSET:
2423     using namespace amdhsa;
2424     TwoByteBuffer = DE.getU16(Cursor);
2425     if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_LENGTH) {
2426       PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_length",
2427                       KERNARG_PRELOAD_SPEC_LENGTH);
2428     }
2429 
2430     if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_OFFSET) {
2431       PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_offset",
2432                       KERNARG_PRELOAD_SPEC_OFFSET);
2433     }
2434     return true;
2435 
2436   case amdhsa::RESERVED3_OFFSET:
2437     // 4 bytes from here are reserved, must be 0.
2438     ReservedBytes = DE.getBytes(Cursor, 4);
2439     for (int I = 0; I < 4; ++I) {
2440       if (ReservedBytes[I] != 0)
2441         return createReservedKDBytesError(amdhsa::RESERVED3_OFFSET, 4);
2442     }
2443     return true;
2444 
2445   default:
2446     llvm_unreachable("Unhandled index. Case statements cover everything.");
2447     return true;
2448   }
2449 #undef PRINT_DIRECTIVE
2450 }
2451 
2452 Expected<bool> AMDGPUDisassembler::decodeKernelDescriptor(
2453     StringRef KdName, ArrayRef<uint8_t> Bytes, uint64_t KdAddress) const {
2454 
2455   // CP microcode requires the kernel descriptor to be 64 aligned.
2456   if (Bytes.size() != 64 || KdAddress % 64 != 0)
2457     return createStringError(std::errc::invalid_argument,
2458                              "kernel descriptor must be 64-byte aligned");
2459 
2460   // FIXME: We can't actually decode "in order" as is done below, as e.g. GFX10
2461   // requires us to know the setting of .amdhsa_wavefront_size32 in order to
2462   // accurately produce .amdhsa_next_free_vgpr, and they appear in the wrong
2463   // order. Workaround this by first looking up .amdhsa_wavefront_size32 here
2464   // when required.
2465   if (isGFX10Plus()) {
2466     uint16_t KernelCodeProperties =
2467         support::endian::read16(&Bytes[amdhsa::KERNEL_CODE_PROPERTIES_OFFSET],
2468                                 llvm::endianness::little);
2469     EnableWavefrontSize32 =
2470         AMDHSA_BITS_GET(KernelCodeProperties,
2471                         amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2472   }
2473 
2474   std::string Kd;
2475   raw_string_ostream KdStream(Kd);
2476   KdStream << ".amdhsa_kernel " << KdName << '\n';
2477 
2478   DataExtractor::Cursor C(0);
2479   while (C && C.tell() < Bytes.size()) {
2480     Expected<bool> Res = decodeKernelDescriptorDirective(C, Bytes, KdStream);
2481 
2482     cantFail(C.takeError());
2483 
2484     if (!Res)
2485       return Res;
2486   }
2487   KdStream << ".end_amdhsa_kernel\n";
2488   outs() << KdStream.str();
2489   return true;
2490 }
2491 
2492 Expected<bool> AMDGPUDisassembler::onSymbolStart(SymbolInfoTy &Symbol,
2493                                                  uint64_t &Size,
2494                                                  ArrayRef<uint8_t> Bytes,
2495                                                  uint64_t Address) const {
2496   // Right now only kernel descriptor needs to be handled.
2497   // We ignore all other symbols for target specific handling.
2498   // TODO:
2499   // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code
2500   // Object V2 and V3 when symbols are marked protected.
2501 
2502   // amd_kernel_code_t for Code Object V2.
2503   if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) {
2504     Size = 256;
2505     return createStringError(std::errc::invalid_argument,
2506                              "code object v2 is not supported");
2507   }
2508 
2509   // Code Object V3 kernel descriptors.
2510   StringRef Name = Symbol.Name;
2511   if (Symbol.Type == ELF::STT_OBJECT && Name.ends_with(StringRef(".kd"))) {
2512     Size = 64; // Size = 64 regardless of success or failure.
2513     return decodeKernelDescriptor(Name.drop_back(3), Bytes, Address);
2514   }
2515 
2516   return false;
2517 }
2518 
2519 const MCExpr *AMDGPUDisassembler::createConstantSymbolExpr(StringRef Id,
2520                                                            int64_t Val) {
2521   MCContext &Ctx = getContext();
2522   MCSymbol *Sym = Ctx.getOrCreateSymbol(Id);
2523   // Note: only set value to Val on a new symbol in case an dissassembler
2524   // has already been initialized in this context.
2525   if (!Sym->isVariable()) {
2526     Sym->setVariableValue(MCConstantExpr::create(Val, Ctx));
2527   } else {
2528     int64_t Res = ~Val;
2529     bool Valid = Sym->getVariableValue()->evaluateAsAbsolute(Res);
2530     if (!Valid || Res != Val)
2531       Ctx.reportWarning(SMLoc(), "unsupported redefinition of " + Id);
2532   }
2533   return MCSymbolRefExpr::create(Sym, Ctx);
2534 }
2535 
2536 //===----------------------------------------------------------------------===//
2537 // AMDGPUSymbolizer
2538 //===----------------------------------------------------------------------===//
2539 
2540 // Try to find symbol name for specified label
2541 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(
2542     MCInst &Inst, raw_ostream & /*cStream*/, int64_t Value,
2543     uint64_t /*Address*/, bool IsBranch, uint64_t /*Offset*/,
2544     uint64_t /*OpSize*/, uint64_t /*InstSize*/) {
2545 
2546   if (!IsBranch) {
2547     return false;
2548   }
2549 
2550   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
2551   if (!Symbols)
2552     return false;
2553 
2554   auto Result = llvm::find_if(*Symbols, [Value](const SymbolInfoTy &Val) {
2555     return Val.Addr == static_cast<uint64_t>(Value) &&
2556            Val.Type == ELF::STT_NOTYPE;
2557   });
2558   if (Result != Symbols->end()) {
2559     auto *Sym = Ctx.getOrCreateSymbol(Result->Name);
2560     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
2561     Inst.addOperand(MCOperand::createExpr(Add));
2562     return true;
2563   }
2564   // Add to list of referenced addresses, so caller can synthesize a label.
2565   ReferencedAddresses.push_back(static_cast<uint64_t>(Value));
2566   return false;
2567 }
2568 
2569 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
2570                                                        int64_t Value,
2571                                                        uint64_t Address) {
2572   llvm_unreachable("unimplemented");
2573 }
2574 
2575 //===----------------------------------------------------------------------===//
2576 // Initialization
2577 //===----------------------------------------------------------------------===//
2578 
2579 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
2580                               LLVMOpInfoCallback /*GetOpInfo*/,
2581                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
2582                               void *DisInfo,
2583                               MCContext *Ctx,
2584                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
2585   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
2586 }
2587 
2588 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
2589                                                 const MCSubtargetInfo &STI,
2590                                                 MCContext &Ctx) {
2591   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
2592 }
2593 
2594 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler() {
2595   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
2596                                          createAMDGPUDisassembler);
2597   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
2598                                        createAMDGPUSymbolizer);
2599 }
2600