xref: /llvm-project/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (revision 5e26ff35c14778a728cd3ee7e70d61529bf49661)
1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 /// \file
12 ///
13 /// This file contains definition for AMDGPU ISA disassembler
14 //
15 //===----------------------------------------------------------------------===//
16 
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18 
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIDefines.h"
22 #include "SIRegisterInfo.h"
23 #include "TargetInfo/AMDGPUTargetInfo.h"
24 #include "Utils/AMDGPUAsmUtils.h"
25 #include "Utils/AMDGPUBaseInfo.h"
26 #include "llvm-c/DisassemblerTypes.h"
27 #include "llvm/BinaryFormat/ELF.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCContext.h"
30 #include "llvm/MC/MCDecoderOps.h"
31 #include "llvm/MC/MCExpr.h"
32 #include "llvm/MC/MCInstrDesc.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/MC/MCSubtargetInfo.h"
35 #include "llvm/MC/TargetRegistry.h"
36 #include "llvm/Support/AMDHSAKernelDescriptor.h"
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "amdgpu-disassembler"
41 
42 #define SGPR_MAX                                                               \
43   (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10                           \
44                  : AMDGPU::EncValues::SGPR_MAX_SI)
45 
46 using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
47 
48 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI,
49                                        MCContext &Ctx, MCInstrInfo const *MCII)
50     : MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
51       MAI(*Ctx.getAsmInfo()), TargetMaxInstBytes(MAI.getMaxInstLength(&STI)),
52       CodeObjectVersion(AMDGPU::getDefaultAMDHSACodeObjectVersion()) {
53   // ToDo: AMDGPUDisassembler supports only VI ISA.
54   if (!STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !isGFX10Plus())
55     report_fatal_error("Disassembly not yet supported for subtarget");
56 
57   for (auto [Symbol, Code] : AMDGPU::UCVersion::getGFXVersions())
58     createConstantSymbolExpr(Symbol, Code);
59 
60   UCVersionW64Expr = createConstantSymbolExpr("UC_VERSION_W64_BIT", 0x2000);
61   UCVersionW32Expr = createConstantSymbolExpr("UC_VERSION_W32_BIT", 0x4000);
62   UCVersionMDPExpr = createConstantSymbolExpr("UC_VERSION_MDP_BIT", 0x8000);
63 }
64 
65 void AMDGPUDisassembler::setABIVersion(unsigned Version) {
66   CodeObjectVersion = AMDGPU::getAMDHSACodeObjectVersion(Version);
67 }
68 
69 inline static MCDisassembler::DecodeStatus
70 addOperand(MCInst &Inst, const MCOperand& Opnd) {
71   Inst.addOperand(Opnd);
72   return Opnd.isValid() ?
73     MCDisassembler::Success :
74     MCDisassembler::Fail;
75 }
76 
77 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
78                                 uint16_t NameIdx) {
79   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
80   if (OpIdx != -1) {
81     auto *I = MI.begin();
82     std::advance(I, OpIdx);
83     MI.insert(I, Op);
84   }
85   return OpIdx;
86 }
87 
88 static DecodeStatus decodeSOPPBrTarget(MCInst &Inst, unsigned Imm,
89                                        uint64_t Addr,
90                                        const MCDisassembler *Decoder) {
91   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
92 
93   // Our branches take a simm16.
94   int64_t Offset = SignExtend64<16>(Imm) * 4 + 4 + Addr;
95 
96   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2, 0))
97     return MCDisassembler::Success;
98   return addOperand(Inst, MCOperand::createImm(Imm));
99 }
100 
101 static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, uint64_t Addr,
102                                      const MCDisassembler *Decoder) {
103   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
104   int64_t Offset;
105   if (DAsm->isGFX12Plus()) { // GFX12 supports 24-bit signed offsets.
106     Offset = SignExtend64<24>(Imm);
107   } else if (DAsm->isVI()) { // VI supports 20-bit unsigned offsets.
108     Offset = Imm & 0xFFFFF;
109   } else { // GFX9+ supports 21-bit signed offsets.
110     Offset = SignExtend64<21>(Imm);
111   }
112   return addOperand(Inst, MCOperand::createImm(Offset));
113 }
114 
115 static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr,
116                                   const MCDisassembler *Decoder) {
117   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
118   return addOperand(Inst, DAsm->decodeBoolReg(Val));
119 }
120 
121 static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val,
122                                        uint64_t Addr,
123                                        const MCDisassembler *Decoder) {
124   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
125   return addOperand(Inst, DAsm->decodeSplitBarrier(Val));
126 }
127 
128 static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
129                                  const MCDisassembler *Decoder) {
130   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
131   return addOperand(Inst, DAsm->decodeDpp8FI(Val));
132 }
133 
134 #define DECODE_OPERAND(StaticDecoderName, DecoderName)                         \
135   static DecodeStatus StaticDecoderName(MCInst &Inst, unsigned Imm,            \
136                                         uint64_t /*Addr*/,                     \
137                                         const MCDisassembler *Decoder) {       \
138     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
139     return addOperand(Inst, DAsm->DecoderName(Imm));                           \
140   }
141 
142 // Decoder for registers, decode directly using RegClassID. Imm(8-bit) is
143 // number of register. Used by VGPR only and AGPR only operands.
144 #define DECODE_OPERAND_REG_8(RegClass)                                         \
145   static DecodeStatus Decode##RegClass##RegisterClass(                         \
146       MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,                           \
147       const MCDisassembler *Decoder) {                                         \
148     assert(Imm < (1 << 8) && "8-bit encoding");                                \
149     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
150     return addOperand(                                                         \
151         Inst, DAsm->createRegOperand(AMDGPU::RegClass##RegClassID, Imm));      \
152   }
153 
154 #define DECODE_SrcOp(Name, EncSize, OpWidth, EncImm, MandatoryLiteral,         \
155                      ImmWidth)                                                 \
156   static DecodeStatus Name(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,      \
157                            const MCDisassembler *Decoder) {                    \
158     assert(Imm < (1 << EncSize) && #EncSize "-bit encoding");                  \
159     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
160     return addOperand(Inst,                                                    \
161                       DAsm->decodeSrcOp(AMDGPUDisassembler::OpWidth, EncImm,   \
162                                         MandatoryLiteral, ImmWidth));          \
163   }
164 
165 static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize,
166                                 AMDGPUDisassembler::OpWidthTy OpWidth,
167                                 unsigned Imm, unsigned EncImm,
168                                 bool MandatoryLiteral, unsigned ImmWidth,
169                                 AMDGPU::OperandSemantics Sema,
170                                 const MCDisassembler *Decoder) {
171   assert(Imm < (1U << EncSize) && "Operand doesn't fit encoding!");
172   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
173   return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm, MandatoryLiteral,
174                                             ImmWidth, Sema));
175 }
176 
177 // Decoder for registers. Imm(7-bit) is number of register, uses decodeSrcOp to
178 // get register class. Used by SGPR only operands.
179 #define DECODE_OPERAND_REG_7(RegClass, OpWidth)                                \
180   DECODE_SrcOp(Decode##RegClass##RegisterClass, 7, OpWidth, Imm, false, 0)
181 
182 // Decoder for registers. Imm(10-bit): Imm{7-0} is number of register,
183 // Imm{9} is acc(agpr or vgpr) Imm{8} should be 0 (see VOP3Pe_SMFMAC).
184 // Set Imm{8} to 1 (IS_VGPR) to decode using 'enum10' from decodeSrcOp.
185 // Used by AV_ register classes (AGPR or VGPR only register operands).
186 template <AMDGPUDisassembler::OpWidthTy OpWidth>
187 static DecodeStatus decodeAV10(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
188                                const MCDisassembler *Decoder) {
189   return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm | AMDGPU::EncValues::IS_VGPR,
190                      false, 0, AMDGPU::OperandSemantics::INT, Decoder);
191 }
192 
193 // Decoder for Src(9-bit encoding) registers only.
194 template <AMDGPUDisassembler::OpWidthTy OpWidth>
195 static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm,
196                                   uint64_t /* Addr */,
197                                   const MCDisassembler *Decoder) {
198   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, false, 0,
199                      AMDGPU::OperandSemantics::INT, Decoder);
200 }
201 
202 // Decoder for Src(9-bit encoding) AGPR, register number encoded in 9bits, set
203 // Imm{9} to 1 (set acc) and decode using 'enum10' from decodeSrcOp, registers
204 // only.
205 template <AMDGPUDisassembler::OpWidthTy OpWidth>
206 static DecodeStatus decodeSrcA9(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
207                                 const MCDisassembler *Decoder) {
208   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, false, 0,
209                      AMDGPU::OperandSemantics::INT, Decoder);
210 }
211 
212 // Decoder for 'enum10' from decodeSrcOp, Imm{0-8} is 9-bit Src encoding
213 // Imm{9} is acc, registers only.
214 template <AMDGPUDisassembler::OpWidthTy OpWidth>
215 static DecodeStatus decodeSrcAV10(MCInst &Inst, unsigned Imm,
216                                   uint64_t /* Addr */,
217                                   const MCDisassembler *Decoder) {
218   return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm, false, 0,
219                      AMDGPU::OperandSemantics::INT, Decoder);
220 }
221 
222 // Decoder for RegisterOperands using 9-bit Src encoding. Operand can be
223 // register from RegClass or immediate. Registers that don't belong to RegClass
224 // will be decoded and InstPrinter will report warning. Immediate will be
225 // decoded into constant of size ImmWidth, should match width of immediate used
226 // by OperandType (important for floating point types).
227 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
228           unsigned OperandSemantics>
229 static DecodeStatus decodeSrcRegOrImm9(MCInst &Inst, unsigned Imm,
230                                        uint64_t /* Addr */,
231                                        const MCDisassembler *Decoder) {
232   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, false, ImmWidth,
233                      (AMDGPU::OperandSemantics)OperandSemantics, Decoder);
234 }
235 
236 // Decoder for Src(9-bit encoding) AGPR or immediate. Set Imm{9} to 1 (set acc)
237 // and decode using 'enum10' from decodeSrcOp.
238 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
239           unsigned OperandSemantics>
240 static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm,
241                                         uint64_t /* Addr */,
242                                         const MCDisassembler *Decoder) {
243   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, false, ImmWidth,
244                      (AMDGPU::OperandSemantics)OperandSemantics, Decoder);
245 }
246 
247 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
248           unsigned OperandSemantics>
249 static DecodeStatus decodeSrcRegOrImmDeferred9(MCInst &Inst, unsigned Imm,
250                                                uint64_t /* Addr */,
251                                                const MCDisassembler *Decoder) {
252   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, true, ImmWidth,
253                      (AMDGPU::OperandSemantics)OperandSemantics, Decoder);
254 }
255 
256 // Default decoders generated by tablegen: 'Decode<RegClass>RegisterClass'
257 // when RegisterClass is used as an operand. Most often used for destination
258 // operands.
259 
260 DECODE_OPERAND_REG_8(VGPR_32)
261 DECODE_OPERAND_REG_8(VGPR_32_Lo128)
262 DECODE_OPERAND_REG_8(VReg_64)
263 DECODE_OPERAND_REG_8(VReg_96)
264 DECODE_OPERAND_REG_8(VReg_128)
265 DECODE_OPERAND_REG_8(VReg_192)
266 DECODE_OPERAND_REG_8(VReg_256)
267 DECODE_OPERAND_REG_8(VReg_288)
268 DECODE_OPERAND_REG_8(VReg_352)
269 DECODE_OPERAND_REG_8(VReg_384)
270 DECODE_OPERAND_REG_8(VReg_512)
271 DECODE_OPERAND_REG_8(VReg_1024)
272 
273 DECODE_OPERAND_REG_7(SReg_32, OPW32)
274 DECODE_OPERAND_REG_7(SReg_32_XEXEC, OPW32)
275 DECODE_OPERAND_REG_7(SReg_32_XM0_XEXEC, OPW32)
276 DECODE_OPERAND_REG_7(SReg_32_XEXEC_HI, OPW32)
277 DECODE_OPERAND_REG_7(SReg_64, OPW64)
278 DECODE_OPERAND_REG_7(SReg_64_XEXEC, OPW64)
279 DECODE_OPERAND_REG_7(SReg_64_XEXEC_XNULL, OPW64)
280 DECODE_OPERAND_REG_7(SReg_96, OPW96)
281 DECODE_OPERAND_REG_7(SReg_128, OPW128)
282 DECODE_OPERAND_REG_7(SReg_128_XNULL, OPW128)
283 DECODE_OPERAND_REG_7(SReg_256, OPW256)
284 DECODE_OPERAND_REG_7(SReg_256_XNULL, OPW256)
285 DECODE_OPERAND_REG_7(SReg_512, OPW512)
286 
287 DECODE_OPERAND_REG_8(AGPR_32)
288 DECODE_OPERAND_REG_8(AReg_64)
289 DECODE_OPERAND_REG_8(AReg_128)
290 DECODE_OPERAND_REG_8(AReg_256)
291 DECODE_OPERAND_REG_8(AReg_512)
292 DECODE_OPERAND_REG_8(AReg_1024)
293 
294 static DecodeStatus DecodeVGPR_16RegisterClass(MCInst &Inst, unsigned Imm,
295                                                uint64_t /*Addr*/,
296                                                const MCDisassembler *Decoder) {
297   assert(isUInt<10>(Imm) && "10-bit encoding expected");
298   assert((Imm & (1 << 8)) == 0 && "Imm{8} should not be used");
299 
300   bool IsHi = Imm & (1 << 9);
301   unsigned RegIdx = Imm & 0xff;
302   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
303   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
304 }
305 
306 static DecodeStatus
307 DecodeVGPR_16_Lo128RegisterClass(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,
308                                  const MCDisassembler *Decoder) {
309   assert(isUInt<8>(Imm) && "8-bit encoding expected");
310 
311   bool IsHi = Imm & (1 << 7);
312   unsigned RegIdx = Imm & 0x7f;
313   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
314   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
315 }
316 
317 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
318           unsigned OperandSemantics>
319 static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm,
320                                                 uint64_t /*Addr*/,
321                                                 const MCDisassembler *Decoder) {
322   assert(isUInt<9>(Imm) && "9-bit encoding expected");
323 
324   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
325   if (Imm & AMDGPU::EncValues::IS_VGPR) {
326     bool IsHi = Imm & (1 << 7);
327     unsigned RegIdx = Imm & 0x7f;
328     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
329   }
330   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
331                               OpWidth, Imm & 0xFF, false, ImmWidth,
332                               (AMDGPU::OperandSemantics)OperandSemantics));
333 }
334 
335 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
336           unsigned OperandSemantics>
337 static DecodeStatus
338 decodeOperand_VSrcT16_Lo128_Deferred(MCInst &Inst, unsigned Imm,
339                                      uint64_t /*Addr*/,
340                                      const MCDisassembler *Decoder) {
341   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
342   assert(isUInt<9>(Imm) && "9-bit encoding expected");
343 
344   if (Imm & AMDGPU::EncValues::IS_VGPR) {
345     bool IsHi = Imm & (1 << 7);
346     unsigned RegIdx = Imm & 0x7f;
347     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
348   }
349   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
350                               OpWidth, Imm & 0xFF, true, ImmWidth,
351                               (AMDGPU::OperandSemantics)OperandSemantics));
352 }
353 
354 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
355           unsigned OperandSemantics>
356 static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
357                                           uint64_t /*Addr*/,
358                                           const MCDisassembler *Decoder) {
359   assert(isUInt<10>(Imm) && "10-bit encoding expected");
360 
361   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
362   if (Imm & AMDGPU::EncValues::IS_VGPR) {
363     bool IsHi = Imm & (1 << 9);
364     unsigned RegIdx = Imm & 0xff;
365     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
366   }
367   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
368                               OpWidth, Imm & 0xFF, false, ImmWidth,
369                               (AMDGPU::OperandSemantics)OperandSemantics));
370 }
371 
372 static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm,
373                                           uint64_t /*Addr*/,
374                                           const MCDisassembler *Decoder) {
375   assert(isUInt<10>(Imm) && "10-bit encoding expected");
376   assert(Imm & AMDGPU::EncValues::IS_VGPR && "VGPR expected");
377 
378   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
379 
380   bool IsHi = Imm & (1 << 9);
381   unsigned RegIdx = Imm & 0xff;
382   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
383 }
384 
385 static DecodeStatus decodeOperand_KImmFP(MCInst &Inst, unsigned Imm,
386                                          uint64_t Addr,
387                                          const MCDisassembler *Decoder) {
388   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
389   return addOperand(Inst, DAsm->decodeMandatoryLiteralConstant(Imm));
390 }
391 
392 static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val,
393                                           uint64_t Addr, const void *Decoder) {
394   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
395   return addOperand(Inst, DAsm->decodeVOPDDstYOp(Inst, Val));
396 }
397 
398 static bool IsAGPROperand(const MCInst &Inst, int OpIdx,
399                           const MCRegisterInfo *MRI) {
400   if (OpIdx < 0)
401     return false;
402 
403   const MCOperand &Op = Inst.getOperand(OpIdx);
404   if (!Op.isReg())
405     return false;
406 
407   MCRegister Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0);
408   auto Reg = Sub ? Sub : Op.getReg();
409   return Reg >= AMDGPU::AGPR0 && Reg <= AMDGPU::AGPR255;
410 }
411 
412 static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm,
413                                  AMDGPUDisassembler::OpWidthTy Opw,
414                                  const MCDisassembler *Decoder) {
415   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
416   if (!DAsm->isGFX90A()) {
417     Imm &= 511;
418   } else {
419     // If atomic has both vdata and vdst their register classes are tied.
420     // The bit is decoded along with the vdst, first operand. We need to
421     // change register class to AGPR if vdst was AGPR.
422     // If a DS instruction has both data0 and data1 their register classes
423     // are also tied.
424     unsigned Opc = Inst.getOpcode();
425     uint64_t TSFlags = DAsm->getMCII()->get(Opc).TSFlags;
426     uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
427                                                         : AMDGPU::OpName::vdata;
428     const MCRegisterInfo *MRI = DAsm->getContext().getRegisterInfo();
429     int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataNameIdx);
430     if ((int)Inst.getNumOperands() == DataIdx) {
431       int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
432       if (IsAGPROperand(Inst, DstIdx, MRI))
433         Imm |= 512;
434     }
435 
436     if (TSFlags & SIInstrFlags::DS) {
437       int Data2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
438       if ((int)Inst.getNumOperands() == Data2Idx &&
439           IsAGPROperand(Inst, DataIdx, MRI))
440         Imm |= 512;
441     }
442   }
443   return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256));
444 }
445 
446 template <AMDGPUDisassembler::OpWidthTy Opw>
447 static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm,
448                                  uint64_t /* Addr */,
449                                  const MCDisassembler *Decoder) {
450   return decodeAVLdSt(Inst, Imm, Opw, Decoder);
451 }
452 
453 static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm,
454                                            uint64_t Addr,
455                                            const MCDisassembler *Decoder) {
456   assert(Imm < (1 << 9) && "9-bit encoding");
457   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
458   return addOperand(Inst,
459                     DAsm->decodeSrcOp(AMDGPUDisassembler::OPW64, Imm, false, 64,
460                                       AMDGPU::OperandSemantics::FP64));
461 }
462 
463 #define DECODE_SDWA(DecName) \
464 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
465 
466 DECODE_SDWA(Src32)
467 DECODE_SDWA(Src16)
468 DECODE_SDWA(VopcDst)
469 
470 static DecodeStatus decodeVersionImm(MCInst &Inst, unsigned Imm,
471                                      uint64_t /* Addr */,
472                                      const MCDisassembler *Decoder) {
473   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
474   return addOperand(Inst, DAsm->decodeVersionImm(Imm));
475 }
476 
477 #include "AMDGPUGenDisassemblerTables.inc"
478 
479 //===----------------------------------------------------------------------===//
480 //
481 //===----------------------------------------------------------------------===//
482 
483 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
484   assert(Bytes.size() >= sizeof(T));
485   const auto Res =
486       support::endian::read<T, llvm::endianness::little>(Bytes.data());
487   Bytes = Bytes.slice(sizeof(T));
488   return Res;
489 }
490 
491 static inline DecoderUInt128 eat12Bytes(ArrayRef<uint8_t> &Bytes) {
492   assert(Bytes.size() >= 12);
493   uint64_t Lo =
494       support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
495   Bytes = Bytes.slice(8);
496   uint64_t Hi =
497       support::endian::read<uint32_t, llvm::endianness::little>(Bytes.data());
498   Bytes = Bytes.slice(4);
499   return DecoderUInt128(Lo, Hi);
500 }
501 
502 static inline DecoderUInt128 eat16Bytes(ArrayRef<uint8_t> &Bytes) {
503   assert(Bytes.size() >= 16);
504   uint64_t Lo =
505       support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
506   Bytes = Bytes.slice(8);
507   uint64_t Hi =
508       support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
509   Bytes = Bytes.slice(8);
510   return DecoderUInt128(Lo, Hi);
511 }
512 
513 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
514                                                 ArrayRef<uint8_t> Bytes_,
515                                                 uint64_t Address,
516                                                 raw_ostream &CS) const {
517   unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
518   Bytes = Bytes_.slice(0, MaxInstBytesNum);
519 
520   // In case the opcode is not recognized we'll assume a Size of 4 bytes (unless
521   // there are fewer bytes left). This will be overridden on success.
522   Size = std::min((size_t)4, Bytes_.size());
523 
524   do {
525     // ToDo: better to switch encoding length using some bit predicate
526     // but it is unknown yet, so try all we can
527 
528     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
529     // encodings
530     if (isGFX11Plus() && Bytes.size() >= 12 ) {
531       DecoderUInt128 DecW = eat12Bytes(Bytes);
532 
533       if (isGFX11() &&
534           tryDecodeInst(DecoderTableGFX1196, DecoderTableGFX11_FAKE1696, MI,
535                         DecW, Address, CS))
536         break;
537 
538       if (isGFX12() &&
539           tryDecodeInst(DecoderTableGFX1296, DecoderTableGFX12_FAKE1696, MI,
540                         DecW, Address, CS))
541         break;
542 
543       if (isGFX12() &&
544           tryDecodeInst(DecoderTableGFX12W6496, MI, DecW, Address, CS))
545         break;
546 
547       // Reinitialize Bytes
548       Bytes = Bytes_.slice(0, MaxInstBytesNum);
549 
550     } else if (Bytes.size() >= 16 &&
551                STI.hasFeature(AMDGPU::FeatureGFX950Insts)) {
552       DecoderUInt128 DecW = eat16Bytes(Bytes);
553       if (tryDecodeInst(DecoderTableGFX940128, MI, DecW, Address, CS))
554         break;
555 
556       // Reinitialize Bytes
557       Bytes = Bytes_.slice(0, MaxInstBytesNum);
558     }
559 
560     if (Bytes.size() >= 8) {
561       const uint64_t QW = eatBytes<uint64_t>(Bytes);
562 
563       if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
564           tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address, CS))
565         break;
566 
567       if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) &&
568           tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address, CS))
569         break;
570 
571       if (STI.hasFeature(AMDGPU::FeatureGFX950Insts) &&
572           tryDecodeInst(DecoderTableGFX95064, MI, QW, Address, CS))
573         break;
574 
575       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
576       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
577       // table first so we print the correct name.
578       if (STI.hasFeature(AMDGPU::FeatureFmaMixInsts) &&
579           tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address, CS))
580         break;
581 
582       if (STI.hasFeature(AMDGPU::FeatureGFX940Insts) &&
583           tryDecodeInst(DecoderTableGFX94064, MI, QW, Address, CS))
584         break;
585 
586       if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
587           tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address, CS))
588         break;
589 
590       if ((isVI() || isGFX9()) &&
591           tryDecodeInst(DecoderTableGFX864, MI, QW, Address, CS))
592         break;
593 
594       if (isGFX9() && tryDecodeInst(DecoderTableGFX964, MI, QW, Address, CS))
595         break;
596 
597       if (isGFX10() && tryDecodeInst(DecoderTableGFX1064, MI, QW, Address, CS))
598         break;
599 
600       if (isGFX12() &&
601           tryDecodeInst(DecoderTableGFX1264, DecoderTableGFX12_FAKE1664, MI, QW,
602                         Address, CS))
603         break;
604 
605       if (isGFX11() &&
606           tryDecodeInst(DecoderTableGFX1164, DecoderTableGFX11_FAKE1664, MI, QW,
607                         Address, CS))
608         break;
609 
610       if (isGFX11() &&
611           tryDecodeInst(DecoderTableGFX11W6464, MI, QW, Address, CS))
612         break;
613 
614       if (isGFX12() &&
615           tryDecodeInst(DecoderTableGFX12W6464, MI, QW, Address, CS))
616         break;
617 
618       // Reinitialize Bytes
619       Bytes = Bytes_.slice(0, MaxInstBytesNum);
620     }
621 
622     // Try decode 32-bit instruction
623     if (Bytes.size() >= 4) {
624       const uint32_t DW = eatBytes<uint32_t>(Bytes);
625 
626       if ((isVI() || isGFX9()) &&
627           tryDecodeInst(DecoderTableGFX832, MI, DW, Address, CS))
628         break;
629 
630       if (tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address, CS))
631         break;
632 
633       if (isGFX9() && tryDecodeInst(DecoderTableGFX932, MI, DW, Address, CS))
634         break;
635 
636       if (STI.hasFeature(AMDGPU::FeatureGFX950Insts) &&
637           tryDecodeInst(DecoderTableGFX95032, MI, DW, Address, CS))
638         break;
639 
640       if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
641           tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address, CS))
642         break;
643 
644       if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
645           tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address, CS))
646         break;
647 
648       if (isGFX10() && tryDecodeInst(DecoderTableGFX1032, MI, DW, Address, CS))
649         break;
650 
651       if (isGFX11() &&
652           tryDecodeInst(DecoderTableGFX1132, DecoderTableGFX11_FAKE1632, MI, DW,
653                         Address, CS))
654         break;
655 
656       if (isGFX12() &&
657           tryDecodeInst(DecoderTableGFX1232, DecoderTableGFX12_FAKE1632, MI, DW,
658                         Address, CS))
659         break;
660     }
661 
662     return MCDisassembler::Fail;
663   } while (false);
664 
665   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DPP) {
666     if (isMacDPP(MI))
667       convertMacDPPInst(MI);
668 
669     if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P)
670       convertVOP3PDPPInst(MI);
671     else if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC)
672       convertVOPCDPPInst(MI); // Special VOP3 case
673     else if (AMDGPU::isVOPC64DPP(MI.getOpcode()))
674       convertVOPC64DPPInst(MI); // Special VOP3 case
675     else if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8) !=
676              -1)
677       convertDPP8Inst(MI);
678     else if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3)
679       convertVOP3DPPInst(MI); // Regular VOP3 case
680   }
681 
682   convertTrue16OpSel(MI);
683 
684   if (AMDGPU::isMAC(MI.getOpcode())) {
685     // Insert dummy unused src2_modifiers.
686     insertNamedMCOperand(MI, MCOperand::createImm(0),
687                          AMDGPU::OpName::src2_modifiers);
688   }
689 
690   if (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp ||
691       MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp) {
692     // Insert dummy unused src2_modifiers.
693     insertNamedMCOperand(MI, MCOperand::createImm(0),
694                          AMDGPU::OpName::src2_modifiers);
695   }
696 
697   if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DS) &&
698       !AMDGPU::hasGDS(STI)) {
699     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::gds);
700   }
701 
702   if (MCII->get(MI.getOpcode()).TSFlags &
703       (SIInstrFlags::MUBUF | SIInstrFlags::FLAT | SIInstrFlags::SMRD)) {
704     int CPolPos = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
705                                              AMDGPU::OpName::cpol);
706     if (CPolPos != -1) {
707       unsigned CPol =
708           (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsAtomicRet) ?
709               AMDGPU::CPol::GLC : 0;
710       if (MI.getNumOperands() <= (unsigned)CPolPos) {
711         insertNamedMCOperand(MI, MCOperand::createImm(CPol),
712                              AMDGPU::OpName::cpol);
713       } else if (CPol) {
714         MI.getOperand(CPolPos).setImm(MI.getOperand(CPolPos).getImm() | CPol);
715       }
716     }
717   }
718 
719   if ((MCII->get(MI.getOpcode()).TSFlags &
720        (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) &&
721       (STI.hasFeature(AMDGPU::FeatureGFX90AInsts))) {
722     // GFX90A lost TFE, its place is occupied by ACC.
723     int TFEOpIdx =
724         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
725     if (TFEOpIdx != -1) {
726       auto *TFEIter = MI.begin();
727       std::advance(TFEIter, TFEOpIdx);
728       MI.insert(TFEIter, MCOperand::createImm(0));
729     }
730   }
731 
732   if (MCII->get(MI.getOpcode()).TSFlags &
733       (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) {
734     int SWZOpIdx =
735         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz);
736     if (SWZOpIdx != -1) {
737       auto *SWZIter = MI.begin();
738       std::advance(SWZIter, SWZOpIdx);
739       MI.insert(SWZIter, MCOperand::createImm(0));
740     }
741   }
742 
743   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG) {
744     int VAddr0Idx =
745         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
746     int RsrcIdx =
747         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
748     unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
749     if (VAddr0Idx >= 0 && NSAArgs > 0) {
750       unsigned NSAWords = (NSAArgs + 3) / 4;
751       if (Bytes.size() < 4 * NSAWords)
752         return MCDisassembler::Fail;
753       for (unsigned i = 0; i < NSAArgs; ++i) {
754         const unsigned VAddrIdx = VAddr0Idx + 1 + i;
755         auto VAddrRCID =
756             MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
757         MI.insert(MI.begin() + VAddrIdx, createRegOperand(VAddrRCID, Bytes[i]));
758       }
759       Bytes = Bytes.slice(4 * NSAWords);
760     }
761 
762     convertMIMGInst(MI);
763   }
764 
765   if (MCII->get(MI.getOpcode()).TSFlags &
766       (SIInstrFlags::VIMAGE | SIInstrFlags::VSAMPLE))
767     convertMIMGInst(MI);
768 
769   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::EXP)
770     convertEXPInst(MI);
771 
772   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VINTERP)
773     convertVINTERPInst(MI);
774 
775   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SDWA)
776     convertSDWAInst(MI);
777 
778   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsMAI)
779     convertMAIInst(MI);
780 
781   int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
782                                               AMDGPU::OpName::vdst_in);
783   if (VDstIn_Idx != -1) {
784     int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
785                            MCOI::OperandConstraint::TIED_TO);
786     if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
787          !MI.getOperand(VDstIn_Idx).isReg() ||
788          MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
789       if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
790         MI.erase(&MI.getOperand(VDstIn_Idx));
791       insertNamedMCOperand(MI,
792         MCOperand::createReg(MI.getOperand(Tied).getReg()),
793         AMDGPU::OpName::vdst_in);
794     }
795   }
796 
797   int ImmLitIdx =
798       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm);
799   bool IsSOPK = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SOPK;
800   if (ImmLitIdx != -1 && !IsSOPK)
801     convertFMAanyK(MI, ImmLitIdx);
802 
803   Size = MaxInstBytesNum - Bytes.size();
804   return MCDisassembler::Success;
805 }
806 
807 void AMDGPUDisassembler::convertEXPInst(MCInst &MI) const {
808   if (STI.hasFeature(AMDGPU::FeatureGFX11Insts)) {
809     // The MCInst still has these fields even though they are no longer encoded
810     // in the GFX11 instruction.
811     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vm);
812     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::compr);
813   }
814 }
815 
816 void AMDGPUDisassembler::convertVINTERPInst(MCInst &MI) const {
817   convertTrue16OpSel(MI);
818   if (MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx11 ||
819       MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx11 ||
820       MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx12 ||
821       MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx12 ||
822       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx11 ||
823       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx11 ||
824       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx12 ||
825       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx12 ||
826       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx11 ||
827       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx11 ||
828       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx12 ||
829       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx12 ||
830       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx11 ||
831       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx11 ||
832       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx12 ||
833       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx12) {
834     // The MCInst has this field that is not directly encoded in the
835     // instruction.
836     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::op_sel);
837   }
838 }
839 
840 void AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
841   if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
842       STI.hasFeature(AMDGPU::FeatureGFX10)) {
843     if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::sdst))
844       // VOPC - insert clamp
845       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
846   } else if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
847     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
848     if (SDst != -1) {
849       // VOPC - insert VCC register as sdst
850       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
851                            AMDGPU::OpName::sdst);
852     } else {
853       // VOP1/2 - insert omod if present in instruction
854       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
855     }
856   }
857 }
858 
859 /// Adjust the register values used by V_MFMA_F8F6F4_f8_f8 instructions to the
860 /// appropriate subregister for the used format width.
861 static void adjustMFMA_F8F6F4OpRegClass(const MCRegisterInfo &MRI,
862                                         MCOperand &MO, uint8_t NumRegs) {
863   switch (NumRegs) {
864   case 4:
865     return MO.setReg(MRI.getSubReg(MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3));
866   case 6:
867     return MO.setReg(
868         MRI.getSubReg(MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5));
869   case 8:
870     // No-op in cases where one operand is still f8/bf8.
871     return;
872   default:
873     llvm_unreachable("Unexpected size for mfma f8f6f4 operand");
874   }
875 }
876 
877 /// f8f6f4 instructions have different pseudos depending on the used formats. In
878 /// the disassembler table, we only have the variants with the largest register
879 /// classes which assume using an fp8/bf8 format for both operands. The actual
880 /// register class depends on the format in blgp and cbsz operands. Adjust the
881 /// register classes depending on the used format.
882 void AMDGPUDisassembler::convertMAIInst(MCInst &MI) const {
883   int BlgpIdx =
884       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::blgp);
885   if (BlgpIdx == -1)
886     return;
887 
888   int CbszIdx =
889       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::cbsz);
890 
891   unsigned CBSZ = MI.getOperand(CbszIdx).getImm();
892   unsigned BLGP = MI.getOperand(BlgpIdx).getImm();
893 
894   const AMDGPU::MFMA_F8F6F4_Info *AdjustedRegClassOpcode =
895       AMDGPU::getMFMA_F8F6F4_WithFormatArgs(CBSZ, BLGP, MI.getOpcode());
896   if (!AdjustedRegClassOpcode ||
897       AdjustedRegClassOpcode->Opcode == MI.getOpcode())
898     return;
899 
900   MI.setOpcode(AdjustedRegClassOpcode->Opcode);
901   int Src0Idx =
902       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
903   int Src1Idx =
904       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
905   adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src0Idx),
906                               AdjustedRegClassOpcode->NumRegsSrcA);
907   adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src1Idx),
908                               AdjustedRegClassOpcode->NumRegsSrcB);
909 }
910 
911 struct VOPModifiers {
912   unsigned OpSel = 0;
913   unsigned OpSelHi = 0;
914   unsigned NegLo = 0;
915   unsigned NegHi = 0;
916 };
917 
918 // Reconstruct values of VOP3/VOP3P operands such as op_sel.
919 // Note that these values do not affect disassembler output,
920 // so this is only necessary for consistency with src_modifiers.
921 static VOPModifiers collectVOPModifiers(const MCInst &MI,
922                                         bool IsVOP3P = false) {
923   VOPModifiers Modifiers;
924   unsigned Opc = MI.getOpcode();
925   const int ModOps[] = {AMDGPU::OpName::src0_modifiers,
926                         AMDGPU::OpName::src1_modifiers,
927                         AMDGPU::OpName::src2_modifiers};
928   for (int J = 0; J < 3; ++J) {
929     int OpIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
930     if (OpIdx == -1)
931       continue;
932 
933     unsigned Val = MI.getOperand(OpIdx).getImm();
934 
935     Modifiers.OpSel |= !!(Val & SISrcMods::OP_SEL_0) << J;
936     if (IsVOP3P) {
937       Modifiers.OpSelHi |= !!(Val & SISrcMods::OP_SEL_1) << J;
938       Modifiers.NegLo |= !!(Val & SISrcMods::NEG) << J;
939       Modifiers.NegHi |= !!(Val & SISrcMods::NEG_HI) << J;
940     } else if (J == 0) {
941       Modifiers.OpSel |= !!(Val & SISrcMods::DST_OP_SEL) << 3;
942     }
943   }
944 
945   return Modifiers;
946 }
947 
948 // Instructions decode the op_sel/suffix bits into the src_modifier
949 // operands. Copy those bits into the src operands for true16 VGPRs.
950 void AMDGPUDisassembler::convertTrue16OpSel(MCInst &MI) const {
951   const unsigned Opc = MI.getOpcode();
952   const MCRegisterClass &ConversionRC =
953       MRI.getRegClass(AMDGPU::VGPR_16RegClassID);
954   constexpr std::array<std::tuple<int, int, unsigned>, 4> OpAndOpMods = {
955       {{AMDGPU::OpName::src0, AMDGPU::OpName::src0_modifiers,
956         SISrcMods::OP_SEL_0},
957        {AMDGPU::OpName::src1, AMDGPU::OpName::src1_modifiers,
958         SISrcMods::OP_SEL_0},
959        {AMDGPU::OpName::src2, AMDGPU::OpName::src2_modifiers,
960         SISrcMods::OP_SEL_0},
961        {AMDGPU::OpName::vdst, AMDGPU::OpName::src0_modifiers,
962         SISrcMods::DST_OP_SEL}}};
963   for (const auto &[OpName, OpModsName, OpSelMask] : OpAndOpMods) {
964     int OpIdx = AMDGPU::getNamedOperandIdx(Opc, OpName);
965     int OpModsIdx = AMDGPU::getNamedOperandIdx(Opc, OpModsName);
966     if (OpIdx == -1 || OpModsIdx == -1)
967       continue;
968     MCOperand &Op = MI.getOperand(OpIdx);
969     if (!Op.isReg())
970       continue;
971     if (!ConversionRC.contains(Op.getReg()))
972       continue;
973     unsigned OpEnc = MRI.getEncodingValue(Op.getReg());
974     const MCOperand &OpMods = MI.getOperand(OpModsIdx);
975     unsigned ModVal = OpMods.getImm();
976     if (ModVal & OpSelMask) { // isHi
977       unsigned RegIdx = OpEnc & AMDGPU::HWEncoding::REG_IDX_MASK;
978       Op.setReg(ConversionRC.getRegister(RegIdx * 2 + 1));
979     }
980   }
981 }
982 
983 // MAC opcodes have special old and src2 operands.
984 // src2 is tied to dst, while old is not tied (but assumed to be).
985 bool AMDGPUDisassembler::isMacDPP(MCInst &MI) const {
986   constexpr int DST_IDX = 0;
987   auto Opcode = MI.getOpcode();
988   const auto &Desc = MCII->get(Opcode);
989   auto OldIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::old);
990 
991   if (OldIdx != -1 && Desc.getOperandConstraint(
992                           OldIdx, MCOI::OperandConstraint::TIED_TO) == -1) {
993     assert(AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2));
994     assert(Desc.getOperandConstraint(
995                AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2),
996                MCOI::OperandConstraint::TIED_TO) == DST_IDX);
997     (void)DST_IDX;
998     return true;
999   }
1000 
1001   return false;
1002 }
1003 
1004 // Create dummy old operand and insert dummy unused src2_modifiers
1005 void AMDGPUDisassembler::convertMacDPPInst(MCInst &MI) const {
1006   assert(MI.getNumOperands() + 1 < MCII->get(MI.getOpcode()).getNumOperands());
1007   insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
1008   insertNamedMCOperand(MI, MCOperand::createImm(0),
1009                        AMDGPU::OpName::src2_modifiers);
1010 }
1011 
1012 void AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const {
1013   unsigned Opc = MI.getOpcode();
1014 
1015   int VDstInIdx =
1016       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
1017   if (VDstInIdx != -1)
1018     insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
1019 
1020   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1021   if (MI.getNumOperands() < DescNumOps &&
1022       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1023     convertTrue16OpSel(MI);
1024     auto Mods = collectVOPModifiers(MI);
1025     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1026                          AMDGPU::OpName::op_sel);
1027   } else {
1028     // Insert dummy unused src modifiers.
1029     if (MI.getNumOperands() < DescNumOps &&
1030         AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1031       insertNamedMCOperand(MI, MCOperand::createImm(0),
1032                            AMDGPU::OpName::src0_modifiers);
1033 
1034     if (MI.getNumOperands() < DescNumOps &&
1035         AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1036       insertNamedMCOperand(MI, MCOperand::createImm(0),
1037                            AMDGPU::OpName::src1_modifiers);
1038   }
1039 }
1040 
1041 void AMDGPUDisassembler::convertVOP3DPPInst(MCInst &MI) const {
1042   convertTrue16OpSel(MI);
1043 
1044   int VDstInIdx =
1045       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
1046   if (VDstInIdx != -1)
1047     insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
1048 
1049   unsigned Opc = MI.getOpcode();
1050   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1051   if (MI.getNumOperands() < DescNumOps &&
1052       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1053     auto Mods = collectVOPModifiers(MI);
1054     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1055                          AMDGPU::OpName::op_sel);
1056   }
1057 }
1058 
1059 // Note that before gfx10, the MIMG encoding provided no information about
1060 // VADDR size. Consequently, decoded instructions always show address as if it
1061 // has 1 dword, which could be not really so.
1062 void AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
1063   auto TSFlags = MCII->get(MI.getOpcode()).TSFlags;
1064 
1065   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1066                                            AMDGPU::OpName::vdst);
1067 
1068   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1069                                             AMDGPU::OpName::vdata);
1070   int VAddr0Idx =
1071       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
1072   int RsrcOpName = (TSFlags & SIInstrFlags::MIMG) ? AMDGPU::OpName::srsrc
1073                                                   : AMDGPU::OpName::rsrc;
1074   int RsrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), RsrcOpName);
1075   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1076                                             AMDGPU::OpName::dmask);
1077 
1078   int TFEIdx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1079                                             AMDGPU::OpName::tfe);
1080   int D16Idx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1081                                             AMDGPU::OpName::d16);
1082 
1083   const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
1084   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1085       AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
1086 
1087   assert(VDataIdx != -1);
1088   if (BaseOpcode->BVH) {
1089     // Add A16 operand for intersect_ray instructions
1090     addOperand(MI, MCOperand::createImm(BaseOpcode->A16));
1091     return;
1092   }
1093 
1094   bool IsAtomic = (VDstIdx != -1);
1095   bool IsGather4 = TSFlags & SIInstrFlags::Gather4;
1096   bool IsVSample = TSFlags & SIInstrFlags::VSAMPLE;
1097   bool IsNSA = false;
1098   bool IsPartialNSA = false;
1099   unsigned AddrSize = Info->VAddrDwords;
1100 
1101   if (isGFX10Plus()) {
1102     unsigned DimIdx =
1103         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
1104     int A16Idx =
1105         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16);
1106     const AMDGPU::MIMGDimInfo *Dim =
1107         AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
1108     const bool IsA16 = (A16Idx != -1 && MI.getOperand(A16Idx).getImm());
1109 
1110     AddrSize =
1111         AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, AMDGPU::hasG16(STI));
1112 
1113     // VSAMPLE insts that do not use vaddr3 behave the same as NSA forms.
1114     // VIMAGE insts other than BVH never use vaddr4.
1115     IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA ||
1116             Info->MIMGEncoding == AMDGPU::MIMGEncGfx11NSA ||
1117             Info->MIMGEncoding == AMDGPU::MIMGEncGfx12;
1118     if (!IsNSA) {
1119       if (!IsVSample && AddrSize > 12)
1120         AddrSize = 16;
1121     } else {
1122       if (AddrSize > Info->VAddrDwords) {
1123         if (!STI.hasFeature(AMDGPU::FeaturePartialNSAEncoding)) {
1124           // The NSA encoding does not contain enough operands for the
1125           // combination of base opcode / dimension. Should this be an error?
1126           return;
1127         }
1128         IsPartialNSA = true;
1129       }
1130     }
1131   }
1132 
1133   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
1134   unsigned DstSize = IsGather4 ? 4 : std::max(llvm::popcount(DMask), 1);
1135 
1136   bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
1137   if (D16 && AMDGPU::hasPackedD16(STI)) {
1138     DstSize = (DstSize + 1) / 2;
1139   }
1140 
1141   if (TFEIdx != -1 && MI.getOperand(TFEIdx).getImm())
1142     DstSize += 1;
1143 
1144   if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
1145     return;
1146 
1147   int NewOpcode =
1148       AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
1149   if (NewOpcode == -1)
1150     return;
1151 
1152   // Widen the register to the correct number of enabled channels.
1153   MCRegister NewVdata;
1154   if (DstSize != Info->VDataDwords) {
1155     auto DataRCID = MCII->get(NewOpcode).operands()[VDataIdx].RegClass;
1156 
1157     // Get first subregister of VData
1158     MCRegister Vdata0 = MI.getOperand(VDataIdx).getReg();
1159     MCRegister VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
1160     Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
1161 
1162     NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
1163                                        &MRI.getRegClass(DataRCID));
1164     if (!NewVdata) {
1165       // It's possible to encode this such that the low register + enabled
1166       // components exceeds the register count.
1167       return;
1168     }
1169   }
1170 
1171   // If not using NSA on GFX10+, widen vaddr0 address register to correct size.
1172   // If using partial NSA on GFX11+ widen last address register.
1173   int VAddrSAIdx = IsPartialNSA ? (RsrcIdx - 1) : VAddr0Idx;
1174   MCRegister NewVAddrSA;
1175   if (STI.hasFeature(AMDGPU::FeatureNSAEncoding) && (!IsNSA || IsPartialNSA) &&
1176       AddrSize != Info->VAddrDwords) {
1177     MCRegister VAddrSA = MI.getOperand(VAddrSAIdx).getReg();
1178     MCRegister VAddrSubSA = MRI.getSubReg(VAddrSA, AMDGPU::sub0);
1179     VAddrSA = VAddrSubSA ? VAddrSubSA : VAddrSA;
1180 
1181     auto AddrRCID = MCII->get(NewOpcode).operands()[VAddrSAIdx].RegClass;
1182     NewVAddrSA = MRI.getMatchingSuperReg(VAddrSA, AMDGPU::sub0,
1183                                         &MRI.getRegClass(AddrRCID));
1184     if (!NewVAddrSA)
1185       return;
1186   }
1187 
1188   MI.setOpcode(NewOpcode);
1189 
1190   if (NewVdata != AMDGPU::NoRegister) {
1191     MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
1192 
1193     if (IsAtomic) {
1194       // Atomic operations have an additional operand (a copy of data)
1195       MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
1196     }
1197   }
1198 
1199   if (NewVAddrSA) {
1200     MI.getOperand(VAddrSAIdx) = MCOperand::createReg(NewVAddrSA);
1201   } else if (IsNSA) {
1202     assert(AddrSize <= Info->VAddrDwords);
1203     MI.erase(MI.begin() + VAddr0Idx + AddrSize,
1204              MI.begin() + VAddr0Idx + Info->VAddrDwords);
1205   }
1206 }
1207 
1208 // Opsel and neg bits are used in src_modifiers and standalone operands. Autogen
1209 // decoder only adds to src_modifiers, so manually add the bits to the other
1210 // operands.
1211 void AMDGPUDisassembler::convertVOP3PDPPInst(MCInst &MI) const {
1212   unsigned Opc = MI.getOpcode();
1213   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1214   auto Mods = collectVOPModifiers(MI, true);
1215 
1216   if (MI.getNumOperands() < DescNumOps &&
1217       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vdst_in))
1218     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vdst_in);
1219 
1220   if (MI.getNumOperands() < DescNumOps &&
1221       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel))
1222     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1223                          AMDGPU::OpName::op_sel);
1224   if (MI.getNumOperands() < DescNumOps &&
1225       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel_hi))
1226     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSelHi),
1227                          AMDGPU::OpName::op_sel_hi);
1228   if (MI.getNumOperands() < DescNumOps &&
1229       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_lo))
1230     insertNamedMCOperand(MI, MCOperand::createImm(Mods.NegLo),
1231                          AMDGPU::OpName::neg_lo);
1232   if (MI.getNumOperands() < DescNumOps &&
1233       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_hi))
1234     insertNamedMCOperand(MI, MCOperand::createImm(Mods.NegHi),
1235                          AMDGPU::OpName::neg_hi);
1236 }
1237 
1238 // Create dummy old operand and insert optional operands
1239 void AMDGPUDisassembler::convertVOPCDPPInst(MCInst &MI) const {
1240   unsigned Opc = MI.getOpcode();
1241   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1242 
1243   if (MI.getNumOperands() < DescNumOps &&
1244       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::old))
1245     insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
1246 
1247   if (MI.getNumOperands() < DescNumOps &&
1248       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1249     insertNamedMCOperand(MI, MCOperand::createImm(0),
1250                          AMDGPU::OpName::src0_modifiers);
1251 
1252   if (MI.getNumOperands() < DescNumOps &&
1253       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1254     insertNamedMCOperand(MI, MCOperand::createImm(0),
1255                          AMDGPU::OpName::src1_modifiers);
1256 }
1257 
1258 void AMDGPUDisassembler::convertVOPC64DPPInst(MCInst &MI) const {
1259   unsigned Opc = MI.getOpcode();
1260   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1261 
1262   convertTrue16OpSel(MI);
1263 
1264   if (MI.getNumOperands() < DescNumOps &&
1265       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1266     VOPModifiers Mods = collectVOPModifiers(MI);
1267     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1268                          AMDGPU::OpName::op_sel);
1269   }
1270 }
1271 
1272 void AMDGPUDisassembler::convertFMAanyK(MCInst &MI, int ImmLitIdx) const {
1273   assert(HasLiteral && "Should have decoded a literal");
1274   const MCInstrDesc &Desc = MCII->get(MI.getOpcode());
1275   unsigned DescNumOps = Desc.getNumOperands();
1276   insertNamedMCOperand(MI, MCOperand::createImm(Literal),
1277                        AMDGPU::OpName::immDeferred);
1278   assert(DescNumOps == MI.getNumOperands());
1279   for (unsigned I = 0; I < DescNumOps; ++I) {
1280     auto &Op = MI.getOperand(I);
1281     auto OpType = Desc.operands()[I].OperandType;
1282     bool IsDeferredOp = (OpType == AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED ||
1283                          OpType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED);
1284     if (Op.isImm() && Op.getImm() == AMDGPU::EncValues::LITERAL_CONST &&
1285         IsDeferredOp)
1286       Op.setImm(Literal);
1287   }
1288 }
1289 
1290 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
1291   return getContext().getRegisterInfo()->
1292     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
1293 }
1294 
1295 inline
1296 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
1297                                          const Twine& ErrMsg) const {
1298   *CommentStream << "Error: " + ErrMsg;
1299 
1300   // ToDo: add support for error operands to MCInst.h
1301   // return MCOperand::createError(V);
1302   return MCOperand();
1303 }
1304 
1305 inline
1306 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
1307   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
1308 }
1309 
1310 inline
1311 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
1312                                                unsigned Val) const {
1313   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
1314   if (Val >= RegCl.getNumRegs())
1315     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
1316                            ": unknown register " + Twine(Val));
1317   return createRegOperand(RegCl.getRegister(Val));
1318 }
1319 
1320 inline
1321 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
1322                                                 unsigned Val) const {
1323   // ToDo: SI/CI have 104 SGPRs, VI - 102
1324   // Valery: here we accepting as much as we can, let assembler sort it out
1325   int shift = 0;
1326   switch (SRegClassID) {
1327   case AMDGPU::SGPR_32RegClassID:
1328   case AMDGPU::TTMP_32RegClassID:
1329     break;
1330   case AMDGPU::SGPR_64RegClassID:
1331   case AMDGPU::TTMP_64RegClassID:
1332     shift = 1;
1333     break;
1334   case AMDGPU::SGPR_96RegClassID:
1335   case AMDGPU::TTMP_96RegClassID:
1336   case AMDGPU::SGPR_128RegClassID:
1337   case AMDGPU::TTMP_128RegClassID:
1338   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
1339   // this bundle?
1340   case AMDGPU::SGPR_256RegClassID:
1341   case AMDGPU::TTMP_256RegClassID:
1342     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
1343   // this bundle?
1344   case AMDGPU::SGPR_288RegClassID:
1345   case AMDGPU::TTMP_288RegClassID:
1346   case AMDGPU::SGPR_320RegClassID:
1347   case AMDGPU::TTMP_320RegClassID:
1348   case AMDGPU::SGPR_352RegClassID:
1349   case AMDGPU::TTMP_352RegClassID:
1350   case AMDGPU::SGPR_384RegClassID:
1351   case AMDGPU::TTMP_384RegClassID:
1352   case AMDGPU::SGPR_512RegClassID:
1353   case AMDGPU::TTMP_512RegClassID:
1354     shift = 2;
1355     break;
1356   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
1357   // this bundle?
1358   default:
1359     llvm_unreachable("unhandled register class");
1360   }
1361 
1362   if (Val % (1 << shift)) {
1363     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
1364                    << ": scalar reg isn't aligned " << Val;
1365   }
1366 
1367   return createRegOperand(SRegClassID, Val >> shift);
1368 }
1369 
1370 MCOperand AMDGPUDisassembler::createVGPR16Operand(unsigned RegIdx,
1371                                                   bool IsHi) const {
1372   unsigned RegIdxInVGPR16 = RegIdx * 2 + (IsHi ? 1 : 0);
1373   return createRegOperand(AMDGPU::VGPR_16RegClassID, RegIdxInVGPR16);
1374 }
1375 
1376 // Decode Literals for insts which always have a literal in the encoding
1377 MCOperand
1378 AMDGPUDisassembler::decodeMandatoryLiteralConstant(unsigned Val) const {
1379   if (HasLiteral) {
1380     assert(
1381         AMDGPU::hasVOPD(STI) &&
1382         "Should only decode multiple kimm with VOPD, check VSrc operand types");
1383     if (Literal != Val)
1384       return errOperand(Val, "More than one unique literal is illegal");
1385   }
1386   HasLiteral = true;
1387   Literal = Val;
1388   return MCOperand::createImm(Literal);
1389 }
1390 
1391 MCOperand AMDGPUDisassembler::decodeLiteralConstant(bool ExtendFP64) const {
1392   // For now all literal constants are supposed to be unsigned integer
1393   // ToDo: deal with signed/unsigned 64-bit integer constants
1394   // ToDo: deal with float/double constants
1395   if (!HasLiteral) {
1396     if (Bytes.size() < 4) {
1397       return errOperand(0, "cannot read literal, inst bytes left " +
1398                         Twine(Bytes.size()));
1399     }
1400     HasLiteral = true;
1401     Literal = Literal64 = eatBytes<uint32_t>(Bytes);
1402     if (ExtendFP64)
1403       Literal64 <<= 32;
1404   }
1405   return MCOperand::createImm(ExtendFP64 ? Literal64 : Literal);
1406 }
1407 
1408 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
1409   using namespace AMDGPU::EncValues;
1410 
1411   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
1412   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
1413     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
1414     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
1415       // Cast prevents negative overflow.
1416 }
1417 
1418 static int64_t getInlineImmVal32(unsigned Imm) {
1419   switch (Imm) {
1420   case 240:
1421     return llvm::bit_cast<uint32_t>(0.5f);
1422   case 241:
1423     return llvm::bit_cast<uint32_t>(-0.5f);
1424   case 242:
1425     return llvm::bit_cast<uint32_t>(1.0f);
1426   case 243:
1427     return llvm::bit_cast<uint32_t>(-1.0f);
1428   case 244:
1429     return llvm::bit_cast<uint32_t>(2.0f);
1430   case 245:
1431     return llvm::bit_cast<uint32_t>(-2.0f);
1432   case 246:
1433     return llvm::bit_cast<uint32_t>(4.0f);
1434   case 247:
1435     return llvm::bit_cast<uint32_t>(-4.0f);
1436   case 248: // 1 / (2 * PI)
1437     return 0x3e22f983;
1438   default:
1439     llvm_unreachable("invalid fp inline imm");
1440   }
1441 }
1442 
1443 static int64_t getInlineImmVal64(unsigned Imm) {
1444   switch (Imm) {
1445   case 240:
1446     return llvm::bit_cast<uint64_t>(0.5);
1447   case 241:
1448     return llvm::bit_cast<uint64_t>(-0.5);
1449   case 242:
1450     return llvm::bit_cast<uint64_t>(1.0);
1451   case 243:
1452     return llvm::bit_cast<uint64_t>(-1.0);
1453   case 244:
1454     return llvm::bit_cast<uint64_t>(2.0);
1455   case 245:
1456     return llvm::bit_cast<uint64_t>(-2.0);
1457   case 246:
1458     return llvm::bit_cast<uint64_t>(4.0);
1459   case 247:
1460     return llvm::bit_cast<uint64_t>(-4.0);
1461   case 248: // 1 / (2 * PI)
1462     return 0x3fc45f306dc9c882;
1463   default:
1464     llvm_unreachable("invalid fp inline imm");
1465   }
1466 }
1467 
1468 static int64_t getInlineImmValF16(unsigned Imm) {
1469   switch (Imm) {
1470   case 240:
1471     return 0x3800;
1472   case 241:
1473     return 0xB800;
1474   case 242:
1475     return 0x3C00;
1476   case 243:
1477     return 0xBC00;
1478   case 244:
1479     return 0x4000;
1480   case 245:
1481     return 0xC000;
1482   case 246:
1483     return 0x4400;
1484   case 247:
1485     return 0xC400;
1486   case 248: // 1 / (2 * PI)
1487     return 0x3118;
1488   default:
1489     llvm_unreachable("invalid fp inline imm");
1490   }
1491 }
1492 
1493 static int64_t getInlineImmValBF16(unsigned Imm) {
1494   switch (Imm) {
1495   case 240:
1496     return 0x3F00;
1497   case 241:
1498     return 0xBF00;
1499   case 242:
1500     return 0x3F80;
1501   case 243:
1502     return 0xBF80;
1503   case 244:
1504     return 0x4000;
1505   case 245:
1506     return 0xC000;
1507   case 246:
1508     return 0x4080;
1509   case 247:
1510     return 0xC080;
1511   case 248: // 1 / (2 * PI)
1512     return 0x3E22;
1513   default:
1514     llvm_unreachable("invalid fp inline imm");
1515   }
1516 }
1517 
1518 static int64_t getInlineImmVal16(unsigned Imm, AMDGPU::OperandSemantics Sema) {
1519   return (Sema == AMDGPU::OperandSemantics::BF16) ? getInlineImmValBF16(Imm)
1520                                                   : getInlineImmValF16(Imm);
1521 }
1522 
1523 MCOperand AMDGPUDisassembler::decodeFPImmed(unsigned ImmWidth, unsigned Imm,
1524                                             AMDGPU::OperandSemantics Sema) {
1525   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN &&
1526          Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
1527 
1528   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
1529   // ImmWidth 0 is a default case where operand should not allow immediates.
1530   // Imm value is still decoded into 32 bit immediate operand, inst printer will
1531   // use it to print verbose error message.
1532   switch (ImmWidth) {
1533   case 0:
1534   case 32:
1535     return MCOperand::createImm(getInlineImmVal32(Imm));
1536   case 64:
1537     return MCOperand::createImm(getInlineImmVal64(Imm));
1538   case 16:
1539     return MCOperand::createImm(getInlineImmVal16(Imm, Sema));
1540   default:
1541     llvm_unreachable("implement me");
1542   }
1543 }
1544 
1545 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
1546   using namespace AMDGPU;
1547 
1548   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1549   switch (Width) {
1550   default: // fall
1551   case OPW32:
1552   case OPW16:
1553   case OPWV216:
1554     return VGPR_32RegClassID;
1555   case OPW64:
1556   case OPWV232: return VReg_64RegClassID;
1557   case OPW96: return VReg_96RegClassID;
1558   case OPW128: return VReg_128RegClassID;
1559   case OPW192: return VReg_192RegClassID;
1560   case OPW160: return VReg_160RegClassID;
1561   case OPW256: return VReg_256RegClassID;
1562   case OPW288: return VReg_288RegClassID;
1563   case OPW320: return VReg_320RegClassID;
1564   case OPW352: return VReg_352RegClassID;
1565   case OPW384: return VReg_384RegClassID;
1566   case OPW512: return VReg_512RegClassID;
1567   case OPW1024: return VReg_1024RegClassID;
1568   }
1569 }
1570 
1571 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width) const {
1572   using namespace AMDGPU;
1573 
1574   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1575   switch (Width) {
1576   default: // fall
1577   case OPW32:
1578   case OPW16:
1579   case OPWV216:
1580     return AGPR_32RegClassID;
1581   case OPW64:
1582   case OPWV232: return AReg_64RegClassID;
1583   case OPW96: return AReg_96RegClassID;
1584   case OPW128: return AReg_128RegClassID;
1585   case OPW160: return AReg_160RegClassID;
1586   case OPW256: return AReg_256RegClassID;
1587   case OPW288: return AReg_288RegClassID;
1588   case OPW320: return AReg_320RegClassID;
1589   case OPW352: return AReg_352RegClassID;
1590   case OPW384: return AReg_384RegClassID;
1591   case OPW512: return AReg_512RegClassID;
1592   case OPW1024: return AReg_1024RegClassID;
1593   }
1594 }
1595 
1596 
1597 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
1598   using namespace AMDGPU;
1599 
1600   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1601   switch (Width) {
1602   default: // fall
1603   case OPW32:
1604   case OPW16:
1605   case OPWV216:
1606     return SGPR_32RegClassID;
1607   case OPW64:
1608   case OPWV232: return SGPR_64RegClassID;
1609   case OPW96: return SGPR_96RegClassID;
1610   case OPW128: return SGPR_128RegClassID;
1611   case OPW160: return SGPR_160RegClassID;
1612   case OPW256: return SGPR_256RegClassID;
1613   case OPW288: return SGPR_288RegClassID;
1614   case OPW320: return SGPR_320RegClassID;
1615   case OPW352: return SGPR_352RegClassID;
1616   case OPW384: return SGPR_384RegClassID;
1617   case OPW512: return SGPR_512RegClassID;
1618   }
1619 }
1620 
1621 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
1622   using namespace AMDGPU;
1623 
1624   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1625   switch (Width) {
1626   default: // fall
1627   case OPW32:
1628   case OPW16:
1629   case OPWV216:
1630     return TTMP_32RegClassID;
1631   case OPW64:
1632   case OPWV232: return TTMP_64RegClassID;
1633   case OPW128: return TTMP_128RegClassID;
1634   case OPW256: return TTMP_256RegClassID;
1635   case OPW288: return TTMP_288RegClassID;
1636   case OPW320: return TTMP_320RegClassID;
1637   case OPW352: return TTMP_352RegClassID;
1638   case OPW384: return TTMP_384RegClassID;
1639   case OPW512: return TTMP_512RegClassID;
1640   }
1641 }
1642 
1643 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
1644   using namespace AMDGPU::EncValues;
1645 
1646   unsigned TTmpMin = isGFX9Plus() ? TTMP_GFX9PLUS_MIN : TTMP_VI_MIN;
1647   unsigned TTmpMax = isGFX9Plus() ? TTMP_GFX9PLUS_MAX : TTMP_VI_MAX;
1648 
1649   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
1650 }
1651 
1652 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val,
1653                                           bool MandatoryLiteral,
1654                                           unsigned ImmWidth,
1655                                           AMDGPU::OperandSemantics Sema) const {
1656   using namespace AMDGPU::EncValues;
1657 
1658   assert(Val < 1024); // enum10
1659 
1660   bool IsAGPR = Val & 512;
1661   Val &= 511;
1662 
1663   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
1664     return createRegOperand(IsAGPR ? getAgprClassId(Width)
1665                                    : getVgprClassId(Width), Val - VGPR_MIN);
1666   }
1667   return decodeNonVGPRSrcOp(Width, Val & 0xFF, MandatoryLiteral, ImmWidth,
1668                             Sema);
1669 }
1670 
1671 MCOperand
1672 AMDGPUDisassembler::decodeNonVGPRSrcOp(const OpWidthTy Width, unsigned Val,
1673                                        bool MandatoryLiteral, unsigned ImmWidth,
1674                                        AMDGPU::OperandSemantics Sema) const {
1675   // Cases when Val{8} is 1 (vgpr, agpr or true 16 vgpr) should have been
1676   // decoded earlier.
1677   assert(Val < (1 << 8) && "9-bit Src encoding when Val{8} is 0");
1678   using namespace AMDGPU::EncValues;
1679 
1680   if (Val <= SGPR_MAX) {
1681     // "SGPR_MIN <= Val" is always true and causes compilation warning.
1682     static_assert(SGPR_MIN == 0);
1683     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1684   }
1685 
1686   int TTmpIdx = getTTmpIdx(Val);
1687   if (TTmpIdx >= 0) {
1688     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1689   }
1690 
1691   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
1692     return decodeIntImmed(Val);
1693 
1694   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
1695     return decodeFPImmed(ImmWidth, Val, Sema);
1696 
1697   if (Val == LITERAL_CONST) {
1698     if (MandatoryLiteral)
1699       // Keep a sentinel value for deferred setting
1700       return MCOperand::createImm(LITERAL_CONST);
1701     return decodeLiteralConstant(Sema == AMDGPU::OperandSemantics::FP64);
1702   }
1703 
1704   switch (Width) {
1705   case OPW32:
1706   case OPW16:
1707   case OPWV216:
1708     return decodeSpecialReg32(Val);
1709   case OPW64:
1710   case OPWV232:
1711     return decodeSpecialReg64(Val);
1712   case OPW96:
1713   case OPW128:
1714   case OPW256:
1715   case OPW512:
1716     return decodeSpecialReg96Plus(Val);
1717   default:
1718     llvm_unreachable("unexpected immediate type");
1719   }
1720 }
1721 
1722 // Bit 0 of DstY isn't stored in the instruction, because it's always the
1723 // opposite of bit 0 of DstX.
1724 MCOperand AMDGPUDisassembler::decodeVOPDDstYOp(MCInst &Inst,
1725                                                unsigned Val) const {
1726   int VDstXInd =
1727       AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::vdstX);
1728   assert(VDstXInd != -1);
1729   assert(Inst.getOperand(VDstXInd).isReg());
1730   unsigned XDstReg = MRI.getEncodingValue(Inst.getOperand(VDstXInd).getReg());
1731   Val |= ~XDstReg & 1;
1732   auto Width = llvm::AMDGPUDisassembler::OPW32;
1733   return createRegOperand(getVgprClassId(Width), Val);
1734 }
1735 
1736 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
1737   using namespace AMDGPU;
1738 
1739   switch (Val) {
1740   // clang-format off
1741   case 102: return createRegOperand(FLAT_SCR_LO);
1742   case 103: return createRegOperand(FLAT_SCR_HI);
1743   case 104: return createRegOperand(XNACK_MASK_LO);
1744   case 105: return createRegOperand(XNACK_MASK_HI);
1745   case 106: return createRegOperand(VCC_LO);
1746   case 107: return createRegOperand(VCC_HI);
1747   case 108: return createRegOperand(TBA_LO);
1748   case 109: return createRegOperand(TBA_HI);
1749   case 110: return createRegOperand(TMA_LO);
1750   case 111: return createRegOperand(TMA_HI);
1751   case 124:
1752     return isGFX11Plus() ? createRegOperand(SGPR_NULL) : createRegOperand(M0);
1753   case 125:
1754     return isGFX11Plus() ? createRegOperand(M0) : createRegOperand(SGPR_NULL);
1755   case 126: return createRegOperand(EXEC_LO);
1756   case 127: return createRegOperand(EXEC_HI);
1757   case 235: return createRegOperand(SRC_SHARED_BASE_LO);
1758   case 236: return createRegOperand(SRC_SHARED_LIMIT_LO);
1759   case 237: return createRegOperand(SRC_PRIVATE_BASE_LO);
1760   case 238: return createRegOperand(SRC_PRIVATE_LIMIT_LO);
1761   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1762   case 251: return createRegOperand(SRC_VCCZ);
1763   case 252: return createRegOperand(SRC_EXECZ);
1764   case 253: return createRegOperand(SRC_SCC);
1765   case 254: return createRegOperand(LDS_DIRECT);
1766   default: break;
1767     // clang-format on
1768   }
1769   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1770 }
1771 
1772 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
1773   using namespace AMDGPU;
1774 
1775   switch (Val) {
1776   case 102: return createRegOperand(FLAT_SCR);
1777   case 104: return createRegOperand(XNACK_MASK);
1778   case 106: return createRegOperand(VCC);
1779   case 108: return createRegOperand(TBA);
1780   case 110: return createRegOperand(TMA);
1781   case 124:
1782     if (isGFX11Plus())
1783       return createRegOperand(SGPR_NULL);
1784     break;
1785   case 125:
1786     if (!isGFX11Plus())
1787       return createRegOperand(SGPR_NULL);
1788     break;
1789   case 126: return createRegOperand(EXEC);
1790   case 235: return createRegOperand(SRC_SHARED_BASE);
1791   case 236: return createRegOperand(SRC_SHARED_LIMIT);
1792   case 237: return createRegOperand(SRC_PRIVATE_BASE);
1793   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1794   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1795   case 251: return createRegOperand(SRC_VCCZ);
1796   case 252: return createRegOperand(SRC_EXECZ);
1797   case 253: return createRegOperand(SRC_SCC);
1798   default: break;
1799   }
1800   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1801 }
1802 
1803 MCOperand AMDGPUDisassembler::decodeSpecialReg96Plus(unsigned Val) const {
1804   using namespace AMDGPU;
1805 
1806   switch (Val) {
1807   case 124:
1808     if (isGFX11Plus())
1809       return createRegOperand(SGPR_NULL);
1810     break;
1811   case 125:
1812     if (!isGFX11Plus())
1813       return createRegOperand(SGPR_NULL);
1814     break;
1815   default:
1816     break;
1817   }
1818   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1819 }
1820 
1821 MCOperand
1822 AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, const unsigned Val,
1823                                   unsigned ImmWidth,
1824                                   AMDGPU::OperandSemantics Sema) const {
1825   using namespace AMDGPU::SDWA;
1826   using namespace AMDGPU::EncValues;
1827 
1828   if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
1829       STI.hasFeature(AMDGPU::FeatureGFX10)) {
1830     // XXX: cast to int is needed to avoid stupid warning:
1831     // compare with unsigned is always true
1832     if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
1833         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
1834       return createRegOperand(getVgprClassId(Width),
1835                               Val - SDWA9EncValues::SRC_VGPR_MIN);
1836     }
1837     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
1838         Val <= (isGFX10Plus() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
1839                               : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
1840       return createSRegOperand(getSgprClassId(Width),
1841                                Val - SDWA9EncValues::SRC_SGPR_MIN);
1842     }
1843     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
1844         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
1845       return createSRegOperand(getTtmpClassId(Width),
1846                                Val - SDWA9EncValues::SRC_TTMP_MIN);
1847     }
1848 
1849     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
1850 
1851     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
1852       return decodeIntImmed(SVal);
1853 
1854     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
1855       return decodeFPImmed(ImmWidth, SVal, Sema);
1856 
1857     return decodeSpecialReg32(SVal);
1858   }
1859   if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands))
1860     return createRegOperand(getVgprClassId(Width), Val);
1861   llvm_unreachable("unsupported target");
1862 }
1863 
1864 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
1865   return decodeSDWASrc(OPW16, Val, 16, AMDGPU::OperandSemantics::FP16);
1866 }
1867 
1868 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
1869   return decodeSDWASrc(OPW32, Val, 32, AMDGPU::OperandSemantics::FP32);
1870 }
1871 
1872 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
1873   using namespace AMDGPU::SDWA;
1874 
1875   assert((STI.hasFeature(AMDGPU::FeatureGFX9) ||
1876           STI.hasFeature(AMDGPU::FeatureGFX10)) &&
1877          "SDWAVopcDst should be present only on GFX9+");
1878 
1879   bool IsWave32 = STI.hasFeature(AMDGPU::FeatureWavefrontSize32);
1880 
1881   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
1882     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
1883 
1884     int TTmpIdx = getTTmpIdx(Val);
1885     if (TTmpIdx >= 0) {
1886       auto TTmpClsId = getTtmpClassId(IsWave32 ? OPW32 : OPW64);
1887       return createSRegOperand(TTmpClsId, TTmpIdx);
1888     }
1889     if (Val > SGPR_MAX) {
1890       return IsWave32 ? decodeSpecialReg32(Val) : decodeSpecialReg64(Val);
1891     }
1892     return createSRegOperand(getSgprClassId(IsWave32 ? OPW32 : OPW64), Val);
1893   }
1894   return createRegOperand(IsWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC);
1895 }
1896 
1897 MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
1898   return STI.hasFeature(AMDGPU::FeatureWavefrontSize32)
1899              ? decodeSrcOp(OPW32, Val)
1900              : decodeSrcOp(OPW64, Val);
1901 }
1902 
1903 MCOperand AMDGPUDisassembler::decodeSplitBarrier(unsigned Val) const {
1904   return decodeSrcOp(OPW32, Val);
1905 }
1906 
1907 MCOperand AMDGPUDisassembler::decodeDpp8FI(unsigned Val) const {
1908   if (Val != AMDGPU::DPP::DPP8_FI_0 && Val != AMDGPU::DPP::DPP8_FI_1)
1909     return MCOperand();
1910   return MCOperand::createImm(Val);
1911 }
1912 
1913 MCOperand AMDGPUDisassembler::decodeVersionImm(unsigned Imm) const {
1914   using VersionField = AMDGPU::EncodingField<7, 0>;
1915   using W64Bit = AMDGPU::EncodingBit<13>;
1916   using W32Bit = AMDGPU::EncodingBit<14>;
1917   using MDPBit = AMDGPU::EncodingBit<15>;
1918   using Encoding = AMDGPU::EncodingFields<VersionField, W64Bit, W32Bit, MDPBit>;
1919 
1920   auto [Version, W64, W32, MDP] = Encoding::decode(Imm);
1921 
1922   // Decode into a plain immediate if any unused bits are raised.
1923   if (Encoding::encode(Version, W64, W32, MDP) != Imm)
1924     return MCOperand::createImm(Imm);
1925 
1926   const auto &Versions = AMDGPU::UCVersion::getGFXVersions();
1927   const auto *I = find_if(
1928       Versions, [Version = Version](const AMDGPU::UCVersion::GFXVersion &V) {
1929         return V.Code == Version;
1930       });
1931   MCContext &Ctx = getContext();
1932   const MCExpr *E;
1933   if (I == Versions.end())
1934     E = MCConstantExpr::create(Version, Ctx);
1935   else
1936     E = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(I->Symbol), Ctx);
1937 
1938   if (W64)
1939     E = MCBinaryExpr::createOr(E, UCVersionW64Expr, Ctx);
1940   if (W32)
1941     E = MCBinaryExpr::createOr(E, UCVersionW32Expr, Ctx);
1942   if (MDP)
1943     E = MCBinaryExpr::createOr(E, UCVersionMDPExpr, Ctx);
1944 
1945   return MCOperand::createExpr(E);
1946 }
1947 
1948 bool AMDGPUDisassembler::isVI() const {
1949   return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
1950 }
1951 
1952 bool AMDGPUDisassembler::isGFX9() const { return AMDGPU::isGFX9(STI); }
1953 
1954 bool AMDGPUDisassembler::isGFX90A() const {
1955   return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
1956 }
1957 
1958 bool AMDGPUDisassembler::isGFX9Plus() const { return AMDGPU::isGFX9Plus(STI); }
1959 
1960 bool AMDGPUDisassembler::isGFX10() const { return AMDGPU::isGFX10(STI); }
1961 
1962 bool AMDGPUDisassembler::isGFX10Plus() const {
1963   return AMDGPU::isGFX10Plus(STI);
1964 }
1965 
1966 bool AMDGPUDisassembler::isGFX11() const {
1967   return STI.hasFeature(AMDGPU::FeatureGFX11);
1968 }
1969 
1970 bool AMDGPUDisassembler::isGFX11Plus() const {
1971   return AMDGPU::isGFX11Plus(STI);
1972 }
1973 
1974 bool AMDGPUDisassembler::isGFX12() const {
1975   return STI.hasFeature(AMDGPU::FeatureGFX12);
1976 }
1977 
1978 bool AMDGPUDisassembler::isGFX12Plus() const {
1979   return AMDGPU::isGFX12Plus(STI);
1980 }
1981 
1982 bool AMDGPUDisassembler::hasArchitectedFlatScratch() const {
1983   return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
1984 }
1985 
1986 bool AMDGPUDisassembler::hasKernargPreload() const {
1987   return AMDGPU::hasKernargPreload(STI);
1988 }
1989 
1990 //===----------------------------------------------------------------------===//
1991 // AMDGPU specific symbol handling
1992 //===----------------------------------------------------------------------===//
1993 
1994 /// Print a string describing the reserved bit range specified by Mask with
1995 /// offset BaseBytes for use in error comments. Mask is a single continuous
1996 /// range of 1s surrounded by zeros. The format here is meant to align with the
1997 /// tables that describe these bits in llvm.org/docs/AMDGPUUsage.html.
1998 static SmallString<32> getBitRangeFromMask(uint32_t Mask, unsigned BaseBytes) {
1999   SmallString<32> Result;
2000   raw_svector_ostream S(Result);
2001 
2002   int TrailingZeros = llvm::countr_zero(Mask);
2003   int PopCount = llvm::popcount(Mask);
2004 
2005   if (PopCount == 1) {
2006     S << "bit (" << (TrailingZeros + BaseBytes * CHAR_BIT) << ')';
2007   } else {
2008     S << "bits in range ("
2009       << (TrailingZeros + PopCount - 1 + BaseBytes * CHAR_BIT) << ':'
2010       << (TrailingZeros + BaseBytes * CHAR_BIT) << ')';
2011   }
2012 
2013   return Result;
2014 }
2015 
2016 #define GET_FIELD(MASK) (AMDHSA_BITS_GET(FourByteBuffer, MASK))
2017 #define PRINT_DIRECTIVE(DIRECTIVE, MASK)                                       \
2018   do {                                                                         \
2019     KdStream << Indent << DIRECTIVE " " << GET_FIELD(MASK) << '\n';            \
2020   } while (0)
2021 #define PRINT_PSEUDO_DIRECTIVE_COMMENT(DIRECTIVE, MASK)                        \
2022   do {                                                                         \
2023     KdStream << Indent << MAI.getCommentString() << ' ' << DIRECTIVE " "       \
2024              << GET_FIELD(MASK) << '\n';                                       \
2025   } while (0)
2026 
2027 #define CHECK_RESERVED_BITS_IMPL(MASK, DESC, MSG)                              \
2028   do {                                                                         \
2029     if (FourByteBuffer & (MASK)) {                                             \
2030       return createStringError(std::errc::invalid_argument,                    \
2031                                "kernel descriptor " DESC                       \
2032                                " reserved %s set" MSG,                         \
2033                                getBitRangeFromMask((MASK), 0).c_str());        \
2034     }                                                                          \
2035   } while (0)
2036 
2037 #define CHECK_RESERVED_BITS(MASK) CHECK_RESERVED_BITS_IMPL(MASK, #MASK, "")
2038 #define CHECK_RESERVED_BITS_MSG(MASK, MSG)                                     \
2039   CHECK_RESERVED_BITS_IMPL(MASK, #MASK, ", " MSG)
2040 #define CHECK_RESERVED_BITS_DESC(MASK, DESC)                                   \
2041   CHECK_RESERVED_BITS_IMPL(MASK, DESC, "")
2042 #define CHECK_RESERVED_BITS_DESC_MSG(MASK, DESC, MSG)                          \
2043   CHECK_RESERVED_BITS_IMPL(MASK, DESC, ", " MSG)
2044 
2045 // NOLINTNEXTLINE(readability-identifier-naming)
2046 Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1(
2047     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2048   using namespace amdhsa;
2049   StringRef Indent = "\t";
2050 
2051   // We cannot accurately backward compute #VGPRs used from
2052   // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same
2053   // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we
2054   // simply calculate the inverse of what the assembler does.
2055 
2056   uint32_t GranulatedWorkitemVGPRCount =
2057       GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT);
2058 
2059   uint32_t NextFreeVGPR =
2060       (GranulatedWorkitemVGPRCount + 1) *
2061       AMDGPU::IsaInfo::getVGPREncodingGranule(&STI, EnableWavefrontSize32);
2062 
2063   KdStream << Indent << ".amdhsa_next_free_vgpr " << NextFreeVGPR << '\n';
2064 
2065   // We cannot backward compute values used to calculate
2066   // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following
2067   // directives can't be computed:
2068   // .amdhsa_reserve_vcc
2069   // .amdhsa_reserve_flat_scratch
2070   // .amdhsa_reserve_xnack_mask
2071   // They take their respective default values if not specified in the assembly.
2072   //
2073   // GRANULATED_WAVEFRONT_SGPR_COUNT
2074   //    = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK)
2075   //
2076   // We compute the inverse as though all directives apart from NEXT_FREE_SGPR
2077   // are set to 0. So while disassembling we consider that:
2078   //
2079   // GRANULATED_WAVEFRONT_SGPR_COUNT
2080   //    = f(NEXT_FREE_SGPR + 0 + 0 + 0)
2081   //
2082   // The disassembler cannot recover the original values of those 3 directives.
2083 
2084   uint32_t GranulatedWavefrontSGPRCount =
2085       GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT);
2086 
2087   if (isGFX10Plus())
2088     CHECK_RESERVED_BITS_MSG(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
2089                             "must be zero on gfx10+");
2090 
2091   uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) *
2092                           AMDGPU::IsaInfo::getSGPREncodingGranule(&STI);
2093 
2094   KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n';
2095   if (!hasArchitectedFlatScratch())
2096     KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n';
2097   KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n';
2098   KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n";
2099 
2100   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIORITY);
2101 
2102   PRINT_DIRECTIVE(".amdhsa_float_round_mode_32",
2103                   COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
2104   PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64",
2105                   COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
2106   PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32",
2107                   COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
2108   PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64",
2109                   COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
2110 
2111   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIV);
2112 
2113   if (!isGFX12Plus())
2114     PRINT_DIRECTIVE(".amdhsa_dx10_clamp",
2115                     COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP);
2116 
2117   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_DEBUG_MODE);
2118 
2119   if (!isGFX12Plus())
2120     PRINT_DIRECTIVE(".amdhsa_ieee_mode",
2121                     COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE);
2122 
2123   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_BULKY);
2124   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_CDBG_USER);
2125 
2126   if (isGFX9Plus())
2127     PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL);
2128 
2129   if (!isGFX9Plus())
2130     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC1_GFX6_GFX8_RESERVED0,
2131                                  "COMPUTE_PGM_RSRC1", "must be zero pre-gfx9");
2132 
2133   CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC1_RESERVED1, "COMPUTE_PGM_RSRC1");
2134 
2135   if (!isGFX10Plus())
2136     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC1_GFX6_GFX9_RESERVED2,
2137                                  "COMPUTE_PGM_RSRC1", "must be zero pre-gfx10");
2138 
2139   if (isGFX10Plus()) {
2140     PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode",
2141                     COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE);
2142     PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED);
2143     PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS);
2144   }
2145 
2146   if (isGFX12Plus())
2147     PRINT_DIRECTIVE(".amdhsa_round_robin_scheduling",
2148                     COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN);
2149 
2150   return true;
2151 }
2152 
2153 // NOLINTNEXTLINE(readability-identifier-naming)
2154 Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC2(
2155     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2156   using namespace amdhsa;
2157   StringRef Indent = "\t";
2158   if (hasArchitectedFlatScratch())
2159     PRINT_DIRECTIVE(".amdhsa_enable_private_segment",
2160                     COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2161   else
2162     PRINT_DIRECTIVE(".amdhsa_system_sgpr_private_segment_wavefront_offset",
2163                     COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2164   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x",
2165                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
2166   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y",
2167                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
2168   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z",
2169                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
2170   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info",
2171                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
2172   PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id",
2173                   COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
2174 
2175   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH);
2176   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY);
2177   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE);
2178 
2179   PRINT_DIRECTIVE(
2180       ".amdhsa_exception_fp_ieee_invalid_op",
2181       COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
2182   PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src",
2183                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
2184   PRINT_DIRECTIVE(
2185       ".amdhsa_exception_fp_ieee_div_zero",
2186       COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
2187   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow",
2188                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
2189   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow",
2190                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
2191   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact",
2192                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
2193   PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero",
2194                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
2195 
2196   CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC2_RESERVED0, "COMPUTE_PGM_RSRC2");
2197 
2198   return true;
2199 }
2200 
2201 // NOLINTNEXTLINE(readability-identifier-naming)
2202 Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC3(
2203     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2204   using namespace amdhsa;
2205   StringRef Indent = "\t";
2206   if (isGFX90A()) {
2207     KdStream << Indent << ".amdhsa_accum_offset "
2208              << (GET_FIELD(COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
2209              << '\n';
2210 
2211     PRINT_DIRECTIVE(".amdhsa_tg_split", COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
2212 
2213     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX90A_RESERVED0,
2214                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx90a");
2215     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX90A_RESERVED1,
2216                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx90a");
2217   } else if (isGFX10Plus()) {
2218     // Bits [0-3].
2219     if (!isGFX12Plus()) {
2220       if (!EnableWavefrontSize32 || !*EnableWavefrontSize32) {
2221         PRINT_DIRECTIVE(".amdhsa_shared_vgpr_count",
2222                         COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2223       } else {
2224         PRINT_PSEUDO_DIRECTIVE_COMMENT(
2225             "SHARED_VGPR_COUNT",
2226             COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2227       }
2228     } else {
2229       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX12_PLUS_RESERVED0,
2230                                    "COMPUTE_PGM_RSRC3",
2231                                    "must be zero on gfx12+");
2232     }
2233 
2234     // Bits [4-11].
2235     if (isGFX11()) {
2236       PRINT_PSEUDO_DIRECTIVE_COMMENT("INST_PREF_SIZE",
2237                                      COMPUTE_PGM_RSRC3_GFX11_INST_PREF_SIZE);
2238       PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_START",
2239                                      COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_START);
2240       PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_END",
2241                                      COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_END);
2242     } else if (isGFX12Plus()) {
2243       PRINT_PSEUDO_DIRECTIVE_COMMENT(
2244           "INST_PREF_SIZE", COMPUTE_PGM_RSRC3_GFX12_PLUS_INST_PREF_SIZE);
2245     } else {
2246       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_RESERVED1,
2247                                    "COMPUTE_PGM_RSRC3",
2248                                    "must be zero on gfx10");
2249     }
2250 
2251     // Bits [12].
2252     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED2,
2253                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx10+");
2254 
2255     // Bits [13].
2256     if (isGFX12Plus()) {
2257       PRINT_PSEUDO_DIRECTIVE_COMMENT("GLG_EN",
2258                                      COMPUTE_PGM_RSRC3_GFX12_PLUS_GLG_EN);
2259     } else {
2260       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_GFX11_RESERVED3,
2261                                    "COMPUTE_PGM_RSRC3",
2262                                    "must be zero on gfx10 or gfx11");
2263     }
2264 
2265     // Bits [14-30].
2266     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED4,
2267                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx10+");
2268 
2269     // Bits [31].
2270     if (isGFX11Plus()) {
2271       PRINT_PSEUDO_DIRECTIVE_COMMENT("IMAGE_OP",
2272                                      COMPUTE_PGM_RSRC3_GFX11_PLUS_IMAGE_OP);
2273     } else {
2274       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_RESERVED5,
2275                                    "COMPUTE_PGM_RSRC3",
2276                                    "must be zero on gfx10");
2277     }
2278   } else if (FourByteBuffer) {
2279     return createStringError(
2280         std::errc::invalid_argument,
2281         "kernel descriptor COMPUTE_PGM_RSRC3 must be all zero before gfx9");
2282   }
2283   return true;
2284 }
2285 #undef PRINT_PSEUDO_DIRECTIVE_COMMENT
2286 #undef PRINT_DIRECTIVE
2287 #undef GET_FIELD
2288 #undef CHECK_RESERVED_BITS_IMPL
2289 #undef CHECK_RESERVED_BITS
2290 #undef CHECK_RESERVED_BITS_MSG
2291 #undef CHECK_RESERVED_BITS_DESC
2292 #undef CHECK_RESERVED_BITS_DESC_MSG
2293 
2294 /// Create an error object to return from onSymbolStart for reserved kernel
2295 /// descriptor bits being set.
2296 static Error createReservedKDBitsError(uint32_t Mask, unsigned BaseBytes,
2297                                        const char *Msg = "") {
2298   return createStringError(
2299       std::errc::invalid_argument, "kernel descriptor reserved %s set%s%s",
2300       getBitRangeFromMask(Mask, BaseBytes).c_str(), *Msg ? ", " : "", Msg);
2301 }
2302 
2303 /// Create an error object to return from onSymbolStart for reserved kernel
2304 /// descriptor bytes being set.
2305 static Error createReservedKDBytesError(unsigned BaseInBytes,
2306                                         unsigned WidthInBytes) {
2307   // Create an error comment in the same format as the "Kernel Descriptor"
2308   // table here: https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor .
2309   return createStringError(
2310       std::errc::invalid_argument,
2311       "kernel descriptor reserved bits in range (%u:%u) set",
2312       (BaseInBytes + WidthInBytes) * CHAR_BIT - 1, BaseInBytes * CHAR_BIT);
2313 }
2314 
2315 Expected<bool> AMDGPUDisassembler::decodeKernelDescriptorDirective(
2316     DataExtractor::Cursor &Cursor, ArrayRef<uint8_t> Bytes,
2317     raw_string_ostream &KdStream) const {
2318 #define PRINT_DIRECTIVE(DIRECTIVE, MASK)                                       \
2319   do {                                                                         \
2320     KdStream << Indent << DIRECTIVE " "                                        \
2321              << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n';            \
2322   } while (0)
2323 
2324   uint16_t TwoByteBuffer = 0;
2325   uint32_t FourByteBuffer = 0;
2326 
2327   StringRef ReservedBytes;
2328   StringRef Indent = "\t";
2329 
2330   assert(Bytes.size() == 64);
2331   DataExtractor DE(Bytes, /*IsLittleEndian=*/true, /*AddressSize=*/8);
2332 
2333   switch (Cursor.tell()) {
2334   case amdhsa::GROUP_SEGMENT_FIXED_SIZE_OFFSET:
2335     FourByteBuffer = DE.getU32(Cursor);
2336     KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer
2337              << '\n';
2338     return true;
2339 
2340   case amdhsa::PRIVATE_SEGMENT_FIXED_SIZE_OFFSET:
2341     FourByteBuffer = DE.getU32(Cursor);
2342     KdStream << Indent << ".amdhsa_private_segment_fixed_size "
2343              << FourByteBuffer << '\n';
2344     return true;
2345 
2346   case amdhsa::KERNARG_SIZE_OFFSET:
2347     FourByteBuffer = DE.getU32(Cursor);
2348     KdStream << Indent << ".amdhsa_kernarg_size "
2349              << FourByteBuffer << '\n';
2350     return true;
2351 
2352   case amdhsa::RESERVED0_OFFSET:
2353     // 4 reserved bytes, must be 0.
2354     ReservedBytes = DE.getBytes(Cursor, 4);
2355     for (int I = 0; I < 4; ++I) {
2356       if (ReservedBytes[I] != 0)
2357         return createReservedKDBytesError(amdhsa::RESERVED0_OFFSET, 4);
2358     }
2359     return true;
2360 
2361   case amdhsa::KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET:
2362     // KERNEL_CODE_ENTRY_BYTE_OFFSET
2363     // So far no directive controls this for Code Object V3, so simply skip for
2364     // disassembly.
2365     DE.skip(Cursor, 8);
2366     return true;
2367 
2368   case amdhsa::RESERVED1_OFFSET:
2369     // 20 reserved bytes, must be 0.
2370     ReservedBytes = DE.getBytes(Cursor, 20);
2371     for (int I = 0; I < 20; ++I) {
2372       if (ReservedBytes[I] != 0)
2373         return createReservedKDBytesError(amdhsa::RESERVED1_OFFSET, 20);
2374     }
2375     return true;
2376 
2377   case amdhsa::COMPUTE_PGM_RSRC3_OFFSET:
2378     FourByteBuffer = DE.getU32(Cursor);
2379     return decodeCOMPUTE_PGM_RSRC3(FourByteBuffer, KdStream);
2380 
2381   case amdhsa::COMPUTE_PGM_RSRC1_OFFSET:
2382     FourByteBuffer = DE.getU32(Cursor);
2383     return decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream);
2384 
2385   case amdhsa::COMPUTE_PGM_RSRC2_OFFSET:
2386     FourByteBuffer = DE.getU32(Cursor);
2387     return decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream);
2388 
2389   case amdhsa::KERNEL_CODE_PROPERTIES_OFFSET:
2390     using namespace amdhsa;
2391     TwoByteBuffer = DE.getU16(Cursor);
2392 
2393     if (!hasArchitectedFlatScratch())
2394       PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer",
2395                       KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
2396     PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr",
2397                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
2398     PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr",
2399                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
2400     PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr",
2401                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
2402     PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id",
2403                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
2404     if (!hasArchitectedFlatScratch())
2405       PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init",
2406                       KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
2407     PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size",
2408                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
2409 
2410     if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0)
2411       return createReservedKDBitsError(KERNEL_CODE_PROPERTY_RESERVED0,
2412                                        amdhsa::KERNEL_CODE_PROPERTIES_OFFSET);
2413 
2414     // Reserved for GFX9
2415     if (isGFX9() &&
2416         (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) {
2417       return createReservedKDBitsError(
2418           KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
2419           amdhsa::KERNEL_CODE_PROPERTIES_OFFSET, "must be zero on gfx9");
2420     }
2421     if (isGFX10Plus()) {
2422       PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
2423                       KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2424     }
2425 
2426     if (CodeObjectVersion >= AMDGPU::AMDHSA_COV5)
2427       PRINT_DIRECTIVE(".amdhsa_uses_dynamic_stack",
2428                       KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK);
2429 
2430     if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1) {
2431       return createReservedKDBitsError(KERNEL_CODE_PROPERTY_RESERVED1,
2432                                        amdhsa::KERNEL_CODE_PROPERTIES_OFFSET);
2433     }
2434 
2435     return true;
2436 
2437   case amdhsa::KERNARG_PRELOAD_OFFSET:
2438     using namespace amdhsa;
2439     TwoByteBuffer = DE.getU16(Cursor);
2440     if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_LENGTH) {
2441       PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_length",
2442                       KERNARG_PRELOAD_SPEC_LENGTH);
2443     }
2444 
2445     if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_OFFSET) {
2446       PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_offset",
2447                       KERNARG_PRELOAD_SPEC_OFFSET);
2448     }
2449     return true;
2450 
2451   case amdhsa::RESERVED3_OFFSET:
2452     // 4 bytes from here are reserved, must be 0.
2453     ReservedBytes = DE.getBytes(Cursor, 4);
2454     for (int I = 0; I < 4; ++I) {
2455       if (ReservedBytes[I] != 0)
2456         return createReservedKDBytesError(amdhsa::RESERVED3_OFFSET, 4);
2457     }
2458     return true;
2459 
2460   default:
2461     llvm_unreachable("Unhandled index. Case statements cover everything.");
2462     return true;
2463   }
2464 #undef PRINT_DIRECTIVE
2465 }
2466 
2467 Expected<bool> AMDGPUDisassembler::decodeKernelDescriptor(
2468     StringRef KdName, ArrayRef<uint8_t> Bytes, uint64_t KdAddress) const {
2469 
2470   // CP microcode requires the kernel descriptor to be 64 aligned.
2471   if (Bytes.size() != 64 || KdAddress % 64 != 0)
2472     return createStringError(std::errc::invalid_argument,
2473                              "kernel descriptor must be 64-byte aligned");
2474 
2475   // FIXME: We can't actually decode "in order" as is done below, as e.g. GFX10
2476   // requires us to know the setting of .amdhsa_wavefront_size32 in order to
2477   // accurately produce .amdhsa_next_free_vgpr, and they appear in the wrong
2478   // order. Workaround this by first looking up .amdhsa_wavefront_size32 here
2479   // when required.
2480   if (isGFX10Plus()) {
2481     uint16_t KernelCodeProperties =
2482         support::endian::read16(&Bytes[amdhsa::KERNEL_CODE_PROPERTIES_OFFSET],
2483                                 llvm::endianness::little);
2484     EnableWavefrontSize32 =
2485         AMDHSA_BITS_GET(KernelCodeProperties,
2486                         amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2487   }
2488 
2489   std::string Kd;
2490   raw_string_ostream KdStream(Kd);
2491   KdStream << ".amdhsa_kernel " << KdName << '\n';
2492 
2493   DataExtractor::Cursor C(0);
2494   while (C && C.tell() < Bytes.size()) {
2495     Expected<bool> Res = decodeKernelDescriptorDirective(C, Bytes, KdStream);
2496 
2497     cantFail(C.takeError());
2498 
2499     if (!Res)
2500       return Res;
2501   }
2502   KdStream << ".end_amdhsa_kernel\n";
2503   outs() << KdStream.str();
2504   return true;
2505 }
2506 
2507 Expected<bool> AMDGPUDisassembler::onSymbolStart(SymbolInfoTy &Symbol,
2508                                                  uint64_t &Size,
2509                                                  ArrayRef<uint8_t> Bytes,
2510                                                  uint64_t Address) const {
2511   // Right now only kernel descriptor needs to be handled.
2512   // We ignore all other symbols for target specific handling.
2513   // TODO:
2514   // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code
2515   // Object V2 and V3 when symbols are marked protected.
2516 
2517   // amd_kernel_code_t for Code Object V2.
2518   if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) {
2519     Size = 256;
2520     return createStringError(std::errc::invalid_argument,
2521                              "code object v2 is not supported");
2522   }
2523 
2524   // Code Object V3 kernel descriptors.
2525   StringRef Name = Symbol.Name;
2526   if (Symbol.Type == ELF::STT_OBJECT && Name.ends_with(StringRef(".kd"))) {
2527     Size = 64; // Size = 64 regardless of success or failure.
2528     return decodeKernelDescriptor(Name.drop_back(3), Bytes, Address);
2529   }
2530 
2531   return false;
2532 }
2533 
2534 const MCExpr *AMDGPUDisassembler::createConstantSymbolExpr(StringRef Id,
2535                                                            int64_t Val) {
2536   MCContext &Ctx = getContext();
2537   MCSymbol *Sym = Ctx.getOrCreateSymbol(Id);
2538   // Note: only set value to Val on a new symbol in case an dissassembler
2539   // has already been initialized in this context.
2540   if (!Sym->isVariable()) {
2541     Sym->setVariableValue(MCConstantExpr::create(Val, Ctx));
2542   } else {
2543     int64_t Res = ~Val;
2544     bool Valid = Sym->getVariableValue()->evaluateAsAbsolute(Res);
2545     if (!Valid || Res != Val)
2546       Ctx.reportWarning(SMLoc(), "unsupported redefinition of " + Id);
2547   }
2548   return MCSymbolRefExpr::create(Sym, Ctx);
2549 }
2550 
2551 //===----------------------------------------------------------------------===//
2552 // AMDGPUSymbolizer
2553 //===----------------------------------------------------------------------===//
2554 
2555 // Try to find symbol name for specified label
2556 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(
2557     MCInst &Inst, raw_ostream & /*cStream*/, int64_t Value,
2558     uint64_t /*Address*/, bool IsBranch, uint64_t /*Offset*/,
2559     uint64_t /*OpSize*/, uint64_t /*InstSize*/) {
2560 
2561   if (!IsBranch) {
2562     return false;
2563   }
2564 
2565   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
2566   if (!Symbols)
2567     return false;
2568 
2569   auto Result = llvm::find_if(*Symbols, [Value](const SymbolInfoTy &Val) {
2570     return Val.Addr == static_cast<uint64_t>(Value) &&
2571            Val.Type == ELF::STT_NOTYPE;
2572   });
2573   if (Result != Symbols->end()) {
2574     auto *Sym = Ctx.getOrCreateSymbol(Result->Name);
2575     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
2576     Inst.addOperand(MCOperand::createExpr(Add));
2577     return true;
2578   }
2579   // Add to list of referenced addresses, so caller can synthesize a label.
2580   ReferencedAddresses.push_back(static_cast<uint64_t>(Value));
2581   return false;
2582 }
2583 
2584 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
2585                                                        int64_t Value,
2586                                                        uint64_t Address) {
2587   llvm_unreachable("unimplemented");
2588 }
2589 
2590 //===----------------------------------------------------------------------===//
2591 // Initialization
2592 //===----------------------------------------------------------------------===//
2593 
2594 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
2595                               LLVMOpInfoCallback /*GetOpInfo*/,
2596                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
2597                               void *DisInfo,
2598                               MCContext *Ctx,
2599                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
2600   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
2601 }
2602 
2603 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
2604                                                 const MCSubtargetInfo &STI,
2605                                                 MCContext &Ctx) {
2606   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
2607 }
2608 
2609 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler() {
2610   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
2611                                          createAMDGPUDisassembler);
2612   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
2613                                        createAMDGPUSymbolizer);
2614 }
2615