xref: /llvm-project/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (revision c1a6974d6b3dc7be2178d8db9f8fc6c2cc9230f2)
1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 /// \file
12 ///
13 /// This file contains definition for AMDGPU ISA disassembler
14 //
15 //===----------------------------------------------------------------------===//
16 
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18 
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIDefines.h"
22 #include "SIRegisterInfo.h"
23 #include "TargetInfo/AMDGPUTargetInfo.h"
24 #include "Utils/AMDGPUBaseInfo.h"
25 #include "llvm-c/DisassemblerTypes.h"
26 #include "llvm/BinaryFormat/ELF.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCDecoderOps.h"
30 #include "llvm/MC/MCExpr.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/MC/MCRegisterInfo.h"
33 #include "llvm/MC/MCSubtargetInfo.h"
34 #include "llvm/MC/TargetRegistry.h"
35 #include "llvm/Support/AMDHSAKernelDescriptor.h"
36 
37 using namespace llvm;
38 
39 #define DEBUG_TYPE "amdgpu-disassembler"
40 
41 #define SGPR_MAX                                                               \
42   (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10                           \
43                  : AMDGPU::EncValues::SGPR_MAX_SI)
44 
45 using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
46 
47 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI,
48                                        MCContext &Ctx, MCInstrInfo const *MCII)
49     : MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
50       MAI(*Ctx.getAsmInfo()), TargetMaxInstBytes(MAI.getMaxInstLength(&STI)) {
51   // ToDo: AMDGPUDisassembler supports only VI ISA.
52   if (!STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !isGFX10Plus())
53     report_fatal_error("Disassembly not yet supported for subtarget");
54 }
55 
56 inline static MCDisassembler::DecodeStatus
57 addOperand(MCInst &Inst, const MCOperand& Opnd) {
58   Inst.addOperand(Opnd);
59   return Opnd.isValid() ?
60     MCDisassembler::Success :
61     MCDisassembler::Fail;
62 }
63 
64 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
65                                 uint16_t NameIdx) {
66   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
67   if (OpIdx != -1) {
68     auto I = MI.begin();
69     std::advance(I, OpIdx);
70     MI.insert(I, Op);
71   }
72   return OpIdx;
73 }
74 
75 static DecodeStatus decodeSOPPBrTarget(MCInst &Inst, unsigned Imm,
76                                        uint64_t Addr,
77                                        const MCDisassembler *Decoder) {
78   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
79 
80   // Our branches take a simm16, but we need two extra bits to account for the
81   // factor of 4.
82   APInt SignedOffset(18, Imm * 4, true);
83   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
84 
85   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2, 0))
86     return MCDisassembler::Success;
87   return addOperand(Inst, MCOperand::createImm(Imm));
88 }
89 
90 static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, uint64_t Addr,
91                                      const MCDisassembler *Decoder) {
92   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
93   int64_t Offset;
94   if (DAsm->isGFX12Plus()) { // GFX12 supports 24-bit signed offsets.
95     Offset = SignExtend64<24>(Imm);
96   } else if (DAsm->isVI()) { // VI supports 20-bit unsigned offsets.
97     Offset = Imm & 0xFFFFF;
98   } else { // GFX9+ supports 21-bit signed offsets.
99     Offset = SignExtend64<21>(Imm);
100   }
101   return addOperand(Inst, MCOperand::createImm(Offset));
102 }
103 
104 static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr,
105                                   const MCDisassembler *Decoder) {
106   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
107   return addOperand(Inst, DAsm->decodeBoolReg(Val));
108 }
109 
110 static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val,
111                                        uint64_t Addr,
112                                        const MCDisassembler *Decoder) {
113   auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
114   return addOperand(Inst, DAsm->decodeSplitBarrier(Val));
115 }
116 
117 #define DECODE_OPERAND(StaticDecoderName, DecoderName)                         \
118   static DecodeStatus StaticDecoderName(MCInst &Inst, unsigned Imm,            \
119                                         uint64_t /*Addr*/,                     \
120                                         const MCDisassembler *Decoder) {       \
121     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
122     return addOperand(Inst, DAsm->DecoderName(Imm));                           \
123   }
124 
125 // Decoder for registers, decode directly using RegClassID. Imm(8-bit) is
126 // number of register. Used by VGPR only and AGPR only operands.
127 #define DECODE_OPERAND_REG_8(RegClass)                                         \
128   static DecodeStatus Decode##RegClass##RegisterClass(                         \
129       MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,                           \
130       const MCDisassembler *Decoder) {                                         \
131     assert(Imm < (1 << 8) && "8-bit encoding");                                \
132     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
133     return addOperand(                                                         \
134         Inst, DAsm->createRegOperand(AMDGPU::RegClass##RegClassID, Imm));      \
135   }
136 
137 #define DECODE_SrcOp(Name, EncSize, OpWidth, EncImm, MandatoryLiteral,         \
138                      ImmWidth)                                                 \
139   static DecodeStatus Name(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,      \
140                            const MCDisassembler *Decoder) {                    \
141     assert(Imm < (1 << EncSize) && #EncSize "-bit encoding");                  \
142     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
143     return addOperand(Inst,                                                    \
144                       DAsm->decodeSrcOp(AMDGPUDisassembler::OpWidth, EncImm,   \
145                                         MandatoryLiteral, ImmWidth));          \
146   }
147 
148 // Decoder for registers. Imm(7-bit) is number of register, uses decodeSrcOp to
149 // get register class. Used by SGPR only operands.
150 #define DECODE_OPERAND_REG_7(RegClass, OpWidth)                                \
151   DECODE_SrcOp(Decode##RegClass##RegisterClass, 7, OpWidth, Imm, false, 0)
152 
153 // Decoder for registers. Imm(10-bit): Imm{7-0} is number of register,
154 // Imm{9} is acc(agpr or vgpr) Imm{8} should be 0 (see VOP3Pe_SMFMAC).
155 // Set Imm{8} to 1 (IS_VGPR) to decode using 'enum10' from decodeSrcOp.
156 // Used by AV_ register classes (AGPR or VGPR only register operands).
157 #define DECODE_OPERAND_REG_AV10(RegClass, OpWidth)                             \
158   DECODE_SrcOp(Decode##RegClass##RegisterClass, 10, OpWidth,                   \
159                Imm | AMDGPU::EncValues::IS_VGPR, false, 0)
160 
161 // Decoder for Src(9-bit encoding) registers only.
162 #define DECODE_OPERAND_SRC_REG_9(RegClass, OpWidth)                            \
163   DECODE_SrcOp(decodeOperand_##RegClass, 9, OpWidth, Imm, false, 0)
164 
165 // Decoder for Src(9-bit encoding) AGPR, register number encoded in 9bits, set
166 // Imm{9} to 1 (set acc) and decode using 'enum10' from decodeSrcOp, registers
167 // only.
168 #define DECODE_OPERAND_SRC_REG_A9(RegClass, OpWidth)                           \
169   DECODE_SrcOp(decodeOperand_##RegClass, 9, OpWidth, Imm | 512, false, 0)
170 
171 // Decoder for 'enum10' from decodeSrcOp, Imm{0-8} is 9-bit Src encoding
172 // Imm{9} is acc, registers only.
173 #define DECODE_SRC_OPERAND_REG_AV10(RegClass, OpWidth)                         \
174   DECODE_SrcOp(decodeOperand_##RegClass, 10, OpWidth, Imm, false, 0)
175 
176 // Decoder for RegisterOperands using 9-bit Src encoding. Operand can be
177 // register from RegClass or immediate. Registers that don't belong to RegClass
178 // will be decoded and InstPrinter will report warning. Immediate will be
179 // decoded into constant of size ImmWidth, should match width of immediate used
180 // by OperandType (important for floating point types).
181 #define DECODE_OPERAND_SRC_REG_OR_IMM_9(RegClass, OpWidth, ImmWidth)           \
182   DECODE_SrcOp(decodeOperand_##RegClass##_Imm##ImmWidth, 9, OpWidth, Imm,      \
183                false, ImmWidth)
184 
185 // Decoder for Src(9-bit encoding) AGPR or immediate. Set Imm{9} to 1 (set acc)
186 // and decode using 'enum10' from decodeSrcOp.
187 #define DECODE_OPERAND_SRC_REG_OR_IMM_A9(RegClass, OpWidth, ImmWidth)          \
188   DECODE_SrcOp(decodeOperand_##RegClass##_Imm##ImmWidth, 9, OpWidth,           \
189                Imm | 512, false, ImmWidth)
190 
191 #define DECODE_OPERAND_SRC_REG_OR_IMM_DEFERRED_9(RegClass, OpWidth, ImmWidth)  \
192   DECODE_SrcOp(decodeOperand_##RegClass##_Deferred##_Imm##ImmWidth, 9,         \
193                OpWidth, Imm, true, ImmWidth)
194 
195 // Default decoders generated by tablegen: 'Decode<RegClass>RegisterClass'
196 // when RegisterClass is used as an operand. Most often used for destination
197 // operands.
198 
199 DECODE_OPERAND_REG_8(VGPR_32)
200 DECODE_OPERAND_REG_8(VGPR_32_Lo128)
201 DECODE_OPERAND_REG_8(VReg_64)
202 DECODE_OPERAND_REG_8(VReg_96)
203 DECODE_OPERAND_REG_8(VReg_128)
204 DECODE_OPERAND_REG_8(VReg_256)
205 DECODE_OPERAND_REG_8(VReg_288)
206 DECODE_OPERAND_REG_8(VReg_352)
207 DECODE_OPERAND_REG_8(VReg_384)
208 DECODE_OPERAND_REG_8(VReg_512)
209 DECODE_OPERAND_REG_8(VReg_1024)
210 
211 DECODE_OPERAND_REG_7(SReg_32, OPW32)
212 DECODE_OPERAND_REG_7(SReg_32_XM0_XEXEC, OPW32)
213 DECODE_OPERAND_REG_7(SReg_32_XEXEC_HI, OPW32)
214 DECODE_OPERAND_REG_7(SReg_64, OPW64)
215 DECODE_OPERAND_REG_7(SReg_64_XEXEC, OPW64)
216 DECODE_OPERAND_REG_7(SReg_96, OPW96)
217 DECODE_OPERAND_REG_7(SReg_128, OPW128)
218 DECODE_OPERAND_REG_7(SReg_256, OPW256)
219 DECODE_OPERAND_REG_7(SReg_512, OPW512)
220 
221 DECODE_OPERAND_REG_8(AGPR_32)
222 DECODE_OPERAND_REG_8(AReg_64)
223 DECODE_OPERAND_REG_8(AReg_128)
224 DECODE_OPERAND_REG_8(AReg_256)
225 DECODE_OPERAND_REG_8(AReg_512)
226 DECODE_OPERAND_REG_8(AReg_1024)
227 
228 DECODE_OPERAND_REG_AV10(AVDst_128, OPW128)
229 DECODE_OPERAND_REG_AV10(AVDst_512, OPW512)
230 
231 // Decoders for register only source RegisterOperands that use use 9-bit Src
232 // encoding: 'decodeOperand_<RegClass>'.
233 
234 DECODE_OPERAND_SRC_REG_9(VGPR_32, OPW32)
235 DECODE_OPERAND_SRC_REG_9(VReg_64, OPW64)
236 DECODE_OPERAND_SRC_REG_9(VReg_128, OPW128)
237 DECODE_OPERAND_SRC_REG_9(VReg_256, OPW256)
238 DECODE_OPERAND_SRC_REG_9(VRegOrLds_32, OPW32)
239 
240 DECODE_OPERAND_SRC_REG_A9(AGPR_32, OPW32)
241 
242 DECODE_SRC_OPERAND_REG_AV10(AV_32, OPW32)
243 DECODE_SRC_OPERAND_REG_AV10(AV_64, OPW64)
244 DECODE_SRC_OPERAND_REG_AV10(AV_128, OPW128)
245 
246 // Decoders for register or immediate RegisterOperands that use 9-bit Src
247 // encoding: 'decodeOperand_<RegClass>_Imm<ImmWidth>'.
248 
249 DECODE_OPERAND_SRC_REG_OR_IMM_9(SReg_64, OPW64, 64)
250 DECODE_OPERAND_SRC_REG_OR_IMM_9(SReg_32, OPW32, 32)
251 DECODE_OPERAND_SRC_REG_OR_IMM_9(SReg_32, OPW32, 16)
252 DECODE_OPERAND_SRC_REG_OR_IMM_9(SRegOrLds_32, OPW32, 32)
253 DECODE_OPERAND_SRC_REG_OR_IMM_9(VS_32_Lo128, OPW16, 16)
254 DECODE_OPERAND_SRC_REG_OR_IMM_9(VS_32, OPW32, 16)
255 DECODE_OPERAND_SRC_REG_OR_IMM_9(VS_32, OPW32, 32)
256 DECODE_OPERAND_SRC_REG_OR_IMM_9(VS_64, OPW64, 64)
257 DECODE_OPERAND_SRC_REG_OR_IMM_9(VS_64, OPW64, 32)
258 DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_64, OPW64, 64)
259 DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_128, OPW128, 32)
260 DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_256, OPW256, 64)
261 DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_512, OPW512, 32)
262 DECODE_OPERAND_SRC_REG_OR_IMM_9(VReg_1024, OPW1024, 32)
263 
264 DECODE_OPERAND_SRC_REG_OR_IMM_A9(AReg_64, OPW64, 64)
265 DECODE_OPERAND_SRC_REG_OR_IMM_A9(AReg_128, OPW128, 32)
266 DECODE_OPERAND_SRC_REG_OR_IMM_A9(AReg_256, OPW256, 64)
267 DECODE_OPERAND_SRC_REG_OR_IMM_A9(AReg_512, OPW512, 32)
268 DECODE_OPERAND_SRC_REG_OR_IMM_A9(AReg_1024, OPW1024, 32)
269 
270 DECODE_OPERAND_SRC_REG_OR_IMM_DEFERRED_9(VS_32_Lo128, OPW16, 16)
271 DECODE_OPERAND_SRC_REG_OR_IMM_DEFERRED_9(VS_32, OPW16, 16)
272 DECODE_OPERAND_SRC_REG_OR_IMM_DEFERRED_9(VS_32, OPW32, 32)
273 DECODE_OPERAND_SRC_REG_OR_IMM_DEFERRED_9(SReg_32, OPW32, 32)
274 
275 static DecodeStatus DecodeVGPR_16RegisterClass(MCInst &Inst, unsigned Imm,
276                                                uint64_t /*Addr*/,
277                                                const MCDisassembler *Decoder) {
278   assert(isUInt<10>(Imm) && "10-bit encoding expected");
279   assert((Imm & (1 << 8)) == 0 && "Imm{8} should not be used");
280 
281   bool IsHi = Imm & (1 << 9);
282   unsigned RegIdx = Imm & 0xff;
283   auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
284   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
285 }
286 
287 static DecodeStatus
288 DecodeVGPR_16_Lo128RegisterClass(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,
289                                  const MCDisassembler *Decoder) {
290   assert(isUInt<8>(Imm) && "8-bit encoding expected");
291 
292   bool IsHi = Imm & (1 << 7);
293   unsigned RegIdx = Imm & 0x7f;
294   auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
295   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
296 }
297 
298 static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm,
299                                                 uint64_t /*Addr*/,
300                                                 const MCDisassembler *Decoder) {
301   assert(isUInt<9>(Imm) && "9-bit encoding expected");
302 
303   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
304   bool IsVGPR = Imm & (1 << 8);
305   if (IsVGPR) {
306     bool IsHi = Imm & (1 << 7);
307     unsigned RegIdx = Imm & 0x7f;
308     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
309   }
310   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(AMDGPUDisassembler::OPW16,
311                                                    Imm & 0xFF, false, 16));
312 }
313 
314 static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
315                                           uint64_t /*Addr*/,
316                                           const MCDisassembler *Decoder) {
317   assert(isUInt<10>(Imm) && "10-bit encoding expected");
318 
319   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
320   bool IsVGPR = Imm & (1 << 8);
321   if (IsVGPR) {
322     bool IsHi = Imm & (1 << 9);
323     unsigned RegIdx = Imm & 0xff;
324     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
325   }
326   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(AMDGPUDisassembler::OPW16,
327                                                    Imm & 0xFF, false, 16));
328 }
329 
330 static DecodeStatus decodeOperand_KImmFP(MCInst &Inst, unsigned Imm,
331                                          uint64_t Addr,
332                                          const MCDisassembler *Decoder) {
333   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
334   return addOperand(Inst, DAsm->decodeMandatoryLiteralConstant(Imm));
335 }
336 
337 static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val,
338                                           uint64_t Addr, const void *Decoder) {
339   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
340   return addOperand(Inst, DAsm->decodeVOPDDstYOp(Inst, Val));
341 }
342 
343 static bool IsAGPROperand(const MCInst &Inst, int OpIdx,
344                           const MCRegisterInfo *MRI) {
345   if (OpIdx < 0)
346     return false;
347 
348   const MCOperand &Op = Inst.getOperand(OpIdx);
349   if (!Op.isReg())
350     return false;
351 
352   unsigned Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0);
353   auto Reg = Sub ? Sub : Op.getReg();
354   return Reg >= AMDGPU::AGPR0 && Reg <= AMDGPU::AGPR255;
355 }
356 
357 static DecodeStatus decodeOperand_AVLdSt_Any(MCInst &Inst, unsigned Imm,
358                                              AMDGPUDisassembler::OpWidthTy Opw,
359                                              const MCDisassembler *Decoder) {
360   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
361   if (!DAsm->isGFX90A()) {
362     Imm &= 511;
363   } else {
364     // If atomic has both vdata and vdst their register classes are tied.
365     // The bit is decoded along with the vdst, first operand. We need to
366     // change register class to AGPR if vdst was AGPR.
367     // If a DS instruction has both data0 and data1 their register classes
368     // are also tied.
369     unsigned Opc = Inst.getOpcode();
370     uint64_t TSFlags = DAsm->getMCII()->get(Opc).TSFlags;
371     uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
372                                                         : AMDGPU::OpName::vdata;
373     const MCRegisterInfo *MRI = DAsm->getContext().getRegisterInfo();
374     int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataNameIdx);
375     if ((int)Inst.getNumOperands() == DataIdx) {
376       int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
377       if (IsAGPROperand(Inst, DstIdx, MRI))
378         Imm |= 512;
379     }
380 
381     if (TSFlags & SIInstrFlags::DS) {
382       int Data2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
383       if ((int)Inst.getNumOperands() == Data2Idx &&
384           IsAGPROperand(Inst, DataIdx, MRI))
385         Imm |= 512;
386     }
387   }
388   return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256));
389 }
390 
391 static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm,
392                                            uint64_t Addr,
393                                            const MCDisassembler *Decoder) {
394   assert(Imm < (1 << 9) && "9-bit encoding");
395   auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
396   return addOperand(
397       Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW64, Imm, false, 64, true));
398 }
399 
400 static DecodeStatus
401 DecodeAVLdSt_32RegisterClass(MCInst &Inst, unsigned Imm, uint64_t Addr,
402                              const MCDisassembler *Decoder) {
403   return decodeOperand_AVLdSt_Any(Inst, Imm,
404                                   AMDGPUDisassembler::OPW32, Decoder);
405 }
406 
407 static DecodeStatus
408 DecodeAVLdSt_64RegisterClass(MCInst &Inst, unsigned Imm, uint64_t Addr,
409                              const MCDisassembler *Decoder) {
410   return decodeOperand_AVLdSt_Any(Inst, Imm,
411                                   AMDGPUDisassembler::OPW64, Decoder);
412 }
413 
414 static DecodeStatus
415 DecodeAVLdSt_96RegisterClass(MCInst &Inst, unsigned Imm, uint64_t Addr,
416                              const MCDisassembler *Decoder) {
417   return decodeOperand_AVLdSt_Any(Inst, Imm,
418                                   AMDGPUDisassembler::OPW96, Decoder);
419 }
420 
421 static DecodeStatus
422 DecodeAVLdSt_128RegisterClass(MCInst &Inst, unsigned Imm, uint64_t Addr,
423                               const MCDisassembler *Decoder) {
424   return decodeOperand_AVLdSt_Any(Inst, Imm,
425                                   AMDGPUDisassembler::OPW128, Decoder);
426 }
427 
428 static DecodeStatus
429 DecodeAVLdSt_160RegisterClass(MCInst &Inst, unsigned Imm, uint64_t Addr,
430                               const MCDisassembler *Decoder) {
431   return decodeOperand_AVLdSt_Any(Inst, Imm, AMDGPUDisassembler::OPW160,
432                                   Decoder);
433 }
434 
435 #define DECODE_SDWA(DecName) \
436 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
437 
438 DECODE_SDWA(Src32)
439 DECODE_SDWA(Src16)
440 DECODE_SDWA(VopcDst)
441 
442 #include "AMDGPUGenDisassemblerTables.inc"
443 
444 //===----------------------------------------------------------------------===//
445 //
446 //===----------------------------------------------------------------------===//
447 
448 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
449   assert(Bytes.size() >= sizeof(T));
450   const auto Res =
451       support::endian::read<T, llvm::endianness::little>(Bytes.data());
452   Bytes = Bytes.slice(sizeof(T));
453   return Res;
454 }
455 
456 static inline DecoderUInt128 eat12Bytes(ArrayRef<uint8_t> &Bytes) {
457   assert(Bytes.size() >= 12);
458   uint64_t Lo =
459       support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
460   Bytes = Bytes.slice(8);
461   uint64_t Hi =
462       support::endian::read<uint32_t, llvm::endianness::little>(Bytes.data());
463   Bytes = Bytes.slice(4);
464   return DecoderUInt128(Lo, Hi);
465 }
466 
467 // The disassembler is greedy, so we need to check FI operand value to
468 // not parse a dpp if the correct literal is not set. For dpp16 the
469 // autogenerated decoder checks the dpp literal
470 static bool isValidDPP8(const MCInst &MI) {
471   using namespace llvm::AMDGPU::DPP;
472   int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi);
473   assert(FiIdx != -1);
474   if ((unsigned)FiIdx >= MI.getNumOperands())
475     return false;
476   unsigned Fi = MI.getOperand(FiIdx).getImm();
477   return Fi == DPP8_FI_0 || Fi == DPP8_FI_1;
478 }
479 
480 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
481                                                 ArrayRef<uint8_t> Bytes_,
482                                                 uint64_t Address,
483                                                 raw_ostream &CS) const {
484   bool IsSDWA = false;
485 
486   unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
487   Bytes = Bytes_.slice(0, MaxInstBytesNum);
488 
489   DecodeStatus Res = MCDisassembler::Fail;
490   do {
491     // ToDo: better to switch encoding length using some bit predicate
492     // but it is unknown yet, so try all we can
493 
494     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
495     // encodings
496     if (isGFX11Plus() && Bytes.size() >= 12 ) {
497       DecoderUInt128 DecW = eat12Bytes(Bytes);
498       Res =
499           tryDecodeInst(DecoderTableDPP8GFX1196, DecoderTableDPP8GFX11_FAKE1696,
500                         MI, DecW, Address, CS);
501       if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
502         break;
503       MI = MCInst(); // clear
504       Res =
505           tryDecodeInst(DecoderTableDPP8GFX1296, DecoderTableDPP8GFX12_FAKE1696,
506                         MI, DecW, Address, CS);
507       if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
508         break;
509       MI = MCInst(); // clear
510 
511       const auto convertVOPDPP = [&]() {
512         if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P) {
513           convertVOP3PDPPInst(MI);
514         } else if (AMDGPU::isVOPC64DPP(MI.getOpcode())) {
515           convertVOPCDPPInst(MI); // Special VOP3 case
516         } else {
517           assert(MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3);
518           convertVOP3DPPInst(MI); // Regular VOP3 case
519         }
520       };
521       Res = tryDecodeInst(DecoderTableDPPGFX1196, DecoderTableDPPGFX11_FAKE1696,
522                           MI, DecW, Address, CS);
523       if (Res) {
524         convertVOPDPP();
525         break;
526       }
527       Res = tryDecodeInst(DecoderTableDPPGFX1296, DecoderTableDPPGFX12_FAKE1696,
528                           MI, DecW, Address, CS);
529       if (Res) {
530         convertVOPDPP();
531         break;
532       }
533       Res = tryDecodeInst(DecoderTableGFX1196, MI, DecW, Address, CS);
534       if (Res)
535         break;
536 
537       Res = tryDecodeInst(DecoderTableGFX1296, MI, DecW, Address, CS);
538       if (Res)
539         break;
540     }
541     // Reinitialize Bytes
542     Bytes = Bytes_.slice(0, MaxInstBytesNum);
543 
544     if (Bytes.size() >= 8) {
545       const uint64_t QW = eatBytes<uint64_t>(Bytes);
546 
547       if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding)) {
548         Res = tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address, CS);
549         if (Res) {
550           if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8)
551               == -1)
552             break;
553           if (convertDPP8Inst(MI) == MCDisassembler::Success)
554             break;
555           MI = MCInst(); // clear
556         }
557       }
558 
559       Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address, CS);
560       if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
561         break;
562       MI = MCInst(); // clear
563 
564       Res = tryDecodeInst(DecoderTableDPP8GFX1164,
565                           DecoderTableDPP8GFX11_FAKE1664, MI, QW, Address, CS);
566       if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
567         break;
568       MI = MCInst(); // clear
569 
570       Res = tryDecodeInst(DecoderTableDPP8GFX1264,
571                           DecoderTableDPP8GFX12_FAKE1664, MI, QW, Address, CS);
572       if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
573         break;
574       MI = MCInst(); // clear
575 
576       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address, CS);
577       if (Res) break;
578 
579       Res = tryDecodeInst(DecoderTableDPPGFX1164, DecoderTableDPPGFX11_FAKE1664,
580                           MI, QW, Address, CS);
581       if (Res) {
582         if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC)
583           convertVOPCDPPInst(MI);
584         break;
585       }
586 
587       Res = tryDecodeInst(DecoderTableDPPGFX1264, DecoderTableDPPGFX12_FAKE1664,
588                           MI, QW, Address, CS);
589       if (Res) {
590         if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC)
591           convertVOPCDPPInst(MI);
592         break;
593       }
594 
595       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address, CS);
596       if (Res) { IsSDWA = true;  break; }
597 
598       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address, CS);
599       if (Res) { IsSDWA = true;  break; }
600 
601       Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address, CS);
602       if (Res) { IsSDWA = true;  break; }
603 
604       if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem)) {
605         Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address, CS);
606         if (Res)
607           break;
608       }
609 
610       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
611       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
612       // table first so we print the correct name.
613       if (STI.hasFeature(AMDGPU::FeatureFmaMixInsts)) {
614         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address, CS);
615         if (Res)
616           break;
617       }
618     }
619 
620     // Reinitialize Bytes as DPP64 could have eaten too much
621     Bytes = Bytes_.slice(0, MaxInstBytesNum);
622 
623     // Try decode 32-bit instruction
624     if (Bytes.size() < 4) break;
625     const uint32_t DW = eatBytes<uint32_t>(Bytes);
626     Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address, CS);
627     if (Res) break;
628 
629     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address, CS);
630     if (Res) break;
631 
632     Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address, CS);
633     if (Res) break;
634 
635     if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts)) {
636       Res = tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address, CS);
637       if (Res)
638         break;
639     }
640 
641     if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding)) {
642       Res = tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address, CS);
643       if (Res) break;
644     }
645 
646     Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address, CS);
647     if (Res) break;
648 
649     Res = tryDecodeInst(DecoderTableGFX1132, DecoderTableGFX11_FAKE1632, MI, DW,
650                         Address, CS);
651     if (Res) break;
652 
653     Res = tryDecodeInst(DecoderTableGFX1232, DecoderTableGFX12_FAKE1632, MI, DW,
654                         Address, CS);
655     if (Res)
656       break;
657 
658     if (Bytes.size() < 4) break;
659     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
660 
661     if (STI.hasFeature(AMDGPU::FeatureGFX940Insts)) {
662       Res = tryDecodeInst(DecoderTableGFX94064, MI, QW, Address, CS);
663       if (Res)
664         break;
665     }
666 
667     if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts)) {
668       Res = tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address, CS);
669       if (Res)
670         break;
671     }
672 
673     Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address, CS);
674     if (Res) break;
675 
676     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address, CS);
677     if (Res) break;
678 
679     Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address, CS);
680     if (Res) break;
681 
682     Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address, CS);
683     if (Res) break;
684 
685     Res = tryDecodeInst(DecoderTableGFX1264, DecoderTableGFX12_FAKE1664, MI, QW,
686                         Address, CS);
687     if (Res)
688       break;
689 
690     Res = tryDecodeInst(DecoderTableGFX1164, DecoderTableGFX11_FAKE1664, MI, QW,
691                         Address, CS);
692     if (Res)
693       break;
694 
695     Res = tryDecodeInst(DecoderTableWMMAGFX1164, MI, QW, Address, CS);
696   } while (false);
697 
698   if (Res && AMDGPU::isMAC(MI.getOpcode())) {
699     // Insert dummy unused src2_modifiers.
700     insertNamedMCOperand(MI, MCOperand::createImm(0),
701                          AMDGPU::OpName::src2_modifiers);
702   }
703 
704   if (Res && (MCII->get(MI.getOpcode()).TSFlags &
705           (SIInstrFlags::MUBUF | SIInstrFlags::FLAT | SIInstrFlags::SMRD))) {
706     int CPolPos = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
707                                              AMDGPU::OpName::cpol);
708     if (CPolPos != -1) {
709       unsigned CPol =
710           (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsAtomicRet) ?
711               AMDGPU::CPol::GLC : 0;
712       if (MI.getNumOperands() <= (unsigned)CPolPos) {
713         insertNamedMCOperand(MI, MCOperand::createImm(CPol),
714                              AMDGPU::OpName::cpol);
715       } else if (CPol) {
716         MI.getOperand(CPolPos).setImm(MI.getOperand(CPolPos).getImm() | CPol);
717       }
718     }
719   }
720 
721   if (Res && (MCII->get(MI.getOpcode()).TSFlags &
722               (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) &&
723              (STI.hasFeature(AMDGPU::FeatureGFX90AInsts))) {
724     // GFX90A lost TFE, its place is occupied by ACC.
725     int TFEOpIdx =
726         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
727     if (TFEOpIdx != -1) {
728       auto TFEIter = MI.begin();
729       std::advance(TFEIter, TFEOpIdx);
730       MI.insert(TFEIter, MCOperand::createImm(0));
731     }
732   }
733 
734   if (Res && (MCII->get(MI.getOpcode()).TSFlags &
735               (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF))) {
736     int SWZOpIdx =
737         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz);
738     if (SWZOpIdx != -1) {
739       auto SWZIter = MI.begin();
740       std::advance(SWZIter, SWZOpIdx);
741       MI.insert(SWZIter, MCOperand::createImm(0));
742     }
743   }
744 
745   if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
746     int VAddr0Idx =
747         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
748     int RsrcIdx =
749         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
750     unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
751     if (VAddr0Idx >= 0 && NSAArgs > 0) {
752       unsigned NSAWords = (NSAArgs + 3) / 4;
753       if (Bytes.size() < 4 * NSAWords) {
754         Res = MCDisassembler::Fail;
755       } else {
756         for (unsigned i = 0; i < NSAArgs; ++i) {
757           const unsigned VAddrIdx = VAddr0Idx + 1 + i;
758           auto VAddrRCID =
759               MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
760           MI.insert(MI.begin() + VAddrIdx,
761                     createRegOperand(VAddrRCID, Bytes[i]));
762         }
763         Bytes = Bytes.slice(4 * NSAWords);
764       }
765     }
766 
767     if (Res)
768       Res = convertMIMGInst(MI);
769   }
770 
771   if (Res && (MCII->get(MI.getOpcode()).TSFlags &
772               (SIInstrFlags::VIMAGE | SIInstrFlags::VSAMPLE)))
773     Res = convertMIMGInst(MI);
774 
775   if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::EXP))
776     Res = convertEXPInst(MI);
777 
778   if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VINTERP))
779     Res = convertVINTERPInst(MI);
780 
781   if (Res && IsSDWA)
782     Res = convertSDWAInst(MI);
783 
784   int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
785                                               AMDGPU::OpName::vdst_in);
786   if (VDstIn_Idx != -1) {
787     int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
788                            MCOI::OperandConstraint::TIED_TO);
789     if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
790          !MI.getOperand(VDstIn_Idx).isReg() ||
791          MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
792       if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
793         MI.erase(&MI.getOperand(VDstIn_Idx));
794       insertNamedMCOperand(MI,
795         MCOperand::createReg(MI.getOperand(Tied).getReg()),
796         AMDGPU::OpName::vdst_in);
797     }
798   }
799 
800   int ImmLitIdx =
801       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm);
802   bool IsSOPK = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SOPK;
803   if (Res && ImmLitIdx != -1 && !IsSOPK)
804     Res = convertFMAanyK(MI, ImmLitIdx);
805 
806   // if the opcode was not recognized we'll assume a Size of 4 bytes
807   // (unless there are fewer bytes left)
808   Size = Res ? (MaxInstBytesNum - Bytes.size())
809              : std::min((size_t)4, Bytes_.size());
810   return Res;
811 }
812 
813 DecodeStatus AMDGPUDisassembler::convertEXPInst(MCInst &MI) const {
814   if (STI.hasFeature(AMDGPU::FeatureGFX11Insts)) {
815     // The MCInst still has these fields even though they are no longer encoded
816     // in the GFX11 instruction.
817     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vm);
818     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::compr);
819   }
820   return MCDisassembler::Success;
821 }
822 
823 DecodeStatus AMDGPUDisassembler::convertVINTERPInst(MCInst &MI) const {
824   if (MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_gfx11 ||
825       MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_gfx12 ||
826       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_gfx11 ||
827       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_gfx12 ||
828       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_gfx11 ||
829       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_gfx12 ||
830       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_gfx11 ||
831       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_gfx12) {
832     // The MCInst has this field that is not directly encoded in the
833     // instruction.
834     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::op_sel);
835   }
836   return MCDisassembler::Success;
837 }
838 
839 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
840   if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
841       STI.hasFeature(AMDGPU::FeatureGFX10)) {
842     if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::sdst))
843       // VOPC - insert clamp
844       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
845   } else if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
846     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
847     if (SDst != -1) {
848       // VOPC - insert VCC register as sdst
849       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
850                            AMDGPU::OpName::sdst);
851     } else {
852       // VOP1/2 - insert omod if present in instruction
853       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
854     }
855   }
856   return MCDisassembler::Success;
857 }
858 
859 struct VOPModifiers {
860   unsigned OpSel = 0;
861   unsigned OpSelHi = 0;
862   unsigned NegLo = 0;
863   unsigned NegHi = 0;
864 };
865 
866 // Reconstruct values of VOP3/VOP3P operands such as op_sel.
867 // Note that these values do not affect disassembler output,
868 // so this is only necessary for consistency with src_modifiers.
869 static VOPModifiers collectVOPModifiers(const MCInst &MI,
870                                         bool IsVOP3P = false) {
871   VOPModifiers Modifiers;
872   unsigned Opc = MI.getOpcode();
873   const int ModOps[] = {AMDGPU::OpName::src0_modifiers,
874                         AMDGPU::OpName::src1_modifiers,
875                         AMDGPU::OpName::src2_modifiers};
876   for (int J = 0; J < 3; ++J) {
877     int OpIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
878     if (OpIdx == -1)
879       continue;
880 
881     unsigned Val = MI.getOperand(OpIdx).getImm();
882 
883     Modifiers.OpSel |= !!(Val & SISrcMods::OP_SEL_0) << J;
884     if (IsVOP3P) {
885       Modifiers.OpSelHi |= !!(Val & SISrcMods::OP_SEL_1) << J;
886       Modifiers.NegLo |= !!(Val & SISrcMods::NEG) << J;
887       Modifiers.NegHi |= !!(Val & SISrcMods::NEG_HI) << J;
888     } else if (J == 0) {
889       Modifiers.OpSel |= !!(Val & SISrcMods::DST_OP_SEL) << 3;
890     }
891   }
892 
893   return Modifiers;
894 }
895 
896 // MAC opcodes have special old and src2 operands.
897 // src2 is tied to dst, while old is not tied (but assumed to be).
898 bool AMDGPUDisassembler::isMacDPP(MCInst &MI) const {
899   constexpr int DST_IDX = 0;
900   auto Opcode = MI.getOpcode();
901   const auto &Desc = MCII->get(Opcode);
902   auto OldIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::old);
903 
904   if (OldIdx != -1 && Desc.getOperandConstraint(
905                           OldIdx, MCOI::OperandConstraint::TIED_TO) == -1) {
906     assert(AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2));
907     assert(Desc.getOperandConstraint(
908                AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2),
909                MCOI::OperandConstraint::TIED_TO) == DST_IDX);
910     (void)DST_IDX;
911     return true;
912   }
913 
914   return false;
915 }
916 
917 // Create dummy old operand and insert dummy unused src2_modifiers
918 void AMDGPUDisassembler::convertMacDPPInst(MCInst &MI) const {
919   assert(MI.getNumOperands() + 1 < MCII->get(MI.getOpcode()).getNumOperands());
920   insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
921   insertNamedMCOperand(MI, MCOperand::createImm(0),
922                        AMDGPU::OpName::src2_modifiers);
923 }
924 
925 // We must check FI == literal to reject not genuine dpp8 insts, and we must
926 // first add optional MI operands to check FI
927 DecodeStatus AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const {
928   unsigned Opc = MI.getOpcode();
929   if (MCII->get(Opc).TSFlags & SIInstrFlags::VOP3P) {
930     convertVOP3PDPPInst(MI);
931   } else if ((MCII->get(Opc).TSFlags & SIInstrFlags::VOPC) ||
932              AMDGPU::isVOPC64DPP(Opc)) {
933     convertVOPCDPPInst(MI);
934   } else {
935     if (isMacDPP(MI))
936       convertMacDPPInst(MI);
937 
938     unsigned DescNumOps = MCII->get(Opc).getNumOperands();
939     if (MI.getNumOperands() < DescNumOps &&
940         AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
941       auto Mods = collectVOPModifiers(MI);
942       insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
943                            AMDGPU::OpName::op_sel);
944     } else {
945       // Insert dummy unused src modifiers.
946       if (MI.getNumOperands() < DescNumOps &&
947           AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
948         insertNamedMCOperand(MI, MCOperand::createImm(0),
949                              AMDGPU::OpName::src0_modifiers);
950 
951       if (MI.getNumOperands() < DescNumOps &&
952           AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
953         insertNamedMCOperand(MI, MCOperand::createImm(0),
954                              AMDGPU::OpName::src1_modifiers);
955     }
956   }
957   return isValidDPP8(MI) ? MCDisassembler::Success : MCDisassembler::SoftFail;
958 }
959 
960 DecodeStatus AMDGPUDisassembler::convertVOP3DPPInst(MCInst &MI) const {
961   if (isMacDPP(MI))
962     convertMacDPPInst(MI);
963 
964   unsigned Opc = MI.getOpcode();
965   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
966   if (MI.getNumOperands() < DescNumOps &&
967       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
968     auto Mods = collectVOPModifiers(MI);
969     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
970                          AMDGPU::OpName::op_sel);
971   }
972   return MCDisassembler::Success;
973 }
974 
975 // Note that before gfx10, the MIMG encoding provided no information about
976 // VADDR size. Consequently, decoded instructions always show address as if it
977 // has 1 dword, which could be not really so.
978 DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
979   auto TSFlags = MCII->get(MI.getOpcode()).TSFlags;
980 
981   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
982                                            AMDGPU::OpName::vdst);
983 
984   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
985                                             AMDGPU::OpName::vdata);
986   int VAddr0Idx =
987       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
988   int RsrcOpName = TSFlags & SIInstrFlags::MIMG ? AMDGPU::OpName::srsrc
989                                                 : AMDGPU::OpName::rsrc;
990   int RsrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), RsrcOpName);
991   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
992                                             AMDGPU::OpName::dmask);
993 
994   int TFEIdx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
995                                             AMDGPU::OpName::tfe);
996   int D16Idx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
997                                             AMDGPU::OpName::d16);
998 
999   const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
1000   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1001       AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
1002 
1003   assert(VDataIdx != -1);
1004   if (BaseOpcode->BVH) {
1005     // Add A16 operand for intersect_ray instructions
1006     addOperand(MI, MCOperand::createImm(BaseOpcode->A16));
1007     return MCDisassembler::Success;
1008   }
1009 
1010   bool IsAtomic = (VDstIdx != -1);
1011   bool IsGather4 = TSFlags & SIInstrFlags::Gather4;
1012   bool IsVSample = TSFlags & SIInstrFlags::VSAMPLE;
1013   bool IsNSA = false;
1014   bool IsPartialNSA = false;
1015   unsigned AddrSize = Info->VAddrDwords;
1016 
1017   if (isGFX10Plus()) {
1018     unsigned DimIdx =
1019         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
1020     int A16Idx =
1021         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16);
1022     const AMDGPU::MIMGDimInfo *Dim =
1023         AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
1024     const bool IsA16 = (A16Idx != -1 && MI.getOperand(A16Idx).getImm());
1025 
1026     AddrSize =
1027         AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, AMDGPU::hasG16(STI));
1028 
1029     // VSAMPLE insts that do not use vaddr3 behave the same as NSA forms.
1030     // VIMAGE insts other than BVH never use vaddr4.
1031     IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA ||
1032             Info->MIMGEncoding == AMDGPU::MIMGEncGfx11NSA ||
1033             Info->MIMGEncoding == AMDGPU::MIMGEncGfx12;
1034     if (!IsNSA) {
1035       if (!IsVSample && AddrSize > 12)
1036         AddrSize = 16;
1037     } else {
1038       if (AddrSize > Info->VAddrDwords) {
1039         if (!STI.hasFeature(AMDGPU::FeaturePartialNSAEncoding)) {
1040           // The NSA encoding does not contain enough operands for the
1041           // combination of base opcode / dimension. Should this be an error?
1042           return MCDisassembler::Success;
1043         }
1044         IsPartialNSA = true;
1045       }
1046     }
1047   }
1048 
1049   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
1050   unsigned DstSize = IsGather4 ? 4 : std::max(llvm::popcount(DMask), 1);
1051 
1052   bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
1053   if (D16 && AMDGPU::hasPackedD16(STI)) {
1054     DstSize = (DstSize + 1) / 2;
1055   }
1056 
1057   if (TFEIdx != -1 && MI.getOperand(TFEIdx).getImm())
1058     DstSize += 1;
1059 
1060   if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
1061     return MCDisassembler::Success;
1062 
1063   int NewOpcode =
1064       AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
1065   if (NewOpcode == -1)
1066     return MCDisassembler::Success;
1067 
1068   // Widen the register to the correct number of enabled channels.
1069   unsigned NewVdata = AMDGPU::NoRegister;
1070   if (DstSize != Info->VDataDwords) {
1071     auto DataRCID = MCII->get(NewOpcode).operands()[VDataIdx].RegClass;
1072 
1073     // Get first subregister of VData
1074     unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
1075     unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
1076     Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
1077 
1078     NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
1079                                        &MRI.getRegClass(DataRCID));
1080     if (NewVdata == AMDGPU::NoRegister) {
1081       // It's possible to encode this such that the low register + enabled
1082       // components exceeds the register count.
1083       return MCDisassembler::Success;
1084     }
1085   }
1086 
1087   // If not using NSA on GFX10+, widen vaddr0 address register to correct size.
1088   // If using partial NSA on GFX11+ widen last address register.
1089   int VAddrSAIdx = IsPartialNSA ? (RsrcIdx - 1) : VAddr0Idx;
1090   unsigned NewVAddrSA = AMDGPU::NoRegister;
1091   if (STI.hasFeature(AMDGPU::FeatureNSAEncoding) && (!IsNSA || IsPartialNSA) &&
1092       AddrSize != Info->VAddrDwords) {
1093     unsigned VAddrSA = MI.getOperand(VAddrSAIdx).getReg();
1094     unsigned VAddrSubSA = MRI.getSubReg(VAddrSA, AMDGPU::sub0);
1095     VAddrSA = VAddrSubSA ? VAddrSubSA : VAddrSA;
1096 
1097     auto AddrRCID = MCII->get(NewOpcode).operands()[VAddrSAIdx].RegClass;
1098     NewVAddrSA = MRI.getMatchingSuperReg(VAddrSA, AMDGPU::sub0,
1099                                         &MRI.getRegClass(AddrRCID));
1100     if (!NewVAddrSA)
1101       return MCDisassembler::Success;
1102   }
1103 
1104   MI.setOpcode(NewOpcode);
1105 
1106   if (NewVdata != AMDGPU::NoRegister) {
1107     MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
1108 
1109     if (IsAtomic) {
1110       // Atomic operations have an additional operand (a copy of data)
1111       MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
1112     }
1113   }
1114 
1115   if (NewVAddrSA) {
1116     MI.getOperand(VAddrSAIdx) = MCOperand::createReg(NewVAddrSA);
1117   } else if (IsNSA) {
1118     assert(AddrSize <= Info->VAddrDwords);
1119     MI.erase(MI.begin() + VAddr0Idx + AddrSize,
1120              MI.begin() + VAddr0Idx + Info->VAddrDwords);
1121   }
1122 
1123   return MCDisassembler::Success;
1124 }
1125 
1126 // Opsel and neg bits are used in src_modifiers and standalone operands. Autogen
1127 // decoder only adds to src_modifiers, so manually add the bits to the other
1128 // operands.
1129 DecodeStatus AMDGPUDisassembler::convertVOP3PDPPInst(MCInst &MI) const {
1130   unsigned Opc = MI.getOpcode();
1131   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1132   auto Mods = collectVOPModifiers(MI, true);
1133 
1134   if (MI.getNumOperands() < DescNumOps &&
1135       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vdst_in))
1136     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vdst_in);
1137 
1138   if (MI.getNumOperands() < DescNumOps &&
1139       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel))
1140     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1141                          AMDGPU::OpName::op_sel);
1142   if (MI.getNumOperands() < DescNumOps &&
1143       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel_hi))
1144     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSelHi),
1145                          AMDGPU::OpName::op_sel_hi);
1146   if (MI.getNumOperands() < DescNumOps &&
1147       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_lo))
1148     insertNamedMCOperand(MI, MCOperand::createImm(Mods.NegLo),
1149                          AMDGPU::OpName::neg_lo);
1150   if (MI.getNumOperands() < DescNumOps &&
1151       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_hi))
1152     insertNamedMCOperand(MI, MCOperand::createImm(Mods.NegHi),
1153                          AMDGPU::OpName::neg_hi);
1154 
1155   return MCDisassembler::Success;
1156 }
1157 
1158 // Create dummy old operand and insert optional operands
1159 DecodeStatus AMDGPUDisassembler::convertVOPCDPPInst(MCInst &MI) const {
1160   unsigned Opc = MI.getOpcode();
1161   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1162 
1163   if (MI.getNumOperands() < DescNumOps &&
1164       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::old))
1165     insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
1166 
1167   if (MI.getNumOperands() < DescNumOps &&
1168       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1169     insertNamedMCOperand(MI, MCOperand::createImm(0),
1170                          AMDGPU::OpName::src0_modifiers);
1171 
1172   if (MI.getNumOperands() < DescNumOps &&
1173       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1174     insertNamedMCOperand(MI, MCOperand::createImm(0),
1175                          AMDGPU::OpName::src1_modifiers);
1176   return MCDisassembler::Success;
1177 }
1178 
1179 DecodeStatus AMDGPUDisassembler::convertFMAanyK(MCInst &MI,
1180                                                 int ImmLitIdx) const {
1181   assert(HasLiteral && "Should have decoded a literal");
1182   const MCInstrDesc &Desc = MCII->get(MI.getOpcode());
1183   unsigned DescNumOps = Desc.getNumOperands();
1184   insertNamedMCOperand(MI, MCOperand::createImm(Literal),
1185                        AMDGPU::OpName::immDeferred);
1186   assert(DescNumOps == MI.getNumOperands());
1187   for (unsigned I = 0; I < DescNumOps; ++I) {
1188     auto &Op = MI.getOperand(I);
1189     auto OpType = Desc.operands()[I].OperandType;
1190     bool IsDeferredOp = (OpType == AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED ||
1191                          OpType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED);
1192     if (Op.isImm() && Op.getImm() == AMDGPU::EncValues::LITERAL_CONST &&
1193         IsDeferredOp)
1194       Op.setImm(Literal);
1195   }
1196   return MCDisassembler::Success;
1197 }
1198 
1199 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
1200   return getContext().getRegisterInfo()->
1201     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
1202 }
1203 
1204 inline
1205 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
1206                                          const Twine& ErrMsg) const {
1207   *CommentStream << "Error: " + ErrMsg;
1208 
1209   // ToDo: add support for error operands to MCInst.h
1210   // return MCOperand::createError(V);
1211   return MCOperand();
1212 }
1213 
1214 inline
1215 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
1216   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
1217 }
1218 
1219 inline
1220 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
1221                                                unsigned Val) const {
1222   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
1223   if (Val >= RegCl.getNumRegs())
1224     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
1225                            ": unknown register " + Twine(Val));
1226   return createRegOperand(RegCl.getRegister(Val));
1227 }
1228 
1229 inline
1230 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
1231                                                 unsigned Val) const {
1232   // ToDo: SI/CI have 104 SGPRs, VI - 102
1233   // Valery: here we accepting as much as we can, let assembler sort it out
1234   int shift = 0;
1235   switch (SRegClassID) {
1236   case AMDGPU::SGPR_32RegClassID:
1237   case AMDGPU::TTMP_32RegClassID:
1238     break;
1239   case AMDGPU::SGPR_64RegClassID:
1240   case AMDGPU::TTMP_64RegClassID:
1241     shift = 1;
1242     break;
1243   case AMDGPU::SGPR_96RegClassID:
1244   case AMDGPU::TTMP_96RegClassID:
1245   case AMDGPU::SGPR_128RegClassID:
1246   case AMDGPU::TTMP_128RegClassID:
1247   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
1248   // this bundle?
1249   case AMDGPU::SGPR_256RegClassID:
1250   case AMDGPU::TTMP_256RegClassID:
1251     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
1252   // this bundle?
1253   case AMDGPU::SGPR_288RegClassID:
1254   case AMDGPU::TTMP_288RegClassID:
1255   case AMDGPU::SGPR_320RegClassID:
1256   case AMDGPU::TTMP_320RegClassID:
1257   case AMDGPU::SGPR_352RegClassID:
1258   case AMDGPU::TTMP_352RegClassID:
1259   case AMDGPU::SGPR_384RegClassID:
1260   case AMDGPU::TTMP_384RegClassID:
1261   case AMDGPU::SGPR_512RegClassID:
1262   case AMDGPU::TTMP_512RegClassID:
1263     shift = 2;
1264     break;
1265   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
1266   // this bundle?
1267   default:
1268     llvm_unreachable("unhandled register class");
1269   }
1270 
1271   if (Val % (1 << shift)) {
1272     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
1273                    << ": scalar reg isn't aligned " << Val;
1274   }
1275 
1276   return createRegOperand(SRegClassID, Val >> shift);
1277 }
1278 
1279 MCOperand AMDGPUDisassembler::createVGPR16Operand(unsigned RegIdx,
1280                                                   bool IsHi) const {
1281   unsigned RCID =
1282       IsHi ? AMDGPU::VGPR_HI16RegClassID : AMDGPU::VGPR_LO16RegClassID;
1283   return createRegOperand(RCID, RegIdx);
1284 }
1285 
1286 // Decode Literals for insts which always have a literal in the encoding
1287 MCOperand
1288 AMDGPUDisassembler::decodeMandatoryLiteralConstant(unsigned Val) const {
1289   if (HasLiteral) {
1290     assert(
1291         AMDGPU::hasVOPD(STI) &&
1292         "Should only decode multiple kimm with VOPD, check VSrc operand types");
1293     if (Literal != Val)
1294       return errOperand(Val, "More than one unique literal is illegal");
1295   }
1296   HasLiteral = true;
1297   Literal = Val;
1298   return MCOperand::createImm(Literal);
1299 }
1300 
1301 MCOperand AMDGPUDisassembler::decodeLiteralConstant(bool ExtendFP64) const {
1302   // For now all literal constants are supposed to be unsigned integer
1303   // ToDo: deal with signed/unsigned 64-bit integer constants
1304   // ToDo: deal with float/double constants
1305   if (!HasLiteral) {
1306     if (Bytes.size() < 4) {
1307       return errOperand(0, "cannot read literal, inst bytes left " +
1308                         Twine(Bytes.size()));
1309     }
1310     HasLiteral = true;
1311     Literal = Literal64 = eatBytes<uint32_t>(Bytes);
1312     if (ExtendFP64)
1313       Literal64 <<= 32;
1314   }
1315   return MCOperand::createImm(ExtendFP64 ? Literal64 : Literal);
1316 }
1317 
1318 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
1319   using namespace AMDGPU::EncValues;
1320 
1321   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
1322   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
1323     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
1324     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
1325       // Cast prevents negative overflow.
1326 }
1327 
1328 static int64_t getInlineImmVal32(unsigned Imm) {
1329   switch (Imm) {
1330   case 240:
1331     return llvm::bit_cast<uint32_t>(0.5f);
1332   case 241:
1333     return llvm::bit_cast<uint32_t>(-0.5f);
1334   case 242:
1335     return llvm::bit_cast<uint32_t>(1.0f);
1336   case 243:
1337     return llvm::bit_cast<uint32_t>(-1.0f);
1338   case 244:
1339     return llvm::bit_cast<uint32_t>(2.0f);
1340   case 245:
1341     return llvm::bit_cast<uint32_t>(-2.0f);
1342   case 246:
1343     return llvm::bit_cast<uint32_t>(4.0f);
1344   case 247:
1345     return llvm::bit_cast<uint32_t>(-4.0f);
1346   case 248: // 1 / (2 * PI)
1347     return 0x3e22f983;
1348   default:
1349     llvm_unreachable("invalid fp inline imm");
1350   }
1351 }
1352 
1353 static int64_t getInlineImmVal64(unsigned Imm) {
1354   switch (Imm) {
1355   case 240:
1356     return llvm::bit_cast<uint64_t>(0.5);
1357   case 241:
1358     return llvm::bit_cast<uint64_t>(-0.5);
1359   case 242:
1360     return llvm::bit_cast<uint64_t>(1.0);
1361   case 243:
1362     return llvm::bit_cast<uint64_t>(-1.0);
1363   case 244:
1364     return llvm::bit_cast<uint64_t>(2.0);
1365   case 245:
1366     return llvm::bit_cast<uint64_t>(-2.0);
1367   case 246:
1368     return llvm::bit_cast<uint64_t>(4.0);
1369   case 247:
1370     return llvm::bit_cast<uint64_t>(-4.0);
1371   case 248: // 1 / (2 * PI)
1372     return 0x3fc45f306dc9c882;
1373   default:
1374     llvm_unreachable("invalid fp inline imm");
1375   }
1376 }
1377 
1378 static int64_t getInlineImmVal16(unsigned Imm) {
1379   switch (Imm) {
1380   case 240:
1381     return 0x3800;
1382   case 241:
1383     return 0xB800;
1384   case 242:
1385     return 0x3C00;
1386   case 243:
1387     return 0xBC00;
1388   case 244:
1389     return 0x4000;
1390   case 245:
1391     return 0xC000;
1392   case 246:
1393     return 0x4400;
1394   case 247:
1395     return 0xC400;
1396   case 248: // 1 / (2 * PI)
1397     return 0x3118;
1398   default:
1399     llvm_unreachable("invalid fp inline imm");
1400   }
1401 }
1402 
1403 MCOperand AMDGPUDisassembler::decodeFPImmed(unsigned ImmWidth, unsigned Imm) {
1404   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
1405       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
1406 
1407   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
1408   // ImmWidth 0 is a default case where operand should not allow immediates.
1409   // Imm value is still decoded into 32 bit immediate operand, inst printer will
1410   // use it to print verbose error message.
1411   switch (ImmWidth) {
1412   case 0:
1413   case 32:
1414     return MCOperand::createImm(getInlineImmVal32(Imm));
1415   case 64:
1416     return MCOperand::createImm(getInlineImmVal64(Imm));
1417   case 16:
1418     return MCOperand::createImm(getInlineImmVal16(Imm));
1419   default:
1420     llvm_unreachable("implement me");
1421   }
1422 }
1423 
1424 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
1425   using namespace AMDGPU;
1426 
1427   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1428   switch (Width) {
1429   default: // fall
1430   case OPW32:
1431   case OPW16:
1432   case OPWV216:
1433     return VGPR_32RegClassID;
1434   case OPW64:
1435   case OPWV232: return VReg_64RegClassID;
1436   case OPW96: return VReg_96RegClassID;
1437   case OPW128: return VReg_128RegClassID;
1438   case OPW160: return VReg_160RegClassID;
1439   case OPW256: return VReg_256RegClassID;
1440   case OPW288: return VReg_288RegClassID;
1441   case OPW320: return VReg_320RegClassID;
1442   case OPW352: return VReg_352RegClassID;
1443   case OPW384: return VReg_384RegClassID;
1444   case OPW512: return VReg_512RegClassID;
1445   case OPW1024: return VReg_1024RegClassID;
1446   }
1447 }
1448 
1449 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width) const {
1450   using namespace AMDGPU;
1451 
1452   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1453   switch (Width) {
1454   default: // fall
1455   case OPW32:
1456   case OPW16:
1457   case OPWV216:
1458     return AGPR_32RegClassID;
1459   case OPW64:
1460   case OPWV232: return AReg_64RegClassID;
1461   case OPW96: return AReg_96RegClassID;
1462   case OPW128: return AReg_128RegClassID;
1463   case OPW160: return AReg_160RegClassID;
1464   case OPW256: return AReg_256RegClassID;
1465   case OPW288: return AReg_288RegClassID;
1466   case OPW320: return AReg_320RegClassID;
1467   case OPW352: return AReg_352RegClassID;
1468   case OPW384: return AReg_384RegClassID;
1469   case OPW512: return AReg_512RegClassID;
1470   case OPW1024: return AReg_1024RegClassID;
1471   }
1472 }
1473 
1474 
1475 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
1476   using namespace AMDGPU;
1477 
1478   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1479   switch (Width) {
1480   default: // fall
1481   case OPW32:
1482   case OPW16:
1483   case OPWV216:
1484     return SGPR_32RegClassID;
1485   case OPW64:
1486   case OPWV232: return SGPR_64RegClassID;
1487   case OPW96: return SGPR_96RegClassID;
1488   case OPW128: return SGPR_128RegClassID;
1489   case OPW160: return SGPR_160RegClassID;
1490   case OPW256: return SGPR_256RegClassID;
1491   case OPW288: return SGPR_288RegClassID;
1492   case OPW320: return SGPR_320RegClassID;
1493   case OPW352: return SGPR_352RegClassID;
1494   case OPW384: return SGPR_384RegClassID;
1495   case OPW512: return SGPR_512RegClassID;
1496   }
1497 }
1498 
1499 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
1500   using namespace AMDGPU;
1501 
1502   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1503   switch (Width) {
1504   default: // fall
1505   case OPW32:
1506   case OPW16:
1507   case OPWV216:
1508     return TTMP_32RegClassID;
1509   case OPW64:
1510   case OPWV232: return TTMP_64RegClassID;
1511   case OPW128: return TTMP_128RegClassID;
1512   case OPW256: return TTMP_256RegClassID;
1513   case OPW288: return TTMP_288RegClassID;
1514   case OPW320: return TTMP_320RegClassID;
1515   case OPW352: return TTMP_352RegClassID;
1516   case OPW384: return TTMP_384RegClassID;
1517   case OPW512: return TTMP_512RegClassID;
1518   }
1519 }
1520 
1521 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
1522   using namespace AMDGPU::EncValues;
1523 
1524   unsigned TTmpMin = isGFX9Plus() ? TTMP_GFX9PLUS_MIN : TTMP_VI_MIN;
1525   unsigned TTmpMax = isGFX9Plus() ? TTMP_GFX9PLUS_MAX : TTMP_VI_MAX;
1526 
1527   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
1528 }
1529 
1530 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val,
1531                                           bool MandatoryLiteral,
1532                                           unsigned ImmWidth, bool IsFP) const {
1533   using namespace AMDGPU::EncValues;
1534 
1535   assert(Val < 1024); // enum10
1536 
1537   bool IsAGPR = Val & 512;
1538   Val &= 511;
1539 
1540   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
1541     return createRegOperand(IsAGPR ? getAgprClassId(Width)
1542                                    : getVgprClassId(Width), Val - VGPR_MIN);
1543   }
1544   return decodeNonVGPRSrcOp(Width, Val & 0xFF, MandatoryLiteral, ImmWidth,
1545                             IsFP);
1546 }
1547 
1548 MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(const OpWidthTy Width,
1549                                                  unsigned Val,
1550                                                  bool MandatoryLiteral,
1551                                                  unsigned ImmWidth,
1552                                                  bool IsFP) const {
1553   // Cases when Val{8} is 1 (vgpr, agpr or true 16 vgpr) should have been
1554   // decoded earlier.
1555   assert(Val < (1 << 8) && "9-bit Src encoding when Val{8} is 0");
1556   using namespace AMDGPU::EncValues;
1557 
1558   if (Val <= SGPR_MAX) {
1559     // "SGPR_MIN <= Val" is always true and causes compilation warning.
1560     static_assert(SGPR_MIN == 0);
1561     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1562   }
1563 
1564   int TTmpIdx = getTTmpIdx(Val);
1565   if (TTmpIdx >= 0) {
1566     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1567   }
1568 
1569   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
1570     return decodeIntImmed(Val);
1571 
1572   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
1573     return decodeFPImmed(ImmWidth, Val);
1574 
1575   if (Val == LITERAL_CONST) {
1576     if (MandatoryLiteral)
1577       // Keep a sentinel value for deferred setting
1578       return MCOperand::createImm(LITERAL_CONST);
1579     else
1580       return decodeLiteralConstant(IsFP && ImmWidth == 64);
1581   }
1582 
1583   switch (Width) {
1584   case OPW32:
1585   case OPW16:
1586   case OPWV216:
1587     return decodeSpecialReg32(Val);
1588   case OPW64:
1589   case OPWV232:
1590     return decodeSpecialReg64(Val);
1591   default:
1592     llvm_unreachable("unexpected immediate type");
1593   }
1594 }
1595 
1596 // Bit 0 of DstY isn't stored in the instruction, because it's always the
1597 // opposite of bit 0 of DstX.
1598 MCOperand AMDGPUDisassembler::decodeVOPDDstYOp(MCInst &Inst,
1599                                                unsigned Val) const {
1600   int VDstXInd =
1601       AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::vdstX);
1602   assert(VDstXInd != -1);
1603   assert(Inst.getOperand(VDstXInd).isReg());
1604   unsigned XDstReg = MRI.getEncodingValue(Inst.getOperand(VDstXInd).getReg());
1605   Val |= ~XDstReg & 1;
1606   auto Width = llvm::AMDGPUDisassembler::OPW32;
1607   return createRegOperand(getVgprClassId(Width), Val);
1608 }
1609 
1610 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
1611   using namespace AMDGPU;
1612 
1613   switch (Val) {
1614   // clang-format off
1615   case 102: return createRegOperand(FLAT_SCR_LO);
1616   case 103: return createRegOperand(FLAT_SCR_HI);
1617   case 104: return createRegOperand(XNACK_MASK_LO);
1618   case 105: return createRegOperand(XNACK_MASK_HI);
1619   case 106: return createRegOperand(VCC_LO);
1620   case 107: return createRegOperand(VCC_HI);
1621   case 108: return createRegOperand(TBA_LO);
1622   case 109: return createRegOperand(TBA_HI);
1623   case 110: return createRegOperand(TMA_LO);
1624   case 111: return createRegOperand(TMA_HI);
1625   case 124:
1626     return isGFX11Plus() ? createRegOperand(SGPR_NULL) : createRegOperand(M0);
1627   case 125:
1628     return isGFX11Plus() ? createRegOperand(M0) : createRegOperand(SGPR_NULL);
1629   case 126: return createRegOperand(EXEC_LO);
1630   case 127: return createRegOperand(EXEC_HI);
1631   case 235: return createRegOperand(SRC_SHARED_BASE_LO);
1632   case 236: return createRegOperand(SRC_SHARED_LIMIT_LO);
1633   case 237: return createRegOperand(SRC_PRIVATE_BASE_LO);
1634   case 238: return createRegOperand(SRC_PRIVATE_LIMIT_LO);
1635   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1636   case 251: return createRegOperand(SRC_VCCZ);
1637   case 252: return createRegOperand(SRC_EXECZ);
1638   case 253: return createRegOperand(SRC_SCC);
1639   case 254: return createRegOperand(LDS_DIRECT);
1640   default: break;
1641     // clang-format on
1642   }
1643   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1644 }
1645 
1646 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
1647   using namespace AMDGPU;
1648 
1649   switch (Val) {
1650   case 102: return createRegOperand(FLAT_SCR);
1651   case 104: return createRegOperand(XNACK_MASK);
1652   case 106: return createRegOperand(VCC);
1653   case 108: return createRegOperand(TBA);
1654   case 110: return createRegOperand(TMA);
1655   case 124:
1656     if (isGFX11Plus())
1657       return createRegOperand(SGPR_NULL);
1658     break;
1659   case 125:
1660     if (!isGFX11Plus())
1661       return createRegOperand(SGPR_NULL);
1662     break;
1663   case 126: return createRegOperand(EXEC);
1664   case 235: return createRegOperand(SRC_SHARED_BASE);
1665   case 236: return createRegOperand(SRC_SHARED_LIMIT);
1666   case 237: return createRegOperand(SRC_PRIVATE_BASE);
1667   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1668   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1669   case 251: return createRegOperand(SRC_VCCZ);
1670   case 252: return createRegOperand(SRC_EXECZ);
1671   case 253: return createRegOperand(SRC_SCC);
1672   default: break;
1673   }
1674   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1675 }
1676 
1677 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
1678                                             const unsigned Val,
1679                                             unsigned ImmWidth) const {
1680   using namespace AMDGPU::SDWA;
1681   using namespace AMDGPU::EncValues;
1682 
1683   if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
1684       STI.hasFeature(AMDGPU::FeatureGFX10)) {
1685     // XXX: cast to int is needed to avoid stupid warning:
1686     // compare with unsigned is always true
1687     if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
1688         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
1689       return createRegOperand(getVgprClassId(Width),
1690                               Val - SDWA9EncValues::SRC_VGPR_MIN);
1691     }
1692     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
1693         Val <= (isGFX10Plus() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
1694                               : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
1695       return createSRegOperand(getSgprClassId(Width),
1696                                Val - SDWA9EncValues::SRC_SGPR_MIN);
1697     }
1698     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
1699         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
1700       return createSRegOperand(getTtmpClassId(Width),
1701                                Val - SDWA9EncValues::SRC_TTMP_MIN);
1702     }
1703 
1704     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
1705 
1706     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
1707       return decodeIntImmed(SVal);
1708 
1709     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
1710       return decodeFPImmed(ImmWidth, SVal);
1711 
1712     return decodeSpecialReg32(SVal);
1713   } else if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
1714     return createRegOperand(getVgprClassId(Width), Val);
1715   }
1716   llvm_unreachable("unsupported target");
1717 }
1718 
1719 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
1720   return decodeSDWASrc(OPW16, Val, 16);
1721 }
1722 
1723 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
1724   return decodeSDWASrc(OPW32, Val, 32);
1725 }
1726 
1727 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
1728   using namespace AMDGPU::SDWA;
1729 
1730   assert((STI.hasFeature(AMDGPU::FeatureGFX9) ||
1731           STI.hasFeature(AMDGPU::FeatureGFX10)) &&
1732          "SDWAVopcDst should be present only on GFX9+");
1733 
1734   bool IsWave64 = STI.hasFeature(AMDGPU::FeatureWavefrontSize64);
1735 
1736   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
1737     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
1738 
1739     int TTmpIdx = getTTmpIdx(Val);
1740     if (TTmpIdx >= 0) {
1741       auto TTmpClsId = getTtmpClassId(IsWave64 ? OPW64 : OPW32);
1742       return createSRegOperand(TTmpClsId, TTmpIdx);
1743     } else if (Val > SGPR_MAX) {
1744       return IsWave64 ? decodeSpecialReg64(Val)
1745                       : decodeSpecialReg32(Val);
1746     } else {
1747       return createSRegOperand(getSgprClassId(IsWave64 ? OPW64 : OPW32), Val);
1748     }
1749   } else {
1750     return createRegOperand(IsWave64 ? AMDGPU::VCC : AMDGPU::VCC_LO);
1751   }
1752 }
1753 
1754 MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
1755   return STI.hasFeature(AMDGPU::FeatureWavefrontSize64)
1756              ? decodeSrcOp(OPW64, Val)
1757              : decodeSrcOp(OPW32, Val);
1758 }
1759 
1760 MCOperand AMDGPUDisassembler::decodeSplitBarrier(unsigned Val) const {
1761   return decodeSrcOp(OPW32, Val);
1762 }
1763 
1764 bool AMDGPUDisassembler::isVI() const {
1765   return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
1766 }
1767 
1768 bool AMDGPUDisassembler::isGFX9() const { return AMDGPU::isGFX9(STI); }
1769 
1770 bool AMDGPUDisassembler::isGFX90A() const {
1771   return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
1772 }
1773 
1774 bool AMDGPUDisassembler::isGFX9Plus() const { return AMDGPU::isGFX9Plus(STI); }
1775 
1776 bool AMDGPUDisassembler::isGFX10() const { return AMDGPU::isGFX10(STI); }
1777 
1778 bool AMDGPUDisassembler::isGFX10Plus() const {
1779   return AMDGPU::isGFX10Plus(STI);
1780 }
1781 
1782 bool AMDGPUDisassembler::isGFX11() const {
1783   return STI.hasFeature(AMDGPU::FeatureGFX11);
1784 }
1785 
1786 bool AMDGPUDisassembler::isGFX11Plus() const {
1787   return AMDGPU::isGFX11Plus(STI);
1788 }
1789 
1790 bool AMDGPUDisassembler::isGFX12Plus() const {
1791   return AMDGPU::isGFX12Plus(STI);
1792 }
1793 
1794 bool AMDGPUDisassembler::hasArchitectedFlatScratch() const {
1795   return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
1796 }
1797 
1798 bool AMDGPUDisassembler::hasKernargPreload() const {
1799   return AMDGPU::hasKernargPreload(STI);
1800 }
1801 
1802 //===----------------------------------------------------------------------===//
1803 // AMDGPU specific symbol handling
1804 //===----------------------------------------------------------------------===//
1805 #define GET_FIELD(MASK) (AMDHSA_BITS_GET(FourByteBuffer, MASK))
1806 #define PRINT_DIRECTIVE(DIRECTIVE, MASK)                                       \
1807   do {                                                                         \
1808     KdStream << Indent << DIRECTIVE " " << GET_FIELD(MASK) << '\n';            \
1809   } while (0)
1810 #define PRINT_PSEUDO_DIRECTIVE_COMMENT(DIRECTIVE, MASK)                        \
1811   do {                                                                         \
1812     KdStream << Indent << MAI.getCommentString() << ' ' << DIRECTIVE " "       \
1813              << GET_FIELD(MASK) << '\n';                                       \
1814   } while (0)
1815 
1816 // NOLINTNEXTLINE(readability-identifier-naming)
1817 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1(
1818     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
1819   using namespace amdhsa;
1820   StringRef Indent = "\t";
1821 
1822   // We cannot accurately backward compute #VGPRs used from
1823   // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same
1824   // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we
1825   // simply calculate the inverse of what the assembler does.
1826 
1827   uint32_t GranulatedWorkitemVGPRCount =
1828       GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT);
1829 
1830   uint32_t NextFreeVGPR =
1831       (GranulatedWorkitemVGPRCount + 1) *
1832       AMDGPU::IsaInfo::getVGPREncodingGranule(&STI, EnableWavefrontSize32);
1833 
1834   KdStream << Indent << ".amdhsa_next_free_vgpr " << NextFreeVGPR << '\n';
1835 
1836   // We cannot backward compute values used to calculate
1837   // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following
1838   // directives can't be computed:
1839   // .amdhsa_reserve_vcc
1840   // .amdhsa_reserve_flat_scratch
1841   // .amdhsa_reserve_xnack_mask
1842   // They take their respective default values if not specified in the assembly.
1843   //
1844   // GRANULATED_WAVEFRONT_SGPR_COUNT
1845   //    = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK)
1846   //
1847   // We compute the inverse as though all directives apart from NEXT_FREE_SGPR
1848   // are set to 0. So while disassembling we consider that:
1849   //
1850   // GRANULATED_WAVEFRONT_SGPR_COUNT
1851   //    = f(NEXT_FREE_SGPR + 0 + 0 + 0)
1852   //
1853   // The disassembler cannot recover the original values of those 3 directives.
1854 
1855   uint32_t GranulatedWavefrontSGPRCount =
1856       GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT);
1857 
1858   if (isGFX10Plus() && GranulatedWavefrontSGPRCount)
1859     return MCDisassembler::Fail;
1860 
1861   uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) *
1862                           AMDGPU::IsaInfo::getSGPREncodingGranule(&STI);
1863 
1864   KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n';
1865   if (!hasArchitectedFlatScratch())
1866     KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n';
1867   KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n';
1868   KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n";
1869 
1870   if (FourByteBuffer & COMPUTE_PGM_RSRC1_PRIORITY)
1871     return MCDisassembler::Fail;
1872 
1873   PRINT_DIRECTIVE(".amdhsa_float_round_mode_32",
1874                   COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
1875   PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64",
1876                   COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
1877   PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32",
1878                   COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
1879   PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64",
1880                   COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
1881 
1882   if (FourByteBuffer & COMPUTE_PGM_RSRC1_PRIV)
1883     return MCDisassembler::Fail;
1884 
1885   if (!isGFX12Plus())
1886     PRINT_DIRECTIVE(".amdhsa_dx10_clamp",
1887                     COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP);
1888 
1889   if (FourByteBuffer & COMPUTE_PGM_RSRC1_DEBUG_MODE)
1890     return MCDisassembler::Fail;
1891 
1892   if (!isGFX12Plus())
1893     PRINT_DIRECTIVE(".amdhsa_ieee_mode",
1894                     COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE);
1895 
1896   if (FourByteBuffer & COMPUTE_PGM_RSRC1_BULKY)
1897     return MCDisassembler::Fail;
1898 
1899   if (FourByteBuffer & COMPUTE_PGM_RSRC1_CDBG_USER)
1900     return MCDisassembler::Fail;
1901 
1902   if (isGFX9Plus())
1903     PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL);
1904 
1905   if (!isGFX9Plus())
1906     if (FourByteBuffer & COMPUTE_PGM_RSRC1_GFX6_GFX8_RESERVED0)
1907       return MCDisassembler::Fail;
1908   if (FourByteBuffer & COMPUTE_PGM_RSRC1_RESERVED1)
1909     return MCDisassembler::Fail;
1910   if (!isGFX10Plus())
1911     if (FourByteBuffer & COMPUTE_PGM_RSRC1_GFX6_GFX9_RESERVED2)
1912       return MCDisassembler::Fail;
1913 
1914   if (isGFX10Plus()) {
1915     PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode",
1916                     COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE);
1917     PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED);
1918     PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS);
1919   }
1920 
1921   if (isGFX12Plus())
1922     PRINT_DIRECTIVE(".amdhsa_round_robin_scheduling",
1923                     COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN);
1924 
1925   return MCDisassembler::Success;
1926 }
1927 
1928 // NOLINTNEXTLINE(readability-identifier-naming)
1929 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC2(
1930     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
1931   using namespace amdhsa;
1932   StringRef Indent = "\t";
1933   if (hasArchitectedFlatScratch())
1934     PRINT_DIRECTIVE(".amdhsa_enable_private_segment",
1935                     COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
1936   else
1937     PRINT_DIRECTIVE(".amdhsa_system_sgpr_private_segment_wavefront_offset",
1938                     COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
1939   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x",
1940                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
1941   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y",
1942                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
1943   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z",
1944                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
1945   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info",
1946                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
1947   PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id",
1948                   COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
1949 
1950   if (FourByteBuffer & COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH)
1951     return MCDisassembler::Fail;
1952 
1953   if (FourByteBuffer & COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY)
1954     return MCDisassembler::Fail;
1955 
1956   if (FourByteBuffer & COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE)
1957     return MCDisassembler::Fail;
1958 
1959   PRINT_DIRECTIVE(
1960       ".amdhsa_exception_fp_ieee_invalid_op",
1961       COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
1962   PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src",
1963                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
1964   PRINT_DIRECTIVE(
1965       ".amdhsa_exception_fp_ieee_div_zero",
1966       COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
1967   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow",
1968                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
1969   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow",
1970                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
1971   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact",
1972                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
1973   PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero",
1974                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
1975 
1976   if (FourByteBuffer & COMPUTE_PGM_RSRC2_RESERVED0)
1977     return MCDisassembler::Fail;
1978 
1979   return MCDisassembler::Success;
1980 }
1981 
1982 // NOLINTNEXTLINE(readability-identifier-naming)
1983 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC3(
1984     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
1985   using namespace amdhsa;
1986   StringRef Indent = "\t";
1987   if (isGFX90A()) {
1988     KdStream << Indent << ".amdhsa_accum_offset "
1989              << (GET_FIELD(COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
1990              << '\n';
1991     if (FourByteBuffer & COMPUTE_PGM_RSRC3_GFX90A_RESERVED0)
1992       return MCDisassembler::Fail;
1993     PRINT_DIRECTIVE(".amdhsa_tg_split", COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
1994     if (FourByteBuffer & COMPUTE_PGM_RSRC3_GFX90A_RESERVED1)
1995       return MCDisassembler::Fail;
1996   } else if (isGFX10Plus()) {
1997     if (!EnableWavefrontSize32 || !*EnableWavefrontSize32) {
1998       PRINT_DIRECTIVE(".amdhsa_shared_vgpr_count",
1999                       COMPUTE_PGM_RSRC3_GFX10_PLUS_SHARED_VGPR_COUNT);
2000     } else {
2001       PRINT_PSEUDO_DIRECTIVE_COMMENT(
2002           "SHARED_VGPR_COUNT", COMPUTE_PGM_RSRC3_GFX10_PLUS_SHARED_VGPR_COUNT);
2003     }
2004 
2005     if (isGFX11Plus()) {
2006       PRINT_PSEUDO_DIRECTIVE_COMMENT("INST_PREF_SIZE",
2007                                      COMPUTE_PGM_RSRC3_GFX11_PLUS_INST_PREF_SIZE);
2008       PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_START",
2009                                      COMPUTE_PGM_RSRC3_GFX11_PLUS_TRAP_ON_START);
2010       PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_END",
2011                                      COMPUTE_PGM_RSRC3_GFX11_PLUS_TRAP_ON_END);
2012     } else {
2013       if (FourByteBuffer & COMPUTE_PGM_RSRC3_GFX10_RESERVED0)
2014         return MCDisassembler::Fail;
2015     }
2016 
2017     if (FourByteBuffer & COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED1)
2018       return MCDisassembler::Fail;
2019 
2020     if (isGFX11Plus()) {
2021       PRINT_PSEUDO_DIRECTIVE_COMMENT("IMAGE_OP",
2022                                      COMPUTE_PGM_RSRC3_GFX11_PLUS_TRAP_ON_START);
2023     } else {
2024       if (FourByteBuffer & COMPUTE_PGM_RSRC3_GFX10_RESERVED2)
2025         return MCDisassembler::Fail;
2026     }
2027   } else if (FourByteBuffer) {
2028     return MCDisassembler::Fail;
2029   }
2030   return MCDisassembler::Success;
2031 }
2032 #undef PRINT_PSEUDO_DIRECTIVE_COMMENT
2033 #undef PRINT_DIRECTIVE
2034 #undef GET_FIELD
2035 
2036 MCDisassembler::DecodeStatus
2037 AMDGPUDisassembler::decodeKernelDescriptorDirective(
2038     DataExtractor::Cursor &Cursor, ArrayRef<uint8_t> Bytes,
2039     raw_string_ostream &KdStream) const {
2040 #define PRINT_DIRECTIVE(DIRECTIVE, MASK)                                       \
2041   do {                                                                         \
2042     KdStream << Indent << DIRECTIVE " "                                        \
2043              << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n';            \
2044   } while (0)
2045 
2046   uint16_t TwoByteBuffer = 0;
2047   uint32_t FourByteBuffer = 0;
2048 
2049   StringRef ReservedBytes;
2050   StringRef Indent = "\t";
2051 
2052   assert(Bytes.size() == 64);
2053   DataExtractor DE(Bytes, /*IsLittleEndian=*/true, /*AddressSize=*/8);
2054 
2055   switch (Cursor.tell()) {
2056   case amdhsa::GROUP_SEGMENT_FIXED_SIZE_OFFSET:
2057     FourByteBuffer = DE.getU32(Cursor);
2058     KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer
2059              << '\n';
2060     return MCDisassembler::Success;
2061 
2062   case amdhsa::PRIVATE_SEGMENT_FIXED_SIZE_OFFSET:
2063     FourByteBuffer = DE.getU32(Cursor);
2064     KdStream << Indent << ".amdhsa_private_segment_fixed_size "
2065              << FourByteBuffer << '\n';
2066     return MCDisassembler::Success;
2067 
2068   case amdhsa::KERNARG_SIZE_OFFSET:
2069     FourByteBuffer = DE.getU32(Cursor);
2070     KdStream << Indent << ".amdhsa_kernarg_size "
2071              << FourByteBuffer << '\n';
2072     return MCDisassembler::Success;
2073 
2074   case amdhsa::RESERVED0_OFFSET:
2075     // 4 reserved bytes, must be 0.
2076     ReservedBytes = DE.getBytes(Cursor, 4);
2077     for (int I = 0; I < 4; ++I) {
2078       if (ReservedBytes[I] != 0) {
2079         return MCDisassembler::Fail;
2080       }
2081     }
2082     return MCDisassembler::Success;
2083 
2084   case amdhsa::KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET:
2085     // KERNEL_CODE_ENTRY_BYTE_OFFSET
2086     // So far no directive controls this for Code Object V3, so simply skip for
2087     // disassembly.
2088     DE.skip(Cursor, 8);
2089     return MCDisassembler::Success;
2090 
2091   case amdhsa::RESERVED1_OFFSET:
2092     // 20 reserved bytes, must be 0.
2093     ReservedBytes = DE.getBytes(Cursor, 20);
2094     for (int I = 0; I < 20; ++I) {
2095       if (ReservedBytes[I] != 0) {
2096         return MCDisassembler::Fail;
2097       }
2098     }
2099     return MCDisassembler::Success;
2100 
2101   case amdhsa::COMPUTE_PGM_RSRC3_OFFSET:
2102     FourByteBuffer = DE.getU32(Cursor);
2103     return decodeCOMPUTE_PGM_RSRC3(FourByteBuffer, KdStream);
2104 
2105   case amdhsa::COMPUTE_PGM_RSRC1_OFFSET:
2106     FourByteBuffer = DE.getU32(Cursor);
2107     return decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream);
2108 
2109   case amdhsa::COMPUTE_PGM_RSRC2_OFFSET:
2110     FourByteBuffer = DE.getU32(Cursor);
2111     return decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream);
2112 
2113   case amdhsa::KERNEL_CODE_PROPERTIES_OFFSET:
2114     using namespace amdhsa;
2115     TwoByteBuffer = DE.getU16(Cursor);
2116 
2117     if (!hasArchitectedFlatScratch())
2118       PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer",
2119                       KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
2120     PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr",
2121                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
2122     PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr",
2123                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
2124     PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr",
2125                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
2126     PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id",
2127                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
2128     if (!hasArchitectedFlatScratch())
2129       PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init",
2130                       KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
2131     PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size",
2132                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
2133 
2134     if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0)
2135       return MCDisassembler::Fail;
2136 
2137     // Reserved for GFX9
2138     if (isGFX9() &&
2139         (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) {
2140       return MCDisassembler::Fail;
2141     } else if (isGFX10Plus()) {
2142       PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
2143                       KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2144     }
2145 
2146     if (AMDGPU::getAmdhsaCodeObjectVersion() >= AMDGPU::AMDHSA_COV5)
2147       PRINT_DIRECTIVE(".amdhsa_uses_dynamic_stack",
2148                       KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK);
2149 
2150     if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1)
2151       return MCDisassembler::Fail;
2152 
2153     return MCDisassembler::Success;
2154 
2155   case amdhsa::KERNARG_PRELOAD_OFFSET:
2156     using namespace amdhsa;
2157     TwoByteBuffer = DE.getU16(Cursor);
2158     if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_LENGTH) {
2159       PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_length",
2160                       KERNARG_PRELOAD_SPEC_LENGTH);
2161     }
2162 
2163     if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_OFFSET) {
2164       PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_offset",
2165                       KERNARG_PRELOAD_SPEC_OFFSET);
2166     }
2167     return MCDisassembler::Success;
2168 
2169   case amdhsa::RESERVED3_OFFSET:
2170     // 4 bytes from here are reserved, must be 0.
2171     ReservedBytes = DE.getBytes(Cursor, 4);
2172     for (int I = 0; I < 4; ++I) {
2173       if (ReservedBytes[I] != 0)
2174         return MCDisassembler::Fail;
2175     }
2176     return MCDisassembler::Success;
2177 
2178   default:
2179     llvm_unreachable("Unhandled index. Case statements cover everything.");
2180     return MCDisassembler::Fail;
2181   }
2182 #undef PRINT_DIRECTIVE
2183 }
2184 
2185 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeKernelDescriptor(
2186     StringRef KdName, ArrayRef<uint8_t> Bytes, uint64_t KdAddress) const {
2187   // CP microcode requires the kernel descriptor to be 64 aligned.
2188   if (Bytes.size() != 64 || KdAddress % 64 != 0)
2189     return MCDisassembler::Fail;
2190 
2191   // FIXME: We can't actually decode "in order" as is done below, as e.g. GFX10
2192   // requires us to know the setting of .amdhsa_wavefront_size32 in order to
2193   // accurately produce .amdhsa_next_free_vgpr, and they appear in the wrong
2194   // order. Workaround this by first looking up .amdhsa_wavefront_size32 here
2195   // when required.
2196   if (isGFX10Plus()) {
2197     uint16_t KernelCodeProperties =
2198         support::endian::read16(&Bytes[amdhsa::KERNEL_CODE_PROPERTIES_OFFSET],
2199                                 llvm::endianness::little);
2200     EnableWavefrontSize32 =
2201         AMDHSA_BITS_GET(KernelCodeProperties,
2202                         amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2203   }
2204 
2205   std::string Kd;
2206   raw_string_ostream KdStream(Kd);
2207   KdStream << ".amdhsa_kernel " << KdName << '\n';
2208 
2209   DataExtractor::Cursor C(0);
2210   while (C && C.tell() < Bytes.size()) {
2211     MCDisassembler::DecodeStatus Status =
2212         decodeKernelDescriptorDirective(C, Bytes, KdStream);
2213 
2214     cantFail(C.takeError());
2215 
2216     if (Status == MCDisassembler::Fail)
2217       return MCDisassembler::Fail;
2218   }
2219   KdStream << ".end_amdhsa_kernel\n";
2220   outs() << KdStream.str();
2221   return MCDisassembler::Success;
2222 }
2223 
2224 std::optional<MCDisassembler::DecodeStatus>
2225 AMDGPUDisassembler::onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size,
2226                                   ArrayRef<uint8_t> Bytes, uint64_t Address,
2227                                   raw_ostream &CStream) const {
2228   // Right now only kernel descriptor needs to be handled.
2229   // We ignore all other symbols for target specific handling.
2230   // TODO:
2231   // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code
2232   // Object V2 and V3 when symbols are marked protected.
2233 
2234   // amd_kernel_code_t for Code Object V2.
2235   if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) {
2236     Size = 256;
2237     return MCDisassembler::Fail;
2238   }
2239 
2240   // Code Object V3 kernel descriptors.
2241   StringRef Name = Symbol.Name;
2242   if (Symbol.Type == ELF::STT_OBJECT && Name.ends_with(StringRef(".kd"))) {
2243     Size = 64; // Size = 64 regardless of success or failure.
2244     return decodeKernelDescriptor(Name.drop_back(3), Bytes, Address);
2245   }
2246   return std::nullopt;
2247 }
2248 
2249 //===----------------------------------------------------------------------===//
2250 // AMDGPUSymbolizer
2251 //===----------------------------------------------------------------------===//
2252 
2253 // Try to find symbol name for specified label
2254 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(
2255     MCInst &Inst, raw_ostream & /*cStream*/, int64_t Value,
2256     uint64_t /*Address*/, bool IsBranch, uint64_t /*Offset*/,
2257     uint64_t /*OpSize*/, uint64_t /*InstSize*/) {
2258 
2259   if (!IsBranch) {
2260     return false;
2261   }
2262 
2263   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
2264   if (!Symbols)
2265     return false;
2266 
2267   auto Result = llvm::find_if(*Symbols, [Value](const SymbolInfoTy &Val) {
2268     return Val.Addr == static_cast<uint64_t>(Value) &&
2269            Val.Type == ELF::STT_NOTYPE;
2270   });
2271   if (Result != Symbols->end()) {
2272     auto *Sym = Ctx.getOrCreateSymbol(Result->Name);
2273     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
2274     Inst.addOperand(MCOperand::createExpr(Add));
2275     return true;
2276   }
2277   // Add to list of referenced addresses, so caller can synthesize a label.
2278   ReferencedAddresses.push_back(static_cast<uint64_t>(Value));
2279   return false;
2280 }
2281 
2282 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
2283                                                        int64_t Value,
2284                                                        uint64_t Address) {
2285   llvm_unreachable("unimplemented");
2286 }
2287 
2288 //===----------------------------------------------------------------------===//
2289 // Initialization
2290 //===----------------------------------------------------------------------===//
2291 
2292 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
2293                               LLVMOpInfoCallback /*GetOpInfo*/,
2294                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
2295                               void *DisInfo,
2296                               MCContext *Ctx,
2297                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
2298   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
2299 }
2300 
2301 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
2302                                                 const MCSubtargetInfo &STI,
2303                                                 MCContext &Ctx) {
2304   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
2305 }
2306 
2307 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler() {
2308   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
2309                                          createAMDGPUDisassembler);
2310   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
2311                                        createAMDGPUSymbolizer);
2312 }
2313