xref: /llvm-project/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (revision 5dd48c4901c60f2a38aa4e78160cc72eafcbbc5b)
1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 /// \file
12 ///
13 /// This file contains definition for AMDGPU ISA disassembler
14 //
15 //===----------------------------------------------------------------------===//
16 
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18 
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIDefines.h"
22 #include "SIRegisterInfo.h"
23 #include "TargetInfo/AMDGPUTargetInfo.h"
24 #include "Utils/AMDGPUAsmUtils.h"
25 #include "Utils/AMDGPUBaseInfo.h"
26 #include "llvm-c/DisassemblerTypes.h"
27 #include "llvm/BinaryFormat/ELF.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCContext.h"
30 #include "llvm/MC/MCDecoderOps.h"
31 #include "llvm/MC/MCExpr.h"
32 #include "llvm/MC/MCInstrDesc.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/MC/MCSubtargetInfo.h"
35 #include "llvm/MC/TargetRegistry.h"
36 #include "llvm/Support/AMDHSAKernelDescriptor.h"
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "amdgpu-disassembler"
41 
42 #define SGPR_MAX                                                               \
43   (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10                           \
44                  : AMDGPU::EncValues::SGPR_MAX_SI)
45 
46 using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
47 
48 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI,
49                                        MCContext &Ctx, MCInstrInfo const *MCII)
50     : MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
51       MAI(*Ctx.getAsmInfo()), TargetMaxInstBytes(MAI.getMaxInstLength(&STI)),
52       CodeObjectVersion(AMDGPU::getDefaultAMDHSACodeObjectVersion()) {
53   // ToDo: AMDGPUDisassembler supports only VI ISA.
54   if (!STI.hasFeature(AMDGPU::FeatureGCN3Encoding) && !isGFX10Plus())
55     report_fatal_error("Disassembly not yet supported for subtarget");
56 
57   for (auto [Symbol, Code] : AMDGPU::UCVersion::getGFXVersions())
58     createConstantSymbolExpr(Symbol, Code);
59 
60   UCVersionW64Expr = createConstantSymbolExpr("UC_VERSION_W64_BIT", 0x2000);
61   UCVersionW32Expr = createConstantSymbolExpr("UC_VERSION_W32_BIT", 0x4000);
62   UCVersionMDPExpr = createConstantSymbolExpr("UC_VERSION_MDP_BIT", 0x8000);
63 }
64 
65 void AMDGPUDisassembler::setABIVersion(unsigned Version) {
66   CodeObjectVersion = AMDGPU::getAMDHSACodeObjectVersion(Version);
67 }
68 
69 inline static MCDisassembler::DecodeStatus
70 addOperand(MCInst &Inst, const MCOperand& Opnd) {
71   Inst.addOperand(Opnd);
72   return Opnd.isValid() ?
73     MCDisassembler::Success :
74     MCDisassembler::Fail;
75 }
76 
77 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
78                                 uint16_t NameIdx) {
79   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
80   if (OpIdx != -1) {
81     auto *I = MI.begin();
82     std::advance(I, OpIdx);
83     MI.insert(I, Op);
84   }
85   return OpIdx;
86 }
87 
88 static DecodeStatus decodeSOPPBrTarget(MCInst &Inst, unsigned Imm,
89                                        uint64_t Addr,
90                                        const MCDisassembler *Decoder) {
91   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
92 
93   // Our branches take a simm16.
94   int64_t Offset = SignExtend64<16>(Imm) * 4 + 4 + Addr;
95 
96   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2, 0))
97     return MCDisassembler::Success;
98   return addOperand(Inst, MCOperand::createImm(Imm));
99 }
100 
101 static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, uint64_t Addr,
102                                      const MCDisassembler *Decoder) {
103   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
104   int64_t Offset;
105   if (DAsm->isGFX12Plus()) { // GFX12 supports 24-bit signed offsets.
106     Offset = SignExtend64<24>(Imm);
107   } else if (DAsm->isVI()) { // VI supports 20-bit unsigned offsets.
108     Offset = Imm & 0xFFFFF;
109   } else { // GFX9+ supports 21-bit signed offsets.
110     Offset = SignExtend64<21>(Imm);
111   }
112   return addOperand(Inst, MCOperand::createImm(Offset));
113 }
114 
115 static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, uint64_t Addr,
116                                   const MCDisassembler *Decoder) {
117   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
118   return addOperand(Inst, DAsm->decodeBoolReg(Val));
119 }
120 
121 static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val,
122                                        uint64_t Addr,
123                                        const MCDisassembler *Decoder) {
124   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
125   return addOperand(Inst, DAsm->decodeSplitBarrier(Val));
126 }
127 
128 static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
129                                  const MCDisassembler *Decoder) {
130   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
131   return addOperand(Inst, DAsm->decodeDpp8FI(Val));
132 }
133 
134 #define DECODE_OPERAND(StaticDecoderName, DecoderName)                         \
135   static DecodeStatus StaticDecoderName(MCInst &Inst, unsigned Imm,            \
136                                         uint64_t /*Addr*/,                     \
137                                         const MCDisassembler *Decoder) {       \
138     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
139     return addOperand(Inst, DAsm->DecoderName(Imm));                           \
140   }
141 
142 // Decoder for registers, decode directly using RegClassID. Imm(8-bit) is
143 // number of register. Used by VGPR only and AGPR only operands.
144 #define DECODE_OPERAND_REG_8(RegClass)                                         \
145   static DecodeStatus Decode##RegClass##RegisterClass(                         \
146       MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,                           \
147       const MCDisassembler *Decoder) {                                         \
148     assert(Imm < (1 << 8) && "8-bit encoding");                                \
149     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
150     return addOperand(                                                         \
151         Inst, DAsm->createRegOperand(AMDGPU::RegClass##RegClassID, Imm));      \
152   }
153 
154 #define DECODE_SrcOp(Name, EncSize, OpWidth, EncImm, MandatoryLiteral,         \
155                      ImmWidth)                                                 \
156   static DecodeStatus Name(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,      \
157                            const MCDisassembler *Decoder) {                    \
158     assert(Imm < (1 << EncSize) && #EncSize "-bit encoding");                  \
159     auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);              \
160     return addOperand(Inst,                                                    \
161                       DAsm->decodeSrcOp(AMDGPUDisassembler::OpWidth, EncImm,   \
162                                         MandatoryLiteral, ImmWidth));          \
163   }
164 
165 static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize,
166                                 AMDGPUDisassembler::OpWidthTy OpWidth,
167                                 unsigned Imm, unsigned EncImm,
168                                 bool MandatoryLiteral, unsigned ImmWidth,
169                                 AMDGPU::OperandSemantics Sema,
170                                 const MCDisassembler *Decoder) {
171   assert(Imm < (1U << EncSize) && "Operand doesn't fit encoding!");
172   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
173   return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm, MandatoryLiteral,
174                                             ImmWidth, Sema));
175 }
176 
177 // Decoder for registers. Imm(7-bit) is number of register, uses decodeSrcOp to
178 // get register class. Used by SGPR only operands.
179 #define DECODE_OPERAND_REG_7(RegClass, OpWidth)                                \
180   DECODE_SrcOp(Decode##RegClass##RegisterClass, 7, OpWidth, Imm, false, 0)
181 
182 // Decoder for registers. Imm(10-bit): Imm{7-0} is number of register,
183 // Imm{9} is acc(agpr or vgpr) Imm{8} should be 0 (see VOP3Pe_SMFMAC).
184 // Set Imm{8} to 1 (IS_VGPR) to decode using 'enum10' from decodeSrcOp.
185 // Used by AV_ register classes (AGPR or VGPR only register operands).
186 template <AMDGPUDisassembler::OpWidthTy OpWidth>
187 static DecodeStatus decodeAV10(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
188                                const MCDisassembler *Decoder) {
189   return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm | AMDGPU::EncValues::IS_VGPR,
190                      false, 0, AMDGPU::OperandSemantics::INT, Decoder);
191 }
192 
193 // Decoder for Src(9-bit encoding) registers only.
194 template <AMDGPUDisassembler::OpWidthTy OpWidth>
195 static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm,
196                                   uint64_t /* Addr */,
197                                   const MCDisassembler *Decoder) {
198   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, false, 0,
199                      AMDGPU::OperandSemantics::INT, Decoder);
200 }
201 
202 // Decoder for Src(9-bit encoding) AGPR, register number encoded in 9bits, set
203 // Imm{9} to 1 (set acc) and decode using 'enum10' from decodeSrcOp, registers
204 // only.
205 template <AMDGPUDisassembler::OpWidthTy OpWidth>
206 static DecodeStatus decodeSrcA9(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
207                                 const MCDisassembler *Decoder) {
208   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, false, 0,
209                      AMDGPU::OperandSemantics::INT, Decoder);
210 }
211 
212 // Decoder for 'enum10' from decodeSrcOp, Imm{0-8} is 9-bit Src encoding
213 // Imm{9} is acc, registers only.
214 template <AMDGPUDisassembler::OpWidthTy OpWidth>
215 static DecodeStatus decodeSrcAV10(MCInst &Inst, unsigned Imm,
216                                   uint64_t /* Addr */,
217                                   const MCDisassembler *Decoder) {
218   return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm, false, 0,
219                      AMDGPU::OperandSemantics::INT, Decoder);
220 }
221 
222 // Decoder for RegisterOperands using 9-bit Src encoding. Operand can be
223 // register from RegClass or immediate. Registers that don't belong to RegClass
224 // will be decoded and InstPrinter will report warning. Immediate will be
225 // decoded into constant of size ImmWidth, should match width of immediate used
226 // by OperandType (important for floating point types).
227 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
228           unsigned OperandSemantics>
229 static DecodeStatus decodeSrcRegOrImm9(MCInst &Inst, unsigned Imm,
230                                        uint64_t /* Addr */,
231                                        const MCDisassembler *Decoder) {
232   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, false, ImmWidth,
233                      (AMDGPU::OperandSemantics)OperandSemantics, Decoder);
234 }
235 
236 // Decoder for Src(9-bit encoding) AGPR or immediate. Set Imm{9} to 1 (set acc)
237 // and decode using 'enum10' from decodeSrcOp.
238 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
239           unsigned OperandSemantics>
240 static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm,
241                                         uint64_t /* Addr */,
242                                         const MCDisassembler *Decoder) {
243   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, false, ImmWidth,
244                      (AMDGPU::OperandSemantics)OperandSemantics, Decoder);
245 }
246 
247 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
248           unsigned OperandSemantics>
249 static DecodeStatus decodeSrcRegOrImmDeferred9(MCInst &Inst, unsigned Imm,
250                                                uint64_t /* Addr */,
251                                                const MCDisassembler *Decoder) {
252   return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, true, ImmWidth,
253                      (AMDGPU::OperandSemantics)OperandSemantics, Decoder);
254 }
255 
256 // Default decoders generated by tablegen: 'Decode<RegClass>RegisterClass'
257 // when RegisterClass is used as an operand. Most often used for destination
258 // operands.
259 
260 DECODE_OPERAND_REG_8(VGPR_32)
261 DECODE_OPERAND_REG_8(VGPR_32_Lo128)
262 DECODE_OPERAND_REG_8(VReg_64)
263 DECODE_OPERAND_REG_8(VReg_96)
264 DECODE_OPERAND_REG_8(VReg_128)
265 DECODE_OPERAND_REG_8(VReg_256)
266 DECODE_OPERAND_REG_8(VReg_288)
267 DECODE_OPERAND_REG_8(VReg_352)
268 DECODE_OPERAND_REG_8(VReg_384)
269 DECODE_OPERAND_REG_8(VReg_512)
270 DECODE_OPERAND_REG_8(VReg_1024)
271 
272 DECODE_OPERAND_REG_7(SReg_32, OPW32)
273 DECODE_OPERAND_REG_7(SReg_32_XEXEC, OPW32)
274 DECODE_OPERAND_REG_7(SReg_32_XM0_XEXEC, OPW32)
275 DECODE_OPERAND_REG_7(SReg_32_XEXEC_HI, OPW32)
276 DECODE_OPERAND_REG_7(SReg_64, OPW64)
277 DECODE_OPERAND_REG_7(SReg_64_XEXEC, OPW64)
278 DECODE_OPERAND_REG_7(SReg_64_XEXEC_XNULL, OPW64)
279 DECODE_OPERAND_REG_7(SReg_96, OPW96)
280 DECODE_OPERAND_REG_7(SReg_128, OPW128)
281 DECODE_OPERAND_REG_7(SReg_256, OPW256)
282 DECODE_OPERAND_REG_7(SReg_512, OPW512)
283 
284 DECODE_OPERAND_REG_8(AGPR_32)
285 DECODE_OPERAND_REG_8(AReg_64)
286 DECODE_OPERAND_REG_8(AReg_128)
287 DECODE_OPERAND_REG_8(AReg_256)
288 DECODE_OPERAND_REG_8(AReg_512)
289 DECODE_OPERAND_REG_8(AReg_1024)
290 
291 static DecodeStatus DecodeVGPR_16RegisterClass(MCInst &Inst, unsigned Imm,
292                                                uint64_t /*Addr*/,
293                                                const MCDisassembler *Decoder) {
294   assert(isUInt<10>(Imm) && "10-bit encoding expected");
295   assert((Imm & (1 << 8)) == 0 && "Imm{8} should not be used");
296 
297   bool IsHi = Imm & (1 << 9);
298   unsigned RegIdx = Imm & 0xff;
299   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
300   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
301 }
302 
303 static DecodeStatus
304 DecodeVGPR_16_Lo128RegisterClass(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,
305                                  const MCDisassembler *Decoder) {
306   assert(isUInt<8>(Imm) && "8-bit encoding expected");
307 
308   bool IsHi = Imm & (1 << 7);
309   unsigned RegIdx = Imm & 0x7f;
310   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
311   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
312 }
313 
314 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
315           unsigned OperandSemantics>
316 static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm,
317                                                 uint64_t /*Addr*/,
318                                                 const MCDisassembler *Decoder) {
319   assert(isUInt<9>(Imm) && "9-bit encoding expected");
320 
321   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
322   if (Imm & AMDGPU::EncValues::IS_VGPR) {
323     bool IsHi = Imm & (1 << 7);
324     unsigned RegIdx = Imm & 0x7f;
325     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
326   }
327   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
328                               OpWidth, Imm & 0xFF, false, ImmWidth,
329                               (AMDGPU::OperandSemantics)OperandSemantics));
330 }
331 
332 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
333           unsigned OperandSemantics>
334 static DecodeStatus
335 decodeOperand_VSrcT16_Lo128_Deferred(MCInst &Inst, unsigned Imm,
336                                      uint64_t /*Addr*/,
337                                      const MCDisassembler *Decoder) {
338   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
339   assert(isUInt<9>(Imm) && "9-bit encoding expected");
340 
341   if (Imm & AMDGPU::EncValues::IS_VGPR) {
342     bool IsHi = Imm & (1 << 7);
343     unsigned RegIdx = Imm & 0x7f;
344     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
345   }
346   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
347                               OpWidth, Imm & 0xFF, true, ImmWidth,
348                               (AMDGPU::OperandSemantics)OperandSemantics));
349 }
350 
351 template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
352           unsigned OperandSemantics>
353 static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
354                                           uint64_t /*Addr*/,
355                                           const MCDisassembler *Decoder) {
356   assert(isUInt<10>(Imm) && "10-bit encoding expected");
357 
358   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
359   if (Imm & AMDGPU::EncValues::IS_VGPR) {
360     bool IsHi = Imm & (1 << 9);
361     unsigned RegIdx = Imm & 0xff;
362     return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
363   }
364   return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
365                               OpWidth, Imm & 0xFF, false, ImmWidth,
366                               (AMDGPU::OperandSemantics)OperandSemantics));
367 }
368 
369 static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm,
370                                           uint64_t /*Addr*/,
371                                           const MCDisassembler *Decoder) {
372   assert(isUInt<10>(Imm) && "10-bit encoding expected");
373   assert(Imm & AMDGPU::EncValues::IS_VGPR && "VGPR expected");
374 
375   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
376 
377   bool IsHi = Imm & (1 << 9);
378   unsigned RegIdx = Imm & 0xff;
379   return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
380 }
381 
382 static DecodeStatus decodeOperand_KImmFP(MCInst &Inst, unsigned Imm,
383                                          uint64_t Addr,
384                                          const MCDisassembler *Decoder) {
385   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
386   return addOperand(Inst, DAsm->decodeMandatoryLiteralConstant(Imm));
387 }
388 
389 static DecodeStatus decodeOperandVOPDDstY(MCInst &Inst, unsigned Val,
390                                           uint64_t Addr, const void *Decoder) {
391   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
392   return addOperand(Inst, DAsm->decodeVOPDDstYOp(Inst, Val));
393 }
394 
395 static bool IsAGPROperand(const MCInst &Inst, int OpIdx,
396                           const MCRegisterInfo *MRI) {
397   if (OpIdx < 0)
398     return false;
399 
400   const MCOperand &Op = Inst.getOperand(OpIdx);
401   if (!Op.isReg())
402     return false;
403 
404   MCRegister Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0);
405   auto Reg = Sub ? Sub : Op.getReg();
406   return Reg >= AMDGPU::AGPR0 && Reg <= AMDGPU::AGPR255;
407 }
408 
409 static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm,
410                                  AMDGPUDisassembler::OpWidthTy Opw,
411                                  const MCDisassembler *Decoder) {
412   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
413   if (!DAsm->isGFX90A()) {
414     Imm &= 511;
415   } else {
416     // If atomic has both vdata and vdst their register classes are tied.
417     // The bit is decoded along with the vdst, first operand. We need to
418     // change register class to AGPR if vdst was AGPR.
419     // If a DS instruction has both data0 and data1 their register classes
420     // are also tied.
421     unsigned Opc = Inst.getOpcode();
422     uint64_t TSFlags = DAsm->getMCII()->get(Opc).TSFlags;
423     uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
424                                                         : AMDGPU::OpName::vdata;
425     const MCRegisterInfo *MRI = DAsm->getContext().getRegisterInfo();
426     int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataNameIdx);
427     if ((int)Inst.getNumOperands() == DataIdx) {
428       int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
429       if (IsAGPROperand(Inst, DstIdx, MRI))
430         Imm |= 512;
431     }
432 
433     if (TSFlags & SIInstrFlags::DS) {
434       int Data2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
435       if ((int)Inst.getNumOperands() == Data2Idx &&
436           IsAGPROperand(Inst, DataIdx, MRI))
437         Imm |= 512;
438     }
439   }
440   return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256));
441 }
442 
443 template <AMDGPUDisassembler::OpWidthTy Opw>
444 static DecodeStatus decodeAVLdSt(MCInst &Inst, unsigned Imm,
445                                  uint64_t /* Addr */,
446                                  const MCDisassembler *Decoder) {
447   return decodeAVLdSt(Inst, Imm, Opw, Decoder);
448 }
449 
450 static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm,
451                                            uint64_t Addr,
452                                            const MCDisassembler *Decoder) {
453   assert(Imm < (1 << 9) && "9-bit encoding");
454   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
455   return addOperand(Inst,
456                     DAsm->decodeSrcOp(AMDGPUDisassembler::OPW64, Imm, false, 64,
457                                       AMDGPU::OperandSemantics::FP64));
458 }
459 
460 #define DECODE_SDWA(DecName) \
461 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
462 
463 DECODE_SDWA(Src32)
464 DECODE_SDWA(Src16)
465 DECODE_SDWA(VopcDst)
466 
467 static DecodeStatus decodeVersionImm(MCInst &Inst, unsigned Imm,
468                                      uint64_t /* Addr */,
469                                      const MCDisassembler *Decoder) {
470   const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
471   return addOperand(Inst, DAsm->decodeVersionImm(Imm));
472 }
473 
474 #include "AMDGPUGenDisassemblerTables.inc"
475 
476 //===----------------------------------------------------------------------===//
477 //
478 //===----------------------------------------------------------------------===//
479 
480 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
481   assert(Bytes.size() >= sizeof(T));
482   const auto Res =
483       support::endian::read<T, llvm::endianness::little>(Bytes.data());
484   Bytes = Bytes.slice(sizeof(T));
485   return Res;
486 }
487 
488 static inline DecoderUInt128 eat12Bytes(ArrayRef<uint8_t> &Bytes) {
489   assert(Bytes.size() >= 12);
490   uint64_t Lo =
491       support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
492   Bytes = Bytes.slice(8);
493   uint64_t Hi =
494       support::endian::read<uint32_t, llvm::endianness::little>(Bytes.data());
495   Bytes = Bytes.slice(4);
496   return DecoderUInt128(Lo, Hi);
497 }
498 
499 static inline DecoderUInt128 eat16Bytes(ArrayRef<uint8_t> &Bytes) {
500   assert(Bytes.size() >= 16);
501   uint64_t Lo =
502       support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
503   Bytes = Bytes.slice(8);
504   uint64_t Hi =
505       support::endian::read<uint64_t, llvm::endianness::little>(Bytes.data());
506   Bytes = Bytes.slice(8);
507   return DecoderUInt128(Lo, Hi);
508 }
509 
510 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
511                                                 ArrayRef<uint8_t> Bytes_,
512                                                 uint64_t Address,
513                                                 raw_ostream &CS) const {
514   unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
515   Bytes = Bytes_.slice(0, MaxInstBytesNum);
516 
517   // In case the opcode is not recognized we'll assume a Size of 4 bytes (unless
518   // there are fewer bytes left). This will be overridden on success.
519   Size = std::min((size_t)4, Bytes_.size());
520 
521   do {
522     // ToDo: better to switch encoding length using some bit predicate
523     // but it is unknown yet, so try all we can
524 
525     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
526     // encodings
527     if (isGFX11Plus() && Bytes.size() >= 12 ) {
528       DecoderUInt128 DecW = eat12Bytes(Bytes);
529 
530       if (isGFX11() &&
531           tryDecodeInst(DecoderTableGFX1196, DecoderTableGFX11_FAKE1696, MI,
532                         DecW, Address, CS))
533         break;
534 
535       if (isGFX12() &&
536           tryDecodeInst(DecoderTableGFX1296, DecoderTableGFX12_FAKE1696, MI,
537                         DecW, Address, CS))
538         break;
539 
540       if (isGFX12() &&
541           tryDecodeInst(DecoderTableGFX12W6496, MI, DecW, Address, CS))
542         break;
543 
544       // Reinitialize Bytes
545       Bytes = Bytes_.slice(0, MaxInstBytesNum);
546 
547     } else if (Bytes.size() >= 16 &&
548                STI.hasFeature(AMDGPU::FeatureGFX950Insts)) {
549       DecoderUInt128 DecW = eat16Bytes(Bytes);
550       if (tryDecodeInst(DecoderTableGFX940128, MI, DecW, Address, CS))
551         break;
552 
553       // Reinitialize Bytes
554       Bytes = Bytes_.slice(0, MaxInstBytesNum);
555     }
556 
557     if (Bytes.size() >= 8) {
558       const uint64_t QW = eatBytes<uint64_t>(Bytes);
559 
560       if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
561           tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address, CS))
562         break;
563 
564       if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) &&
565           tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address, CS))
566         break;
567 
568       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
569       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
570       // table first so we print the correct name.
571       if (STI.hasFeature(AMDGPU::FeatureFmaMixInsts) &&
572           tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address, CS))
573         break;
574 
575       if (STI.hasFeature(AMDGPU::FeatureGFX940Insts) &&
576           tryDecodeInst(DecoderTableGFX94064, MI, QW, Address, CS))
577         break;
578 
579       if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
580           tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address, CS))
581         break;
582 
583       if ((isVI() || isGFX9()) &&
584           tryDecodeInst(DecoderTableGFX864, MI, QW, Address, CS))
585         break;
586 
587       if (isGFX9() && tryDecodeInst(DecoderTableGFX964, MI, QW, Address, CS))
588         break;
589 
590       if (isGFX10() && tryDecodeInst(DecoderTableGFX1064, MI, QW, Address, CS))
591         break;
592 
593       if (isGFX12() &&
594           tryDecodeInst(DecoderTableGFX1264, DecoderTableGFX12_FAKE1664, MI, QW,
595                         Address, CS))
596         break;
597 
598       if (isGFX11() &&
599           tryDecodeInst(DecoderTableGFX1164, DecoderTableGFX11_FAKE1664, MI, QW,
600                         Address, CS))
601         break;
602 
603       if (isGFX11() &&
604           tryDecodeInst(DecoderTableGFX11W6464, MI, QW, Address, CS))
605         break;
606 
607       if (isGFX12() &&
608           tryDecodeInst(DecoderTableGFX12W6464, MI, QW, Address, CS))
609         break;
610 
611       // Reinitialize Bytes
612       Bytes = Bytes_.slice(0, MaxInstBytesNum);
613     }
614 
615     // Try decode 32-bit instruction
616     if (Bytes.size() >= 4) {
617       const uint32_t DW = eatBytes<uint32_t>(Bytes);
618 
619       if ((isVI() || isGFX9()) &&
620           tryDecodeInst(DecoderTableGFX832, MI, DW, Address, CS))
621         break;
622 
623       if (tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address, CS))
624         break;
625 
626       if (isGFX9() && tryDecodeInst(DecoderTableGFX932, MI, DW, Address, CS))
627         break;
628 
629       if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
630           tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address, CS))
631         break;
632 
633       if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
634           tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address, CS))
635         break;
636 
637       if (isGFX10() && tryDecodeInst(DecoderTableGFX1032, MI, DW, Address, CS))
638         break;
639 
640       if (isGFX11() &&
641           tryDecodeInst(DecoderTableGFX1132, DecoderTableGFX11_FAKE1632, MI, DW,
642                         Address, CS))
643         break;
644 
645       if (isGFX12() &&
646           tryDecodeInst(DecoderTableGFX1232, DecoderTableGFX12_FAKE1632, MI, DW,
647                         Address, CS))
648         break;
649     }
650 
651     return MCDisassembler::Fail;
652   } while (false);
653 
654   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DPP) {
655     if (isMacDPP(MI))
656       convertMacDPPInst(MI);
657 
658     if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P)
659       convertVOP3PDPPInst(MI);
660     else if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC) ||
661              AMDGPU::isVOPC64DPP(MI.getOpcode()))
662       convertVOPCDPPInst(MI); // Special VOP3 case
663     else if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8) !=
664              -1)
665       convertDPP8Inst(MI);
666     else if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3)
667       convertVOP3DPPInst(MI); // Regular VOP3 case
668   }
669 
670   convertTrue16OpSel(MI);
671 
672   if (AMDGPU::isMAC(MI.getOpcode())) {
673     // Insert dummy unused src2_modifiers.
674     insertNamedMCOperand(MI, MCOperand::createImm(0),
675                          AMDGPU::OpName::src2_modifiers);
676   }
677 
678   if (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp ||
679       MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp) {
680     // Insert dummy unused src2_modifiers.
681     insertNamedMCOperand(MI, MCOperand::createImm(0),
682                          AMDGPU::OpName::src2_modifiers);
683   }
684 
685   if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DS) &&
686       !AMDGPU::hasGDS(STI)) {
687     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::gds);
688   }
689 
690   if (MCII->get(MI.getOpcode()).TSFlags &
691       (SIInstrFlags::MUBUF | SIInstrFlags::FLAT | SIInstrFlags::SMRD)) {
692     int CPolPos = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
693                                              AMDGPU::OpName::cpol);
694     if (CPolPos != -1) {
695       unsigned CPol =
696           (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsAtomicRet) ?
697               AMDGPU::CPol::GLC : 0;
698       if (MI.getNumOperands() <= (unsigned)CPolPos) {
699         insertNamedMCOperand(MI, MCOperand::createImm(CPol),
700                              AMDGPU::OpName::cpol);
701       } else if (CPol) {
702         MI.getOperand(CPolPos).setImm(MI.getOperand(CPolPos).getImm() | CPol);
703       }
704     }
705   }
706 
707   if ((MCII->get(MI.getOpcode()).TSFlags &
708        (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) &&
709       (STI.hasFeature(AMDGPU::FeatureGFX90AInsts))) {
710     // GFX90A lost TFE, its place is occupied by ACC.
711     int TFEOpIdx =
712         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
713     if (TFEOpIdx != -1) {
714       auto *TFEIter = MI.begin();
715       std::advance(TFEIter, TFEOpIdx);
716       MI.insert(TFEIter, MCOperand::createImm(0));
717     }
718   }
719 
720   if (MCII->get(MI.getOpcode()).TSFlags &
721       (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) {
722     int SWZOpIdx =
723         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz);
724     if (SWZOpIdx != -1) {
725       auto *SWZIter = MI.begin();
726       std::advance(SWZIter, SWZOpIdx);
727       MI.insert(SWZIter, MCOperand::createImm(0));
728     }
729   }
730 
731   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG) {
732     int VAddr0Idx =
733         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
734     int RsrcIdx =
735         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
736     unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
737     if (VAddr0Idx >= 0 && NSAArgs > 0) {
738       unsigned NSAWords = (NSAArgs + 3) / 4;
739       if (Bytes.size() < 4 * NSAWords)
740         return MCDisassembler::Fail;
741       for (unsigned i = 0; i < NSAArgs; ++i) {
742         const unsigned VAddrIdx = VAddr0Idx + 1 + i;
743         auto VAddrRCID =
744             MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
745         MI.insert(MI.begin() + VAddrIdx, createRegOperand(VAddrRCID, Bytes[i]));
746       }
747       Bytes = Bytes.slice(4 * NSAWords);
748     }
749 
750     convertMIMGInst(MI);
751   }
752 
753   if (MCII->get(MI.getOpcode()).TSFlags &
754       (SIInstrFlags::VIMAGE | SIInstrFlags::VSAMPLE))
755     convertMIMGInst(MI);
756 
757   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::EXP)
758     convertEXPInst(MI);
759 
760   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VINTERP)
761     convertVINTERPInst(MI);
762 
763   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SDWA)
764     convertSDWAInst(MI);
765 
766   if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsMAI)
767     convertMAIInst(MI);
768 
769   int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
770                                               AMDGPU::OpName::vdst_in);
771   if (VDstIn_Idx != -1) {
772     int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
773                            MCOI::OperandConstraint::TIED_TO);
774     if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
775          !MI.getOperand(VDstIn_Idx).isReg() ||
776          MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
777       if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
778         MI.erase(&MI.getOperand(VDstIn_Idx));
779       insertNamedMCOperand(MI,
780         MCOperand::createReg(MI.getOperand(Tied).getReg()),
781         AMDGPU::OpName::vdst_in);
782     }
783   }
784 
785   int ImmLitIdx =
786       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm);
787   bool IsSOPK = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SOPK;
788   if (ImmLitIdx != -1 && !IsSOPK)
789     convertFMAanyK(MI, ImmLitIdx);
790 
791   Size = MaxInstBytesNum - Bytes.size();
792   return MCDisassembler::Success;
793 }
794 
795 void AMDGPUDisassembler::convertEXPInst(MCInst &MI) const {
796   if (STI.hasFeature(AMDGPU::FeatureGFX11Insts)) {
797     // The MCInst still has these fields even though they are no longer encoded
798     // in the GFX11 instruction.
799     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vm);
800     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::compr);
801   }
802 }
803 
804 void AMDGPUDisassembler::convertVINTERPInst(MCInst &MI) const {
805   convertTrue16OpSel(MI);
806   if (MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx11 ||
807       MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx11 ||
808       MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_t16_gfx12 ||
809       MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_fake16_gfx12 ||
810       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx11 ||
811       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx11 ||
812       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_t16_gfx12 ||
813       MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_fake16_gfx12 ||
814       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx11 ||
815       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx11 ||
816       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_t16_gfx12 ||
817       MI.getOpcode() == AMDGPU::V_INTERP_P2_F16_F32_inreg_fake16_gfx12 ||
818       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx11 ||
819       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx11 ||
820       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_t16_gfx12 ||
821       MI.getOpcode() == AMDGPU::V_INTERP_P2_RTZ_F16_F32_inreg_fake16_gfx12) {
822     // The MCInst has this field that is not directly encoded in the
823     // instruction.
824     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::op_sel);
825   }
826 }
827 
828 void AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
829   if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
830       STI.hasFeature(AMDGPU::FeatureGFX10)) {
831     if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::sdst))
832       // VOPC - insert clamp
833       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
834   } else if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands)) {
835     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
836     if (SDst != -1) {
837       // VOPC - insert VCC register as sdst
838       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
839                            AMDGPU::OpName::sdst);
840     } else {
841       // VOP1/2 - insert omod if present in instruction
842       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
843     }
844   }
845 }
846 
847 /// Adjust the register values used by V_MFMA_F8F6F4_f8_f8 instructions to the
848 /// appropriate subregister for the used format width.
849 static void adjustMFMA_F8F6F4OpRegClass(const MCRegisterInfo &MRI,
850                                         MCOperand &MO, uint8_t NumRegs) {
851   switch (NumRegs) {
852   case 4:
853     return MO.setReg(MRI.getSubReg(MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3));
854   case 6:
855     return MO.setReg(
856         MRI.getSubReg(MO.getReg(), AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5));
857   case 8:
858     // No-op in cases where one operand is still f8/bf8.
859     return;
860   default:
861     llvm_unreachable("Unexpected size for mfma f8f6f4 operand");
862   }
863 }
864 
865 /// f8f6f4 instructions have different pseudos depending on the used formats. In
866 /// the disassembler table, we only have the variants with the largest register
867 /// classes which assume using an fp8/bf8 format for both operands. The actual
868 /// register class depends on the format in blgp and cbsz operands. Adjust the
869 /// register classes depending on the used format.
870 void AMDGPUDisassembler::convertMAIInst(MCInst &MI) const {
871   int BlgpIdx =
872       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::blgp);
873   if (BlgpIdx == -1)
874     return;
875 
876   int CbszIdx =
877       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::cbsz);
878 
879   unsigned CBSZ = MI.getOperand(CbszIdx).getImm();
880   unsigned BLGP = MI.getOperand(BlgpIdx).getImm();
881 
882   const AMDGPU::MFMA_F8F6F4_Info *AdjustedRegClassOpcode =
883       AMDGPU::getMFMA_F8F6F4_WithFormatArgs(CBSZ, BLGP, MI.getOpcode());
884   if (!AdjustedRegClassOpcode ||
885       AdjustedRegClassOpcode->Opcode == MI.getOpcode())
886     return;
887 
888   MI.setOpcode(AdjustedRegClassOpcode->Opcode);
889   int Src0Idx =
890       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
891   int Src1Idx =
892       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
893   adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src0Idx),
894                               AdjustedRegClassOpcode->NumRegsSrcA);
895   adjustMFMA_F8F6F4OpRegClass(MRI, MI.getOperand(Src1Idx),
896                               AdjustedRegClassOpcode->NumRegsSrcB);
897 }
898 
899 struct VOPModifiers {
900   unsigned OpSel = 0;
901   unsigned OpSelHi = 0;
902   unsigned NegLo = 0;
903   unsigned NegHi = 0;
904 };
905 
906 // Reconstruct values of VOP3/VOP3P operands such as op_sel.
907 // Note that these values do not affect disassembler output,
908 // so this is only necessary for consistency with src_modifiers.
909 static VOPModifiers collectVOPModifiers(const MCInst &MI,
910                                         bool IsVOP3P = false) {
911   VOPModifiers Modifiers;
912   unsigned Opc = MI.getOpcode();
913   const int ModOps[] = {AMDGPU::OpName::src0_modifiers,
914                         AMDGPU::OpName::src1_modifiers,
915                         AMDGPU::OpName::src2_modifiers};
916   for (int J = 0; J < 3; ++J) {
917     int OpIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
918     if (OpIdx == -1)
919       continue;
920 
921     unsigned Val = MI.getOperand(OpIdx).getImm();
922 
923     Modifiers.OpSel |= !!(Val & SISrcMods::OP_SEL_0) << J;
924     if (IsVOP3P) {
925       Modifiers.OpSelHi |= !!(Val & SISrcMods::OP_SEL_1) << J;
926       Modifiers.NegLo |= !!(Val & SISrcMods::NEG) << J;
927       Modifiers.NegHi |= !!(Val & SISrcMods::NEG_HI) << J;
928     } else if (J == 0) {
929       Modifiers.OpSel |= !!(Val & SISrcMods::DST_OP_SEL) << 3;
930     }
931   }
932 
933   return Modifiers;
934 }
935 
936 // Instructions decode the op_sel/suffix bits into the src_modifier
937 // operands. Copy those bits into the src operands for true16 VGPRs.
938 void AMDGPUDisassembler::convertTrue16OpSel(MCInst &MI) const {
939   const unsigned Opc = MI.getOpcode();
940   const MCRegisterClass &ConversionRC =
941       MRI.getRegClass(AMDGPU::VGPR_16RegClassID);
942   constexpr std::array<std::tuple<int, int, unsigned>, 4> OpAndOpMods = {
943       {{AMDGPU::OpName::src0, AMDGPU::OpName::src0_modifiers,
944         SISrcMods::OP_SEL_0},
945        {AMDGPU::OpName::src1, AMDGPU::OpName::src1_modifiers,
946         SISrcMods::OP_SEL_0},
947        {AMDGPU::OpName::src2, AMDGPU::OpName::src2_modifiers,
948         SISrcMods::OP_SEL_0},
949        {AMDGPU::OpName::vdst, AMDGPU::OpName::src0_modifiers,
950         SISrcMods::DST_OP_SEL}}};
951   for (const auto &[OpName, OpModsName, OpSelMask] : OpAndOpMods) {
952     int OpIdx = AMDGPU::getNamedOperandIdx(Opc, OpName);
953     int OpModsIdx = AMDGPU::getNamedOperandIdx(Opc, OpModsName);
954     if (OpIdx == -1 || OpModsIdx == -1)
955       continue;
956     MCOperand &Op = MI.getOperand(OpIdx);
957     if (!Op.isReg())
958       continue;
959     if (!ConversionRC.contains(Op.getReg()))
960       continue;
961     unsigned OpEnc = MRI.getEncodingValue(Op.getReg());
962     const MCOperand &OpMods = MI.getOperand(OpModsIdx);
963     unsigned ModVal = OpMods.getImm();
964     if (ModVal & OpSelMask) { // isHi
965       unsigned RegIdx = OpEnc & AMDGPU::HWEncoding::REG_IDX_MASK;
966       Op.setReg(ConversionRC.getRegister(RegIdx * 2 + 1));
967     }
968   }
969 }
970 
971 // MAC opcodes have special old and src2 operands.
972 // src2 is tied to dst, while old is not tied (but assumed to be).
973 bool AMDGPUDisassembler::isMacDPP(MCInst &MI) const {
974   constexpr int DST_IDX = 0;
975   auto Opcode = MI.getOpcode();
976   const auto &Desc = MCII->get(Opcode);
977   auto OldIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::old);
978 
979   if (OldIdx != -1 && Desc.getOperandConstraint(
980                           OldIdx, MCOI::OperandConstraint::TIED_TO) == -1) {
981     assert(AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src2));
982     assert(Desc.getOperandConstraint(
983                AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2),
984                MCOI::OperandConstraint::TIED_TO) == DST_IDX);
985     (void)DST_IDX;
986     return true;
987   }
988 
989   return false;
990 }
991 
992 // Create dummy old operand and insert dummy unused src2_modifiers
993 void AMDGPUDisassembler::convertMacDPPInst(MCInst &MI) const {
994   assert(MI.getNumOperands() + 1 < MCII->get(MI.getOpcode()).getNumOperands());
995   insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
996   insertNamedMCOperand(MI, MCOperand::createImm(0),
997                        AMDGPU::OpName::src2_modifiers);
998 }
999 
1000 void AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const {
1001   unsigned Opc = MI.getOpcode();
1002 
1003   int VDstInIdx =
1004       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
1005   if (VDstInIdx != -1)
1006     insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
1007 
1008   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1009   if (MI.getNumOperands() < DescNumOps &&
1010       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1011     convertTrue16OpSel(MI);
1012     auto Mods = collectVOPModifiers(MI);
1013     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1014                          AMDGPU::OpName::op_sel);
1015   } else {
1016     // Insert dummy unused src modifiers.
1017     if (MI.getNumOperands() < DescNumOps &&
1018         AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1019       insertNamedMCOperand(MI, MCOperand::createImm(0),
1020                            AMDGPU::OpName::src0_modifiers);
1021 
1022     if (MI.getNumOperands() < DescNumOps &&
1023         AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1024       insertNamedMCOperand(MI, MCOperand::createImm(0),
1025                            AMDGPU::OpName::src1_modifiers);
1026   }
1027 }
1028 
1029 void AMDGPUDisassembler::convertVOP3DPPInst(MCInst &MI) const {
1030   convertTrue16OpSel(MI);
1031 
1032   int VDstInIdx =
1033       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
1034   if (VDstInIdx != -1)
1035     insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
1036 
1037   unsigned Opc = MI.getOpcode();
1038   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1039   if (MI.getNumOperands() < DescNumOps &&
1040       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
1041     auto Mods = collectVOPModifiers(MI);
1042     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1043                          AMDGPU::OpName::op_sel);
1044   }
1045 }
1046 
1047 // Note that before gfx10, the MIMG encoding provided no information about
1048 // VADDR size. Consequently, decoded instructions always show address as if it
1049 // has 1 dword, which could be not really so.
1050 void AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
1051   auto TSFlags = MCII->get(MI.getOpcode()).TSFlags;
1052 
1053   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1054                                            AMDGPU::OpName::vdst);
1055 
1056   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1057                                             AMDGPU::OpName::vdata);
1058   int VAddr0Idx =
1059       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
1060   int RsrcOpName = (TSFlags & SIInstrFlags::MIMG) ? AMDGPU::OpName::srsrc
1061                                                   : AMDGPU::OpName::rsrc;
1062   int RsrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), RsrcOpName);
1063   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1064                                             AMDGPU::OpName::dmask);
1065 
1066   int TFEIdx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1067                                             AMDGPU::OpName::tfe);
1068   int D16Idx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1069                                             AMDGPU::OpName::d16);
1070 
1071   const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
1072   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1073       AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
1074 
1075   assert(VDataIdx != -1);
1076   if (BaseOpcode->BVH) {
1077     // Add A16 operand for intersect_ray instructions
1078     addOperand(MI, MCOperand::createImm(BaseOpcode->A16));
1079     return;
1080   }
1081 
1082   bool IsAtomic = (VDstIdx != -1);
1083   bool IsGather4 = TSFlags & SIInstrFlags::Gather4;
1084   bool IsVSample = TSFlags & SIInstrFlags::VSAMPLE;
1085   bool IsNSA = false;
1086   bool IsPartialNSA = false;
1087   unsigned AddrSize = Info->VAddrDwords;
1088 
1089   if (isGFX10Plus()) {
1090     unsigned DimIdx =
1091         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
1092     int A16Idx =
1093         AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16);
1094     const AMDGPU::MIMGDimInfo *Dim =
1095         AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
1096     const bool IsA16 = (A16Idx != -1 && MI.getOperand(A16Idx).getImm());
1097 
1098     AddrSize =
1099         AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, AMDGPU::hasG16(STI));
1100 
1101     // VSAMPLE insts that do not use vaddr3 behave the same as NSA forms.
1102     // VIMAGE insts other than BVH never use vaddr4.
1103     IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA ||
1104             Info->MIMGEncoding == AMDGPU::MIMGEncGfx11NSA ||
1105             Info->MIMGEncoding == AMDGPU::MIMGEncGfx12;
1106     if (!IsNSA) {
1107       if (!IsVSample && AddrSize > 12)
1108         AddrSize = 16;
1109     } else {
1110       if (AddrSize > Info->VAddrDwords) {
1111         if (!STI.hasFeature(AMDGPU::FeaturePartialNSAEncoding)) {
1112           // The NSA encoding does not contain enough operands for the
1113           // combination of base opcode / dimension. Should this be an error?
1114           return;
1115         }
1116         IsPartialNSA = true;
1117       }
1118     }
1119   }
1120 
1121   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
1122   unsigned DstSize = IsGather4 ? 4 : std::max(llvm::popcount(DMask), 1);
1123 
1124   bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
1125   if (D16 && AMDGPU::hasPackedD16(STI)) {
1126     DstSize = (DstSize + 1) / 2;
1127   }
1128 
1129   if (TFEIdx != -1 && MI.getOperand(TFEIdx).getImm())
1130     DstSize += 1;
1131 
1132   if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
1133     return;
1134 
1135   int NewOpcode =
1136       AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
1137   if (NewOpcode == -1)
1138     return;
1139 
1140   // Widen the register to the correct number of enabled channels.
1141   MCRegister NewVdata;
1142   if (DstSize != Info->VDataDwords) {
1143     auto DataRCID = MCII->get(NewOpcode).operands()[VDataIdx].RegClass;
1144 
1145     // Get first subregister of VData
1146     MCRegister Vdata0 = MI.getOperand(VDataIdx).getReg();
1147     MCRegister VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
1148     Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
1149 
1150     NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
1151                                        &MRI.getRegClass(DataRCID));
1152     if (!NewVdata) {
1153       // It's possible to encode this such that the low register + enabled
1154       // components exceeds the register count.
1155       return;
1156     }
1157   }
1158 
1159   // If not using NSA on GFX10+, widen vaddr0 address register to correct size.
1160   // If using partial NSA on GFX11+ widen last address register.
1161   int VAddrSAIdx = IsPartialNSA ? (RsrcIdx - 1) : VAddr0Idx;
1162   MCRegister NewVAddrSA;
1163   if (STI.hasFeature(AMDGPU::FeatureNSAEncoding) && (!IsNSA || IsPartialNSA) &&
1164       AddrSize != Info->VAddrDwords) {
1165     MCRegister VAddrSA = MI.getOperand(VAddrSAIdx).getReg();
1166     MCRegister VAddrSubSA = MRI.getSubReg(VAddrSA, AMDGPU::sub0);
1167     VAddrSA = VAddrSubSA ? VAddrSubSA : VAddrSA;
1168 
1169     auto AddrRCID = MCII->get(NewOpcode).operands()[VAddrSAIdx].RegClass;
1170     NewVAddrSA = MRI.getMatchingSuperReg(VAddrSA, AMDGPU::sub0,
1171                                         &MRI.getRegClass(AddrRCID));
1172     if (!NewVAddrSA)
1173       return;
1174   }
1175 
1176   MI.setOpcode(NewOpcode);
1177 
1178   if (NewVdata != AMDGPU::NoRegister) {
1179     MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
1180 
1181     if (IsAtomic) {
1182       // Atomic operations have an additional operand (a copy of data)
1183       MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
1184     }
1185   }
1186 
1187   if (NewVAddrSA) {
1188     MI.getOperand(VAddrSAIdx) = MCOperand::createReg(NewVAddrSA);
1189   } else if (IsNSA) {
1190     assert(AddrSize <= Info->VAddrDwords);
1191     MI.erase(MI.begin() + VAddr0Idx + AddrSize,
1192              MI.begin() + VAddr0Idx + Info->VAddrDwords);
1193   }
1194 }
1195 
1196 // Opsel and neg bits are used in src_modifiers and standalone operands. Autogen
1197 // decoder only adds to src_modifiers, so manually add the bits to the other
1198 // operands.
1199 void AMDGPUDisassembler::convertVOP3PDPPInst(MCInst &MI) const {
1200   unsigned Opc = MI.getOpcode();
1201   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1202   auto Mods = collectVOPModifiers(MI, true);
1203 
1204   if (MI.getNumOperands() < DescNumOps &&
1205       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vdst_in))
1206     insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vdst_in);
1207 
1208   if (MI.getNumOperands() < DescNumOps &&
1209       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel))
1210     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
1211                          AMDGPU::OpName::op_sel);
1212   if (MI.getNumOperands() < DescNumOps &&
1213       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel_hi))
1214     insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSelHi),
1215                          AMDGPU::OpName::op_sel_hi);
1216   if (MI.getNumOperands() < DescNumOps &&
1217       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_lo))
1218     insertNamedMCOperand(MI, MCOperand::createImm(Mods.NegLo),
1219                          AMDGPU::OpName::neg_lo);
1220   if (MI.getNumOperands() < DescNumOps &&
1221       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_hi))
1222     insertNamedMCOperand(MI, MCOperand::createImm(Mods.NegHi),
1223                          AMDGPU::OpName::neg_hi);
1224 }
1225 
1226 // Create dummy old operand and insert optional operands
1227 void AMDGPUDisassembler::convertVOPCDPPInst(MCInst &MI) const {
1228   unsigned Opc = MI.getOpcode();
1229   unsigned DescNumOps = MCII->get(Opc).getNumOperands();
1230 
1231   if (MI.getNumOperands() < DescNumOps &&
1232       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::old))
1233     insertNamedMCOperand(MI, MCOperand::createReg(0), AMDGPU::OpName::old);
1234 
1235   if (MI.getNumOperands() < DescNumOps &&
1236       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
1237     insertNamedMCOperand(MI, MCOperand::createImm(0),
1238                          AMDGPU::OpName::src0_modifiers);
1239 
1240   if (MI.getNumOperands() < DescNumOps &&
1241       AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
1242     insertNamedMCOperand(MI, MCOperand::createImm(0),
1243                          AMDGPU::OpName::src1_modifiers);
1244 }
1245 
1246 void AMDGPUDisassembler::convertFMAanyK(MCInst &MI, int ImmLitIdx) const {
1247   assert(HasLiteral && "Should have decoded a literal");
1248   const MCInstrDesc &Desc = MCII->get(MI.getOpcode());
1249   unsigned DescNumOps = Desc.getNumOperands();
1250   insertNamedMCOperand(MI, MCOperand::createImm(Literal),
1251                        AMDGPU::OpName::immDeferred);
1252   assert(DescNumOps == MI.getNumOperands());
1253   for (unsigned I = 0; I < DescNumOps; ++I) {
1254     auto &Op = MI.getOperand(I);
1255     auto OpType = Desc.operands()[I].OperandType;
1256     bool IsDeferredOp = (OpType == AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED ||
1257                          OpType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED);
1258     if (Op.isImm() && Op.getImm() == AMDGPU::EncValues::LITERAL_CONST &&
1259         IsDeferredOp)
1260       Op.setImm(Literal);
1261   }
1262 }
1263 
1264 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
1265   return getContext().getRegisterInfo()->
1266     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
1267 }
1268 
1269 inline
1270 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
1271                                          const Twine& ErrMsg) const {
1272   *CommentStream << "Error: " + ErrMsg;
1273 
1274   // ToDo: add support for error operands to MCInst.h
1275   // return MCOperand::createError(V);
1276   return MCOperand();
1277 }
1278 
1279 inline
1280 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
1281   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
1282 }
1283 
1284 inline
1285 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
1286                                                unsigned Val) const {
1287   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
1288   if (Val >= RegCl.getNumRegs())
1289     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
1290                            ": unknown register " + Twine(Val));
1291   return createRegOperand(RegCl.getRegister(Val));
1292 }
1293 
1294 inline
1295 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
1296                                                 unsigned Val) const {
1297   // ToDo: SI/CI have 104 SGPRs, VI - 102
1298   // Valery: here we accepting as much as we can, let assembler sort it out
1299   int shift = 0;
1300   switch (SRegClassID) {
1301   case AMDGPU::SGPR_32RegClassID:
1302   case AMDGPU::TTMP_32RegClassID:
1303     break;
1304   case AMDGPU::SGPR_64RegClassID:
1305   case AMDGPU::TTMP_64RegClassID:
1306     shift = 1;
1307     break;
1308   case AMDGPU::SGPR_96RegClassID:
1309   case AMDGPU::TTMP_96RegClassID:
1310   case AMDGPU::SGPR_128RegClassID:
1311   case AMDGPU::TTMP_128RegClassID:
1312   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
1313   // this bundle?
1314   case AMDGPU::SGPR_256RegClassID:
1315   case AMDGPU::TTMP_256RegClassID:
1316     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
1317   // this bundle?
1318   case AMDGPU::SGPR_288RegClassID:
1319   case AMDGPU::TTMP_288RegClassID:
1320   case AMDGPU::SGPR_320RegClassID:
1321   case AMDGPU::TTMP_320RegClassID:
1322   case AMDGPU::SGPR_352RegClassID:
1323   case AMDGPU::TTMP_352RegClassID:
1324   case AMDGPU::SGPR_384RegClassID:
1325   case AMDGPU::TTMP_384RegClassID:
1326   case AMDGPU::SGPR_512RegClassID:
1327   case AMDGPU::TTMP_512RegClassID:
1328     shift = 2;
1329     break;
1330   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
1331   // this bundle?
1332   default:
1333     llvm_unreachable("unhandled register class");
1334   }
1335 
1336   if (Val % (1 << shift)) {
1337     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
1338                    << ": scalar reg isn't aligned " << Val;
1339   }
1340 
1341   return createRegOperand(SRegClassID, Val >> shift);
1342 }
1343 
1344 MCOperand AMDGPUDisassembler::createVGPR16Operand(unsigned RegIdx,
1345                                                   bool IsHi) const {
1346   unsigned RegIdxInVGPR16 = RegIdx * 2 + (IsHi ? 1 : 0);
1347   return createRegOperand(AMDGPU::VGPR_16RegClassID, RegIdxInVGPR16);
1348 }
1349 
1350 // Decode Literals for insts which always have a literal in the encoding
1351 MCOperand
1352 AMDGPUDisassembler::decodeMandatoryLiteralConstant(unsigned Val) const {
1353   if (HasLiteral) {
1354     assert(
1355         AMDGPU::hasVOPD(STI) &&
1356         "Should only decode multiple kimm with VOPD, check VSrc operand types");
1357     if (Literal != Val)
1358       return errOperand(Val, "More than one unique literal is illegal");
1359   }
1360   HasLiteral = true;
1361   Literal = Val;
1362   return MCOperand::createImm(Literal);
1363 }
1364 
1365 MCOperand AMDGPUDisassembler::decodeLiteralConstant(bool ExtendFP64) const {
1366   // For now all literal constants are supposed to be unsigned integer
1367   // ToDo: deal with signed/unsigned 64-bit integer constants
1368   // ToDo: deal with float/double constants
1369   if (!HasLiteral) {
1370     if (Bytes.size() < 4) {
1371       return errOperand(0, "cannot read literal, inst bytes left " +
1372                         Twine(Bytes.size()));
1373     }
1374     HasLiteral = true;
1375     Literal = Literal64 = eatBytes<uint32_t>(Bytes);
1376     if (ExtendFP64)
1377       Literal64 <<= 32;
1378   }
1379   return MCOperand::createImm(ExtendFP64 ? Literal64 : Literal);
1380 }
1381 
1382 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
1383   using namespace AMDGPU::EncValues;
1384 
1385   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
1386   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
1387     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
1388     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
1389       // Cast prevents negative overflow.
1390 }
1391 
1392 static int64_t getInlineImmVal32(unsigned Imm) {
1393   switch (Imm) {
1394   case 240:
1395     return llvm::bit_cast<uint32_t>(0.5f);
1396   case 241:
1397     return llvm::bit_cast<uint32_t>(-0.5f);
1398   case 242:
1399     return llvm::bit_cast<uint32_t>(1.0f);
1400   case 243:
1401     return llvm::bit_cast<uint32_t>(-1.0f);
1402   case 244:
1403     return llvm::bit_cast<uint32_t>(2.0f);
1404   case 245:
1405     return llvm::bit_cast<uint32_t>(-2.0f);
1406   case 246:
1407     return llvm::bit_cast<uint32_t>(4.0f);
1408   case 247:
1409     return llvm::bit_cast<uint32_t>(-4.0f);
1410   case 248: // 1 / (2 * PI)
1411     return 0x3e22f983;
1412   default:
1413     llvm_unreachable("invalid fp inline imm");
1414   }
1415 }
1416 
1417 static int64_t getInlineImmVal64(unsigned Imm) {
1418   switch (Imm) {
1419   case 240:
1420     return llvm::bit_cast<uint64_t>(0.5);
1421   case 241:
1422     return llvm::bit_cast<uint64_t>(-0.5);
1423   case 242:
1424     return llvm::bit_cast<uint64_t>(1.0);
1425   case 243:
1426     return llvm::bit_cast<uint64_t>(-1.0);
1427   case 244:
1428     return llvm::bit_cast<uint64_t>(2.0);
1429   case 245:
1430     return llvm::bit_cast<uint64_t>(-2.0);
1431   case 246:
1432     return llvm::bit_cast<uint64_t>(4.0);
1433   case 247:
1434     return llvm::bit_cast<uint64_t>(-4.0);
1435   case 248: // 1 / (2 * PI)
1436     return 0x3fc45f306dc9c882;
1437   default:
1438     llvm_unreachable("invalid fp inline imm");
1439   }
1440 }
1441 
1442 static int64_t getInlineImmValF16(unsigned Imm) {
1443   switch (Imm) {
1444   case 240:
1445     return 0x3800;
1446   case 241:
1447     return 0xB800;
1448   case 242:
1449     return 0x3C00;
1450   case 243:
1451     return 0xBC00;
1452   case 244:
1453     return 0x4000;
1454   case 245:
1455     return 0xC000;
1456   case 246:
1457     return 0x4400;
1458   case 247:
1459     return 0xC400;
1460   case 248: // 1 / (2 * PI)
1461     return 0x3118;
1462   default:
1463     llvm_unreachable("invalid fp inline imm");
1464   }
1465 }
1466 
1467 static int64_t getInlineImmValBF16(unsigned Imm) {
1468   switch (Imm) {
1469   case 240:
1470     return 0x3F00;
1471   case 241:
1472     return 0xBF00;
1473   case 242:
1474     return 0x3F80;
1475   case 243:
1476     return 0xBF80;
1477   case 244:
1478     return 0x4000;
1479   case 245:
1480     return 0xC000;
1481   case 246:
1482     return 0x4080;
1483   case 247:
1484     return 0xC080;
1485   case 248: // 1 / (2 * PI)
1486     return 0x3E22;
1487   default:
1488     llvm_unreachable("invalid fp inline imm");
1489   }
1490 }
1491 
1492 static int64_t getInlineImmVal16(unsigned Imm, AMDGPU::OperandSemantics Sema) {
1493   return (Sema == AMDGPU::OperandSemantics::BF16) ? getInlineImmValBF16(Imm)
1494                                                   : getInlineImmValF16(Imm);
1495 }
1496 
1497 MCOperand AMDGPUDisassembler::decodeFPImmed(unsigned ImmWidth, unsigned Imm,
1498                                             AMDGPU::OperandSemantics Sema) {
1499   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN &&
1500          Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
1501 
1502   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
1503   // ImmWidth 0 is a default case where operand should not allow immediates.
1504   // Imm value is still decoded into 32 bit immediate operand, inst printer will
1505   // use it to print verbose error message.
1506   switch (ImmWidth) {
1507   case 0:
1508   case 32:
1509     return MCOperand::createImm(getInlineImmVal32(Imm));
1510   case 64:
1511     return MCOperand::createImm(getInlineImmVal64(Imm));
1512   case 16:
1513     return MCOperand::createImm(getInlineImmVal16(Imm, Sema));
1514   default:
1515     llvm_unreachable("implement me");
1516   }
1517 }
1518 
1519 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
1520   using namespace AMDGPU;
1521 
1522   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1523   switch (Width) {
1524   default: // fall
1525   case OPW32:
1526   case OPW16:
1527   case OPWV216:
1528     return VGPR_32RegClassID;
1529   case OPW64:
1530   case OPWV232: return VReg_64RegClassID;
1531   case OPW96: return VReg_96RegClassID;
1532   case OPW128: return VReg_128RegClassID;
1533   case OPW192: return VReg_192RegClassID;
1534   case OPW160: return VReg_160RegClassID;
1535   case OPW256: return VReg_256RegClassID;
1536   case OPW288: return VReg_288RegClassID;
1537   case OPW320: return VReg_320RegClassID;
1538   case OPW352: return VReg_352RegClassID;
1539   case OPW384: return VReg_384RegClassID;
1540   case OPW512: return VReg_512RegClassID;
1541   case OPW1024: return VReg_1024RegClassID;
1542   }
1543 }
1544 
1545 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width) const {
1546   using namespace AMDGPU;
1547 
1548   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1549   switch (Width) {
1550   default: // fall
1551   case OPW32:
1552   case OPW16:
1553   case OPWV216:
1554     return AGPR_32RegClassID;
1555   case OPW64:
1556   case OPWV232: return AReg_64RegClassID;
1557   case OPW96: return AReg_96RegClassID;
1558   case OPW128: return AReg_128RegClassID;
1559   case OPW160: return AReg_160RegClassID;
1560   case OPW256: return AReg_256RegClassID;
1561   case OPW288: return AReg_288RegClassID;
1562   case OPW320: return AReg_320RegClassID;
1563   case OPW352: return AReg_352RegClassID;
1564   case OPW384: return AReg_384RegClassID;
1565   case OPW512: return AReg_512RegClassID;
1566   case OPW1024: return AReg_1024RegClassID;
1567   }
1568 }
1569 
1570 
1571 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
1572   using namespace AMDGPU;
1573 
1574   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1575   switch (Width) {
1576   default: // fall
1577   case OPW32:
1578   case OPW16:
1579   case OPWV216:
1580     return SGPR_32RegClassID;
1581   case OPW64:
1582   case OPWV232: return SGPR_64RegClassID;
1583   case OPW96: return SGPR_96RegClassID;
1584   case OPW128: return SGPR_128RegClassID;
1585   case OPW160: return SGPR_160RegClassID;
1586   case OPW256: return SGPR_256RegClassID;
1587   case OPW288: return SGPR_288RegClassID;
1588   case OPW320: return SGPR_320RegClassID;
1589   case OPW352: return SGPR_352RegClassID;
1590   case OPW384: return SGPR_384RegClassID;
1591   case OPW512: return SGPR_512RegClassID;
1592   }
1593 }
1594 
1595 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
1596   using namespace AMDGPU;
1597 
1598   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1599   switch (Width) {
1600   default: // fall
1601   case OPW32:
1602   case OPW16:
1603   case OPWV216:
1604     return TTMP_32RegClassID;
1605   case OPW64:
1606   case OPWV232: return TTMP_64RegClassID;
1607   case OPW128: return TTMP_128RegClassID;
1608   case OPW256: return TTMP_256RegClassID;
1609   case OPW288: return TTMP_288RegClassID;
1610   case OPW320: return TTMP_320RegClassID;
1611   case OPW352: return TTMP_352RegClassID;
1612   case OPW384: return TTMP_384RegClassID;
1613   case OPW512: return TTMP_512RegClassID;
1614   }
1615 }
1616 
1617 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
1618   using namespace AMDGPU::EncValues;
1619 
1620   unsigned TTmpMin = isGFX9Plus() ? TTMP_GFX9PLUS_MIN : TTMP_VI_MIN;
1621   unsigned TTmpMax = isGFX9Plus() ? TTMP_GFX9PLUS_MAX : TTMP_VI_MAX;
1622 
1623   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
1624 }
1625 
1626 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val,
1627                                           bool MandatoryLiteral,
1628                                           unsigned ImmWidth,
1629                                           AMDGPU::OperandSemantics Sema) const {
1630   using namespace AMDGPU::EncValues;
1631 
1632   assert(Val < 1024); // enum10
1633 
1634   bool IsAGPR = Val & 512;
1635   Val &= 511;
1636 
1637   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
1638     return createRegOperand(IsAGPR ? getAgprClassId(Width)
1639                                    : getVgprClassId(Width), Val - VGPR_MIN);
1640   }
1641   return decodeNonVGPRSrcOp(Width, Val & 0xFF, MandatoryLiteral, ImmWidth,
1642                             Sema);
1643 }
1644 
1645 MCOperand
1646 AMDGPUDisassembler::decodeNonVGPRSrcOp(const OpWidthTy Width, unsigned Val,
1647                                        bool MandatoryLiteral, unsigned ImmWidth,
1648                                        AMDGPU::OperandSemantics Sema) const {
1649   // Cases when Val{8} is 1 (vgpr, agpr or true 16 vgpr) should have been
1650   // decoded earlier.
1651   assert(Val < (1 << 8) && "9-bit Src encoding when Val{8} is 0");
1652   using namespace AMDGPU::EncValues;
1653 
1654   if (Val <= SGPR_MAX) {
1655     // "SGPR_MIN <= Val" is always true and causes compilation warning.
1656     static_assert(SGPR_MIN == 0);
1657     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1658   }
1659 
1660   int TTmpIdx = getTTmpIdx(Val);
1661   if (TTmpIdx >= 0) {
1662     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1663   }
1664 
1665   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
1666     return decodeIntImmed(Val);
1667 
1668   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
1669     return decodeFPImmed(ImmWidth, Val, Sema);
1670 
1671   if (Val == LITERAL_CONST) {
1672     if (MandatoryLiteral)
1673       // Keep a sentinel value for deferred setting
1674       return MCOperand::createImm(LITERAL_CONST);
1675     return decodeLiteralConstant(Sema == AMDGPU::OperandSemantics::FP64);
1676   }
1677 
1678   switch (Width) {
1679   case OPW32:
1680   case OPW16:
1681   case OPWV216:
1682     return decodeSpecialReg32(Val);
1683   case OPW64:
1684   case OPWV232:
1685     return decodeSpecialReg64(Val);
1686   default:
1687     llvm_unreachable("unexpected immediate type");
1688   }
1689 }
1690 
1691 // Bit 0 of DstY isn't stored in the instruction, because it's always the
1692 // opposite of bit 0 of DstX.
1693 MCOperand AMDGPUDisassembler::decodeVOPDDstYOp(MCInst &Inst,
1694                                                unsigned Val) const {
1695   int VDstXInd =
1696       AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::vdstX);
1697   assert(VDstXInd != -1);
1698   assert(Inst.getOperand(VDstXInd).isReg());
1699   unsigned XDstReg = MRI.getEncodingValue(Inst.getOperand(VDstXInd).getReg());
1700   Val |= ~XDstReg & 1;
1701   auto Width = llvm::AMDGPUDisassembler::OPW32;
1702   return createRegOperand(getVgprClassId(Width), Val);
1703 }
1704 
1705 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
1706   using namespace AMDGPU;
1707 
1708   switch (Val) {
1709   // clang-format off
1710   case 102: return createRegOperand(FLAT_SCR_LO);
1711   case 103: return createRegOperand(FLAT_SCR_HI);
1712   case 104: return createRegOperand(XNACK_MASK_LO);
1713   case 105: return createRegOperand(XNACK_MASK_HI);
1714   case 106: return createRegOperand(VCC_LO);
1715   case 107: return createRegOperand(VCC_HI);
1716   case 108: return createRegOperand(TBA_LO);
1717   case 109: return createRegOperand(TBA_HI);
1718   case 110: return createRegOperand(TMA_LO);
1719   case 111: return createRegOperand(TMA_HI);
1720   case 124:
1721     return isGFX11Plus() ? createRegOperand(SGPR_NULL) : createRegOperand(M0);
1722   case 125:
1723     return isGFX11Plus() ? createRegOperand(M0) : createRegOperand(SGPR_NULL);
1724   case 126: return createRegOperand(EXEC_LO);
1725   case 127: return createRegOperand(EXEC_HI);
1726   case 235: return createRegOperand(SRC_SHARED_BASE_LO);
1727   case 236: return createRegOperand(SRC_SHARED_LIMIT_LO);
1728   case 237: return createRegOperand(SRC_PRIVATE_BASE_LO);
1729   case 238: return createRegOperand(SRC_PRIVATE_LIMIT_LO);
1730   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1731   case 251: return createRegOperand(SRC_VCCZ);
1732   case 252: return createRegOperand(SRC_EXECZ);
1733   case 253: return createRegOperand(SRC_SCC);
1734   case 254: return createRegOperand(LDS_DIRECT);
1735   default: break;
1736     // clang-format on
1737   }
1738   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1739 }
1740 
1741 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
1742   using namespace AMDGPU;
1743 
1744   switch (Val) {
1745   case 102: return createRegOperand(FLAT_SCR);
1746   case 104: return createRegOperand(XNACK_MASK);
1747   case 106: return createRegOperand(VCC);
1748   case 108: return createRegOperand(TBA);
1749   case 110: return createRegOperand(TMA);
1750   case 124:
1751     if (isGFX11Plus())
1752       return createRegOperand(SGPR_NULL);
1753     break;
1754   case 125:
1755     if (!isGFX11Plus())
1756       return createRegOperand(SGPR_NULL);
1757     break;
1758   case 126: return createRegOperand(EXEC);
1759   case 235: return createRegOperand(SRC_SHARED_BASE);
1760   case 236: return createRegOperand(SRC_SHARED_LIMIT);
1761   case 237: return createRegOperand(SRC_PRIVATE_BASE);
1762   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1763   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1764   case 251: return createRegOperand(SRC_VCCZ);
1765   case 252: return createRegOperand(SRC_EXECZ);
1766   case 253: return createRegOperand(SRC_SCC);
1767   default: break;
1768   }
1769   return errOperand(Val, "unknown operand encoding " + Twine(Val));
1770 }
1771 
1772 MCOperand
1773 AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, const unsigned Val,
1774                                   unsigned ImmWidth,
1775                                   AMDGPU::OperandSemantics Sema) const {
1776   using namespace AMDGPU::SDWA;
1777   using namespace AMDGPU::EncValues;
1778 
1779   if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
1780       STI.hasFeature(AMDGPU::FeatureGFX10)) {
1781     // XXX: cast to int is needed to avoid stupid warning:
1782     // compare with unsigned is always true
1783     if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
1784         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
1785       return createRegOperand(getVgprClassId(Width),
1786                               Val - SDWA9EncValues::SRC_VGPR_MIN);
1787     }
1788     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
1789         Val <= (isGFX10Plus() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
1790                               : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
1791       return createSRegOperand(getSgprClassId(Width),
1792                                Val - SDWA9EncValues::SRC_SGPR_MIN);
1793     }
1794     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
1795         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
1796       return createSRegOperand(getTtmpClassId(Width),
1797                                Val - SDWA9EncValues::SRC_TTMP_MIN);
1798     }
1799 
1800     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
1801 
1802     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
1803       return decodeIntImmed(SVal);
1804 
1805     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
1806       return decodeFPImmed(ImmWidth, SVal, Sema);
1807 
1808     return decodeSpecialReg32(SVal);
1809   }
1810   if (STI.hasFeature(AMDGPU::FeatureVolcanicIslands))
1811     return createRegOperand(getVgprClassId(Width), Val);
1812   llvm_unreachable("unsupported target");
1813 }
1814 
1815 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
1816   return decodeSDWASrc(OPW16, Val, 16, AMDGPU::OperandSemantics::FP16);
1817 }
1818 
1819 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
1820   return decodeSDWASrc(OPW32, Val, 32, AMDGPU::OperandSemantics::FP32);
1821 }
1822 
1823 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
1824   using namespace AMDGPU::SDWA;
1825 
1826   assert((STI.hasFeature(AMDGPU::FeatureGFX9) ||
1827           STI.hasFeature(AMDGPU::FeatureGFX10)) &&
1828          "SDWAVopcDst should be present only on GFX9+");
1829 
1830   bool IsWave32 = STI.hasFeature(AMDGPU::FeatureWavefrontSize32);
1831 
1832   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
1833     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
1834 
1835     int TTmpIdx = getTTmpIdx(Val);
1836     if (TTmpIdx >= 0) {
1837       auto TTmpClsId = getTtmpClassId(IsWave32 ? OPW32 : OPW64);
1838       return createSRegOperand(TTmpClsId, TTmpIdx);
1839     }
1840     if (Val > SGPR_MAX) {
1841       return IsWave32 ? decodeSpecialReg32(Val) : decodeSpecialReg64(Val);
1842     }
1843     return createSRegOperand(getSgprClassId(IsWave32 ? OPW32 : OPW64), Val);
1844   }
1845   return createRegOperand(IsWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC);
1846 }
1847 
1848 MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
1849   return STI.hasFeature(AMDGPU::FeatureWavefrontSize32)
1850              ? decodeSrcOp(OPW32, Val)
1851              : decodeSrcOp(OPW64, Val);
1852 }
1853 
1854 MCOperand AMDGPUDisassembler::decodeSplitBarrier(unsigned Val) const {
1855   return decodeSrcOp(OPW32, Val);
1856 }
1857 
1858 MCOperand AMDGPUDisassembler::decodeDpp8FI(unsigned Val) const {
1859   if (Val != AMDGPU::DPP::DPP8_FI_0 && Val != AMDGPU::DPP::DPP8_FI_1)
1860     return MCOperand();
1861   return MCOperand::createImm(Val);
1862 }
1863 
1864 MCOperand AMDGPUDisassembler::decodeVersionImm(unsigned Imm) const {
1865   using VersionField = AMDGPU::EncodingField<7, 0>;
1866   using W64Bit = AMDGPU::EncodingBit<13>;
1867   using W32Bit = AMDGPU::EncodingBit<14>;
1868   using MDPBit = AMDGPU::EncodingBit<15>;
1869   using Encoding = AMDGPU::EncodingFields<VersionField, W64Bit, W32Bit, MDPBit>;
1870 
1871   auto [Version, W64, W32, MDP] = Encoding::decode(Imm);
1872 
1873   // Decode into a plain immediate if any unused bits are raised.
1874   if (Encoding::encode(Version, W64, W32, MDP) != Imm)
1875     return MCOperand::createImm(Imm);
1876 
1877   const auto &Versions = AMDGPU::UCVersion::getGFXVersions();
1878   const auto *I = find_if(
1879       Versions, [Version = Version](const AMDGPU::UCVersion::GFXVersion &V) {
1880         return V.Code == Version;
1881       });
1882   MCContext &Ctx = getContext();
1883   const MCExpr *E;
1884   if (I == Versions.end())
1885     E = MCConstantExpr::create(Version, Ctx);
1886   else
1887     E = MCSymbolRefExpr::create(Ctx.getOrCreateSymbol(I->Symbol), Ctx);
1888 
1889   if (W64)
1890     E = MCBinaryExpr::createOr(E, UCVersionW64Expr, Ctx);
1891   if (W32)
1892     E = MCBinaryExpr::createOr(E, UCVersionW32Expr, Ctx);
1893   if (MDP)
1894     E = MCBinaryExpr::createOr(E, UCVersionMDPExpr, Ctx);
1895 
1896   return MCOperand::createExpr(E);
1897 }
1898 
1899 bool AMDGPUDisassembler::isVI() const {
1900   return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
1901 }
1902 
1903 bool AMDGPUDisassembler::isGFX9() const { return AMDGPU::isGFX9(STI); }
1904 
1905 bool AMDGPUDisassembler::isGFX90A() const {
1906   return STI.hasFeature(AMDGPU::FeatureGFX90AInsts);
1907 }
1908 
1909 bool AMDGPUDisassembler::isGFX9Plus() const { return AMDGPU::isGFX9Plus(STI); }
1910 
1911 bool AMDGPUDisassembler::isGFX10() const { return AMDGPU::isGFX10(STI); }
1912 
1913 bool AMDGPUDisassembler::isGFX10Plus() const {
1914   return AMDGPU::isGFX10Plus(STI);
1915 }
1916 
1917 bool AMDGPUDisassembler::isGFX11() const {
1918   return STI.hasFeature(AMDGPU::FeatureGFX11);
1919 }
1920 
1921 bool AMDGPUDisassembler::isGFX11Plus() const {
1922   return AMDGPU::isGFX11Plus(STI);
1923 }
1924 
1925 bool AMDGPUDisassembler::isGFX12() const {
1926   return STI.hasFeature(AMDGPU::FeatureGFX12);
1927 }
1928 
1929 bool AMDGPUDisassembler::isGFX12Plus() const {
1930   return AMDGPU::isGFX12Plus(STI);
1931 }
1932 
1933 bool AMDGPUDisassembler::hasArchitectedFlatScratch() const {
1934   return STI.hasFeature(AMDGPU::FeatureArchitectedFlatScratch);
1935 }
1936 
1937 bool AMDGPUDisassembler::hasKernargPreload() const {
1938   return AMDGPU::hasKernargPreload(STI);
1939 }
1940 
1941 //===----------------------------------------------------------------------===//
1942 // AMDGPU specific symbol handling
1943 //===----------------------------------------------------------------------===//
1944 
1945 /// Print a string describing the reserved bit range specified by Mask with
1946 /// offset BaseBytes for use in error comments. Mask is a single continuous
1947 /// range of 1s surrounded by zeros. The format here is meant to align with the
1948 /// tables that describe these bits in llvm.org/docs/AMDGPUUsage.html.
1949 static SmallString<32> getBitRangeFromMask(uint32_t Mask, unsigned BaseBytes) {
1950   SmallString<32> Result;
1951   raw_svector_ostream S(Result);
1952 
1953   int TrailingZeros = llvm::countr_zero(Mask);
1954   int PopCount = llvm::popcount(Mask);
1955 
1956   if (PopCount == 1) {
1957     S << "bit (" << (TrailingZeros + BaseBytes * CHAR_BIT) << ')';
1958   } else {
1959     S << "bits in range ("
1960       << (TrailingZeros + PopCount - 1 + BaseBytes * CHAR_BIT) << ':'
1961       << (TrailingZeros + BaseBytes * CHAR_BIT) << ')';
1962   }
1963 
1964   return Result;
1965 }
1966 
1967 #define GET_FIELD(MASK) (AMDHSA_BITS_GET(FourByteBuffer, MASK))
1968 #define PRINT_DIRECTIVE(DIRECTIVE, MASK)                                       \
1969   do {                                                                         \
1970     KdStream << Indent << DIRECTIVE " " << GET_FIELD(MASK) << '\n';            \
1971   } while (0)
1972 #define PRINT_PSEUDO_DIRECTIVE_COMMENT(DIRECTIVE, MASK)                        \
1973   do {                                                                         \
1974     KdStream << Indent << MAI.getCommentString() << ' ' << DIRECTIVE " "       \
1975              << GET_FIELD(MASK) << '\n';                                       \
1976   } while (0)
1977 
1978 #define CHECK_RESERVED_BITS_IMPL(MASK, DESC, MSG)                              \
1979   do {                                                                         \
1980     if (FourByteBuffer & (MASK)) {                                             \
1981       return createStringError(std::errc::invalid_argument,                    \
1982                                "kernel descriptor " DESC                       \
1983                                " reserved %s set" MSG,                         \
1984                                getBitRangeFromMask((MASK), 0).c_str());        \
1985     }                                                                          \
1986   } while (0)
1987 
1988 #define CHECK_RESERVED_BITS(MASK) CHECK_RESERVED_BITS_IMPL(MASK, #MASK, "")
1989 #define CHECK_RESERVED_BITS_MSG(MASK, MSG)                                     \
1990   CHECK_RESERVED_BITS_IMPL(MASK, #MASK, ", " MSG)
1991 #define CHECK_RESERVED_BITS_DESC(MASK, DESC)                                   \
1992   CHECK_RESERVED_BITS_IMPL(MASK, DESC, "")
1993 #define CHECK_RESERVED_BITS_DESC_MSG(MASK, DESC, MSG)                          \
1994   CHECK_RESERVED_BITS_IMPL(MASK, DESC, ", " MSG)
1995 
1996 // NOLINTNEXTLINE(readability-identifier-naming)
1997 Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1(
1998     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
1999   using namespace amdhsa;
2000   StringRef Indent = "\t";
2001 
2002   // We cannot accurately backward compute #VGPRs used from
2003   // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same
2004   // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we
2005   // simply calculate the inverse of what the assembler does.
2006 
2007   uint32_t GranulatedWorkitemVGPRCount =
2008       GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT);
2009 
2010   uint32_t NextFreeVGPR =
2011       (GranulatedWorkitemVGPRCount + 1) *
2012       AMDGPU::IsaInfo::getVGPREncodingGranule(&STI, EnableWavefrontSize32);
2013 
2014   KdStream << Indent << ".amdhsa_next_free_vgpr " << NextFreeVGPR << '\n';
2015 
2016   // We cannot backward compute values used to calculate
2017   // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following
2018   // directives can't be computed:
2019   // .amdhsa_reserve_vcc
2020   // .amdhsa_reserve_flat_scratch
2021   // .amdhsa_reserve_xnack_mask
2022   // They take their respective default values if not specified in the assembly.
2023   //
2024   // GRANULATED_WAVEFRONT_SGPR_COUNT
2025   //    = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK)
2026   //
2027   // We compute the inverse as though all directives apart from NEXT_FREE_SGPR
2028   // are set to 0. So while disassembling we consider that:
2029   //
2030   // GRANULATED_WAVEFRONT_SGPR_COUNT
2031   //    = f(NEXT_FREE_SGPR + 0 + 0 + 0)
2032   //
2033   // The disassembler cannot recover the original values of those 3 directives.
2034 
2035   uint32_t GranulatedWavefrontSGPRCount =
2036       GET_FIELD(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT);
2037 
2038   if (isGFX10Plus())
2039     CHECK_RESERVED_BITS_MSG(COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
2040                             "must be zero on gfx10+");
2041 
2042   uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) *
2043                           AMDGPU::IsaInfo::getSGPREncodingGranule(&STI);
2044 
2045   KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n';
2046   if (!hasArchitectedFlatScratch())
2047     KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n';
2048   KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n';
2049   KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n";
2050 
2051   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIORITY);
2052 
2053   PRINT_DIRECTIVE(".amdhsa_float_round_mode_32",
2054                   COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
2055   PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64",
2056                   COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
2057   PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32",
2058                   COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
2059   PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64",
2060                   COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
2061 
2062   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_PRIV);
2063 
2064   if (!isGFX12Plus())
2065     PRINT_DIRECTIVE(".amdhsa_dx10_clamp",
2066                     COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_DX10_CLAMP);
2067 
2068   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_DEBUG_MODE);
2069 
2070   if (!isGFX12Plus())
2071     PRINT_DIRECTIVE(".amdhsa_ieee_mode",
2072                     COMPUTE_PGM_RSRC1_GFX6_GFX11_ENABLE_IEEE_MODE);
2073 
2074   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_BULKY);
2075   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC1_CDBG_USER);
2076 
2077   if (isGFX9Plus())
2078     PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_GFX9_PLUS_FP16_OVFL);
2079 
2080   if (!isGFX9Plus())
2081     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC1_GFX6_GFX8_RESERVED0,
2082                                  "COMPUTE_PGM_RSRC1", "must be zero pre-gfx9");
2083 
2084   CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC1_RESERVED1, "COMPUTE_PGM_RSRC1");
2085 
2086   if (!isGFX10Plus())
2087     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC1_GFX6_GFX9_RESERVED2,
2088                                  "COMPUTE_PGM_RSRC1", "must be zero pre-gfx10");
2089 
2090   if (isGFX10Plus()) {
2091     PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode",
2092                     COMPUTE_PGM_RSRC1_GFX10_PLUS_WGP_MODE);
2093     PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_GFX10_PLUS_MEM_ORDERED);
2094     PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_GFX10_PLUS_FWD_PROGRESS);
2095   }
2096 
2097   if (isGFX12Plus())
2098     PRINT_DIRECTIVE(".amdhsa_round_robin_scheduling",
2099                     COMPUTE_PGM_RSRC1_GFX12_PLUS_ENABLE_WG_RR_EN);
2100 
2101   return true;
2102 }
2103 
2104 // NOLINTNEXTLINE(readability-identifier-naming)
2105 Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC2(
2106     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2107   using namespace amdhsa;
2108   StringRef Indent = "\t";
2109   if (hasArchitectedFlatScratch())
2110     PRINT_DIRECTIVE(".amdhsa_enable_private_segment",
2111                     COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2112   else
2113     PRINT_DIRECTIVE(".amdhsa_system_sgpr_private_segment_wavefront_offset",
2114                     COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
2115   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x",
2116                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
2117   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y",
2118                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
2119   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z",
2120                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
2121   PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info",
2122                   COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
2123   PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id",
2124                   COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
2125 
2126   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH);
2127   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY);
2128   CHECK_RESERVED_BITS(COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE);
2129 
2130   PRINT_DIRECTIVE(
2131       ".amdhsa_exception_fp_ieee_invalid_op",
2132       COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
2133   PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src",
2134                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
2135   PRINT_DIRECTIVE(
2136       ".amdhsa_exception_fp_ieee_div_zero",
2137       COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
2138   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow",
2139                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
2140   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow",
2141                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
2142   PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact",
2143                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
2144   PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero",
2145                   COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
2146 
2147   CHECK_RESERVED_BITS_DESC(COMPUTE_PGM_RSRC2_RESERVED0, "COMPUTE_PGM_RSRC2");
2148 
2149   return true;
2150 }
2151 
2152 // NOLINTNEXTLINE(readability-identifier-naming)
2153 Expected<bool> AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC3(
2154     uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
2155   using namespace amdhsa;
2156   StringRef Indent = "\t";
2157   if (isGFX90A()) {
2158     KdStream << Indent << ".amdhsa_accum_offset "
2159              << (GET_FIELD(COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
2160              << '\n';
2161 
2162     PRINT_DIRECTIVE(".amdhsa_tg_split", COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
2163 
2164     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX90A_RESERVED0,
2165                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx90a");
2166     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX90A_RESERVED1,
2167                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx90a");
2168   } else if (isGFX10Plus()) {
2169     // Bits [0-3].
2170     if (!isGFX12Plus()) {
2171       if (!EnableWavefrontSize32 || !*EnableWavefrontSize32) {
2172         PRINT_DIRECTIVE(".amdhsa_shared_vgpr_count",
2173                         COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2174       } else {
2175         PRINT_PSEUDO_DIRECTIVE_COMMENT(
2176             "SHARED_VGPR_COUNT",
2177             COMPUTE_PGM_RSRC3_GFX10_GFX11_SHARED_VGPR_COUNT);
2178       }
2179     } else {
2180       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX12_PLUS_RESERVED0,
2181                                    "COMPUTE_PGM_RSRC3",
2182                                    "must be zero on gfx12+");
2183     }
2184 
2185     // Bits [4-11].
2186     if (isGFX11()) {
2187       PRINT_PSEUDO_DIRECTIVE_COMMENT("INST_PREF_SIZE",
2188                                      COMPUTE_PGM_RSRC3_GFX11_INST_PREF_SIZE);
2189       PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_START",
2190                                      COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_START);
2191       PRINT_PSEUDO_DIRECTIVE_COMMENT("TRAP_ON_END",
2192                                      COMPUTE_PGM_RSRC3_GFX11_TRAP_ON_END);
2193     } else if (isGFX12Plus()) {
2194       PRINT_PSEUDO_DIRECTIVE_COMMENT(
2195           "INST_PREF_SIZE", COMPUTE_PGM_RSRC3_GFX12_PLUS_INST_PREF_SIZE);
2196     } else {
2197       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_RESERVED1,
2198                                    "COMPUTE_PGM_RSRC3",
2199                                    "must be zero on gfx10");
2200     }
2201 
2202     // Bits [12].
2203     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED2,
2204                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx10+");
2205 
2206     // Bits [13].
2207     if (isGFX12Plus()) {
2208       PRINT_PSEUDO_DIRECTIVE_COMMENT("GLG_EN",
2209                                      COMPUTE_PGM_RSRC3_GFX12_PLUS_GLG_EN);
2210     } else {
2211       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_GFX11_RESERVED3,
2212                                    "COMPUTE_PGM_RSRC3",
2213                                    "must be zero on gfx10 or gfx11");
2214     }
2215 
2216     // Bits [14-30].
2217     CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_PLUS_RESERVED4,
2218                                  "COMPUTE_PGM_RSRC3", "must be zero on gfx10+");
2219 
2220     // Bits [31].
2221     if (isGFX11Plus()) {
2222       PRINT_PSEUDO_DIRECTIVE_COMMENT("IMAGE_OP",
2223                                      COMPUTE_PGM_RSRC3_GFX11_PLUS_IMAGE_OP);
2224     } else {
2225       CHECK_RESERVED_BITS_DESC_MSG(COMPUTE_PGM_RSRC3_GFX10_RESERVED5,
2226                                    "COMPUTE_PGM_RSRC3",
2227                                    "must be zero on gfx10");
2228     }
2229   } else if (FourByteBuffer) {
2230     return createStringError(
2231         std::errc::invalid_argument,
2232         "kernel descriptor COMPUTE_PGM_RSRC3 must be all zero before gfx9");
2233   }
2234   return true;
2235 }
2236 #undef PRINT_PSEUDO_DIRECTIVE_COMMENT
2237 #undef PRINT_DIRECTIVE
2238 #undef GET_FIELD
2239 #undef CHECK_RESERVED_BITS_IMPL
2240 #undef CHECK_RESERVED_BITS
2241 #undef CHECK_RESERVED_BITS_MSG
2242 #undef CHECK_RESERVED_BITS_DESC
2243 #undef CHECK_RESERVED_BITS_DESC_MSG
2244 
2245 /// Create an error object to return from onSymbolStart for reserved kernel
2246 /// descriptor bits being set.
2247 static Error createReservedKDBitsError(uint32_t Mask, unsigned BaseBytes,
2248                                        const char *Msg = "") {
2249   return createStringError(
2250       std::errc::invalid_argument, "kernel descriptor reserved %s set%s%s",
2251       getBitRangeFromMask(Mask, BaseBytes).c_str(), *Msg ? ", " : "", Msg);
2252 }
2253 
2254 /// Create an error object to return from onSymbolStart for reserved kernel
2255 /// descriptor bytes being set.
2256 static Error createReservedKDBytesError(unsigned BaseInBytes,
2257                                         unsigned WidthInBytes) {
2258   // Create an error comment in the same format as the "Kernel Descriptor"
2259   // table here: https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor .
2260   return createStringError(
2261       std::errc::invalid_argument,
2262       "kernel descriptor reserved bits in range (%u:%u) set",
2263       (BaseInBytes + WidthInBytes) * CHAR_BIT - 1, BaseInBytes * CHAR_BIT);
2264 }
2265 
2266 Expected<bool> AMDGPUDisassembler::decodeKernelDescriptorDirective(
2267     DataExtractor::Cursor &Cursor, ArrayRef<uint8_t> Bytes,
2268     raw_string_ostream &KdStream) const {
2269 #define PRINT_DIRECTIVE(DIRECTIVE, MASK)                                       \
2270   do {                                                                         \
2271     KdStream << Indent << DIRECTIVE " "                                        \
2272              << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n';            \
2273   } while (0)
2274 
2275   uint16_t TwoByteBuffer = 0;
2276   uint32_t FourByteBuffer = 0;
2277 
2278   StringRef ReservedBytes;
2279   StringRef Indent = "\t";
2280 
2281   assert(Bytes.size() == 64);
2282   DataExtractor DE(Bytes, /*IsLittleEndian=*/true, /*AddressSize=*/8);
2283 
2284   switch (Cursor.tell()) {
2285   case amdhsa::GROUP_SEGMENT_FIXED_SIZE_OFFSET:
2286     FourByteBuffer = DE.getU32(Cursor);
2287     KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer
2288              << '\n';
2289     return true;
2290 
2291   case amdhsa::PRIVATE_SEGMENT_FIXED_SIZE_OFFSET:
2292     FourByteBuffer = DE.getU32(Cursor);
2293     KdStream << Indent << ".amdhsa_private_segment_fixed_size "
2294              << FourByteBuffer << '\n';
2295     return true;
2296 
2297   case amdhsa::KERNARG_SIZE_OFFSET:
2298     FourByteBuffer = DE.getU32(Cursor);
2299     KdStream << Indent << ".amdhsa_kernarg_size "
2300              << FourByteBuffer << '\n';
2301     return true;
2302 
2303   case amdhsa::RESERVED0_OFFSET:
2304     // 4 reserved bytes, must be 0.
2305     ReservedBytes = DE.getBytes(Cursor, 4);
2306     for (int I = 0; I < 4; ++I) {
2307       if (ReservedBytes[I] != 0)
2308         return createReservedKDBytesError(amdhsa::RESERVED0_OFFSET, 4);
2309     }
2310     return true;
2311 
2312   case amdhsa::KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET:
2313     // KERNEL_CODE_ENTRY_BYTE_OFFSET
2314     // So far no directive controls this for Code Object V3, so simply skip for
2315     // disassembly.
2316     DE.skip(Cursor, 8);
2317     return true;
2318 
2319   case amdhsa::RESERVED1_OFFSET:
2320     // 20 reserved bytes, must be 0.
2321     ReservedBytes = DE.getBytes(Cursor, 20);
2322     for (int I = 0; I < 20; ++I) {
2323       if (ReservedBytes[I] != 0)
2324         return createReservedKDBytesError(amdhsa::RESERVED1_OFFSET, 20);
2325     }
2326     return true;
2327 
2328   case amdhsa::COMPUTE_PGM_RSRC3_OFFSET:
2329     FourByteBuffer = DE.getU32(Cursor);
2330     return decodeCOMPUTE_PGM_RSRC3(FourByteBuffer, KdStream);
2331 
2332   case amdhsa::COMPUTE_PGM_RSRC1_OFFSET:
2333     FourByteBuffer = DE.getU32(Cursor);
2334     return decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream);
2335 
2336   case amdhsa::COMPUTE_PGM_RSRC2_OFFSET:
2337     FourByteBuffer = DE.getU32(Cursor);
2338     return decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream);
2339 
2340   case amdhsa::KERNEL_CODE_PROPERTIES_OFFSET:
2341     using namespace amdhsa;
2342     TwoByteBuffer = DE.getU16(Cursor);
2343 
2344     if (!hasArchitectedFlatScratch())
2345       PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer",
2346                       KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
2347     PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr",
2348                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
2349     PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr",
2350                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
2351     PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr",
2352                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
2353     PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id",
2354                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
2355     if (!hasArchitectedFlatScratch())
2356       PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init",
2357                       KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
2358     PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size",
2359                     KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
2360 
2361     if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0)
2362       return createReservedKDBitsError(KERNEL_CODE_PROPERTY_RESERVED0,
2363                                        amdhsa::KERNEL_CODE_PROPERTIES_OFFSET);
2364 
2365     // Reserved for GFX9
2366     if (isGFX9() &&
2367         (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) {
2368       return createReservedKDBitsError(
2369           KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
2370           amdhsa::KERNEL_CODE_PROPERTIES_OFFSET, "must be zero on gfx9");
2371     }
2372     if (isGFX10Plus()) {
2373       PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
2374                       KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2375     }
2376 
2377     if (CodeObjectVersion >= AMDGPU::AMDHSA_COV5)
2378       PRINT_DIRECTIVE(".amdhsa_uses_dynamic_stack",
2379                       KERNEL_CODE_PROPERTY_USES_DYNAMIC_STACK);
2380 
2381     if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1) {
2382       return createReservedKDBitsError(KERNEL_CODE_PROPERTY_RESERVED1,
2383                                        amdhsa::KERNEL_CODE_PROPERTIES_OFFSET);
2384     }
2385 
2386     return true;
2387 
2388   case amdhsa::KERNARG_PRELOAD_OFFSET:
2389     using namespace amdhsa;
2390     TwoByteBuffer = DE.getU16(Cursor);
2391     if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_LENGTH) {
2392       PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_length",
2393                       KERNARG_PRELOAD_SPEC_LENGTH);
2394     }
2395 
2396     if (TwoByteBuffer & KERNARG_PRELOAD_SPEC_OFFSET) {
2397       PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_preload_offset",
2398                       KERNARG_PRELOAD_SPEC_OFFSET);
2399     }
2400     return true;
2401 
2402   case amdhsa::RESERVED3_OFFSET:
2403     // 4 bytes from here are reserved, must be 0.
2404     ReservedBytes = DE.getBytes(Cursor, 4);
2405     for (int I = 0; I < 4; ++I) {
2406       if (ReservedBytes[I] != 0)
2407         return createReservedKDBytesError(amdhsa::RESERVED3_OFFSET, 4);
2408     }
2409     return true;
2410 
2411   default:
2412     llvm_unreachable("Unhandled index. Case statements cover everything.");
2413     return true;
2414   }
2415 #undef PRINT_DIRECTIVE
2416 }
2417 
2418 Expected<bool> AMDGPUDisassembler::decodeKernelDescriptor(
2419     StringRef KdName, ArrayRef<uint8_t> Bytes, uint64_t KdAddress) const {
2420 
2421   // CP microcode requires the kernel descriptor to be 64 aligned.
2422   if (Bytes.size() != 64 || KdAddress % 64 != 0)
2423     return createStringError(std::errc::invalid_argument,
2424                              "kernel descriptor must be 64-byte aligned");
2425 
2426   // FIXME: We can't actually decode "in order" as is done below, as e.g. GFX10
2427   // requires us to know the setting of .amdhsa_wavefront_size32 in order to
2428   // accurately produce .amdhsa_next_free_vgpr, and they appear in the wrong
2429   // order. Workaround this by first looking up .amdhsa_wavefront_size32 here
2430   // when required.
2431   if (isGFX10Plus()) {
2432     uint16_t KernelCodeProperties =
2433         support::endian::read16(&Bytes[amdhsa::KERNEL_CODE_PROPERTIES_OFFSET],
2434                                 llvm::endianness::little);
2435     EnableWavefrontSize32 =
2436         AMDHSA_BITS_GET(KernelCodeProperties,
2437                         amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
2438   }
2439 
2440   std::string Kd;
2441   raw_string_ostream KdStream(Kd);
2442   KdStream << ".amdhsa_kernel " << KdName << '\n';
2443 
2444   DataExtractor::Cursor C(0);
2445   while (C && C.tell() < Bytes.size()) {
2446     Expected<bool> Res = decodeKernelDescriptorDirective(C, Bytes, KdStream);
2447 
2448     cantFail(C.takeError());
2449 
2450     if (!Res)
2451       return Res;
2452   }
2453   KdStream << ".end_amdhsa_kernel\n";
2454   outs() << KdStream.str();
2455   return true;
2456 }
2457 
2458 Expected<bool> AMDGPUDisassembler::onSymbolStart(SymbolInfoTy &Symbol,
2459                                                  uint64_t &Size,
2460                                                  ArrayRef<uint8_t> Bytes,
2461                                                  uint64_t Address) const {
2462   // Right now only kernel descriptor needs to be handled.
2463   // We ignore all other symbols for target specific handling.
2464   // TODO:
2465   // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code
2466   // Object V2 and V3 when symbols are marked protected.
2467 
2468   // amd_kernel_code_t for Code Object V2.
2469   if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) {
2470     Size = 256;
2471     return createStringError(std::errc::invalid_argument,
2472                              "code object v2 is not supported");
2473   }
2474 
2475   // Code Object V3 kernel descriptors.
2476   StringRef Name = Symbol.Name;
2477   if (Symbol.Type == ELF::STT_OBJECT && Name.ends_with(StringRef(".kd"))) {
2478     Size = 64; // Size = 64 regardless of success or failure.
2479     return decodeKernelDescriptor(Name.drop_back(3), Bytes, Address);
2480   }
2481 
2482   return false;
2483 }
2484 
2485 const MCExpr *AMDGPUDisassembler::createConstantSymbolExpr(StringRef Id,
2486                                                            int64_t Val) {
2487   MCContext &Ctx = getContext();
2488   MCSymbol *Sym = Ctx.getOrCreateSymbol(Id);
2489   // Note: only set value to Val on a new symbol in case an dissassembler
2490   // has already been initialized in this context.
2491   if (!Sym->isVariable()) {
2492     Sym->setVariableValue(MCConstantExpr::create(Val, Ctx));
2493   } else {
2494     int64_t Res = ~Val;
2495     bool Valid = Sym->getVariableValue()->evaluateAsAbsolute(Res);
2496     if (!Valid || Res != Val)
2497       Ctx.reportWarning(SMLoc(), "unsupported redefinition of " + Id);
2498   }
2499   return MCSymbolRefExpr::create(Sym, Ctx);
2500 }
2501 
2502 //===----------------------------------------------------------------------===//
2503 // AMDGPUSymbolizer
2504 //===----------------------------------------------------------------------===//
2505 
2506 // Try to find symbol name for specified label
2507 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(
2508     MCInst &Inst, raw_ostream & /*cStream*/, int64_t Value,
2509     uint64_t /*Address*/, bool IsBranch, uint64_t /*Offset*/,
2510     uint64_t /*OpSize*/, uint64_t /*InstSize*/) {
2511 
2512   if (!IsBranch) {
2513     return false;
2514   }
2515 
2516   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
2517   if (!Symbols)
2518     return false;
2519 
2520   auto Result = llvm::find_if(*Symbols, [Value](const SymbolInfoTy &Val) {
2521     return Val.Addr == static_cast<uint64_t>(Value) &&
2522            Val.Type == ELF::STT_NOTYPE;
2523   });
2524   if (Result != Symbols->end()) {
2525     auto *Sym = Ctx.getOrCreateSymbol(Result->Name);
2526     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
2527     Inst.addOperand(MCOperand::createExpr(Add));
2528     return true;
2529   }
2530   // Add to list of referenced addresses, so caller can synthesize a label.
2531   ReferencedAddresses.push_back(static_cast<uint64_t>(Value));
2532   return false;
2533 }
2534 
2535 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
2536                                                        int64_t Value,
2537                                                        uint64_t Address) {
2538   llvm_unreachable("unimplemented");
2539 }
2540 
2541 //===----------------------------------------------------------------------===//
2542 // Initialization
2543 //===----------------------------------------------------------------------===//
2544 
2545 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
2546                               LLVMOpInfoCallback /*GetOpInfo*/,
2547                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
2548                               void *DisInfo,
2549                               MCContext *Ctx,
2550                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
2551   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
2552 }
2553 
2554 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
2555                                                 const MCSubtargetInfo &STI,
2556                                                 MCContext &Ctx) {
2557   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
2558 }
2559 
2560 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler() {
2561   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
2562                                          createAMDGPUDisassembler);
2563   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
2564                                        createAMDGPUSymbolizer);
2565 }
2566