xref: /llvm-project/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (revision 8f3da70eed3f002441427be5873560f76a505988)
1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 /// \file
12 ///
13 /// This file contains definition for AMDGPU ISA disassembler
14 //
15 //===----------------------------------------------------------------------===//
16 
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18 
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "AMDGPU.h"
21 #include "AMDGPURegisterInfo.h"
22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23 #include "SIDefines.h"
24 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
25 #include "Utils/AMDGPUBaseInfo.h"
26 #include "llvm-c/Disassembler.h"
27 #include "llvm/ADT/APInt.h"
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/BinaryFormat/ELF.h"
31 #include "llvm/MC/MCContext.h"
32 #include "llvm/MC/MCDisassembler/MCDisassembler.h"
33 #include "llvm/MC/MCExpr.h"
34 #include "llvm/MC/MCFixedLenDisassembler.h"
35 #include "llvm/MC/MCInst.h"
36 #include "llvm/MC/MCSubtargetInfo.h"
37 #include "llvm/Support/Endian.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/TargetRegistry.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstddef>
45 #include <cstdint>
46 #include <iterator>
47 #include <tuple>
48 #include <vector>
49 
50 using namespace llvm;
51 
52 #define DEBUG_TYPE "amdgpu-disassembler"
53 
54 #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
55                             : AMDGPU::EncValues::SGPR_MAX_SI)
56 
57 using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
58 
59 inline static MCDisassembler::DecodeStatus
60 addOperand(MCInst &Inst, const MCOperand& Opnd) {
61   Inst.addOperand(Opnd);
62   return Opnd.isValid() ?
63     MCDisassembler::Success :
64     MCDisassembler::SoftFail;
65 }
66 
67 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
68                                 uint16_t NameIdx) {
69   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
70   if (OpIdx != -1) {
71     auto I = MI.begin();
72     std::advance(I, OpIdx);
73     MI.insert(I, Op);
74   }
75   return OpIdx;
76 }
77 
78 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
79                                        uint64_t Addr, const void *Decoder) {
80   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
81 
82   // Our branches take a simm16, but we need two extra bits to account for the
83   // factor of 4.
84   APInt SignedOffset(18, Imm * 4, true);
85   int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
86 
87   if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
88     return MCDisassembler::Success;
89   return addOperand(Inst, MCOperand::createImm(Imm));
90 }
91 
92 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
93 static DecodeStatus StaticDecoderName(MCInst &Inst, \
94                                        unsigned Imm, \
95                                        uint64_t /*Addr*/, \
96                                        const void *Decoder) { \
97   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
98   return addOperand(Inst, DAsm->DecoderName(Imm)); \
99 }
100 
101 #define DECODE_OPERAND_REG(RegClass) \
102 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
103 
104 DECODE_OPERAND_REG(VGPR_32)
105 DECODE_OPERAND_REG(VRegOrLds_32)
106 DECODE_OPERAND_REG(VS_32)
107 DECODE_OPERAND_REG(VS_64)
108 DECODE_OPERAND_REG(VS_128)
109 
110 DECODE_OPERAND_REG(VReg_64)
111 DECODE_OPERAND_REG(VReg_96)
112 DECODE_OPERAND_REG(VReg_128)
113 
114 DECODE_OPERAND_REG(SReg_32)
115 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
116 DECODE_OPERAND_REG(SReg_32_XEXEC_HI)
117 DECODE_OPERAND_REG(SRegOrLds_32)
118 DECODE_OPERAND_REG(SReg_64)
119 DECODE_OPERAND_REG(SReg_64_XEXEC)
120 DECODE_OPERAND_REG(SReg_128)
121 DECODE_OPERAND_REG(SReg_256)
122 DECODE_OPERAND_REG(SReg_512)
123 
124 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
125                                          unsigned Imm,
126                                          uint64_t Addr,
127                                          const void *Decoder) {
128   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
129   return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
130 }
131 
132 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
133                                          unsigned Imm,
134                                          uint64_t Addr,
135                                          const void *Decoder) {
136   auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
137   return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
138 }
139 
140 #define DECODE_SDWA(DecName) \
141 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
142 
143 DECODE_SDWA(Src32)
144 DECODE_SDWA(Src16)
145 DECODE_SDWA(VopcDst)
146 
147 #include "AMDGPUGenDisassemblerTables.inc"
148 
149 //===----------------------------------------------------------------------===//
150 //
151 //===----------------------------------------------------------------------===//
152 
153 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
154   assert(Bytes.size() >= sizeof(T));
155   const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
156   Bytes = Bytes.slice(sizeof(T));
157   return Res;
158 }
159 
160 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
161                                                MCInst &MI,
162                                                uint64_t Inst,
163                                                uint64_t Address) const {
164   assert(MI.getOpcode() == 0);
165   assert(MI.getNumOperands() == 0);
166   MCInst TmpInst;
167   HasLiteral = false;
168   const auto SavedBytes = Bytes;
169   if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
170     MI = TmpInst;
171     return MCDisassembler::Success;
172   }
173   Bytes = SavedBytes;
174   return MCDisassembler::Fail;
175 }
176 
177 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
178                                                 ArrayRef<uint8_t> Bytes_,
179                                                 uint64_t Address,
180                                                 raw_ostream &WS,
181                                                 raw_ostream &CS) const {
182   CommentStream = &CS;
183   bool IsSDWA = false;
184 
185   // ToDo: AMDGPUDisassembler supports only VI ISA.
186   if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10())
187     report_fatal_error("Disassembly not yet supported for subtarget");
188 
189   unsigned MaxInstBytesNum = (std::min)(
190     STI.getFeatureBits()[AMDGPU::FeatureGFX10] ? (size_t) 20 :
191     STI.getFeatureBits()[AMDGPU::FeatureVOP3Literal] ? (size_t) 12 : (size_t)8,
192     Bytes_.size());
193   Bytes = Bytes_.slice(0, MaxInstBytesNum);
194 
195   DecodeStatus Res = MCDisassembler::Fail;
196   do {
197     // ToDo: better to switch encoding length using some bit predicate
198     // but it is unknown yet, so try all we can
199 
200     // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
201     // encodings
202     if (Bytes.size() >= 8) {
203       const uint64_t QW = eatBytes<uint64_t>(Bytes);
204       Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
205       if (Res) break;
206 
207       Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
208       if (Res) { IsSDWA = true;  break; }
209 
210       Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
211       if (Res) { IsSDWA = true;  break; }
212 
213       Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address);
214       if (Res) { IsSDWA = true;  break; }
215 
216       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
217       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
218       // table first so we print the correct name.
219 
220       if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
221         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
222         if (Res) break;
223       }
224 
225       if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
226         Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
227         if (Res)
228           break;
229       }
230 
231       // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
232       // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
233       // table first so we print the correct name.
234       if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
235         Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
236         if (Res)
237           break;
238       }
239     }
240 
241     // Reinitialize Bytes as DPP64 could have eaten too much
242     Bytes = Bytes_.slice(0, MaxInstBytesNum);
243 
244     // Try decode 32-bit instruction
245     if (Bytes.size() < 4) break;
246     const uint32_t DW = eatBytes<uint32_t>(Bytes);
247     Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address);
248     if (Res) break;
249 
250     Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
251     if (Res) break;
252 
253     Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
254     if (Res) break;
255 
256     Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address);
257     if (Res) break;
258 
259     if (Bytes.size() < 4) break;
260     const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
261     Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address);
262     if (Res) break;
263 
264     Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
265     if (Res) break;
266 
267     Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address);
268     if (Res) break;
269 
270     Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address);
271   } while (false);
272 
273   if (Res && (MaxInstBytesNum - Bytes.size()) == 12 && (!HasLiteral ||
274         !(MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3))) {
275     MaxInstBytesNum = 8;
276     Bytes = Bytes_.slice(0, MaxInstBytesNum);
277     eatBytes<uint64_t>(Bytes);
278   }
279 
280   if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
281               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
282               MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10 ||
283               MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ||
284               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi ||
285               MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10 ||
286               MI.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10)) {
287     // Insert dummy unused src2_modifiers.
288     insertNamedMCOperand(MI, MCOperand::createImm(0),
289                          AMDGPU::OpName::src2_modifiers);
290   }
291 
292   if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
293     Res = convertMIMGInst(MI);
294   }
295 
296   if (Res && IsSDWA)
297     Res = convertSDWAInst(MI);
298 
299   int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
300                                               AMDGPU::OpName::vdst_in);
301   if (VDstIn_Idx != -1) {
302     int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
303                            MCOI::OperandConstraint::TIED_TO);
304     if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
305          !MI.getOperand(VDstIn_Idx).isReg() ||
306          MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
307       if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
308         MI.erase(&MI.getOperand(VDstIn_Idx));
309       insertNamedMCOperand(MI,
310         MCOperand::createReg(MI.getOperand(Tied).getReg()),
311         AMDGPU::OpName::vdst_in);
312     }
313   }
314 
315   // if the opcode was not recognized we'll assume a Size of 4 bytes
316   // (unless there are fewer bytes left)
317   Size = Res ? (MaxInstBytesNum - Bytes.size())
318              : std::min((size_t)4, Bytes_.size());
319   return Res;
320 }
321 
322 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
323   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
324       STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
325     if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
326       // VOPC - insert clamp
327       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
328   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
329     int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
330     if (SDst != -1) {
331       // VOPC - insert VCC register as sdst
332       insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
333                            AMDGPU::OpName::sdst);
334     } else {
335       // VOP1/2 - insert omod if present in instruction
336       insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
337     }
338   }
339   return MCDisassembler::Success;
340 }
341 
342 // Note that MIMG format provides no information about VADDR size.
343 // Consequently, decoded instructions always show address
344 // as if it has 1 dword, which could be not really so.
345 DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
346 
347   int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
348                                            AMDGPU::OpName::vdst);
349 
350   int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
351                                             AMDGPU::OpName::vdata);
352 
353   int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
354                                             AMDGPU::OpName::dmask);
355 
356   int TFEIdx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
357                                             AMDGPU::OpName::tfe);
358   int D16Idx   = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
359                                             AMDGPU::OpName::d16);
360 
361   assert(VDataIdx != -1);
362   assert(DMaskIdx != -1);
363   assert(TFEIdx != -1);
364 
365   bool IsAtomic = (VDstIdx != -1);
366   bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4;
367 
368   unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
369   if (DMask == 0)
370     return MCDisassembler::Success;
371 
372   unsigned DstSize = IsGather4 ? 4 : countPopulation(DMask);
373   if (DstSize == 1)
374     return MCDisassembler::Success;
375 
376   bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
377   if (D16 && AMDGPU::hasPackedD16(STI)) {
378     DstSize = (DstSize + 1) / 2;
379   }
380 
381   // FIXME: Add tfe support
382   if (MI.getOperand(TFEIdx).getImm())
383     return MCDisassembler::Success;
384 
385   int NewOpcode = -1;
386 
387   if (IsGather4) {
388     if (D16 && AMDGPU::hasPackedD16(STI))
389       NewOpcode = AMDGPU::getMaskedMIMGOp(MI.getOpcode(), 2);
390     else
391       return MCDisassembler::Success;
392   } else {
393     NewOpcode = AMDGPU::getMaskedMIMGOp(MI.getOpcode(), DstSize);
394     if (NewOpcode == -1)
395       return MCDisassembler::Success;
396   }
397 
398   auto RCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
399 
400   // Get first subregister of VData
401   unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
402   unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
403   Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
404 
405   // Widen the register to the correct number of enabled channels.
406   auto NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
407                                           &MRI.getRegClass(RCID));
408   if (NewVdata == AMDGPU::NoRegister) {
409     // It's possible to encode this such that the low register + enabled
410     // components exceeds the register count.
411     return MCDisassembler::Success;
412   }
413 
414   MI.setOpcode(NewOpcode);
415   // vaddr will be always appear as a single VGPR. This will look different than
416   // how it is usually emitted because the number of register components is not
417   // in the instruction encoding.
418   MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
419 
420   if (IsAtomic) {
421     // Atomic operations have an additional operand (a copy of data)
422     MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
423   }
424 
425   return MCDisassembler::Success;
426 }
427 
428 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
429   return getContext().getRegisterInfo()->
430     getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
431 }
432 
433 inline
434 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
435                                          const Twine& ErrMsg) const {
436   *CommentStream << "Error: " + ErrMsg;
437 
438   // ToDo: add support for error operands to MCInst.h
439   // return MCOperand::createError(V);
440   return MCOperand();
441 }
442 
443 inline
444 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
445   return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
446 }
447 
448 inline
449 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
450                                                unsigned Val) const {
451   const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
452   if (Val >= RegCl.getNumRegs())
453     return errOperand(Val, Twine(getRegClassName(RegClassID)) +
454                            ": unknown register " + Twine(Val));
455   return createRegOperand(RegCl.getRegister(Val));
456 }
457 
458 inline
459 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
460                                                 unsigned Val) const {
461   // ToDo: SI/CI have 104 SGPRs, VI - 102
462   // Valery: here we accepting as much as we can, let assembler sort it out
463   int shift = 0;
464   switch (SRegClassID) {
465   case AMDGPU::SGPR_32RegClassID:
466   case AMDGPU::TTMP_32RegClassID:
467     break;
468   case AMDGPU::SGPR_64RegClassID:
469   case AMDGPU::TTMP_64RegClassID:
470     shift = 1;
471     break;
472   case AMDGPU::SGPR_128RegClassID:
473   case AMDGPU::TTMP_128RegClassID:
474   // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
475   // this bundle?
476   case AMDGPU::SGPR_256RegClassID:
477   case AMDGPU::TTMP_256RegClassID:
478     // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
479   // this bundle?
480   case AMDGPU::SGPR_512RegClassID:
481   case AMDGPU::TTMP_512RegClassID:
482     shift = 2;
483     break;
484   // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
485   // this bundle?
486   default:
487     llvm_unreachable("unhandled register class");
488   }
489 
490   if (Val % (1 << shift)) {
491     *CommentStream << "Warning: " << getRegClassName(SRegClassID)
492                    << ": scalar reg isn't aligned " << Val;
493   }
494 
495   return createRegOperand(SRegClassID, Val >> shift);
496 }
497 
498 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
499   return decodeSrcOp(OPW32, Val);
500 }
501 
502 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
503   return decodeSrcOp(OPW64, Val);
504 }
505 
506 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
507   return decodeSrcOp(OPW128, Val);
508 }
509 
510 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
511   return decodeSrcOp(OPW16, Val);
512 }
513 
514 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
515   return decodeSrcOp(OPWV216, Val);
516 }
517 
518 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
519   // Some instructions have operand restrictions beyond what the encoding
520   // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
521   // high bit.
522   Val &= 255;
523 
524   return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
525 }
526 
527 MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const {
528   return decodeSrcOp(OPW32, Val);
529 }
530 
531 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
532   return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
533 }
534 
535 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
536   return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
537 }
538 
539 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
540   return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
541 }
542 
543 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
544   // table-gen generated disassembler doesn't care about operand types
545   // leaving only registry class so SSrc_32 operand turns into SReg_32
546   // and therefore we accept immediates and literals here as well
547   return decodeSrcOp(OPW32, Val);
548 }
549 
550 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
551   unsigned Val) const {
552   // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
553   return decodeOperand_SReg_32(Val);
554 }
555 
556 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
557   unsigned Val) const {
558   // SReg_32_XM0 is SReg_32 without EXEC_HI
559   return decodeOperand_SReg_32(Val);
560 }
561 
562 MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const {
563   // table-gen generated disassembler doesn't care about operand types
564   // leaving only registry class so SSrc_32 operand turns into SReg_32
565   // and therefore we accept immediates and literals here as well
566   return decodeSrcOp(OPW32, Val);
567 }
568 
569 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
570   return decodeSrcOp(OPW64, Val);
571 }
572 
573 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
574   return decodeSrcOp(OPW64, Val);
575 }
576 
577 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
578   return decodeSrcOp(OPW128, Val);
579 }
580 
581 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
582   return decodeDstOp(OPW256, Val);
583 }
584 
585 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
586   return decodeDstOp(OPW512, Val);
587 }
588 
589 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
590   // For now all literal constants are supposed to be unsigned integer
591   // ToDo: deal with signed/unsigned 64-bit integer constants
592   // ToDo: deal with float/double constants
593   if (!HasLiteral) {
594     if (Bytes.size() < 4) {
595       return errOperand(0, "cannot read literal, inst bytes left " +
596                         Twine(Bytes.size()));
597     }
598     HasLiteral = true;
599     Literal = eatBytes<uint32_t>(Bytes);
600   }
601   return MCOperand::createImm(Literal);
602 }
603 
604 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
605   using namespace AMDGPU::EncValues;
606 
607   assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
608   return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
609     (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
610     (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
611       // Cast prevents negative overflow.
612 }
613 
614 static int64_t getInlineImmVal32(unsigned Imm) {
615   switch (Imm) {
616   case 240:
617     return FloatToBits(0.5f);
618   case 241:
619     return FloatToBits(-0.5f);
620   case 242:
621     return FloatToBits(1.0f);
622   case 243:
623     return FloatToBits(-1.0f);
624   case 244:
625     return FloatToBits(2.0f);
626   case 245:
627     return FloatToBits(-2.0f);
628   case 246:
629     return FloatToBits(4.0f);
630   case 247:
631     return FloatToBits(-4.0f);
632   case 248: // 1 / (2 * PI)
633     return 0x3e22f983;
634   default:
635     llvm_unreachable("invalid fp inline imm");
636   }
637 }
638 
639 static int64_t getInlineImmVal64(unsigned Imm) {
640   switch (Imm) {
641   case 240:
642     return DoubleToBits(0.5);
643   case 241:
644     return DoubleToBits(-0.5);
645   case 242:
646     return DoubleToBits(1.0);
647   case 243:
648     return DoubleToBits(-1.0);
649   case 244:
650     return DoubleToBits(2.0);
651   case 245:
652     return DoubleToBits(-2.0);
653   case 246:
654     return DoubleToBits(4.0);
655   case 247:
656     return DoubleToBits(-4.0);
657   case 248: // 1 / (2 * PI)
658     return 0x3fc45f306dc9c882;
659   default:
660     llvm_unreachable("invalid fp inline imm");
661   }
662 }
663 
664 static int64_t getInlineImmVal16(unsigned Imm) {
665   switch (Imm) {
666   case 240:
667     return 0x3800;
668   case 241:
669     return 0xB800;
670   case 242:
671     return 0x3C00;
672   case 243:
673     return 0xBC00;
674   case 244:
675     return 0x4000;
676   case 245:
677     return 0xC000;
678   case 246:
679     return 0x4400;
680   case 247:
681     return 0xC400;
682   case 248: // 1 / (2 * PI)
683     return 0x3118;
684   default:
685     llvm_unreachable("invalid fp inline imm");
686   }
687 }
688 
689 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
690   assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
691       && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
692 
693   // ToDo: case 248: 1/(2*PI) - is allowed only on VI
694   switch (Width) {
695   case OPW32:
696     return MCOperand::createImm(getInlineImmVal32(Imm));
697   case OPW64:
698     return MCOperand::createImm(getInlineImmVal64(Imm));
699   case OPW16:
700   case OPWV216:
701     return MCOperand::createImm(getInlineImmVal16(Imm));
702   default:
703     llvm_unreachable("implement me");
704   }
705 }
706 
707 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
708   using namespace AMDGPU;
709 
710   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
711   switch (Width) {
712   default: // fall
713   case OPW32:
714   case OPW16:
715   case OPWV216:
716     return VGPR_32RegClassID;
717   case OPW64: return VReg_64RegClassID;
718   case OPW128: return VReg_128RegClassID;
719   }
720 }
721 
722 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
723   using namespace AMDGPU;
724 
725   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
726   switch (Width) {
727   default: // fall
728   case OPW32:
729   case OPW16:
730   case OPWV216:
731     return SGPR_32RegClassID;
732   case OPW64: return SGPR_64RegClassID;
733   case OPW128: return SGPR_128RegClassID;
734   case OPW256: return SGPR_256RegClassID;
735   case OPW512: return SGPR_512RegClassID;
736   }
737 }
738 
739 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
740   using namespace AMDGPU;
741 
742   assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
743   switch (Width) {
744   default: // fall
745   case OPW32:
746   case OPW16:
747   case OPWV216:
748     return TTMP_32RegClassID;
749   case OPW64: return TTMP_64RegClassID;
750   case OPW128: return TTMP_128RegClassID;
751   case OPW256: return TTMP_256RegClassID;
752   case OPW512: return TTMP_512RegClassID;
753   }
754 }
755 
756 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
757   using namespace AMDGPU::EncValues;
758 
759   unsigned TTmpMin =
760       (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MIN : TTMP_VI_MIN;
761   unsigned TTmpMax =
762       (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MAX : TTMP_VI_MAX;
763 
764   return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
765 }
766 
767 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
768   using namespace AMDGPU::EncValues;
769 
770   assert(Val < 512); // enum9
771 
772   if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
773     return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
774   }
775   if (Val <= SGPR_MAX) {
776     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
777     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
778   }
779 
780   int TTmpIdx = getTTmpIdx(Val);
781   if (TTmpIdx >= 0) {
782     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
783   }
784 
785   if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
786     return decodeIntImmed(Val);
787 
788   if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
789     return decodeFPImmed(Width, Val);
790 
791   if (Val == LITERAL_CONST)
792     return decodeLiteralConstant();
793 
794   switch (Width) {
795   case OPW32:
796   case OPW16:
797   case OPWV216:
798     return decodeSpecialReg32(Val);
799   case OPW64:
800     return decodeSpecialReg64(Val);
801   default:
802     llvm_unreachable("unexpected immediate type");
803   }
804 }
805 
806 MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const {
807   using namespace AMDGPU::EncValues;
808 
809   assert(Val < 128);
810   assert(Width == OPW256 || Width == OPW512);
811 
812   if (Val <= SGPR_MAX) {
813     assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
814     return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
815   }
816 
817   int TTmpIdx = getTTmpIdx(Val);
818   if (TTmpIdx >= 0) {
819     return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
820   }
821 
822   llvm_unreachable("unknown dst register");
823 }
824 
825 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
826   using namespace AMDGPU;
827 
828   switch (Val) {
829   case 102: return createRegOperand(FLAT_SCR_LO);
830   case 103: return createRegOperand(FLAT_SCR_HI);
831   case 104: return createRegOperand(XNACK_MASK_LO);
832   case 105: return createRegOperand(XNACK_MASK_HI);
833   case 106: return createRegOperand(VCC_LO);
834   case 107: return createRegOperand(VCC_HI);
835   case 108: return createRegOperand(TBA_LO);
836   case 109: return createRegOperand(TBA_HI);
837   case 110: return createRegOperand(TMA_LO);
838   case 111: return createRegOperand(TMA_HI);
839   case 124: return createRegOperand(M0);
840   case 125: return createRegOperand(SGPR_NULL);
841   case 126: return createRegOperand(EXEC_LO);
842   case 127: return createRegOperand(EXEC_HI);
843   case 235: return createRegOperand(SRC_SHARED_BASE);
844   case 236: return createRegOperand(SRC_SHARED_LIMIT);
845   case 237: return createRegOperand(SRC_PRIVATE_BASE);
846   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
847   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
848     // ToDo: no support for vccz register
849   case 251: break;
850     // ToDo: no support for execz register
851   case 252: break;
852   case 253: return createRegOperand(SCC);
853   case 254: return createRegOperand(LDS_DIRECT);
854   default: break;
855   }
856   return errOperand(Val, "unknown operand encoding " + Twine(Val));
857 }
858 
859 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
860   using namespace AMDGPU;
861 
862   switch (Val) {
863   case 102: return createRegOperand(FLAT_SCR);
864   case 104: return createRegOperand(XNACK_MASK);
865   case 106: return createRegOperand(VCC);
866   case 108: return createRegOperand(TBA);
867   case 110: return createRegOperand(TMA);
868   case 126: return createRegOperand(EXEC);
869   case 235: return createRegOperand(SRC_SHARED_BASE);
870   case 236: return createRegOperand(SRC_SHARED_LIMIT);
871   case 237: return createRegOperand(SRC_PRIVATE_BASE);
872   case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
873   case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
874   default: break;
875   }
876   return errOperand(Val, "unknown operand encoding " + Twine(Val));
877 }
878 
879 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
880                                             const unsigned Val) const {
881   using namespace AMDGPU::SDWA;
882   using namespace AMDGPU::EncValues;
883 
884   if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
885       STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
886     // XXX: cast to int is needed to avoid stupid warning:
887     // compare with unsigned is always true
888     if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
889         Val <= SDWA9EncValues::SRC_VGPR_MAX) {
890       return createRegOperand(getVgprClassId(Width),
891                               Val - SDWA9EncValues::SRC_VGPR_MIN);
892     }
893     if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
894         Val <= (isGFX10() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
895                           : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
896       return createSRegOperand(getSgprClassId(Width),
897                                Val - SDWA9EncValues::SRC_SGPR_MIN);
898     }
899     if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
900         Val <= SDWA9EncValues::SRC_TTMP_MAX) {
901       return createSRegOperand(getTtmpClassId(Width),
902                                Val - SDWA9EncValues::SRC_TTMP_MIN);
903     }
904 
905     const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
906 
907     if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
908       return decodeIntImmed(SVal);
909 
910     if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
911       return decodeFPImmed(Width, SVal);
912 
913     return decodeSpecialReg32(SVal);
914   } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
915     return createRegOperand(getVgprClassId(Width), Val);
916   }
917   llvm_unreachable("unsupported target");
918 }
919 
920 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
921   return decodeSDWASrc(OPW16, Val);
922 }
923 
924 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
925   return decodeSDWASrc(OPW32, Val);
926 }
927 
928 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
929   using namespace AMDGPU::SDWA;
930 
931   assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
932           STI.getFeatureBits()[AMDGPU::FeatureGFX10]) &&
933          "SDWAVopcDst should be present only on GFX9+");
934 
935   if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
936     Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
937 
938     int TTmpIdx = getTTmpIdx(Val);
939     if (TTmpIdx >= 0) {
940       return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx);
941     } else if (Val > SGPR_MAX) {
942       return decodeSpecialReg64(Val);
943     } else {
944       return createSRegOperand(getSgprClassId(OPW64), Val);
945     }
946   } else {
947     return createRegOperand(AMDGPU::VCC);
948   }
949 }
950 
951 bool AMDGPUDisassembler::isVI() const {
952   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
953 }
954 
955 bool AMDGPUDisassembler::isGFX9() const {
956   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
957 }
958 
959 bool AMDGPUDisassembler::isGFX10() const {
960   return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
961 }
962 
963 //===----------------------------------------------------------------------===//
964 // AMDGPUSymbolizer
965 //===----------------------------------------------------------------------===//
966 
967 // Try to find symbol name for specified label
968 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
969                                 raw_ostream &/*cStream*/, int64_t Value,
970                                 uint64_t /*Address*/, bool IsBranch,
971                                 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
972   using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>;
973   using SectionSymbolsTy = std::vector<SymbolInfoTy>;
974 
975   if (!IsBranch) {
976     return false;
977   }
978 
979   auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
980   if (!Symbols)
981     return false;
982 
983   auto Result = std::find_if(Symbols->begin(), Symbols->end(),
984                              [Value](const SymbolInfoTy& Val) {
985                                 return std::get<0>(Val) == static_cast<uint64_t>(Value)
986                                     && std::get<2>(Val) == ELF::STT_NOTYPE;
987                              });
988   if (Result != Symbols->end()) {
989     auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
990     const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
991     Inst.addOperand(MCOperand::createExpr(Add));
992     return true;
993   }
994   return false;
995 }
996 
997 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
998                                                        int64_t Value,
999                                                        uint64_t Address) {
1000   llvm_unreachable("unimplemented");
1001 }
1002 
1003 //===----------------------------------------------------------------------===//
1004 // Initialization
1005 //===----------------------------------------------------------------------===//
1006 
1007 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
1008                               LLVMOpInfoCallback /*GetOpInfo*/,
1009                               LLVMSymbolLookupCallback /*SymbolLookUp*/,
1010                               void *DisInfo,
1011                               MCContext *Ctx,
1012                               std::unique_ptr<MCRelocationInfo> &&RelInfo) {
1013   return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
1014 }
1015 
1016 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
1017                                                 const MCSubtargetInfo &STI,
1018                                                 MCContext &Ctx) {
1019   return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
1020 }
1021 
1022 extern "C" void LLVMInitializeAMDGPUDisassembler() {
1023   TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
1024                                          createAMDGPUDisassembler);
1025   TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
1026                                        createAMDGPUSymbolizer);
1027 }
1028