1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //===----------------------------------------------------------------------===//
10 //
11 /// \file
12 ///
13 /// This file contains definition for AMDGPU ISA disassembler
14 //
15 //===----------------------------------------------------------------------===//
16
17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
18
19 #include "Disassembler/AMDGPUDisassembler.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "TargetInfo/AMDGPUTargetInfo.h"
22 #include "Utils/AMDGPUBaseInfo.h"
23 #include "llvm-c/DisassemblerTypes.h"
24 #include "llvm/MC/MCAsmInfo.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCFixedLenDisassembler.h"
28 #include "llvm/Support/AMDHSAKernelDescriptor.h"
29 #include "llvm/Support/TargetRegistry.h"
30
31 using namespace llvm;
32
33 #define DEBUG_TYPE "amdgpu-disassembler"
34
35 #define SGPR_MAX \
36 (isGFX10Plus() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \
37 : AMDGPU::EncValues::SGPR_MAX_SI)
38
39 using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
40
AMDGPUDisassembler(const MCSubtargetInfo & STI,MCContext & Ctx,MCInstrInfo const * MCII)41 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI,
42 MCContext &Ctx,
43 MCInstrInfo const *MCII) :
44 MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
45 TargetMaxInstBytes(Ctx.getAsmInfo()->getMaxInstLength(&STI)) {
46
47 // ToDo: AMDGPUDisassembler supports only VI ISA.
48 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10Plus())
49 report_fatal_error("Disassembly not yet supported for subtarget");
50 }
51
52 inline static MCDisassembler::DecodeStatus
addOperand(MCInst & Inst,const MCOperand & Opnd)53 addOperand(MCInst &Inst, const MCOperand& Opnd) {
54 Inst.addOperand(Opnd);
55 return Opnd.isValid() ?
56 MCDisassembler::Success :
57 MCDisassembler::Fail;
58 }
59
insertNamedMCOperand(MCInst & MI,const MCOperand & Op,uint16_t NameIdx)60 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
61 uint16_t NameIdx) {
62 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
63 if (OpIdx != -1) {
64 auto I = MI.begin();
65 std::advance(I, OpIdx);
66 MI.insert(I, Op);
67 }
68 return OpIdx;
69 }
70
decodeSoppBrTarget(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)71 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
72 uint64_t Addr, const void *Decoder) {
73 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
74
75 // Our branches take a simm16, but we need two extra bits to account for the
76 // factor of 4.
77 APInt SignedOffset(18, Imm * 4, true);
78 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
79
80 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
81 return MCDisassembler::Success;
82 return addOperand(Inst, MCOperand::createImm(Imm));
83 }
84
decodeSMEMOffset(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)85 static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm,
86 uint64_t Addr, const void *Decoder) {
87 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
88 int64_t Offset;
89 if (DAsm->isVI()) { // VI supports 20-bit unsigned offsets.
90 Offset = Imm & 0xFFFFF;
91 } else { // GFX9+ supports 21-bit signed offsets.
92 Offset = SignExtend64<21>(Imm);
93 }
94 return addOperand(Inst, MCOperand::createImm(Offset));
95 }
96
decodeBoolReg(MCInst & Inst,unsigned Val,uint64_t Addr,const void * Decoder)97 static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val,
98 uint64_t Addr, const void *Decoder) {
99 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
100 return addOperand(Inst, DAsm->decodeBoolReg(Val));
101 }
102
103 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \
104 static DecodeStatus StaticDecoderName(MCInst &Inst, \
105 unsigned Imm, \
106 uint64_t /*Addr*/, \
107 const void *Decoder) { \
108 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
109 return addOperand(Inst, DAsm->DecoderName(Imm)); \
110 }
111
112 #define DECODE_OPERAND_REG(RegClass) \
113 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
114
115 DECODE_OPERAND_REG(VGPR_32)
DECODE_OPERAND_REG(VRegOrLds_32)116 DECODE_OPERAND_REG(VRegOrLds_32)
117 DECODE_OPERAND_REG(VS_32)
118 DECODE_OPERAND_REG(VS_64)
119 DECODE_OPERAND_REG(VS_128)
120
121 DECODE_OPERAND_REG(VReg_64)
122 DECODE_OPERAND_REG(VReg_96)
123 DECODE_OPERAND_REG(VReg_128)
124 DECODE_OPERAND_REG(VReg_256)
125 DECODE_OPERAND_REG(VReg_512)
126 DECODE_OPERAND_REG(VReg_1024)
127
128 DECODE_OPERAND_REG(SReg_32)
129 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
130 DECODE_OPERAND_REG(SReg_32_XEXEC_HI)
131 DECODE_OPERAND_REG(SRegOrLds_32)
132 DECODE_OPERAND_REG(SReg_64)
133 DECODE_OPERAND_REG(SReg_64_XEXEC)
134 DECODE_OPERAND_REG(SReg_128)
135 DECODE_OPERAND_REG(SReg_256)
136 DECODE_OPERAND_REG(SReg_512)
137
138 DECODE_OPERAND_REG(AGPR_32)
139 DECODE_OPERAND_REG(AReg_64)
140 DECODE_OPERAND_REG(AReg_128)
141 DECODE_OPERAND_REG(AReg_256)
142 DECODE_OPERAND_REG(AReg_512)
143 DECODE_OPERAND_REG(AReg_1024)
144 DECODE_OPERAND_REG(AV_32)
145 DECODE_OPERAND_REG(AV_64)
146
147 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
148 unsigned Imm,
149 uint64_t Addr,
150 const void *Decoder) {
151 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
152 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
153 }
154
decodeOperand_VSrcV216(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)155 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
156 unsigned Imm,
157 uint64_t Addr,
158 const void *Decoder) {
159 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
160 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
161 }
162
decodeOperand_VSrcV232(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)163 static DecodeStatus decodeOperand_VSrcV232(MCInst &Inst,
164 unsigned Imm,
165 uint64_t Addr,
166 const void *Decoder) {
167 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
168 return addOperand(Inst, DAsm->decodeOperand_VSrcV232(Imm));
169 }
170
decodeOperand_VS_16(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)171 static DecodeStatus decodeOperand_VS_16(MCInst &Inst,
172 unsigned Imm,
173 uint64_t Addr,
174 const void *Decoder) {
175 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
176 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
177 }
178
decodeOperand_VS_32(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)179 static DecodeStatus decodeOperand_VS_32(MCInst &Inst,
180 unsigned Imm,
181 uint64_t Addr,
182 const void *Decoder) {
183 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
184 return addOperand(Inst, DAsm->decodeOperand_VS_32(Imm));
185 }
186
decodeOperand_AReg_64(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)187 static DecodeStatus decodeOperand_AReg_64(MCInst &Inst,
188 unsigned Imm,
189 uint64_t Addr,
190 const void *Decoder) {
191 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
192 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW64, Imm | 512));
193 }
194
decodeOperand_AReg_128(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)195 static DecodeStatus decodeOperand_AReg_128(MCInst &Inst,
196 unsigned Imm,
197 uint64_t Addr,
198 const void *Decoder) {
199 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
200 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW128, Imm | 512));
201 }
202
decodeOperand_AReg_256(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)203 static DecodeStatus decodeOperand_AReg_256(MCInst &Inst,
204 unsigned Imm,
205 uint64_t Addr,
206 const void *Decoder) {
207 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
208 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW256, Imm | 512));
209 }
210
decodeOperand_AReg_512(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)211 static DecodeStatus decodeOperand_AReg_512(MCInst &Inst,
212 unsigned Imm,
213 uint64_t Addr,
214 const void *Decoder) {
215 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
216 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW512, Imm | 512));
217 }
218
decodeOperand_AReg_1024(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)219 static DecodeStatus decodeOperand_AReg_1024(MCInst &Inst,
220 unsigned Imm,
221 uint64_t Addr,
222 const void *Decoder) {
223 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
224 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW1024, Imm | 512));
225 }
226
decodeOperand_VReg_64(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)227 static DecodeStatus decodeOperand_VReg_64(MCInst &Inst,
228 unsigned Imm,
229 uint64_t Addr,
230 const void *Decoder) {
231 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
232 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW64, Imm));
233 }
234
decodeOperand_VReg_128(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)235 static DecodeStatus decodeOperand_VReg_128(MCInst &Inst,
236 unsigned Imm,
237 uint64_t Addr,
238 const void *Decoder) {
239 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
240 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW128, Imm));
241 }
242
decodeOperand_VReg_256(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)243 static DecodeStatus decodeOperand_VReg_256(MCInst &Inst,
244 unsigned Imm,
245 uint64_t Addr,
246 const void *Decoder) {
247 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
248 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW256, Imm));
249 }
250
decodeOperand_VReg_512(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)251 static DecodeStatus decodeOperand_VReg_512(MCInst &Inst,
252 unsigned Imm,
253 uint64_t Addr,
254 const void *Decoder) {
255 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
256 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW512, Imm));
257 }
258
decodeOperand_VReg_1024(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)259 static DecodeStatus decodeOperand_VReg_1024(MCInst &Inst,
260 unsigned Imm,
261 uint64_t Addr,
262 const void *Decoder) {
263 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
264 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW1024, Imm));
265 }
266
IsAGPROperand(const MCInst & Inst,int OpIdx,const MCRegisterInfo * MRI)267 static bool IsAGPROperand(const MCInst &Inst, int OpIdx,
268 const MCRegisterInfo *MRI) {
269 if (OpIdx < 0)
270 return false;
271
272 const MCOperand &Op = Inst.getOperand(OpIdx);
273 if (!Op.isReg())
274 return false;
275
276 unsigned Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0);
277 auto Reg = Sub ? Sub : Op.getReg();
278 return Reg >= AMDGPU::AGPR0 && Reg <= AMDGPU::AGPR255;
279 }
280
decodeOperand_AVLdSt_Any(MCInst & Inst,unsigned Imm,AMDGPUDisassembler::OpWidthTy Opw,const void * Decoder)281 static DecodeStatus decodeOperand_AVLdSt_Any(MCInst &Inst,
282 unsigned Imm,
283 AMDGPUDisassembler::OpWidthTy Opw,
284 const void *Decoder) {
285 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
286 if (!DAsm->isGFX90A()) {
287 Imm &= 511;
288 } else {
289 // If atomic has both vdata and vdst their register classes are tied.
290 // The bit is decoded along with the vdst, first operand. We need to
291 // change register class to AGPR if vdst was AGPR.
292 // If a DS instruction has both data0 and data1 their register classes
293 // are also tied.
294 unsigned Opc = Inst.getOpcode();
295 uint64_t TSFlags = DAsm->getMCII()->get(Opc).TSFlags;
296 uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
297 : AMDGPU::OpName::vdata;
298 const MCRegisterInfo *MRI = DAsm->getContext().getRegisterInfo();
299 int DataIdx = AMDGPU::getNamedOperandIdx(Opc, DataNameIdx);
300 if ((int)Inst.getNumOperands() == DataIdx) {
301 int DstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst);
302 if (IsAGPROperand(Inst, DstIdx, MRI))
303 Imm |= 512;
304 }
305
306 if (TSFlags & SIInstrFlags::DS) {
307 int Data2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1);
308 if ((int)Inst.getNumOperands() == Data2Idx &&
309 IsAGPROperand(Inst, DataIdx, MRI))
310 Imm |= 512;
311 }
312 }
313 return addOperand(Inst, DAsm->decodeSrcOp(Opw, Imm | 256));
314 }
315
DecodeAVLdSt_32RegisterClass(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)316 static DecodeStatus DecodeAVLdSt_32RegisterClass(MCInst &Inst,
317 unsigned Imm,
318 uint64_t Addr,
319 const void *Decoder) {
320 return decodeOperand_AVLdSt_Any(Inst, Imm,
321 AMDGPUDisassembler::OPW32, Decoder);
322 }
323
DecodeAVLdSt_64RegisterClass(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)324 static DecodeStatus DecodeAVLdSt_64RegisterClass(MCInst &Inst,
325 unsigned Imm,
326 uint64_t Addr,
327 const void *Decoder) {
328 return decodeOperand_AVLdSt_Any(Inst, Imm,
329 AMDGPUDisassembler::OPW64, Decoder);
330 }
331
DecodeAVLdSt_96RegisterClass(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)332 static DecodeStatus DecodeAVLdSt_96RegisterClass(MCInst &Inst,
333 unsigned Imm,
334 uint64_t Addr,
335 const void *Decoder) {
336 return decodeOperand_AVLdSt_Any(Inst, Imm,
337 AMDGPUDisassembler::OPW96, Decoder);
338 }
339
DecodeAVLdSt_128RegisterClass(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)340 static DecodeStatus DecodeAVLdSt_128RegisterClass(MCInst &Inst,
341 unsigned Imm,
342 uint64_t Addr,
343 const void *Decoder) {
344 return decodeOperand_AVLdSt_Any(Inst, Imm,
345 AMDGPUDisassembler::OPW128, Decoder);
346 }
347
decodeOperand_SReg_32(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)348 static DecodeStatus decodeOperand_SReg_32(MCInst &Inst,
349 unsigned Imm,
350 uint64_t Addr,
351 const void *Decoder) {
352 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
353 return addOperand(Inst, DAsm->decodeOperand_SReg_32(Imm));
354 }
355
decodeOperand_VGPR_32(MCInst & Inst,unsigned Imm,uint64_t Addr,const void * Decoder)356 static DecodeStatus decodeOperand_VGPR_32(MCInst &Inst,
357 unsigned Imm,
358 uint64_t Addr,
359 const void *Decoder) {
360 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
361 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW32, Imm));
362 }
363
364 #define DECODE_SDWA(DecName) \
365 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
366
367 DECODE_SDWA(Src32)
DECODE_SDWA(Src16)368 DECODE_SDWA(Src16)
369 DECODE_SDWA(VopcDst)
370
371 #include "AMDGPUGenDisassemblerTables.inc"
372
373 //===----------------------------------------------------------------------===//
374 //
375 //===----------------------------------------------------------------------===//
376
377 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
378 assert(Bytes.size() >= sizeof(T));
379 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
380 Bytes = Bytes.slice(sizeof(T));
381 return Res;
382 }
383
tryDecodeInst(const uint8_t * Table,MCInst & MI,uint64_t Inst,uint64_t Address) const384 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
385 MCInst &MI,
386 uint64_t Inst,
387 uint64_t Address) const {
388 assert(MI.getOpcode() == 0);
389 assert(MI.getNumOperands() == 0);
390 MCInst TmpInst;
391 HasLiteral = false;
392 const auto SavedBytes = Bytes;
393 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
394 MI = TmpInst;
395 return MCDisassembler::Success;
396 }
397 Bytes = SavedBytes;
398 return MCDisassembler::Fail;
399 }
400
401 // The disassembler is greedy, so we need to check FI operand value to
402 // not parse a dpp if the correct literal is not set. For dpp16 the
403 // autogenerated decoder checks the dpp literal
isValidDPP8(const MCInst & MI)404 static bool isValidDPP8(const MCInst &MI) {
405 using namespace llvm::AMDGPU::DPP;
406 int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi);
407 assert(FiIdx != -1);
408 if ((unsigned)FiIdx >= MI.getNumOperands())
409 return false;
410 unsigned Fi = MI.getOperand(FiIdx).getImm();
411 return Fi == DPP8_FI_0 || Fi == DPP8_FI_1;
412 }
413
getInstruction(MCInst & MI,uint64_t & Size,ArrayRef<uint8_t> Bytes_,uint64_t Address,raw_ostream & CS) const414 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
415 ArrayRef<uint8_t> Bytes_,
416 uint64_t Address,
417 raw_ostream &CS) const {
418 CommentStream = &CS;
419 bool IsSDWA = false;
420
421 unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
422 Bytes = Bytes_.slice(0, MaxInstBytesNum);
423
424 DecodeStatus Res = MCDisassembler::Fail;
425 do {
426 // ToDo: better to switch encoding length using some bit predicate
427 // but it is unknown yet, so try all we can
428
429 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
430 // encodings
431 if (Bytes.size() >= 8) {
432 const uint64_t QW = eatBytes<uint64_t>(Bytes);
433
434 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]) {
435 Res = tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address);
436 if (Res) {
437 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8)
438 == -1)
439 break;
440 if (convertDPP8Inst(MI) == MCDisassembler::Success)
441 break;
442 MI = MCInst(); // clear
443 }
444 }
445
446 Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address);
447 if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
448 break;
449
450 MI = MCInst(); // clear
451
452 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
453 if (Res) break;
454
455 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
456 if (Res) { IsSDWA = true; break; }
457
458 Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
459 if (Res) { IsSDWA = true; break; }
460
461 Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address);
462 if (Res) { IsSDWA = true; break; }
463
464 if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
465 Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
466 if (Res)
467 break;
468 }
469
470 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
471 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special
472 // table first so we print the correct name.
473 if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) {
474 Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address);
475 if (Res)
476 break;
477 }
478 }
479
480 // Reinitialize Bytes as DPP64 could have eaten too much
481 Bytes = Bytes_.slice(0, MaxInstBytesNum);
482
483 // Try decode 32-bit instruction
484 if (Bytes.size() < 4) break;
485 const uint32_t DW = eatBytes<uint32_t>(Bytes);
486 Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address);
487 if (Res) break;
488
489 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
490 if (Res) break;
491
492 Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
493 if (Res) break;
494
495 if (STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts]) {
496 Res = tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address);
497 if (Res)
498 break;
499 }
500
501 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]) {
502 Res = tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address);
503 if (Res) break;
504 }
505
506 Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address);
507 if (Res) break;
508
509 if (Bytes.size() < 4) break;
510 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
511
512 if (STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts]) {
513 Res = tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address);
514 if (Res)
515 break;
516 }
517
518 Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address);
519 if (Res) break;
520
521 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
522 if (Res) break;
523
524 Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address);
525 if (Res) break;
526
527 Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address);
528 } while (false);
529
530 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
531 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
532 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10 ||
533 MI.getOpcode() == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
534 MI.getOpcode() == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
535 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ||
536 MI.getOpcode() == AMDGPU::V_FMAC_F64_e64_gfx90a ||
537 MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi ||
538 MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10 ||
539 MI.getOpcode() == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
540 MI.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10)) {
541 // Insert dummy unused src2_modifiers.
542 insertNamedMCOperand(MI, MCOperand::createImm(0),
543 AMDGPU::OpName::src2_modifiers);
544 }
545
546 if (Res && (MCII->get(MI.getOpcode()).TSFlags &
547 (SIInstrFlags::MUBUF | SIInstrFlags::FLAT | SIInstrFlags::SMRD))) {
548 int CPolPos = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
549 AMDGPU::OpName::cpol);
550 if (CPolPos != -1) {
551 unsigned CPol =
552 (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsAtomicRet) ?
553 AMDGPU::CPol::GLC : 0;
554 if (MI.getNumOperands() <= (unsigned)CPolPos) {
555 insertNamedMCOperand(MI, MCOperand::createImm(CPol),
556 AMDGPU::OpName::cpol);
557 } else if (CPol) {
558 MI.getOperand(CPolPos).setImm(MI.getOperand(CPolPos).getImm() | CPol);
559 }
560 }
561 }
562
563 if (Res && (MCII->get(MI.getOpcode()).TSFlags &
564 (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) &&
565 (STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts])) {
566 // GFX90A lost TFE, its place is occupied by ACC.
567 int TFEOpIdx =
568 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
569 if (TFEOpIdx != -1) {
570 auto TFEIter = MI.begin();
571 std::advance(TFEIter, TFEOpIdx);
572 MI.insert(TFEIter, MCOperand::createImm(0));
573 }
574 }
575
576 if (Res && (MCII->get(MI.getOpcode()).TSFlags &
577 (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF))) {
578 int SWZOpIdx =
579 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz);
580 if (SWZOpIdx != -1) {
581 auto SWZIter = MI.begin();
582 std::advance(SWZIter, SWZOpIdx);
583 MI.insert(SWZIter, MCOperand::createImm(0));
584 }
585 }
586
587 if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
588 int VAddr0Idx =
589 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
590 int RsrcIdx =
591 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
592 unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
593 if (VAddr0Idx >= 0 && NSAArgs > 0) {
594 unsigned NSAWords = (NSAArgs + 3) / 4;
595 if (Bytes.size() < 4 * NSAWords) {
596 Res = MCDisassembler::Fail;
597 } else {
598 for (unsigned i = 0; i < NSAArgs; ++i) {
599 MI.insert(MI.begin() + VAddr0Idx + 1 + i,
600 decodeOperand_VGPR_32(Bytes[i]));
601 }
602 Bytes = Bytes.slice(4 * NSAWords);
603 }
604 }
605
606 if (Res)
607 Res = convertMIMGInst(MI);
608 }
609
610 if (Res && IsSDWA)
611 Res = convertSDWAInst(MI);
612
613 int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
614 AMDGPU::OpName::vdst_in);
615 if (VDstIn_Idx != -1) {
616 int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx,
617 MCOI::OperandConstraint::TIED_TO);
618 if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx ||
619 !MI.getOperand(VDstIn_Idx).isReg() ||
620 MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) {
621 if (MI.getNumOperands() > (unsigned)VDstIn_Idx)
622 MI.erase(&MI.getOperand(VDstIn_Idx));
623 insertNamedMCOperand(MI,
624 MCOperand::createReg(MI.getOperand(Tied).getReg()),
625 AMDGPU::OpName::vdst_in);
626 }
627 }
628
629 // if the opcode was not recognized we'll assume a Size of 4 bytes
630 // (unless there are fewer bytes left)
631 Size = Res ? (MaxInstBytesNum - Bytes.size())
632 : std::min((size_t)4, Bytes_.size());
633 return Res;
634 }
635
convertSDWAInst(MCInst & MI) const636 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
637 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
638 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
639 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
640 // VOPC - insert clamp
641 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
642 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
643 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
644 if (SDst != -1) {
645 // VOPC - insert VCC register as sdst
646 insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
647 AMDGPU::OpName::sdst);
648 } else {
649 // VOP1/2 - insert omod if present in instruction
650 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
651 }
652 }
653 return MCDisassembler::Success;
654 }
655
656 // We must check FI == literal to reject not genuine dpp8 insts, and we must
657 // first add optional MI operands to check FI
convertDPP8Inst(MCInst & MI) const658 DecodeStatus AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const {
659 unsigned Opc = MI.getOpcode();
660 unsigned DescNumOps = MCII->get(Opc).getNumOperands();
661
662 // Insert dummy unused src modifiers.
663 if (MI.getNumOperands() < DescNumOps &&
664 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1)
665 insertNamedMCOperand(MI, MCOperand::createImm(0),
666 AMDGPU::OpName::src0_modifiers);
667
668 if (MI.getNumOperands() < DescNumOps &&
669 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers) != -1)
670 insertNamedMCOperand(MI, MCOperand::createImm(0),
671 AMDGPU::OpName::src1_modifiers);
672
673 return isValidDPP8(MI) ? MCDisassembler::Success : MCDisassembler::SoftFail;
674 }
675
676 // Note that before gfx10, the MIMG encoding provided no information about
677 // VADDR size. Consequently, decoded instructions always show address as if it
678 // has 1 dword, which could be not really so.
convertMIMGInst(MCInst & MI) const679 DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
680
681 int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
682 AMDGPU::OpName::vdst);
683
684 int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
685 AMDGPU::OpName::vdata);
686 int VAddr0Idx =
687 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
688 int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
689 AMDGPU::OpName::dmask);
690
691 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
692 AMDGPU::OpName::tfe);
693 int D16Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
694 AMDGPU::OpName::d16);
695
696 assert(VDataIdx != -1);
697 if (DMaskIdx == -1 || TFEIdx == -1) {// intersect_ray
698 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16) > -1) {
699 assert(MI.getOpcode() == AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16_sa ||
700 MI.getOpcode() == AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16_nsa ||
701 MI.getOpcode() == AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16_sa ||
702 MI.getOpcode() == AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16_nsa);
703 addOperand(MI, MCOperand::createImm(1));
704 }
705 return MCDisassembler::Success;
706 }
707
708 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
709 bool IsAtomic = (VDstIdx != -1);
710 bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4;
711
712 bool IsNSA = false;
713 unsigned AddrSize = Info->VAddrDwords;
714
715 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
716 unsigned DimIdx =
717 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim);
718 int A16Idx =
719 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::a16);
720 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
721 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
722 const AMDGPU::MIMGDimInfo *Dim =
723 AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm());
724 const bool IsA16 = (A16Idx != -1 && MI.getOperand(A16Idx).getImm());
725
726 AddrSize =
727 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, AMDGPU::hasG16(STI));
728
729 IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA;
730 if (!IsNSA) {
731 if (AddrSize > 8)
732 AddrSize = 16;
733 else if (AddrSize > 4)
734 AddrSize = 8;
735 } else {
736 if (AddrSize > Info->VAddrDwords) {
737 // The NSA encoding does not contain enough operands for the combination
738 // of base opcode / dimension. Should this be an error?
739 return MCDisassembler::Success;
740 }
741 }
742 }
743
744 unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
745 unsigned DstSize = IsGather4 ? 4 : std::max(countPopulation(DMask), 1u);
746
747 bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm();
748 if (D16 && AMDGPU::hasPackedD16(STI)) {
749 DstSize = (DstSize + 1) / 2;
750 }
751
752 if (TFEIdx != -1 && MI.getOperand(TFEIdx).getImm())
753 DstSize += 1;
754
755 if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
756 return MCDisassembler::Success;
757
758 int NewOpcode =
759 AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
760 if (NewOpcode == -1)
761 return MCDisassembler::Success;
762
763 // Widen the register to the correct number of enabled channels.
764 unsigned NewVdata = AMDGPU::NoRegister;
765 if (DstSize != Info->VDataDwords) {
766 auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
767
768 // Get first subregister of VData
769 unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
770 unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
771 Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
772
773 NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
774 &MRI.getRegClass(DataRCID));
775 if (NewVdata == AMDGPU::NoRegister) {
776 // It's possible to encode this such that the low register + enabled
777 // components exceeds the register count.
778 return MCDisassembler::Success;
779 }
780 }
781
782 unsigned NewVAddr0 = AMDGPU::NoRegister;
783 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10] && !IsNSA &&
784 AddrSize != Info->VAddrDwords) {
785 unsigned VAddr0 = MI.getOperand(VAddr0Idx).getReg();
786 unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0);
787 VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0;
788
789 auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass;
790 NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0,
791 &MRI.getRegClass(AddrRCID));
792 if (NewVAddr0 == AMDGPU::NoRegister)
793 return MCDisassembler::Success;
794 }
795
796 MI.setOpcode(NewOpcode);
797
798 if (NewVdata != AMDGPU::NoRegister) {
799 MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
800
801 if (IsAtomic) {
802 // Atomic operations have an additional operand (a copy of data)
803 MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
804 }
805 }
806
807 if (NewVAddr0 != AMDGPU::NoRegister) {
808 MI.getOperand(VAddr0Idx) = MCOperand::createReg(NewVAddr0);
809 } else if (IsNSA) {
810 assert(AddrSize <= Info->VAddrDwords);
811 MI.erase(MI.begin() + VAddr0Idx + AddrSize,
812 MI.begin() + VAddr0Idx + Info->VAddrDwords);
813 }
814
815 return MCDisassembler::Success;
816 }
817
getRegClassName(unsigned RegClassID) const818 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
819 return getContext().getRegisterInfo()->
820 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
821 }
822
823 inline
errOperand(unsigned V,const Twine & ErrMsg) const824 MCOperand AMDGPUDisassembler::errOperand(unsigned V,
825 const Twine& ErrMsg) const {
826 *CommentStream << "Error: " + ErrMsg;
827
828 // ToDo: add support for error operands to MCInst.h
829 // return MCOperand::createError(V);
830 return MCOperand();
831 }
832
833 inline
createRegOperand(unsigned int RegId) const834 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
835 return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
836 }
837
838 inline
createRegOperand(unsigned RegClassID,unsigned Val) const839 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
840 unsigned Val) const {
841 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
842 if (Val >= RegCl.getNumRegs())
843 return errOperand(Val, Twine(getRegClassName(RegClassID)) +
844 ": unknown register " + Twine(Val));
845 return createRegOperand(RegCl.getRegister(Val));
846 }
847
848 inline
createSRegOperand(unsigned SRegClassID,unsigned Val) const849 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
850 unsigned Val) const {
851 // ToDo: SI/CI have 104 SGPRs, VI - 102
852 // Valery: here we accepting as much as we can, let assembler sort it out
853 int shift = 0;
854 switch (SRegClassID) {
855 case AMDGPU::SGPR_32RegClassID:
856 case AMDGPU::TTMP_32RegClassID:
857 break;
858 case AMDGPU::SGPR_64RegClassID:
859 case AMDGPU::TTMP_64RegClassID:
860 shift = 1;
861 break;
862 case AMDGPU::SGPR_128RegClassID:
863 case AMDGPU::TTMP_128RegClassID:
864 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
865 // this bundle?
866 case AMDGPU::SGPR_256RegClassID:
867 case AMDGPU::TTMP_256RegClassID:
868 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
869 // this bundle?
870 case AMDGPU::SGPR_512RegClassID:
871 case AMDGPU::TTMP_512RegClassID:
872 shift = 2;
873 break;
874 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
875 // this bundle?
876 default:
877 llvm_unreachable("unhandled register class");
878 }
879
880 if (Val % (1 << shift)) {
881 *CommentStream << "Warning: " << getRegClassName(SRegClassID)
882 << ": scalar reg isn't aligned " << Val;
883 }
884
885 return createRegOperand(SRegClassID, Val >> shift);
886 }
887
decodeOperand_VS_32(unsigned Val) const888 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
889 return decodeSrcOp(OPW32, Val);
890 }
891
decodeOperand_VS_64(unsigned Val) const892 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
893 return decodeSrcOp(OPW64, Val);
894 }
895
decodeOperand_VS_128(unsigned Val) const896 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
897 return decodeSrcOp(OPW128, Val);
898 }
899
decodeOperand_VSrc16(unsigned Val) const900 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
901 return decodeSrcOp(OPW16, Val);
902 }
903
decodeOperand_VSrcV216(unsigned Val) const904 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
905 return decodeSrcOp(OPWV216, Val);
906 }
907
decodeOperand_VSrcV232(unsigned Val) const908 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV232(unsigned Val) const {
909 return decodeSrcOp(OPWV232, Val);
910 }
911
decodeOperand_VGPR_32(unsigned Val) const912 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
913 // Some instructions have operand restrictions beyond what the encoding
914 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
915 // high bit.
916 Val &= 255;
917
918 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
919 }
920
decodeOperand_VRegOrLds_32(unsigned Val) const921 MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const {
922 return decodeSrcOp(OPW32, Val);
923 }
924
decodeOperand_AGPR_32(unsigned Val) const925 MCOperand AMDGPUDisassembler::decodeOperand_AGPR_32(unsigned Val) const {
926 return createRegOperand(AMDGPU::AGPR_32RegClassID, Val & 255);
927 }
928
decodeOperand_AReg_64(unsigned Val) const929 MCOperand AMDGPUDisassembler::decodeOperand_AReg_64(unsigned Val) const {
930 return createRegOperand(AMDGPU::AReg_64RegClassID, Val & 255);
931 }
932
decodeOperand_AReg_128(unsigned Val) const933 MCOperand AMDGPUDisassembler::decodeOperand_AReg_128(unsigned Val) const {
934 return createRegOperand(AMDGPU::AReg_128RegClassID, Val & 255);
935 }
936
decodeOperand_AReg_256(unsigned Val) const937 MCOperand AMDGPUDisassembler::decodeOperand_AReg_256(unsigned Val) const {
938 return createRegOperand(AMDGPU::AReg_256RegClassID, Val & 255);
939 }
940
decodeOperand_AReg_512(unsigned Val) const941 MCOperand AMDGPUDisassembler::decodeOperand_AReg_512(unsigned Val) const {
942 return createRegOperand(AMDGPU::AReg_512RegClassID, Val & 255);
943 }
944
decodeOperand_AReg_1024(unsigned Val) const945 MCOperand AMDGPUDisassembler::decodeOperand_AReg_1024(unsigned Val) const {
946 return createRegOperand(AMDGPU::AReg_1024RegClassID, Val & 255);
947 }
948
decodeOperand_AV_32(unsigned Val) const949 MCOperand AMDGPUDisassembler::decodeOperand_AV_32(unsigned Val) const {
950 return decodeSrcOp(OPW32, Val);
951 }
952
decodeOperand_AV_64(unsigned Val) const953 MCOperand AMDGPUDisassembler::decodeOperand_AV_64(unsigned Val) const {
954 return decodeSrcOp(OPW64, Val);
955 }
956
decodeOperand_VReg_64(unsigned Val) const957 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
958 return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
959 }
960
decodeOperand_VReg_96(unsigned Val) const961 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
962 return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
963 }
964
decodeOperand_VReg_128(unsigned Val) const965 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
966 return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
967 }
968
decodeOperand_VReg_256(unsigned Val) const969 MCOperand AMDGPUDisassembler::decodeOperand_VReg_256(unsigned Val) const {
970 return createRegOperand(AMDGPU::VReg_256RegClassID, Val);
971 }
972
decodeOperand_VReg_512(unsigned Val) const973 MCOperand AMDGPUDisassembler::decodeOperand_VReg_512(unsigned Val) const {
974 return createRegOperand(AMDGPU::VReg_512RegClassID, Val);
975 }
976
decodeOperand_VReg_1024(unsigned Val) const977 MCOperand AMDGPUDisassembler::decodeOperand_VReg_1024(unsigned Val) const {
978 return createRegOperand(AMDGPU::VReg_1024RegClassID, Val);
979 }
980
decodeOperand_SReg_32(unsigned Val) const981 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
982 // table-gen generated disassembler doesn't care about operand types
983 // leaving only registry class so SSrc_32 operand turns into SReg_32
984 // and therefore we accept immediates and literals here as well
985 return decodeSrcOp(OPW32, Val);
986 }
987
decodeOperand_SReg_32_XM0_XEXEC(unsigned Val) const988 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
989 unsigned Val) const {
990 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
991 return decodeOperand_SReg_32(Val);
992 }
993
decodeOperand_SReg_32_XEXEC_HI(unsigned Val) const994 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
995 unsigned Val) const {
996 // SReg_32_XM0 is SReg_32 without EXEC_HI
997 return decodeOperand_SReg_32(Val);
998 }
999
decodeOperand_SRegOrLds_32(unsigned Val) const1000 MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const {
1001 // table-gen generated disassembler doesn't care about operand types
1002 // leaving only registry class so SSrc_32 operand turns into SReg_32
1003 // and therefore we accept immediates and literals here as well
1004 return decodeSrcOp(OPW32, Val);
1005 }
1006
decodeOperand_SReg_64(unsigned Val) const1007 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
1008 return decodeSrcOp(OPW64, Val);
1009 }
1010
decodeOperand_SReg_64_XEXEC(unsigned Val) const1011 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
1012 return decodeSrcOp(OPW64, Val);
1013 }
1014
decodeOperand_SReg_128(unsigned Val) const1015 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
1016 return decodeSrcOp(OPW128, Val);
1017 }
1018
decodeOperand_SReg_256(unsigned Val) const1019 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
1020 return decodeDstOp(OPW256, Val);
1021 }
1022
decodeOperand_SReg_512(unsigned Val) const1023 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
1024 return decodeDstOp(OPW512, Val);
1025 }
1026
decodeLiteralConstant() const1027 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
1028 // For now all literal constants are supposed to be unsigned integer
1029 // ToDo: deal with signed/unsigned 64-bit integer constants
1030 // ToDo: deal with float/double constants
1031 if (!HasLiteral) {
1032 if (Bytes.size() < 4) {
1033 return errOperand(0, "cannot read literal, inst bytes left " +
1034 Twine(Bytes.size()));
1035 }
1036 HasLiteral = true;
1037 Literal = eatBytes<uint32_t>(Bytes);
1038 }
1039 return MCOperand::createImm(Literal);
1040 }
1041
decodeIntImmed(unsigned Imm)1042 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
1043 using namespace AMDGPU::EncValues;
1044
1045 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
1046 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
1047 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
1048 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
1049 // Cast prevents negative overflow.
1050 }
1051
getInlineImmVal32(unsigned Imm)1052 static int64_t getInlineImmVal32(unsigned Imm) {
1053 switch (Imm) {
1054 case 240:
1055 return FloatToBits(0.5f);
1056 case 241:
1057 return FloatToBits(-0.5f);
1058 case 242:
1059 return FloatToBits(1.0f);
1060 case 243:
1061 return FloatToBits(-1.0f);
1062 case 244:
1063 return FloatToBits(2.0f);
1064 case 245:
1065 return FloatToBits(-2.0f);
1066 case 246:
1067 return FloatToBits(4.0f);
1068 case 247:
1069 return FloatToBits(-4.0f);
1070 case 248: // 1 / (2 * PI)
1071 return 0x3e22f983;
1072 default:
1073 llvm_unreachable("invalid fp inline imm");
1074 }
1075 }
1076
getInlineImmVal64(unsigned Imm)1077 static int64_t getInlineImmVal64(unsigned Imm) {
1078 switch (Imm) {
1079 case 240:
1080 return DoubleToBits(0.5);
1081 case 241:
1082 return DoubleToBits(-0.5);
1083 case 242:
1084 return DoubleToBits(1.0);
1085 case 243:
1086 return DoubleToBits(-1.0);
1087 case 244:
1088 return DoubleToBits(2.0);
1089 case 245:
1090 return DoubleToBits(-2.0);
1091 case 246:
1092 return DoubleToBits(4.0);
1093 case 247:
1094 return DoubleToBits(-4.0);
1095 case 248: // 1 / (2 * PI)
1096 return 0x3fc45f306dc9c882;
1097 default:
1098 llvm_unreachable("invalid fp inline imm");
1099 }
1100 }
1101
getInlineImmVal16(unsigned Imm)1102 static int64_t getInlineImmVal16(unsigned Imm) {
1103 switch (Imm) {
1104 case 240:
1105 return 0x3800;
1106 case 241:
1107 return 0xB800;
1108 case 242:
1109 return 0x3C00;
1110 case 243:
1111 return 0xBC00;
1112 case 244:
1113 return 0x4000;
1114 case 245:
1115 return 0xC000;
1116 case 246:
1117 return 0x4400;
1118 case 247:
1119 return 0xC400;
1120 case 248: // 1 / (2 * PI)
1121 return 0x3118;
1122 default:
1123 llvm_unreachable("invalid fp inline imm");
1124 }
1125 }
1126
decodeFPImmed(OpWidthTy Width,unsigned Imm)1127 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
1128 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
1129 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
1130
1131 // ToDo: case 248: 1/(2*PI) - is allowed only on VI
1132 switch (Width) {
1133 case OPW32:
1134 case OPW128: // splat constants
1135 case OPW512:
1136 case OPW1024:
1137 case OPWV232:
1138 return MCOperand::createImm(getInlineImmVal32(Imm));
1139 case OPW64:
1140 case OPW256:
1141 return MCOperand::createImm(getInlineImmVal64(Imm));
1142 case OPW16:
1143 case OPWV216:
1144 return MCOperand::createImm(getInlineImmVal16(Imm));
1145 default:
1146 llvm_unreachable("implement me");
1147 }
1148 }
1149
getVgprClassId(const OpWidthTy Width) const1150 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
1151 using namespace AMDGPU;
1152
1153 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1154 switch (Width) {
1155 default: // fall
1156 case OPW32:
1157 case OPW16:
1158 case OPWV216:
1159 return VGPR_32RegClassID;
1160 case OPW64:
1161 case OPWV232: return VReg_64RegClassID;
1162 case OPW96: return VReg_96RegClassID;
1163 case OPW128: return VReg_128RegClassID;
1164 case OPW160: return VReg_160RegClassID;
1165 case OPW256: return VReg_256RegClassID;
1166 case OPW512: return VReg_512RegClassID;
1167 case OPW1024: return VReg_1024RegClassID;
1168 }
1169 }
1170
getAgprClassId(const OpWidthTy Width) const1171 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width) const {
1172 using namespace AMDGPU;
1173
1174 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1175 switch (Width) {
1176 default: // fall
1177 case OPW32:
1178 case OPW16:
1179 case OPWV216:
1180 return AGPR_32RegClassID;
1181 case OPW64:
1182 case OPWV232: return AReg_64RegClassID;
1183 case OPW96: return AReg_96RegClassID;
1184 case OPW128: return AReg_128RegClassID;
1185 case OPW160: return AReg_160RegClassID;
1186 case OPW256: return AReg_256RegClassID;
1187 case OPW512: return AReg_512RegClassID;
1188 case OPW1024: return AReg_1024RegClassID;
1189 }
1190 }
1191
1192
getSgprClassId(const OpWidthTy Width) const1193 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
1194 using namespace AMDGPU;
1195
1196 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1197 switch (Width) {
1198 default: // fall
1199 case OPW32:
1200 case OPW16:
1201 case OPWV216:
1202 return SGPR_32RegClassID;
1203 case OPW64:
1204 case OPWV232: return SGPR_64RegClassID;
1205 case OPW96: return SGPR_96RegClassID;
1206 case OPW128: return SGPR_128RegClassID;
1207 case OPW160: return SGPR_160RegClassID;
1208 case OPW256: return SGPR_256RegClassID;
1209 case OPW512: return SGPR_512RegClassID;
1210 }
1211 }
1212
getTtmpClassId(const OpWidthTy Width) const1213 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
1214 using namespace AMDGPU;
1215
1216 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
1217 switch (Width) {
1218 default: // fall
1219 case OPW32:
1220 case OPW16:
1221 case OPWV216:
1222 return TTMP_32RegClassID;
1223 case OPW64:
1224 case OPWV232: return TTMP_64RegClassID;
1225 case OPW128: return TTMP_128RegClassID;
1226 case OPW256: return TTMP_256RegClassID;
1227 case OPW512: return TTMP_512RegClassID;
1228 }
1229 }
1230
getTTmpIdx(unsigned Val) const1231 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
1232 using namespace AMDGPU::EncValues;
1233
1234 unsigned TTmpMin = isGFX9Plus() ? TTMP_GFX9PLUS_MIN : TTMP_VI_MIN;
1235 unsigned TTmpMax = isGFX9Plus() ? TTMP_GFX9PLUS_MAX : TTMP_VI_MAX;
1236
1237 return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
1238 }
1239
decodeSrcOp(const OpWidthTy Width,unsigned Val) const1240 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
1241 using namespace AMDGPU::EncValues;
1242
1243 assert(Val < 1024); // enum10
1244
1245 bool IsAGPR = Val & 512;
1246 Val &= 511;
1247
1248 if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
1249 return createRegOperand(IsAGPR ? getAgprClassId(Width)
1250 : getVgprClassId(Width), Val - VGPR_MIN);
1251 }
1252 if (Val <= SGPR_MAX) {
1253 // "SGPR_MIN <= Val" is always true and causes compilation warning.
1254 static_assert(SGPR_MIN == 0, "");
1255 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1256 }
1257
1258 int TTmpIdx = getTTmpIdx(Val);
1259 if (TTmpIdx >= 0) {
1260 return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1261 }
1262
1263 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
1264 return decodeIntImmed(Val);
1265
1266 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
1267 return decodeFPImmed(Width, Val);
1268
1269 if (Val == LITERAL_CONST)
1270 return decodeLiteralConstant();
1271
1272 switch (Width) {
1273 case OPW32:
1274 case OPW16:
1275 case OPWV216:
1276 return decodeSpecialReg32(Val);
1277 case OPW64:
1278 case OPWV232:
1279 return decodeSpecialReg64(Val);
1280 default:
1281 llvm_unreachable("unexpected immediate type");
1282 }
1283 }
1284
decodeDstOp(const OpWidthTy Width,unsigned Val) const1285 MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const {
1286 using namespace AMDGPU::EncValues;
1287
1288 assert(Val < 128);
1289 assert(Width == OPW256 || Width == OPW512);
1290
1291 if (Val <= SGPR_MAX) {
1292 // "SGPR_MIN <= Val" is always true and causes compilation warning.
1293 static_assert(SGPR_MIN == 0, "");
1294 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
1295 }
1296
1297 int TTmpIdx = getTTmpIdx(Val);
1298 if (TTmpIdx >= 0) {
1299 return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
1300 }
1301
1302 llvm_unreachable("unknown dst register");
1303 }
1304
decodeSpecialReg32(unsigned Val) const1305 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
1306 using namespace AMDGPU;
1307
1308 switch (Val) {
1309 case 102: return createRegOperand(FLAT_SCR_LO);
1310 case 103: return createRegOperand(FLAT_SCR_HI);
1311 case 104: return createRegOperand(XNACK_MASK_LO);
1312 case 105: return createRegOperand(XNACK_MASK_HI);
1313 case 106: return createRegOperand(VCC_LO);
1314 case 107: return createRegOperand(VCC_HI);
1315 case 108: return createRegOperand(TBA_LO);
1316 case 109: return createRegOperand(TBA_HI);
1317 case 110: return createRegOperand(TMA_LO);
1318 case 111: return createRegOperand(TMA_HI);
1319 case 124: return createRegOperand(M0);
1320 case 125: return createRegOperand(SGPR_NULL);
1321 case 126: return createRegOperand(EXEC_LO);
1322 case 127: return createRegOperand(EXEC_HI);
1323 case 235: return createRegOperand(SRC_SHARED_BASE);
1324 case 236: return createRegOperand(SRC_SHARED_LIMIT);
1325 case 237: return createRegOperand(SRC_PRIVATE_BASE);
1326 case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1327 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1328 case 251: return createRegOperand(SRC_VCCZ);
1329 case 252: return createRegOperand(SRC_EXECZ);
1330 case 253: return createRegOperand(SRC_SCC);
1331 case 254: return createRegOperand(LDS_DIRECT);
1332 default: break;
1333 }
1334 return errOperand(Val, "unknown operand encoding " + Twine(Val));
1335 }
1336
decodeSpecialReg64(unsigned Val) const1337 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
1338 using namespace AMDGPU;
1339
1340 switch (Val) {
1341 case 102: return createRegOperand(FLAT_SCR);
1342 case 104: return createRegOperand(XNACK_MASK);
1343 case 106: return createRegOperand(VCC);
1344 case 108: return createRegOperand(TBA);
1345 case 110: return createRegOperand(TMA);
1346 case 125: return createRegOperand(SGPR_NULL);
1347 case 126: return createRegOperand(EXEC);
1348 case 235: return createRegOperand(SRC_SHARED_BASE);
1349 case 236: return createRegOperand(SRC_SHARED_LIMIT);
1350 case 237: return createRegOperand(SRC_PRIVATE_BASE);
1351 case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
1352 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID);
1353 case 251: return createRegOperand(SRC_VCCZ);
1354 case 252: return createRegOperand(SRC_EXECZ);
1355 case 253: return createRegOperand(SRC_SCC);
1356 default: break;
1357 }
1358 return errOperand(Val, "unknown operand encoding " + Twine(Val));
1359 }
1360
decodeSDWASrc(const OpWidthTy Width,const unsigned Val) const1361 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
1362 const unsigned Val) const {
1363 using namespace AMDGPU::SDWA;
1364 using namespace AMDGPU::EncValues;
1365
1366 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
1367 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) {
1368 // XXX: cast to int is needed to avoid stupid warning:
1369 // compare with unsigned is always true
1370 if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) &&
1371 Val <= SDWA9EncValues::SRC_VGPR_MAX) {
1372 return createRegOperand(getVgprClassId(Width),
1373 Val - SDWA9EncValues::SRC_VGPR_MIN);
1374 }
1375 if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
1376 Val <= (isGFX10Plus() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10
1377 : SDWA9EncValues::SRC_SGPR_MAX_SI)) {
1378 return createSRegOperand(getSgprClassId(Width),
1379 Val - SDWA9EncValues::SRC_SGPR_MIN);
1380 }
1381 if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
1382 Val <= SDWA9EncValues::SRC_TTMP_MAX) {
1383 return createSRegOperand(getTtmpClassId(Width),
1384 Val - SDWA9EncValues::SRC_TTMP_MIN);
1385 }
1386
1387 const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
1388
1389 if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
1390 return decodeIntImmed(SVal);
1391
1392 if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
1393 return decodeFPImmed(Width, SVal);
1394
1395 return decodeSpecialReg32(SVal);
1396 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
1397 return createRegOperand(getVgprClassId(Width), Val);
1398 }
1399 llvm_unreachable("unsupported target");
1400 }
1401
decodeSDWASrc16(unsigned Val) const1402 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
1403 return decodeSDWASrc(OPW16, Val);
1404 }
1405
decodeSDWASrc32(unsigned Val) const1406 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
1407 return decodeSDWASrc(OPW32, Val);
1408 }
1409
decodeSDWAVopcDst(unsigned Val) const1410 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
1411 using namespace AMDGPU::SDWA;
1412
1413 assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] ||
1414 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) &&
1415 "SDWAVopcDst should be present only on GFX9+");
1416
1417 bool IsWave64 = STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64];
1418
1419 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
1420 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
1421
1422 int TTmpIdx = getTTmpIdx(Val);
1423 if (TTmpIdx >= 0) {
1424 auto TTmpClsId = getTtmpClassId(IsWave64 ? OPW64 : OPW32);
1425 return createSRegOperand(TTmpClsId, TTmpIdx);
1426 } else if (Val > SGPR_MAX) {
1427 return IsWave64 ? decodeSpecialReg64(Val)
1428 : decodeSpecialReg32(Val);
1429 } else {
1430 return createSRegOperand(getSgprClassId(IsWave64 ? OPW64 : OPW32), Val);
1431 }
1432 } else {
1433 return createRegOperand(IsWave64 ? AMDGPU::VCC : AMDGPU::VCC_LO);
1434 }
1435 }
1436
decodeBoolReg(unsigned Val) const1437 MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const {
1438 return STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64] ?
1439 decodeOperand_SReg_64(Val) : decodeOperand_SReg_32(Val);
1440 }
1441
isVI() const1442 bool AMDGPUDisassembler::isVI() const {
1443 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
1444 }
1445
isGFX9() const1446 bool AMDGPUDisassembler::isGFX9() const { return AMDGPU::isGFX9(STI); }
1447
isGFX90A() const1448 bool AMDGPUDisassembler::isGFX90A() const {
1449 return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts];
1450 }
1451
isGFX9Plus() const1452 bool AMDGPUDisassembler::isGFX9Plus() const { return AMDGPU::isGFX9Plus(STI); }
1453
isGFX10() const1454 bool AMDGPUDisassembler::isGFX10() const { return AMDGPU::isGFX10(STI); }
1455
isGFX10Plus() const1456 bool AMDGPUDisassembler::isGFX10Plus() const {
1457 return AMDGPU::isGFX10Plus(STI);
1458 }
1459
hasArchitectedFlatScratch() const1460 bool AMDGPUDisassembler::hasArchitectedFlatScratch() const {
1461 return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
1462 }
1463
1464 //===----------------------------------------------------------------------===//
1465 // AMDGPU specific symbol handling
1466 //===----------------------------------------------------------------------===//
1467 #define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
1468 do { \
1469 KdStream << Indent << DIRECTIVE " " \
1470 << ((FourByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \
1471 } while (0)
1472
1473 // NOLINTNEXTLINE(readability-identifier-naming)
decodeCOMPUTE_PGM_RSRC1(uint32_t FourByteBuffer,raw_string_ostream & KdStream) const1474 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1(
1475 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
1476 using namespace amdhsa;
1477 StringRef Indent = "\t";
1478
1479 // We cannot accurately backward compute #VGPRs used from
1480 // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same
1481 // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we
1482 // simply calculate the inverse of what the assembler does.
1483
1484 uint32_t GranulatedWorkitemVGPRCount =
1485 (FourByteBuffer & COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT) >>
1486 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_SHIFT;
1487
1488 uint32_t NextFreeVGPR = (GranulatedWorkitemVGPRCount + 1) *
1489 AMDGPU::IsaInfo::getVGPREncodingGranule(&STI);
1490
1491 KdStream << Indent << ".amdhsa_next_free_vgpr " << NextFreeVGPR << '\n';
1492
1493 // We cannot backward compute values used to calculate
1494 // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following
1495 // directives can't be computed:
1496 // .amdhsa_reserve_vcc
1497 // .amdhsa_reserve_flat_scratch
1498 // .amdhsa_reserve_xnack_mask
1499 // They take their respective default values if not specified in the assembly.
1500 //
1501 // GRANULATED_WAVEFRONT_SGPR_COUNT
1502 // = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK)
1503 //
1504 // We compute the inverse as though all directives apart from NEXT_FREE_SGPR
1505 // are set to 0. So while disassembling we consider that:
1506 //
1507 // GRANULATED_WAVEFRONT_SGPR_COUNT
1508 // = f(NEXT_FREE_SGPR + 0 + 0 + 0)
1509 //
1510 // The disassembler cannot recover the original values of those 3 directives.
1511
1512 uint32_t GranulatedWavefrontSGPRCount =
1513 (FourByteBuffer & COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT) >>
1514 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_SHIFT;
1515
1516 if (isGFX10Plus() && GranulatedWavefrontSGPRCount)
1517 return MCDisassembler::Fail;
1518
1519 uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) *
1520 AMDGPU::IsaInfo::getSGPREncodingGranule(&STI);
1521
1522 KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n';
1523 if (!hasArchitectedFlatScratch())
1524 KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n';
1525 KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n';
1526 KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n";
1527
1528 if (FourByteBuffer & COMPUTE_PGM_RSRC1_PRIORITY)
1529 return MCDisassembler::Fail;
1530
1531 PRINT_DIRECTIVE(".amdhsa_float_round_mode_32",
1532 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
1533 PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64",
1534 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
1535 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32",
1536 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
1537 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64",
1538 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
1539
1540 if (FourByteBuffer & COMPUTE_PGM_RSRC1_PRIV)
1541 return MCDisassembler::Fail;
1542
1543 PRINT_DIRECTIVE(".amdhsa_dx10_clamp", COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP);
1544
1545 if (FourByteBuffer & COMPUTE_PGM_RSRC1_DEBUG_MODE)
1546 return MCDisassembler::Fail;
1547
1548 PRINT_DIRECTIVE(".amdhsa_ieee_mode", COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE);
1549
1550 if (FourByteBuffer & COMPUTE_PGM_RSRC1_BULKY)
1551 return MCDisassembler::Fail;
1552
1553 if (FourByteBuffer & COMPUTE_PGM_RSRC1_CDBG_USER)
1554 return MCDisassembler::Fail;
1555
1556 PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_FP16_OVFL);
1557
1558 if (FourByteBuffer & COMPUTE_PGM_RSRC1_RESERVED0)
1559 return MCDisassembler::Fail;
1560
1561 if (isGFX10Plus()) {
1562 PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode",
1563 COMPUTE_PGM_RSRC1_WGP_MODE);
1564 PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_MEM_ORDERED);
1565 PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_FWD_PROGRESS);
1566 }
1567 return MCDisassembler::Success;
1568 }
1569
1570 // NOLINTNEXTLINE(readability-identifier-naming)
decodeCOMPUTE_PGM_RSRC2(uint32_t FourByteBuffer,raw_string_ostream & KdStream) const1571 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC2(
1572 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const {
1573 using namespace amdhsa;
1574 StringRef Indent = "\t";
1575 if (hasArchitectedFlatScratch())
1576 PRINT_DIRECTIVE(".amdhsa_enable_private_segment",
1577 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
1578 else
1579 PRINT_DIRECTIVE(".amdhsa_system_sgpr_private_segment_wavefront_offset",
1580 COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
1581 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x",
1582 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
1583 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y",
1584 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
1585 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z",
1586 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
1587 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info",
1588 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
1589 PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id",
1590 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
1591
1592 if (FourByteBuffer & COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH)
1593 return MCDisassembler::Fail;
1594
1595 if (FourByteBuffer & COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY)
1596 return MCDisassembler::Fail;
1597
1598 if (FourByteBuffer & COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE)
1599 return MCDisassembler::Fail;
1600
1601 PRINT_DIRECTIVE(
1602 ".amdhsa_exception_fp_ieee_invalid_op",
1603 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
1604 PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src",
1605 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
1606 PRINT_DIRECTIVE(
1607 ".amdhsa_exception_fp_ieee_div_zero",
1608 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
1609 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow",
1610 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
1611 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow",
1612 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
1613 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact",
1614 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
1615 PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero",
1616 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
1617
1618 if (FourByteBuffer & COMPUTE_PGM_RSRC2_RESERVED0)
1619 return MCDisassembler::Fail;
1620
1621 return MCDisassembler::Success;
1622 }
1623
1624 #undef PRINT_DIRECTIVE
1625
1626 MCDisassembler::DecodeStatus
decodeKernelDescriptorDirective(DataExtractor::Cursor & Cursor,ArrayRef<uint8_t> Bytes,raw_string_ostream & KdStream) const1627 AMDGPUDisassembler::decodeKernelDescriptorDirective(
1628 DataExtractor::Cursor &Cursor, ArrayRef<uint8_t> Bytes,
1629 raw_string_ostream &KdStream) const {
1630 #define PRINT_DIRECTIVE(DIRECTIVE, MASK) \
1631 do { \
1632 KdStream << Indent << DIRECTIVE " " \
1633 << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \
1634 } while (0)
1635
1636 uint16_t TwoByteBuffer = 0;
1637 uint32_t FourByteBuffer = 0;
1638
1639 StringRef ReservedBytes;
1640 StringRef Indent = "\t";
1641
1642 assert(Bytes.size() == 64);
1643 DataExtractor DE(Bytes, /*IsLittleEndian=*/true, /*AddressSize=*/8);
1644
1645 switch (Cursor.tell()) {
1646 case amdhsa::GROUP_SEGMENT_FIXED_SIZE_OFFSET:
1647 FourByteBuffer = DE.getU32(Cursor);
1648 KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer
1649 << '\n';
1650 return MCDisassembler::Success;
1651
1652 case amdhsa::PRIVATE_SEGMENT_FIXED_SIZE_OFFSET:
1653 FourByteBuffer = DE.getU32(Cursor);
1654 KdStream << Indent << ".amdhsa_private_segment_fixed_size "
1655 << FourByteBuffer << '\n';
1656 return MCDisassembler::Success;
1657
1658 case amdhsa::KERNARG_SIZE_OFFSET:
1659 FourByteBuffer = DE.getU32(Cursor);
1660 KdStream << Indent << ".amdhsa_kernarg_size "
1661 << FourByteBuffer << '\n';
1662 return MCDisassembler::Success;
1663
1664 case amdhsa::RESERVED0_OFFSET:
1665 // 4 reserved bytes, must be 0.
1666 ReservedBytes = DE.getBytes(Cursor, 4);
1667 for (int I = 0; I < 4; ++I) {
1668 if (ReservedBytes[I] != 0) {
1669 return MCDisassembler::Fail;
1670 }
1671 }
1672 return MCDisassembler::Success;
1673
1674 case amdhsa::KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET:
1675 // KERNEL_CODE_ENTRY_BYTE_OFFSET
1676 // So far no directive controls this for Code Object V3, so simply skip for
1677 // disassembly.
1678 DE.skip(Cursor, 8);
1679 return MCDisassembler::Success;
1680
1681 case amdhsa::RESERVED1_OFFSET:
1682 // 20 reserved bytes, must be 0.
1683 ReservedBytes = DE.getBytes(Cursor, 20);
1684 for (int I = 0; I < 20; ++I) {
1685 if (ReservedBytes[I] != 0) {
1686 return MCDisassembler::Fail;
1687 }
1688 }
1689 return MCDisassembler::Success;
1690
1691 case amdhsa::COMPUTE_PGM_RSRC3_OFFSET:
1692 // COMPUTE_PGM_RSRC3
1693 // - Only set for GFX10, GFX6-9 have this to be 0.
1694 // - Currently no directives directly control this.
1695 FourByteBuffer = DE.getU32(Cursor);
1696 if (!isGFX10Plus() && FourByteBuffer) {
1697 return MCDisassembler::Fail;
1698 }
1699 return MCDisassembler::Success;
1700
1701 case amdhsa::COMPUTE_PGM_RSRC1_OFFSET:
1702 FourByteBuffer = DE.getU32(Cursor);
1703 if (decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream) ==
1704 MCDisassembler::Fail) {
1705 return MCDisassembler::Fail;
1706 }
1707 return MCDisassembler::Success;
1708
1709 case amdhsa::COMPUTE_PGM_RSRC2_OFFSET:
1710 FourByteBuffer = DE.getU32(Cursor);
1711 if (decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream) ==
1712 MCDisassembler::Fail) {
1713 return MCDisassembler::Fail;
1714 }
1715 return MCDisassembler::Success;
1716
1717 case amdhsa::KERNEL_CODE_PROPERTIES_OFFSET:
1718 using namespace amdhsa;
1719 TwoByteBuffer = DE.getU16(Cursor);
1720
1721 if (!hasArchitectedFlatScratch())
1722 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer",
1723 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
1724 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr",
1725 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
1726 PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr",
1727 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
1728 PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr",
1729 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
1730 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id",
1731 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
1732 if (!hasArchitectedFlatScratch())
1733 PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init",
1734 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
1735 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size",
1736 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
1737
1738 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0)
1739 return MCDisassembler::Fail;
1740
1741 // Reserved for GFX9
1742 if (isGFX9() &&
1743 (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) {
1744 return MCDisassembler::Fail;
1745 } else if (isGFX10Plus()) {
1746 PRINT_DIRECTIVE(".amdhsa_wavefront_size32",
1747 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
1748 }
1749
1750 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1)
1751 return MCDisassembler::Fail;
1752
1753 return MCDisassembler::Success;
1754
1755 case amdhsa::RESERVED2_OFFSET:
1756 // 6 bytes from here are reserved, must be 0.
1757 ReservedBytes = DE.getBytes(Cursor, 6);
1758 for (int I = 0; I < 6; ++I) {
1759 if (ReservedBytes[I] != 0)
1760 return MCDisassembler::Fail;
1761 }
1762 return MCDisassembler::Success;
1763
1764 default:
1765 llvm_unreachable("Unhandled index. Case statements cover everything.");
1766 return MCDisassembler::Fail;
1767 }
1768 #undef PRINT_DIRECTIVE
1769 }
1770
decodeKernelDescriptor(StringRef KdName,ArrayRef<uint8_t> Bytes,uint64_t KdAddress) const1771 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeKernelDescriptor(
1772 StringRef KdName, ArrayRef<uint8_t> Bytes, uint64_t KdAddress) const {
1773 // CP microcode requires the kernel descriptor to be 64 aligned.
1774 if (Bytes.size() != 64 || KdAddress % 64 != 0)
1775 return MCDisassembler::Fail;
1776
1777 std::string Kd;
1778 raw_string_ostream KdStream(Kd);
1779 KdStream << ".amdhsa_kernel " << KdName << '\n';
1780
1781 DataExtractor::Cursor C(0);
1782 while (C && C.tell() < Bytes.size()) {
1783 MCDisassembler::DecodeStatus Status =
1784 decodeKernelDescriptorDirective(C, Bytes, KdStream);
1785
1786 cantFail(C.takeError());
1787
1788 if (Status == MCDisassembler::Fail)
1789 return MCDisassembler::Fail;
1790 }
1791 KdStream << ".end_amdhsa_kernel\n";
1792 outs() << KdStream.str();
1793 return MCDisassembler::Success;
1794 }
1795
1796 Optional<MCDisassembler::DecodeStatus>
onSymbolStart(SymbolInfoTy & Symbol,uint64_t & Size,ArrayRef<uint8_t> Bytes,uint64_t Address,raw_ostream & CStream) const1797 AMDGPUDisassembler::onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size,
1798 ArrayRef<uint8_t> Bytes, uint64_t Address,
1799 raw_ostream &CStream) const {
1800 // Right now only kernel descriptor needs to be handled.
1801 // We ignore all other symbols for target specific handling.
1802 // TODO:
1803 // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code
1804 // Object V2 and V3 when symbols are marked protected.
1805
1806 // amd_kernel_code_t for Code Object V2.
1807 if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) {
1808 Size = 256;
1809 return MCDisassembler::Fail;
1810 }
1811
1812 // Code Object V3 kernel descriptors.
1813 StringRef Name = Symbol.Name;
1814 if (Symbol.Type == ELF::STT_OBJECT && Name.endswith(StringRef(".kd"))) {
1815 Size = 64; // Size = 64 regardless of success or failure.
1816 return decodeKernelDescriptor(Name.drop_back(3), Bytes, Address);
1817 }
1818 return None;
1819 }
1820
1821 //===----------------------------------------------------------------------===//
1822 // AMDGPUSymbolizer
1823 //===----------------------------------------------------------------------===//
1824
1825 // Try to find symbol name for specified label
tryAddingSymbolicOperand(MCInst & Inst,raw_ostream &,int64_t Value,uint64_t,bool IsBranch,uint64_t,uint64_t)1826 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
1827 raw_ostream &/*cStream*/, int64_t Value,
1828 uint64_t /*Address*/, bool IsBranch,
1829 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
1830
1831 if (!IsBranch) {
1832 return false;
1833 }
1834
1835 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
1836 if (!Symbols)
1837 return false;
1838
1839 auto Result = llvm::find_if(*Symbols, [Value](const SymbolInfoTy &Val) {
1840 return Val.Addr == static_cast<uint64_t>(Value) &&
1841 Val.Type == ELF::STT_NOTYPE;
1842 });
1843 if (Result != Symbols->end()) {
1844 auto *Sym = Ctx.getOrCreateSymbol(Result->Name);
1845 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
1846 Inst.addOperand(MCOperand::createExpr(Add));
1847 return true;
1848 }
1849 // Add to list of referenced addresses, so caller can synthesize a label.
1850 ReferencedAddresses.push_back(static_cast<uint64_t>(Value));
1851 return false;
1852 }
1853
tryAddingPcLoadReferenceComment(raw_ostream & cStream,int64_t Value,uint64_t Address)1854 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
1855 int64_t Value,
1856 uint64_t Address) {
1857 llvm_unreachable("unimplemented");
1858 }
1859
1860 //===----------------------------------------------------------------------===//
1861 // Initialization
1862 //===----------------------------------------------------------------------===//
1863
createAMDGPUSymbolizer(const Triple &,LLVMOpInfoCallback,LLVMSymbolLookupCallback,void * DisInfo,MCContext * Ctx,std::unique_ptr<MCRelocationInfo> && RelInfo)1864 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
1865 LLVMOpInfoCallback /*GetOpInfo*/,
1866 LLVMSymbolLookupCallback /*SymbolLookUp*/,
1867 void *DisInfo,
1868 MCContext *Ctx,
1869 std::unique_ptr<MCRelocationInfo> &&RelInfo) {
1870 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
1871 }
1872
createAMDGPUDisassembler(const Target & T,const MCSubtargetInfo & STI,MCContext & Ctx)1873 static MCDisassembler *createAMDGPUDisassembler(const Target &T,
1874 const MCSubtargetInfo &STI,
1875 MCContext &Ctx) {
1876 return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
1877 }
1878
LLVMInitializeAMDGPUDisassembler()1879 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler() {
1880 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
1881 createAMDGPUDisassembler);
1882 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
1883 createAMDGPUSymbolizer);
1884 }
1885