1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 //===----------------------------------------------------------------------===// 10 // 11 /// \file 12 /// 13 /// This file contains definition for AMDGPU ISA disassembler 14 // 15 //===----------------------------------------------------------------------===// 16 17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 18 19 #include "Disassembler/AMDGPUDisassembler.h" 20 #include "AMDGPU.h" 21 #include "AMDGPURegisterInfo.h" 22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 23 #include "SIDefines.h" 24 #include "TargetInfo/AMDGPUTargetInfo.h" 25 #include "Utils/AMDGPUBaseInfo.h" 26 #include "llvm-c/Disassembler.h" 27 #include "llvm/ADT/APInt.h" 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/Twine.h" 30 #include "llvm/BinaryFormat/ELF.h" 31 #include "llvm/MC/MCAsmInfo.h" 32 #include "llvm/MC/MCContext.h" 33 #include "llvm/MC/MCDisassembler/MCDisassembler.h" 34 #include "llvm/MC/MCExpr.h" 35 #include "llvm/MC/MCFixedLenDisassembler.h" 36 #include "llvm/MC/MCInst.h" 37 #include "llvm/MC/MCSubtargetInfo.h" 38 #include "llvm/Support/Endian.h" 39 #include "llvm/Support/ErrorHandling.h" 40 #include "llvm/Support/MathExtras.h" 41 #include "llvm/Support/TargetRegistry.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <algorithm> 44 #include <cassert> 45 #include <cstddef> 46 #include <cstdint> 47 #include <iterator> 48 #include <tuple> 49 #include <vector> 50 51 using namespace llvm; 52 53 #define DEBUG_TYPE "amdgpu-disassembler" 54 55 #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \ 56 : AMDGPU::EncValues::SGPR_MAX_SI) 57 58 using DecodeStatus = llvm::MCDisassembler::DecodeStatus; 59 60 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI, 61 MCContext &Ctx, 62 MCInstrInfo const *MCII) : 63 MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()), 64 TargetMaxInstBytes(Ctx.getAsmInfo()->getMaxInstLength(&STI)) { 65 66 // ToDo: AMDGPUDisassembler supports only VI ISA. 67 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10()) 68 report_fatal_error("Disassembly not yet supported for subtarget"); 69 } 70 71 inline static MCDisassembler::DecodeStatus 72 addOperand(MCInst &Inst, const MCOperand& Opnd) { 73 Inst.addOperand(Opnd); 74 return Opnd.isValid() ? 75 MCDisassembler::Success : 76 MCDisassembler::SoftFail; 77 } 78 79 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, 80 uint16_t NameIdx) { 81 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); 82 if (OpIdx != -1) { 83 auto I = MI.begin(); 84 std::advance(I, OpIdx); 85 MI.insert(I, Op); 86 } 87 return OpIdx; 88 } 89 90 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 91 uint64_t Addr, const void *Decoder) { 92 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 93 94 // Our branches take a simm16, but we need two extra bits to account for the 95 // factor of 4. 96 APInt SignedOffset(18, Imm * 4, true); 97 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 98 99 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 100 return MCDisassembler::Success; 101 return addOperand(Inst, MCOperand::createImm(Imm)); 102 } 103 104 static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, 105 uint64_t Addr, const void *Decoder) { 106 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 107 return addOperand(Inst, DAsm->decodeBoolReg(Val)); 108 } 109 110 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ 111 static DecodeStatus StaticDecoderName(MCInst &Inst, \ 112 unsigned Imm, \ 113 uint64_t /*Addr*/, \ 114 const void *Decoder) { \ 115 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 116 return addOperand(Inst, DAsm->DecoderName(Imm)); \ 117 } 118 119 #define DECODE_OPERAND_REG(RegClass) \ 120 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass) 121 122 DECODE_OPERAND_REG(VGPR_32) 123 DECODE_OPERAND_REG(VRegOrLds_32) 124 DECODE_OPERAND_REG(VS_32) 125 DECODE_OPERAND_REG(VS_64) 126 DECODE_OPERAND_REG(VS_128) 127 128 DECODE_OPERAND_REG(VReg_64) 129 DECODE_OPERAND_REG(VReg_96) 130 DECODE_OPERAND_REG(VReg_128) 131 132 DECODE_OPERAND_REG(SReg_32) 133 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC) 134 DECODE_OPERAND_REG(SReg_32_XEXEC_HI) 135 DECODE_OPERAND_REG(SRegOrLds_32) 136 DECODE_OPERAND_REG(SReg_64) 137 DECODE_OPERAND_REG(SReg_64_XEXEC) 138 DECODE_OPERAND_REG(SReg_128) 139 DECODE_OPERAND_REG(SReg_256) 140 DECODE_OPERAND_REG(SReg_512) 141 142 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 143 unsigned Imm, 144 uint64_t Addr, 145 const void *Decoder) { 146 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 147 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 148 } 149 150 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst, 151 unsigned Imm, 152 uint64_t Addr, 153 const void *Decoder) { 154 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 155 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm)); 156 } 157 158 static DecodeStatus decodeOperand_VS_16(MCInst &Inst, 159 unsigned Imm, 160 uint64_t Addr, 161 const void *Decoder) { 162 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 163 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 164 } 165 166 static DecodeStatus decodeOperand_VS_32(MCInst &Inst, 167 unsigned Imm, 168 uint64_t Addr, 169 const void *Decoder) { 170 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 171 return addOperand(Inst, DAsm->decodeOperand_VS_32(Imm)); 172 } 173 174 static DecodeStatus decodeOperand_SReg_32(MCInst &Inst, 175 unsigned Imm, 176 uint64_t Addr, 177 const void *Decoder) { 178 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 179 return addOperand(Inst, DAsm->decodeOperand_SReg_32(Imm)); 180 } 181 182 #define DECODE_SDWA(DecName) \ 183 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName) 184 185 DECODE_SDWA(Src32) 186 DECODE_SDWA(Src16) 187 DECODE_SDWA(VopcDst) 188 189 #include "AMDGPUGenDisassemblerTables.inc" 190 191 //===----------------------------------------------------------------------===// 192 // 193 //===----------------------------------------------------------------------===// 194 195 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 196 assert(Bytes.size() >= sizeof(T)); 197 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 198 Bytes = Bytes.slice(sizeof(T)); 199 return Res; 200 } 201 202 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 203 MCInst &MI, 204 uint64_t Inst, 205 uint64_t Address) const { 206 assert(MI.getOpcode() == 0); 207 assert(MI.getNumOperands() == 0); 208 MCInst TmpInst; 209 HasLiteral = false; 210 const auto SavedBytes = Bytes; 211 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 212 MI = TmpInst; 213 return MCDisassembler::Success; 214 } 215 Bytes = SavedBytes; 216 return MCDisassembler::Fail; 217 } 218 219 static bool isValidDPP8(const MCInst &MI) { 220 using namespace llvm::AMDGPU::DPP; 221 int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi); 222 assert(FiIdx != -1); 223 if ((unsigned)FiIdx >= MI.getNumOperands()) 224 return false; 225 unsigned Fi = MI.getOperand(FiIdx).getImm(); 226 return Fi == DPP8_FI_0 || Fi == DPP8_FI_1; 227 } 228 229 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 230 ArrayRef<uint8_t> Bytes_, 231 uint64_t Address, 232 raw_ostream &WS, 233 raw_ostream &CS) const { 234 CommentStream = &CS; 235 bool IsSDWA = false; 236 237 unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size()); 238 Bytes = Bytes_.slice(0, MaxInstBytesNum); 239 240 DecodeStatus Res = MCDisassembler::Fail; 241 do { 242 // ToDo: better to switch encoding length using some bit predicate 243 // but it is unknown yet, so try all we can 244 245 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 246 // encodings 247 if (Bytes.size() >= 8) { 248 const uint64_t QW = eatBytes<uint64_t>(Bytes); 249 250 Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address); 251 if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) 252 break; 253 254 MI = MCInst(); // clear 255 256 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 257 if (Res) break; 258 259 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 260 if (Res) { IsSDWA = true; break; } 261 262 Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address); 263 if (Res) { IsSDWA = true; break; } 264 265 Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address); 266 if (Res) { IsSDWA = true; break; } 267 268 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and 269 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special 270 // table first so we print the correct name. 271 272 if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) { 273 Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address); 274 if (Res) break; 275 } 276 277 if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) { 278 Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address); 279 if (Res) 280 break; 281 } 282 283 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and 284 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special 285 // table first so we print the correct name. 286 if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) { 287 Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address); 288 if (Res) 289 break; 290 } 291 } 292 293 // Reinitialize Bytes as DPP64 could have eaten too much 294 Bytes = Bytes_.slice(0, MaxInstBytesNum); 295 296 // Try decode 32-bit instruction 297 if (Bytes.size() < 4) break; 298 const uint32_t DW = eatBytes<uint32_t>(Bytes); 299 Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address); 300 if (Res) break; 301 302 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 303 if (Res) break; 304 305 Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address); 306 if (Res) break; 307 308 Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address); 309 if (Res) break; 310 311 if (Bytes.size() < 4) break; 312 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 313 Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address); 314 if (Res) break; 315 316 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 317 if (Res) break; 318 319 Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address); 320 if (Res) break; 321 322 Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address); 323 } while (false); 324 325 if (Res && (MaxInstBytesNum - Bytes.size()) == 12 && (!HasLiteral || 326 !(MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3))) { 327 MaxInstBytesNum = 8; 328 Bytes = Bytes_.slice(0, MaxInstBytesNum); 329 eatBytes<uint64_t>(Bytes); 330 } 331 332 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || 333 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 || 334 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10 || 335 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi || 336 MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi || 337 MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10 || 338 MI.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10)) { 339 // Insert dummy unused src2_modifiers. 340 insertNamedMCOperand(MI, MCOperand::createImm(0), 341 AMDGPU::OpName::src2_modifiers); 342 } 343 344 if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) { 345 int VAddr0Idx = 346 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); 347 int RsrcIdx = 348 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 349 unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1; 350 if (VAddr0Idx >= 0 && NSAArgs > 0) { 351 unsigned NSAWords = (NSAArgs + 3) / 4; 352 if (Bytes.size() < 4 * NSAWords) { 353 Res = MCDisassembler::Fail; 354 } else { 355 for (unsigned i = 0; i < NSAArgs; ++i) { 356 MI.insert(MI.begin() + VAddr0Idx + 1 + i, 357 decodeOperand_VGPR_32(Bytes[i])); 358 } 359 Bytes = Bytes.slice(4 * NSAWords); 360 } 361 } 362 363 if (Res) 364 Res = convertMIMGInst(MI); 365 } 366 367 if (Res && IsSDWA) 368 Res = convertSDWAInst(MI); 369 370 int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 371 AMDGPU::OpName::vdst_in); 372 if (VDstIn_Idx != -1) { 373 int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx, 374 MCOI::OperandConstraint::TIED_TO); 375 if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx || 376 !MI.getOperand(VDstIn_Idx).isReg() || 377 MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) { 378 if (MI.getNumOperands() > (unsigned)VDstIn_Idx) 379 MI.erase(&MI.getOperand(VDstIn_Idx)); 380 insertNamedMCOperand(MI, 381 MCOperand::createReg(MI.getOperand(Tied).getReg()), 382 AMDGPU::OpName::vdst_in); 383 } 384 } 385 386 // if the opcode was not recognized we'll assume a Size of 4 bytes 387 // (unless there are fewer bytes left) 388 Size = Res ? (MaxInstBytesNum - Bytes.size()) 389 : std::min((size_t)4, Bytes_.size()); 390 return Res; 391 } 392 393 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const { 394 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] || 395 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) { 396 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) 397 // VOPC - insert clamp 398 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp); 399 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 400 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); 401 if (SDst != -1) { 402 // VOPC - insert VCC register as sdst 403 insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC), 404 AMDGPU::OpName::sdst); 405 } else { 406 // VOP1/2 - insert omod if present in instruction 407 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod); 408 } 409 } 410 return MCDisassembler::Success; 411 } 412 413 DecodeStatus AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const { 414 unsigned Opc = MI.getOpcode(); 415 unsigned DescNumOps = MCII->get(Opc).getNumOperands(); 416 417 // Insert dummy unused src modifiers. 418 if (MI.getNumOperands() < DescNumOps && 419 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) 420 insertNamedMCOperand(MI, MCOperand::createImm(0), 421 AMDGPU::OpName::src0_modifiers); 422 423 if (MI.getNumOperands() < DescNumOps && 424 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers) != -1) 425 insertNamedMCOperand(MI, MCOperand::createImm(0), 426 AMDGPU::OpName::src1_modifiers); 427 428 return isValidDPP8(MI) ? MCDisassembler::Success : MCDisassembler::SoftFail; 429 } 430 431 // Note that before gfx10, the MIMG encoding provided no information about 432 // VADDR size. Consequently, decoded instructions always show address as if it 433 // has 1 dword, which could be not really so. 434 DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const { 435 436 int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 437 AMDGPU::OpName::vdst); 438 439 int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 440 AMDGPU::OpName::vdata); 441 int VAddr0Idx = 442 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); 443 int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 444 AMDGPU::OpName::dmask); 445 446 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 447 AMDGPU::OpName::tfe); 448 int D16Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 449 AMDGPU::OpName::d16); 450 451 assert(VDataIdx != -1); 452 assert(DMaskIdx != -1); 453 assert(TFEIdx != -1); 454 455 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode()); 456 bool IsAtomic = (VDstIdx != -1); 457 bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4; 458 459 bool IsNSA = false; 460 unsigned AddrSize = Info->VAddrDwords; 461 462 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10]) { 463 unsigned DimIdx = 464 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim); 465 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 466 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 467 const AMDGPU::MIMGDimInfo *Dim = 468 AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm()); 469 470 AddrSize = BaseOpcode->NumExtraArgs + 471 (BaseOpcode->Gradients ? Dim->NumGradients : 0) + 472 (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 473 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 474 IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA; 475 if (!IsNSA) { 476 if (AddrSize > 8) 477 AddrSize = 16; 478 else if (AddrSize > 4) 479 AddrSize = 8; 480 } else { 481 if (AddrSize > Info->VAddrDwords) { 482 // The NSA encoding does not contain enough operands for the combination 483 // of base opcode / dimension. Should this be an error? 484 return MCDisassembler::Success; 485 } 486 } 487 } 488 489 unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf; 490 unsigned DstSize = IsGather4 ? 4 : std::max(countPopulation(DMask), 1u); 491 492 bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm(); 493 if (D16 && AMDGPU::hasPackedD16(STI)) { 494 DstSize = (DstSize + 1) / 2; 495 } 496 497 // FIXME: Add tfe support 498 if (MI.getOperand(TFEIdx).getImm()) 499 return MCDisassembler::Success; 500 501 if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords) 502 return MCDisassembler::Success; 503 504 int NewOpcode = 505 AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize); 506 if (NewOpcode == -1) 507 return MCDisassembler::Success; 508 509 // Widen the register to the correct number of enabled channels. 510 unsigned NewVdata = AMDGPU::NoRegister; 511 if (DstSize != Info->VDataDwords) { 512 auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass; 513 514 // Get first subregister of VData 515 unsigned Vdata0 = MI.getOperand(VDataIdx).getReg(); 516 unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0); 517 Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0; 518 519 NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0, 520 &MRI.getRegClass(DataRCID)); 521 if (NewVdata == AMDGPU::NoRegister) { 522 // It's possible to encode this such that the low register + enabled 523 // components exceeds the register count. 524 return MCDisassembler::Success; 525 } 526 } 527 528 unsigned NewVAddr0 = AMDGPU::NoRegister; 529 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10] && !IsNSA && 530 AddrSize != Info->VAddrDwords) { 531 unsigned VAddr0 = MI.getOperand(VAddr0Idx).getReg(); 532 unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0); 533 VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0; 534 535 auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass; 536 NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0, 537 &MRI.getRegClass(AddrRCID)); 538 if (NewVAddr0 == AMDGPU::NoRegister) 539 return MCDisassembler::Success; 540 } 541 542 MI.setOpcode(NewOpcode); 543 544 if (NewVdata != AMDGPU::NoRegister) { 545 MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata); 546 547 if (IsAtomic) { 548 // Atomic operations have an additional operand (a copy of data) 549 MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata); 550 } 551 } 552 553 if (NewVAddr0 != AMDGPU::NoRegister) { 554 MI.getOperand(VAddr0Idx) = MCOperand::createReg(NewVAddr0); 555 } else if (IsNSA) { 556 assert(AddrSize <= Info->VAddrDwords); 557 MI.erase(MI.begin() + VAddr0Idx + AddrSize, 558 MI.begin() + VAddr0Idx + Info->VAddrDwords); 559 } 560 561 return MCDisassembler::Success; 562 } 563 564 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 565 return getContext().getRegisterInfo()-> 566 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 567 } 568 569 inline 570 MCOperand AMDGPUDisassembler::errOperand(unsigned V, 571 const Twine& ErrMsg) const { 572 *CommentStream << "Error: " + ErrMsg; 573 574 // ToDo: add support for error operands to MCInst.h 575 // return MCOperand::createError(V); 576 return MCOperand(); 577 } 578 579 inline 580 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 581 return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI)); 582 } 583 584 inline 585 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 586 unsigned Val) const { 587 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 588 if (Val >= RegCl.getNumRegs()) 589 return errOperand(Val, Twine(getRegClassName(RegClassID)) + 590 ": unknown register " + Twine(Val)); 591 return createRegOperand(RegCl.getRegister(Val)); 592 } 593 594 inline 595 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 596 unsigned Val) const { 597 // ToDo: SI/CI have 104 SGPRs, VI - 102 598 // Valery: here we accepting as much as we can, let assembler sort it out 599 int shift = 0; 600 switch (SRegClassID) { 601 case AMDGPU::SGPR_32RegClassID: 602 case AMDGPU::TTMP_32RegClassID: 603 break; 604 case AMDGPU::SGPR_64RegClassID: 605 case AMDGPU::TTMP_64RegClassID: 606 shift = 1; 607 break; 608 case AMDGPU::SGPR_128RegClassID: 609 case AMDGPU::TTMP_128RegClassID: 610 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 611 // this bundle? 612 case AMDGPU::SGPR_256RegClassID: 613 case AMDGPU::TTMP_256RegClassID: 614 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 615 // this bundle? 616 case AMDGPU::SGPR_512RegClassID: 617 case AMDGPU::TTMP_512RegClassID: 618 shift = 2; 619 break; 620 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 621 // this bundle? 622 default: 623 llvm_unreachable("unhandled register class"); 624 } 625 626 if (Val % (1 << shift)) { 627 *CommentStream << "Warning: " << getRegClassName(SRegClassID) 628 << ": scalar reg isn't aligned " << Val; 629 } 630 631 return createRegOperand(SRegClassID, Val >> shift); 632 } 633 634 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 635 return decodeSrcOp(OPW32, Val); 636 } 637 638 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 639 return decodeSrcOp(OPW64, Val); 640 } 641 642 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const { 643 return decodeSrcOp(OPW128, Val); 644 } 645 646 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 647 return decodeSrcOp(OPW16, Val); 648 } 649 650 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const { 651 return decodeSrcOp(OPWV216, Val); 652 } 653 654 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 655 // Some instructions have operand restrictions beyond what the encoding 656 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 657 // high bit. 658 Val &= 255; 659 660 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 661 } 662 663 MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const { 664 return decodeSrcOp(OPW32, Val); 665 } 666 667 MCOperand AMDGPUDisassembler::decodeOperand_AGPR_32(unsigned Val) const { 668 return createRegOperand(AMDGPU::AGPR_32RegClassID, Val & 255); 669 } 670 671 MCOperand AMDGPUDisassembler::decodeOperand_AReg_128(unsigned Val) const { 672 return createRegOperand(AMDGPU::AReg_128RegClassID, Val & 255); 673 } 674 675 MCOperand AMDGPUDisassembler::decodeOperand_AReg_512(unsigned Val) const { 676 return createRegOperand(AMDGPU::AReg_512RegClassID, Val & 255); 677 } 678 679 MCOperand AMDGPUDisassembler::decodeOperand_AReg_1024(unsigned Val) const { 680 return createRegOperand(AMDGPU::AReg_1024RegClassID, Val & 255); 681 } 682 683 MCOperand AMDGPUDisassembler::decodeOperand_AV_32(unsigned Val) const { 684 return decodeSrcOp(OPW32, Val); 685 } 686 687 MCOperand AMDGPUDisassembler::decodeOperand_AV_64(unsigned Val) const { 688 return decodeSrcOp(OPW64, Val); 689 } 690 691 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 692 return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 693 } 694 695 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 696 return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 697 } 698 699 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 700 return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 701 } 702 703 MCOperand AMDGPUDisassembler::decodeOperand_VReg_256(unsigned Val) const { 704 return createRegOperand(AMDGPU::VReg_256RegClassID, Val); 705 } 706 707 MCOperand AMDGPUDisassembler::decodeOperand_VReg_512(unsigned Val) const { 708 return createRegOperand(AMDGPU::VReg_512RegClassID, Val); 709 } 710 711 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 712 // table-gen generated disassembler doesn't care about operand types 713 // leaving only registry class so SSrc_32 operand turns into SReg_32 714 // and therefore we accept immediates and literals here as well 715 return decodeSrcOp(OPW32, Val); 716 } 717 718 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 719 unsigned Val) const { 720 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 721 return decodeOperand_SReg_32(Val); 722 } 723 724 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI( 725 unsigned Val) const { 726 // SReg_32_XM0 is SReg_32 without EXEC_HI 727 return decodeOperand_SReg_32(Val); 728 } 729 730 MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const { 731 // table-gen generated disassembler doesn't care about operand types 732 // leaving only registry class so SSrc_32 operand turns into SReg_32 733 // and therefore we accept immediates and literals here as well 734 return decodeSrcOp(OPW32, Val); 735 } 736 737 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 738 return decodeSrcOp(OPW64, Val); 739 } 740 741 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 742 return decodeSrcOp(OPW64, Val); 743 } 744 745 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 746 return decodeSrcOp(OPW128, Val); 747 } 748 749 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 750 return decodeDstOp(OPW256, Val); 751 } 752 753 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 754 return decodeDstOp(OPW512, Val); 755 } 756 757 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 758 // For now all literal constants are supposed to be unsigned integer 759 // ToDo: deal with signed/unsigned 64-bit integer constants 760 // ToDo: deal with float/double constants 761 if (!HasLiteral) { 762 if (Bytes.size() < 4) { 763 return errOperand(0, "cannot read literal, inst bytes left " + 764 Twine(Bytes.size())); 765 } 766 HasLiteral = true; 767 Literal = eatBytes<uint32_t>(Bytes); 768 } 769 return MCOperand::createImm(Literal); 770 } 771 772 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 773 using namespace AMDGPU::EncValues; 774 775 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 776 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 777 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 778 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 779 // Cast prevents negative overflow. 780 } 781 782 static int64_t getInlineImmVal32(unsigned Imm) { 783 switch (Imm) { 784 case 240: 785 return FloatToBits(0.5f); 786 case 241: 787 return FloatToBits(-0.5f); 788 case 242: 789 return FloatToBits(1.0f); 790 case 243: 791 return FloatToBits(-1.0f); 792 case 244: 793 return FloatToBits(2.0f); 794 case 245: 795 return FloatToBits(-2.0f); 796 case 246: 797 return FloatToBits(4.0f); 798 case 247: 799 return FloatToBits(-4.0f); 800 case 248: // 1 / (2 * PI) 801 return 0x3e22f983; 802 default: 803 llvm_unreachable("invalid fp inline imm"); 804 } 805 } 806 807 static int64_t getInlineImmVal64(unsigned Imm) { 808 switch (Imm) { 809 case 240: 810 return DoubleToBits(0.5); 811 case 241: 812 return DoubleToBits(-0.5); 813 case 242: 814 return DoubleToBits(1.0); 815 case 243: 816 return DoubleToBits(-1.0); 817 case 244: 818 return DoubleToBits(2.0); 819 case 245: 820 return DoubleToBits(-2.0); 821 case 246: 822 return DoubleToBits(4.0); 823 case 247: 824 return DoubleToBits(-4.0); 825 case 248: // 1 / (2 * PI) 826 return 0x3fc45f306dc9c882; 827 default: 828 llvm_unreachable("invalid fp inline imm"); 829 } 830 } 831 832 static int64_t getInlineImmVal16(unsigned Imm) { 833 switch (Imm) { 834 case 240: 835 return 0x3800; 836 case 241: 837 return 0xB800; 838 case 242: 839 return 0x3C00; 840 case 243: 841 return 0xBC00; 842 case 244: 843 return 0x4000; 844 case 245: 845 return 0xC000; 846 case 246: 847 return 0x4400; 848 case 247: 849 return 0xC400; 850 case 248: // 1 / (2 * PI) 851 return 0x3118; 852 default: 853 llvm_unreachable("invalid fp inline imm"); 854 } 855 } 856 857 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 858 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 859 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 860 861 // ToDo: case 248: 1/(2*PI) - is allowed only on VI 862 switch (Width) { 863 case OPW32: 864 case OPW128: // splat constants 865 case OPW512: 866 case OPW1024: 867 return MCOperand::createImm(getInlineImmVal32(Imm)); 868 case OPW64: 869 return MCOperand::createImm(getInlineImmVal64(Imm)); 870 case OPW16: 871 case OPWV216: 872 return MCOperand::createImm(getInlineImmVal16(Imm)); 873 default: 874 llvm_unreachable("implement me"); 875 } 876 } 877 878 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 879 using namespace AMDGPU; 880 881 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 882 switch (Width) { 883 default: // fall 884 case OPW32: 885 case OPW16: 886 case OPWV216: 887 return VGPR_32RegClassID; 888 case OPW64: return VReg_64RegClassID; 889 case OPW128: return VReg_128RegClassID; 890 } 891 } 892 893 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width) const { 894 using namespace AMDGPU; 895 896 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 897 switch (Width) { 898 default: // fall 899 case OPW32: 900 case OPW16: 901 case OPWV216: 902 return AGPR_32RegClassID; 903 case OPW64: return AReg_64RegClassID; 904 case OPW128: return AReg_128RegClassID; 905 case OPW512: return AReg_512RegClassID; 906 case OPW1024: return AReg_1024RegClassID; 907 } 908 } 909 910 911 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 912 using namespace AMDGPU; 913 914 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 915 switch (Width) { 916 default: // fall 917 case OPW32: 918 case OPW16: 919 case OPWV216: 920 return SGPR_32RegClassID; 921 case OPW64: return SGPR_64RegClassID; 922 case OPW128: return SGPR_128RegClassID; 923 case OPW256: return SGPR_256RegClassID; 924 case OPW512: return SGPR_512RegClassID; 925 } 926 } 927 928 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 929 using namespace AMDGPU; 930 931 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 932 switch (Width) { 933 default: // fall 934 case OPW32: 935 case OPW16: 936 case OPWV216: 937 return TTMP_32RegClassID; 938 case OPW64: return TTMP_64RegClassID; 939 case OPW128: return TTMP_128RegClassID; 940 case OPW256: return TTMP_256RegClassID; 941 case OPW512: return TTMP_512RegClassID; 942 } 943 } 944 945 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const { 946 using namespace AMDGPU::EncValues; 947 948 unsigned TTmpMin = 949 (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MIN : TTMP_VI_MIN; 950 unsigned TTmpMax = 951 (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MAX : TTMP_VI_MAX; 952 953 return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1; 954 } 955 956 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 957 using namespace AMDGPU::EncValues; 958 959 assert(Val < 1024); // enum10 960 961 bool IsAGPR = Val & 512; 962 Val &= 511; 963 964 if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 965 return createRegOperand(IsAGPR ? getAgprClassId(Width) 966 : getVgprClassId(Width), Val - VGPR_MIN); 967 } 968 if (Val <= SGPR_MAX) { 969 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 970 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 971 } 972 973 int TTmpIdx = getTTmpIdx(Val); 974 if (TTmpIdx >= 0) { 975 return createSRegOperand(getTtmpClassId(Width), TTmpIdx); 976 } 977 978 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 979 return decodeIntImmed(Val); 980 981 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 982 return decodeFPImmed(Width, Val); 983 984 if (Val == LITERAL_CONST) 985 return decodeLiteralConstant(); 986 987 switch (Width) { 988 case OPW32: 989 case OPW16: 990 case OPWV216: 991 return decodeSpecialReg32(Val); 992 case OPW64: 993 return decodeSpecialReg64(Val); 994 default: 995 llvm_unreachable("unexpected immediate type"); 996 } 997 } 998 999 MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const { 1000 using namespace AMDGPU::EncValues; 1001 1002 assert(Val < 128); 1003 assert(Width == OPW256 || Width == OPW512); 1004 1005 if (Val <= SGPR_MAX) { 1006 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 1007 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 1008 } 1009 1010 int TTmpIdx = getTTmpIdx(Val); 1011 if (TTmpIdx >= 0) { 1012 return createSRegOperand(getTtmpClassId(Width), TTmpIdx); 1013 } 1014 1015 llvm_unreachable("unknown dst register"); 1016 } 1017 1018 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 1019 using namespace AMDGPU; 1020 1021 switch (Val) { 1022 case 102: return createRegOperand(FLAT_SCR_LO); 1023 case 103: return createRegOperand(FLAT_SCR_HI); 1024 case 104: return createRegOperand(XNACK_MASK_LO); 1025 case 105: return createRegOperand(XNACK_MASK_HI); 1026 case 106: return createRegOperand(VCC_LO); 1027 case 107: return createRegOperand(VCC_HI); 1028 case 108: return createRegOperand(TBA_LO); 1029 case 109: return createRegOperand(TBA_HI); 1030 case 110: return createRegOperand(TMA_LO); 1031 case 111: return createRegOperand(TMA_HI); 1032 case 124: return createRegOperand(M0); 1033 case 125: return createRegOperand(SGPR_NULL); 1034 case 126: return createRegOperand(EXEC_LO); 1035 case 127: return createRegOperand(EXEC_HI); 1036 case 235: return createRegOperand(SRC_SHARED_BASE); 1037 case 236: return createRegOperand(SRC_SHARED_LIMIT); 1038 case 237: return createRegOperand(SRC_PRIVATE_BASE); 1039 case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 1040 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID); 1041 case 251: return createRegOperand(SRC_VCCZ); 1042 case 252: return createRegOperand(SRC_EXECZ); 1043 case 253: return createRegOperand(SRC_SCC); 1044 case 254: return createRegOperand(LDS_DIRECT); 1045 default: break; 1046 } 1047 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 1048 } 1049 1050 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 1051 using namespace AMDGPU; 1052 1053 switch (Val) { 1054 case 102: return createRegOperand(FLAT_SCR); 1055 case 104: return createRegOperand(XNACK_MASK); 1056 case 106: return createRegOperand(VCC); 1057 case 108: return createRegOperand(TBA); 1058 case 110: return createRegOperand(TMA); 1059 case 126: return createRegOperand(EXEC); 1060 case 235: return createRegOperand(SRC_SHARED_BASE); 1061 case 236: return createRegOperand(SRC_SHARED_LIMIT); 1062 case 237: return createRegOperand(SRC_PRIVATE_BASE); 1063 case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 1064 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID); 1065 case 251: return createRegOperand(SRC_VCCZ); 1066 case 252: return createRegOperand(SRC_EXECZ); 1067 case 253: return createRegOperand(SRC_SCC); 1068 default: break; 1069 } 1070 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 1071 } 1072 1073 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, 1074 const unsigned Val) const { 1075 using namespace AMDGPU::SDWA; 1076 using namespace AMDGPU::EncValues; 1077 1078 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] || 1079 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) { 1080 // XXX: cast to int is needed to avoid stupid warning: 1081 // compare with unsigned is always true 1082 if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) && 1083 Val <= SDWA9EncValues::SRC_VGPR_MAX) { 1084 return createRegOperand(getVgprClassId(Width), 1085 Val - SDWA9EncValues::SRC_VGPR_MIN); 1086 } 1087 if (SDWA9EncValues::SRC_SGPR_MIN <= Val && 1088 Val <= (isGFX10() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10 1089 : SDWA9EncValues::SRC_SGPR_MAX_SI)) { 1090 return createSRegOperand(getSgprClassId(Width), 1091 Val - SDWA9EncValues::SRC_SGPR_MIN); 1092 } 1093 if (SDWA9EncValues::SRC_TTMP_MIN <= Val && 1094 Val <= SDWA9EncValues::SRC_TTMP_MAX) { 1095 return createSRegOperand(getTtmpClassId(Width), 1096 Val - SDWA9EncValues::SRC_TTMP_MIN); 1097 } 1098 1099 const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN; 1100 1101 if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX) 1102 return decodeIntImmed(SVal); 1103 1104 if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX) 1105 return decodeFPImmed(Width, SVal); 1106 1107 return decodeSpecialReg32(SVal); 1108 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 1109 return createRegOperand(getVgprClassId(Width), Val); 1110 } 1111 llvm_unreachable("unsupported target"); 1112 } 1113 1114 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const { 1115 return decodeSDWASrc(OPW16, Val); 1116 } 1117 1118 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const { 1119 return decodeSDWASrc(OPW32, Val); 1120 } 1121 1122 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const { 1123 using namespace AMDGPU::SDWA; 1124 1125 assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] || 1126 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) && 1127 "SDWAVopcDst should be present only on GFX9+"); 1128 1129 bool IsWave64 = STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64]; 1130 1131 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { 1132 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 1133 1134 int TTmpIdx = getTTmpIdx(Val); 1135 if (TTmpIdx >= 0) { 1136 return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx); 1137 } else if (Val > SGPR_MAX) { 1138 return IsWave64 ? decodeSpecialReg64(Val) 1139 : decodeSpecialReg32(Val); 1140 } else { 1141 return createSRegOperand(getSgprClassId(IsWave64 ? OPW64 : OPW32), Val); 1142 } 1143 } else { 1144 return createRegOperand(IsWave64 ? AMDGPU::VCC : AMDGPU::VCC_LO); 1145 } 1146 } 1147 1148 MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const { 1149 return STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64] ? 1150 decodeOperand_SReg_64(Val) : decodeOperand_SReg_32(Val); 1151 } 1152 1153 bool AMDGPUDisassembler::isVI() const { 1154 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 1155 } 1156 1157 bool AMDGPUDisassembler::isGFX9() const { 1158 return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; 1159 } 1160 1161 bool AMDGPUDisassembler::isGFX10() const { 1162 return STI.getFeatureBits()[AMDGPU::FeatureGFX10]; 1163 } 1164 1165 //===----------------------------------------------------------------------===// 1166 // AMDGPUSymbolizer 1167 //===----------------------------------------------------------------------===// 1168 1169 // Try to find symbol name for specified label 1170 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 1171 raw_ostream &/*cStream*/, int64_t Value, 1172 uint64_t /*Address*/, bool IsBranch, 1173 uint64_t /*Offset*/, uint64_t /*InstSize*/) { 1174 using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>; 1175 using SectionSymbolsTy = std::vector<SymbolInfoTy>; 1176 1177 if (!IsBranch) { 1178 return false; 1179 } 1180 1181 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 1182 if (!Symbols) 1183 return false; 1184 1185 auto Result = std::find_if(Symbols->begin(), Symbols->end(), 1186 [Value](const SymbolInfoTy& Val) { 1187 return std::get<0>(Val) == static_cast<uint64_t>(Value) 1188 && std::get<2>(Val) == ELF::STT_NOTYPE; 1189 }); 1190 if (Result != Symbols->end()) { 1191 auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result)); 1192 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 1193 Inst.addOperand(MCOperand::createExpr(Add)); 1194 return true; 1195 } 1196 return false; 1197 } 1198 1199 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 1200 int64_t Value, 1201 uint64_t Address) { 1202 llvm_unreachable("unimplemented"); 1203 } 1204 1205 //===----------------------------------------------------------------------===// 1206 // Initialization 1207 //===----------------------------------------------------------------------===// 1208 1209 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 1210 LLVMOpInfoCallback /*GetOpInfo*/, 1211 LLVMSymbolLookupCallback /*SymbolLookUp*/, 1212 void *DisInfo, 1213 MCContext *Ctx, 1214 std::unique_ptr<MCRelocationInfo> &&RelInfo) { 1215 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 1216 } 1217 1218 static MCDisassembler *createAMDGPUDisassembler(const Target &T, 1219 const MCSubtargetInfo &STI, 1220 MCContext &Ctx) { 1221 return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo()); 1222 } 1223 1224 extern "C" void LLVMInitializeAMDGPUDisassembler() { 1225 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 1226 createAMDGPUDisassembler); 1227 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 1228 createAMDGPUSymbolizer); 1229 } 1230