1 //===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 //===----------------------------------------------------------------------===// 10 // 11 /// \file 12 /// 13 /// This file contains definition for AMDGPU ISA disassembler 14 // 15 //===----------------------------------------------------------------------===// 16 17 // ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)? 18 19 #include "Disassembler/AMDGPUDisassembler.h" 20 #include "AMDGPU.h" 21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 22 #include "SIDefines.h" 23 #include "TargetInfo/AMDGPUTargetInfo.h" 24 #include "Utils/AMDGPUBaseInfo.h" 25 #include "llvm-c/Disassembler.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/BinaryFormat/ELF.h" 30 #include "llvm/MC/MCAsmInfo.h" 31 #include "llvm/MC/MCContext.h" 32 #include "llvm/MC/MCDisassembler/MCDisassembler.h" 33 #include "llvm/MC/MCExpr.h" 34 #include "llvm/MC/MCFixedLenDisassembler.h" 35 #include "llvm/MC/MCInst.h" 36 #include "llvm/MC/MCSubtargetInfo.h" 37 #include "llvm/Support/AMDHSAKernelDescriptor.h" 38 #include "llvm/Support/Endian.h" 39 #include "llvm/Support/ErrorHandling.h" 40 #include "llvm/Support/MathExtras.h" 41 #include "llvm/Support/TargetRegistry.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <algorithm> 44 #include <cassert> 45 #include <cstddef> 46 #include <cstdint> 47 #include <iterator> 48 #include <tuple> 49 #include <vector> 50 51 using namespace llvm; 52 53 #define DEBUG_TYPE "amdgpu-disassembler" 54 55 #define SGPR_MAX (isGFX10() ? AMDGPU::EncValues::SGPR_MAX_GFX10 \ 56 : AMDGPU::EncValues::SGPR_MAX_SI) 57 58 using DecodeStatus = llvm::MCDisassembler::DecodeStatus; 59 60 AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI, 61 MCContext &Ctx, 62 MCInstrInfo const *MCII) : 63 MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()), 64 TargetMaxInstBytes(Ctx.getAsmInfo()->getMaxInstLength(&STI)) { 65 66 // ToDo: AMDGPUDisassembler supports only VI ISA. 67 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding] && !isGFX10()) 68 report_fatal_error("Disassembly not yet supported for subtarget"); 69 } 70 71 inline static MCDisassembler::DecodeStatus 72 addOperand(MCInst &Inst, const MCOperand& Opnd) { 73 Inst.addOperand(Opnd); 74 return Opnd.isValid() ? 75 MCDisassembler::Success : 76 MCDisassembler::Fail; 77 } 78 79 static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op, 80 uint16_t NameIdx) { 81 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx); 82 if (OpIdx != -1) { 83 auto I = MI.begin(); 84 std::advance(I, OpIdx); 85 MI.insert(I, Op); 86 } 87 return OpIdx; 88 } 89 90 static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm, 91 uint64_t Addr, const void *Decoder) { 92 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 93 94 // Our branches take a simm16, but we need two extra bits to account for the 95 // factor of 4. 96 APInt SignedOffset(18, Imm * 4, true); 97 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue(); 98 99 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2)) 100 return MCDisassembler::Success; 101 return addOperand(Inst, MCOperand::createImm(Imm)); 102 } 103 104 static DecodeStatus decodeSMEMOffset(MCInst &Inst, unsigned Imm, 105 uint64_t Addr, const void *Decoder) { 106 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 107 int64_t Offset; 108 if (DAsm->isVI()) { // VI supports 20-bit unsigned offsets. 109 Offset = Imm & 0xFFFFF; 110 } else { // GFX9+ supports 21-bit signed offsets. 111 Offset = SignExtend64<21>(Imm); 112 } 113 return addOperand(Inst, MCOperand::createImm(Offset)); 114 } 115 116 static DecodeStatus decodeBoolReg(MCInst &Inst, unsigned Val, 117 uint64_t Addr, const void *Decoder) { 118 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 119 return addOperand(Inst, DAsm->decodeBoolReg(Val)); 120 } 121 122 #define DECODE_OPERAND(StaticDecoderName, DecoderName) \ 123 static DecodeStatus StaticDecoderName(MCInst &Inst, \ 124 unsigned Imm, \ 125 uint64_t /*Addr*/, \ 126 const void *Decoder) { \ 127 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \ 128 return addOperand(Inst, DAsm->DecoderName(Imm)); \ 129 } 130 131 #define DECODE_OPERAND_REG(RegClass) \ 132 DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass) 133 134 DECODE_OPERAND_REG(VGPR_32) 135 DECODE_OPERAND_REG(VRegOrLds_32) 136 DECODE_OPERAND_REG(VS_32) 137 DECODE_OPERAND_REG(VS_64) 138 DECODE_OPERAND_REG(VS_128) 139 140 DECODE_OPERAND_REG(VReg_64) 141 DECODE_OPERAND_REG(VReg_96) 142 DECODE_OPERAND_REG(VReg_128) 143 144 DECODE_OPERAND_REG(SReg_32) 145 DECODE_OPERAND_REG(SReg_32_XM0_XEXEC) 146 DECODE_OPERAND_REG(SReg_32_XEXEC_HI) 147 DECODE_OPERAND_REG(SRegOrLds_32) 148 DECODE_OPERAND_REG(SReg_64) 149 DECODE_OPERAND_REG(SReg_64_XEXEC) 150 DECODE_OPERAND_REG(SReg_128) 151 DECODE_OPERAND_REG(SReg_256) 152 DECODE_OPERAND_REG(SReg_512) 153 154 DECODE_OPERAND_REG(AGPR_32) 155 DECODE_OPERAND_REG(AReg_128) 156 DECODE_OPERAND_REG(AReg_512) 157 DECODE_OPERAND_REG(AReg_1024) 158 DECODE_OPERAND_REG(AV_32) 159 DECODE_OPERAND_REG(AV_64) 160 161 static DecodeStatus decodeOperand_VSrc16(MCInst &Inst, 162 unsigned Imm, 163 uint64_t Addr, 164 const void *Decoder) { 165 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 166 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 167 } 168 169 static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst, 170 unsigned Imm, 171 uint64_t Addr, 172 const void *Decoder) { 173 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 174 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm)); 175 } 176 177 static DecodeStatus decodeOperand_VS_16(MCInst &Inst, 178 unsigned Imm, 179 uint64_t Addr, 180 const void *Decoder) { 181 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 182 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm)); 183 } 184 185 static DecodeStatus decodeOperand_VS_32(MCInst &Inst, 186 unsigned Imm, 187 uint64_t Addr, 188 const void *Decoder) { 189 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 190 return addOperand(Inst, DAsm->decodeOperand_VS_32(Imm)); 191 } 192 193 static DecodeStatus decodeOperand_AReg_128(MCInst &Inst, 194 unsigned Imm, 195 uint64_t Addr, 196 const void *Decoder) { 197 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 198 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW128, Imm | 512)); 199 } 200 201 static DecodeStatus decodeOperand_AReg_512(MCInst &Inst, 202 unsigned Imm, 203 uint64_t Addr, 204 const void *Decoder) { 205 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 206 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW512, Imm | 512)); 207 } 208 209 static DecodeStatus decodeOperand_AReg_1024(MCInst &Inst, 210 unsigned Imm, 211 uint64_t Addr, 212 const void *Decoder) { 213 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 214 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW1024, Imm | 512)); 215 } 216 217 static DecodeStatus decodeOperand_SReg_32(MCInst &Inst, 218 unsigned Imm, 219 uint64_t Addr, 220 const void *Decoder) { 221 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 222 return addOperand(Inst, DAsm->decodeOperand_SReg_32(Imm)); 223 } 224 225 static DecodeStatus decodeOperand_VGPR_32(MCInst &Inst, 226 unsigned Imm, 227 uint64_t Addr, 228 const void *Decoder) { 229 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); 230 return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW32, Imm)); 231 } 232 233 #define DECODE_SDWA(DecName) \ 234 DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName) 235 236 DECODE_SDWA(Src32) 237 DECODE_SDWA(Src16) 238 DECODE_SDWA(VopcDst) 239 240 #include "AMDGPUGenDisassemblerTables.inc" 241 242 //===----------------------------------------------------------------------===// 243 // 244 //===----------------------------------------------------------------------===// 245 246 template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) { 247 assert(Bytes.size() >= sizeof(T)); 248 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data()); 249 Bytes = Bytes.slice(sizeof(T)); 250 return Res; 251 } 252 253 DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table, 254 MCInst &MI, 255 uint64_t Inst, 256 uint64_t Address) const { 257 assert(MI.getOpcode() == 0); 258 assert(MI.getNumOperands() == 0); 259 MCInst TmpInst; 260 HasLiteral = false; 261 const auto SavedBytes = Bytes; 262 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) { 263 MI = TmpInst; 264 return MCDisassembler::Success; 265 } 266 Bytes = SavedBytes; 267 return MCDisassembler::Fail; 268 } 269 270 static bool isValidDPP8(const MCInst &MI) { 271 using namespace llvm::AMDGPU::DPP; 272 int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi); 273 assert(FiIdx != -1); 274 if ((unsigned)FiIdx >= MI.getNumOperands()) 275 return false; 276 unsigned Fi = MI.getOperand(FiIdx).getImm(); 277 return Fi == DPP8_FI_0 || Fi == DPP8_FI_1; 278 } 279 280 DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size, 281 ArrayRef<uint8_t> Bytes_, 282 uint64_t Address, 283 raw_ostream &CS) const { 284 CommentStream = &CS; 285 bool IsSDWA = false; 286 287 unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size()); 288 Bytes = Bytes_.slice(0, MaxInstBytesNum); 289 290 DecodeStatus Res = MCDisassembler::Fail; 291 do { 292 // ToDo: better to switch encoding length using some bit predicate 293 // but it is unknown yet, so try all we can 294 295 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2 296 // encodings 297 if (Bytes.size() >= 8) { 298 const uint64_t QW = eatBytes<uint64_t>(Bytes); 299 300 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]) { 301 Res = tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address); 302 if (Res) { 303 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8) 304 == -1) 305 break; 306 if (convertDPP8Inst(MI) == MCDisassembler::Success) 307 break; 308 MI = MCInst(); // clear 309 } 310 } 311 312 Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address); 313 if (Res && convertDPP8Inst(MI) == MCDisassembler::Success) 314 break; 315 316 MI = MCInst(); // clear 317 318 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address); 319 if (Res) break; 320 321 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address); 322 if (Res) { IsSDWA = true; break; } 323 324 Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address); 325 if (Res) { IsSDWA = true; break; } 326 327 Res = tryDecodeInst(DecoderTableSDWA1064, MI, QW, Address); 328 if (Res) { IsSDWA = true; break; } 329 330 if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) { 331 Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address); 332 if (Res) 333 break; 334 } 335 336 // Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and 337 // v_mad_mixhi_f16 for FMA variants. Try to decode using this special 338 // table first so we print the correct name. 339 if (STI.getFeatureBits()[AMDGPU::FeatureFmaMixInsts]) { 340 Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address); 341 if (Res) 342 break; 343 } 344 } 345 346 // Reinitialize Bytes as DPP64 could have eaten too much 347 Bytes = Bytes_.slice(0, MaxInstBytesNum); 348 349 // Try decode 32-bit instruction 350 if (Bytes.size() < 4) break; 351 const uint32_t DW = eatBytes<uint32_t>(Bytes); 352 Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address); 353 if (Res) break; 354 355 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address); 356 if (Res) break; 357 358 Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address); 359 if (Res) break; 360 361 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding]) { 362 Res = tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address); 363 if (Res) break; 364 } 365 366 Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address); 367 if (Res) break; 368 369 if (Bytes.size() < 4) break; 370 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW; 371 Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address); 372 if (Res) break; 373 374 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address); 375 if (Res) break; 376 377 Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address); 378 if (Res) break; 379 380 Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address); 381 } while (false); 382 383 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi || 384 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 || 385 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_gfx10 || 386 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi || 387 MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_vi || 388 MI.getOpcode() == AMDGPU::V_FMAC_F32_e64_gfx10 || 389 MI.getOpcode() == AMDGPU::V_FMAC_F16_e64_gfx10)) { 390 // Insert dummy unused src2_modifiers. 391 insertNamedMCOperand(MI, MCOperand::createImm(0), 392 AMDGPU::OpName::src2_modifiers); 393 } 394 395 if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) { 396 int VAddr0Idx = 397 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); 398 int RsrcIdx = 399 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 400 unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1; 401 if (VAddr0Idx >= 0 && NSAArgs > 0) { 402 unsigned NSAWords = (NSAArgs + 3) / 4; 403 if (Bytes.size() < 4 * NSAWords) { 404 Res = MCDisassembler::Fail; 405 } else { 406 for (unsigned i = 0; i < NSAArgs; ++i) { 407 MI.insert(MI.begin() + VAddr0Idx + 1 + i, 408 decodeOperand_VGPR_32(Bytes[i])); 409 } 410 Bytes = Bytes.slice(4 * NSAWords); 411 } 412 } 413 414 if (Res) 415 Res = convertMIMGInst(MI); 416 } 417 418 if (Res && IsSDWA) 419 Res = convertSDWAInst(MI); 420 421 int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 422 AMDGPU::OpName::vdst_in); 423 if (VDstIn_Idx != -1) { 424 int Tied = MCII->get(MI.getOpcode()).getOperandConstraint(VDstIn_Idx, 425 MCOI::OperandConstraint::TIED_TO); 426 if (Tied != -1 && (MI.getNumOperands() <= (unsigned)VDstIn_Idx || 427 !MI.getOperand(VDstIn_Idx).isReg() || 428 MI.getOperand(VDstIn_Idx).getReg() != MI.getOperand(Tied).getReg())) { 429 if (MI.getNumOperands() > (unsigned)VDstIn_Idx) 430 MI.erase(&MI.getOperand(VDstIn_Idx)); 431 insertNamedMCOperand(MI, 432 MCOperand::createReg(MI.getOperand(Tied).getReg()), 433 AMDGPU::OpName::vdst_in); 434 } 435 } 436 437 // if the opcode was not recognized we'll assume a Size of 4 bytes 438 // (unless there are fewer bytes left) 439 Size = Res ? (MaxInstBytesNum - Bytes.size()) 440 : std::min((size_t)4, Bytes_.size()); 441 return Res; 442 } 443 444 DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const { 445 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] || 446 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) { 447 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1) 448 // VOPC - insert clamp 449 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp); 450 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 451 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst); 452 if (SDst != -1) { 453 // VOPC - insert VCC register as sdst 454 insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC), 455 AMDGPU::OpName::sdst); 456 } else { 457 // VOP1/2 - insert omod if present in instruction 458 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod); 459 } 460 } 461 return MCDisassembler::Success; 462 } 463 464 DecodeStatus AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const { 465 unsigned Opc = MI.getOpcode(); 466 unsigned DescNumOps = MCII->get(Opc).getNumOperands(); 467 468 // Insert dummy unused src modifiers. 469 if (MI.getNumOperands() < DescNumOps && 470 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) 471 insertNamedMCOperand(MI, MCOperand::createImm(0), 472 AMDGPU::OpName::src0_modifiers); 473 474 if (MI.getNumOperands() < DescNumOps && 475 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers) != -1) 476 insertNamedMCOperand(MI, MCOperand::createImm(0), 477 AMDGPU::OpName::src1_modifiers); 478 479 return isValidDPP8(MI) ? MCDisassembler::Success : MCDisassembler::SoftFail; 480 } 481 482 // Note that before gfx10, the MIMG encoding provided no information about 483 // VADDR size. Consequently, decoded instructions always show address as if it 484 // has 1 dword, which could be not really so. 485 DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const { 486 487 int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 488 AMDGPU::OpName::vdst); 489 490 int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 491 AMDGPU::OpName::vdata); 492 int VAddr0Idx = 493 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0); 494 int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 495 AMDGPU::OpName::dmask); 496 497 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 498 AMDGPU::OpName::tfe); 499 int D16Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 500 AMDGPU::OpName::d16); 501 502 assert(VDataIdx != -1); 503 assert(DMaskIdx != -1); 504 assert(TFEIdx != -1); 505 506 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode()); 507 bool IsAtomic = (VDstIdx != -1); 508 bool IsGather4 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4; 509 510 bool IsNSA = false; 511 unsigned AddrSize = Info->VAddrDwords; 512 513 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10]) { 514 unsigned DimIdx = 515 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dim); 516 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 517 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 518 const AMDGPU::MIMGDimInfo *Dim = 519 AMDGPU::getMIMGDimInfoByEncoding(MI.getOperand(DimIdx).getImm()); 520 521 AddrSize = BaseOpcode->NumExtraArgs + 522 (BaseOpcode->Gradients ? Dim->NumGradients : 0) + 523 (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 524 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 525 IsNSA = Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA; 526 if (!IsNSA) { 527 if (AddrSize > 8) 528 AddrSize = 16; 529 else if (AddrSize > 4) 530 AddrSize = 8; 531 } else { 532 if (AddrSize > Info->VAddrDwords) { 533 // The NSA encoding does not contain enough operands for the combination 534 // of base opcode / dimension. Should this be an error? 535 return MCDisassembler::Success; 536 } 537 } 538 } 539 540 unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf; 541 unsigned DstSize = IsGather4 ? 4 : std::max(countPopulation(DMask), 1u); 542 543 bool D16 = D16Idx >= 0 && MI.getOperand(D16Idx).getImm(); 544 if (D16 && AMDGPU::hasPackedD16(STI)) { 545 DstSize = (DstSize + 1) / 2; 546 } 547 548 // FIXME: Add tfe support 549 if (MI.getOperand(TFEIdx).getImm()) 550 return MCDisassembler::Success; 551 552 if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords) 553 return MCDisassembler::Success; 554 555 int NewOpcode = 556 AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize); 557 if (NewOpcode == -1) 558 return MCDisassembler::Success; 559 560 // Widen the register to the correct number of enabled channels. 561 unsigned NewVdata = AMDGPU::NoRegister; 562 if (DstSize != Info->VDataDwords) { 563 auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass; 564 565 // Get first subregister of VData 566 unsigned Vdata0 = MI.getOperand(VDataIdx).getReg(); 567 unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0); 568 Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0; 569 570 NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0, 571 &MRI.getRegClass(DataRCID)); 572 if (NewVdata == AMDGPU::NoRegister) { 573 // It's possible to encode this such that the low register + enabled 574 // components exceeds the register count. 575 return MCDisassembler::Success; 576 } 577 } 578 579 unsigned NewVAddr0 = AMDGPU::NoRegister; 580 if (STI.getFeatureBits()[AMDGPU::FeatureGFX10] && !IsNSA && 581 AddrSize != Info->VAddrDwords) { 582 unsigned VAddr0 = MI.getOperand(VAddr0Idx).getReg(); 583 unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0); 584 VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0; 585 586 auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass; 587 NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0, 588 &MRI.getRegClass(AddrRCID)); 589 if (NewVAddr0 == AMDGPU::NoRegister) 590 return MCDisassembler::Success; 591 } 592 593 MI.setOpcode(NewOpcode); 594 595 if (NewVdata != AMDGPU::NoRegister) { 596 MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata); 597 598 if (IsAtomic) { 599 // Atomic operations have an additional operand (a copy of data) 600 MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata); 601 } 602 } 603 604 if (NewVAddr0 != AMDGPU::NoRegister) { 605 MI.getOperand(VAddr0Idx) = MCOperand::createReg(NewVAddr0); 606 } else if (IsNSA) { 607 assert(AddrSize <= Info->VAddrDwords); 608 MI.erase(MI.begin() + VAddr0Idx + AddrSize, 609 MI.begin() + VAddr0Idx + Info->VAddrDwords); 610 } 611 612 return MCDisassembler::Success; 613 } 614 615 const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const { 616 return getContext().getRegisterInfo()-> 617 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]); 618 } 619 620 inline 621 MCOperand AMDGPUDisassembler::errOperand(unsigned V, 622 const Twine& ErrMsg) const { 623 *CommentStream << "Error: " + ErrMsg; 624 625 // ToDo: add support for error operands to MCInst.h 626 // return MCOperand::createError(V); 627 return MCOperand(); 628 } 629 630 inline 631 MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const { 632 return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI)); 633 } 634 635 inline 636 MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID, 637 unsigned Val) const { 638 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID]; 639 if (Val >= RegCl.getNumRegs()) 640 return errOperand(Val, Twine(getRegClassName(RegClassID)) + 641 ": unknown register " + Twine(Val)); 642 return createRegOperand(RegCl.getRegister(Val)); 643 } 644 645 inline 646 MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID, 647 unsigned Val) const { 648 // ToDo: SI/CI have 104 SGPRs, VI - 102 649 // Valery: here we accepting as much as we can, let assembler sort it out 650 int shift = 0; 651 switch (SRegClassID) { 652 case AMDGPU::SGPR_32RegClassID: 653 case AMDGPU::TTMP_32RegClassID: 654 break; 655 case AMDGPU::SGPR_64RegClassID: 656 case AMDGPU::TTMP_64RegClassID: 657 shift = 1; 658 break; 659 case AMDGPU::SGPR_128RegClassID: 660 case AMDGPU::TTMP_128RegClassID: 661 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in 662 // this bundle? 663 case AMDGPU::SGPR_256RegClassID: 664 case AMDGPU::TTMP_256RegClassID: 665 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in 666 // this bundle? 667 case AMDGPU::SGPR_512RegClassID: 668 case AMDGPU::TTMP_512RegClassID: 669 shift = 2; 670 break; 671 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in 672 // this bundle? 673 default: 674 llvm_unreachable("unhandled register class"); 675 } 676 677 if (Val % (1 << shift)) { 678 *CommentStream << "Warning: " << getRegClassName(SRegClassID) 679 << ": scalar reg isn't aligned " << Val; 680 } 681 682 return createRegOperand(SRegClassID, Val >> shift); 683 } 684 685 MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const { 686 return decodeSrcOp(OPW32, Val); 687 } 688 689 MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const { 690 return decodeSrcOp(OPW64, Val); 691 } 692 693 MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const { 694 return decodeSrcOp(OPW128, Val); 695 } 696 697 MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const { 698 return decodeSrcOp(OPW16, Val); 699 } 700 701 MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const { 702 return decodeSrcOp(OPWV216, Val); 703 } 704 705 MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const { 706 // Some instructions have operand restrictions beyond what the encoding 707 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra 708 // high bit. 709 Val &= 255; 710 711 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val); 712 } 713 714 MCOperand AMDGPUDisassembler::decodeOperand_VRegOrLds_32(unsigned Val) const { 715 return decodeSrcOp(OPW32, Val); 716 } 717 718 MCOperand AMDGPUDisassembler::decodeOperand_AGPR_32(unsigned Val) const { 719 return createRegOperand(AMDGPU::AGPR_32RegClassID, Val & 255); 720 } 721 722 MCOperand AMDGPUDisassembler::decodeOperand_AReg_128(unsigned Val) const { 723 return createRegOperand(AMDGPU::AReg_128RegClassID, Val & 255); 724 } 725 726 MCOperand AMDGPUDisassembler::decodeOperand_AReg_512(unsigned Val) const { 727 return createRegOperand(AMDGPU::AReg_512RegClassID, Val & 255); 728 } 729 730 MCOperand AMDGPUDisassembler::decodeOperand_AReg_1024(unsigned Val) const { 731 return createRegOperand(AMDGPU::AReg_1024RegClassID, Val & 255); 732 } 733 734 MCOperand AMDGPUDisassembler::decodeOperand_AV_32(unsigned Val) const { 735 return decodeSrcOp(OPW32, Val); 736 } 737 738 MCOperand AMDGPUDisassembler::decodeOperand_AV_64(unsigned Val) const { 739 return decodeSrcOp(OPW64, Val); 740 } 741 742 MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const { 743 return createRegOperand(AMDGPU::VReg_64RegClassID, Val); 744 } 745 746 MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const { 747 return createRegOperand(AMDGPU::VReg_96RegClassID, Val); 748 } 749 750 MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const { 751 return createRegOperand(AMDGPU::VReg_128RegClassID, Val); 752 } 753 754 MCOperand AMDGPUDisassembler::decodeOperand_VReg_256(unsigned Val) const { 755 return createRegOperand(AMDGPU::VReg_256RegClassID, Val); 756 } 757 758 MCOperand AMDGPUDisassembler::decodeOperand_VReg_512(unsigned Val) const { 759 return createRegOperand(AMDGPU::VReg_512RegClassID, Val); 760 } 761 762 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const { 763 // table-gen generated disassembler doesn't care about operand types 764 // leaving only registry class so SSrc_32 operand turns into SReg_32 765 // and therefore we accept immediates and literals here as well 766 return decodeSrcOp(OPW32, Val); 767 } 768 769 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC( 770 unsigned Val) const { 771 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI 772 return decodeOperand_SReg_32(Val); 773 } 774 775 MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI( 776 unsigned Val) const { 777 // SReg_32_XM0 is SReg_32 without EXEC_HI 778 return decodeOperand_SReg_32(Val); 779 } 780 781 MCOperand AMDGPUDisassembler::decodeOperand_SRegOrLds_32(unsigned Val) const { 782 // table-gen generated disassembler doesn't care about operand types 783 // leaving only registry class so SSrc_32 operand turns into SReg_32 784 // and therefore we accept immediates and literals here as well 785 return decodeSrcOp(OPW32, Val); 786 } 787 788 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const { 789 return decodeSrcOp(OPW64, Val); 790 } 791 792 MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const { 793 return decodeSrcOp(OPW64, Val); 794 } 795 796 MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const { 797 return decodeSrcOp(OPW128, Val); 798 } 799 800 MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const { 801 return decodeDstOp(OPW256, Val); 802 } 803 804 MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const { 805 return decodeDstOp(OPW512, Val); 806 } 807 808 MCOperand AMDGPUDisassembler::decodeLiteralConstant() const { 809 // For now all literal constants are supposed to be unsigned integer 810 // ToDo: deal with signed/unsigned 64-bit integer constants 811 // ToDo: deal with float/double constants 812 if (!HasLiteral) { 813 if (Bytes.size() < 4) { 814 return errOperand(0, "cannot read literal, inst bytes left " + 815 Twine(Bytes.size())); 816 } 817 HasLiteral = true; 818 Literal = eatBytes<uint32_t>(Bytes); 819 } 820 return MCOperand::createImm(Literal); 821 } 822 823 MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) { 824 using namespace AMDGPU::EncValues; 825 826 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX); 827 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ? 828 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) : 829 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm))); 830 // Cast prevents negative overflow. 831 } 832 833 static int64_t getInlineImmVal32(unsigned Imm) { 834 switch (Imm) { 835 case 240: 836 return FloatToBits(0.5f); 837 case 241: 838 return FloatToBits(-0.5f); 839 case 242: 840 return FloatToBits(1.0f); 841 case 243: 842 return FloatToBits(-1.0f); 843 case 244: 844 return FloatToBits(2.0f); 845 case 245: 846 return FloatToBits(-2.0f); 847 case 246: 848 return FloatToBits(4.0f); 849 case 247: 850 return FloatToBits(-4.0f); 851 case 248: // 1 / (2 * PI) 852 return 0x3e22f983; 853 default: 854 llvm_unreachable("invalid fp inline imm"); 855 } 856 } 857 858 static int64_t getInlineImmVal64(unsigned Imm) { 859 switch (Imm) { 860 case 240: 861 return DoubleToBits(0.5); 862 case 241: 863 return DoubleToBits(-0.5); 864 case 242: 865 return DoubleToBits(1.0); 866 case 243: 867 return DoubleToBits(-1.0); 868 case 244: 869 return DoubleToBits(2.0); 870 case 245: 871 return DoubleToBits(-2.0); 872 case 246: 873 return DoubleToBits(4.0); 874 case 247: 875 return DoubleToBits(-4.0); 876 case 248: // 1 / (2 * PI) 877 return 0x3fc45f306dc9c882; 878 default: 879 llvm_unreachable("invalid fp inline imm"); 880 } 881 } 882 883 static int64_t getInlineImmVal16(unsigned Imm) { 884 switch (Imm) { 885 case 240: 886 return 0x3800; 887 case 241: 888 return 0xB800; 889 case 242: 890 return 0x3C00; 891 case 243: 892 return 0xBC00; 893 case 244: 894 return 0x4000; 895 case 245: 896 return 0xC000; 897 case 246: 898 return 0x4400; 899 case 247: 900 return 0xC400; 901 case 248: // 1 / (2 * PI) 902 return 0x3118; 903 default: 904 llvm_unreachable("invalid fp inline imm"); 905 } 906 } 907 908 MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) { 909 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN 910 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX); 911 912 // ToDo: case 248: 1/(2*PI) - is allowed only on VI 913 switch (Width) { 914 case OPW32: 915 case OPW128: // splat constants 916 case OPW512: 917 case OPW1024: 918 return MCOperand::createImm(getInlineImmVal32(Imm)); 919 case OPW64: 920 return MCOperand::createImm(getInlineImmVal64(Imm)); 921 case OPW16: 922 case OPWV216: 923 return MCOperand::createImm(getInlineImmVal16(Imm)); 924 default: 925 llvm_unreachable("implement me"); 926 } 927 } 928 929 unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const { 930 using namespace AMDGPU; 931 932 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 933 switch (Width) { 934 default: // fall 935 case OPW32: 936 case OPW16: 937 case OPWV216: 938 return VGPR_32RegClassID; 939 case OPW64: return VReg_64RegClassID; 940 case OPW128: return VReg_128RegClassID; 941 } 942 } 943 944 unsigned AMDGPUDisassembler::getAgprClassId(const OpWidthTy Width) const { 945 using namespace AMDGPU; 946 947 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 948 switch (Width) { 949 default: // fall 950 case OPW32: 951 case OPW16: 952 case OPWV216: 953 return AGPR_32RegClassID; 954 case OPW64: return AReg_64RegClassID; 955 case OPW128: return AReg_128RegClassID; 956 case OPW256: return AReg_256RegClassID; 957 case OPW512: return AReg_512RegClassID; 958 case OPW1024: return AReg_1024RegClassID; 959 } 960 } 961 962 963 unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const { 964 using namespace AMDGPU; 965 966 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 967 switch (Width) { 968 default: // fall 969 case OPW32: 970 case OPW16: 971 case OPWV216: 972 return SGPR_32RegClassID; 973 case OPW64: return SGPR_64RegClassID; 974 case OPW128: return SGPR_128RegClassID; 975 case OPW256: return SGPR_256RegClassID; 976 case OPW512: return SGPR_512RegClassID; 977 } 978 } 979 980 unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const { 981 using namespace AMDGPU; 982 983 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_); 984 switch (Width) { 985 default: // fall 986 case OPW32: 987 case OPW16: 988 case OPWV216: 989 return TTMP_32RegClassID; 990 case OPW64: return TTMP_64RegClassID; 991 case OPW128: return TTMP_128RegClassID; 992 case OPW256: return TTMP_256RegClassID; 993 case OPW512: return TTMP_512RegClassID; 994 } 995 } 996 997 int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const { 998 using namespace AMDGPU::EncValues; 999 1000 unsigned TTmpMin = 1001 (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MIN : TTMP_VI_MIN; 1002 unsigned TTmpMax = 1003 (isGFX9() || isGFX10()) ? TTMP_GFX9_GFX10_MAX : TTMP_VI_MAX; 1004 1005 return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1; 1006 } 1007 1008 MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const { 1009 using namespace AMDGPU::EncValues; 1010 1011 assert(Val < 1024); // enum10 1012 1013 bool IsAGPR = Val & 512; 1014 Val &= 511; 1015 1016 if (VGPR_MIN <= Val && Val <= VGPR_MAX) { 1017 return createRegOperand(IsAGPR ? getAgprClassId(Width) 1018 : getVgprClassId(Width), Val - VGPR_MIN); 1019 } 1020 if (Val <= SGPR_MAX) { 1021 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 1022 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 1023 } 1024 1025 int TTmpIdx = getTTmpIdx(Val); 1026 if (TTmpIdx >= 0) { 1027 return createSRegOperand(getTtmpClassId(Width), TTmpIdx); 1028 } 1029 1030 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) 1031 return decodeIntImmed(Val); 1032 1033 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) 1034 return decodeFPImmed(Width, Val); 1035 1036 if (Val == LITERAL_CONST) 1037 return decodeLiteralConstant(); 1038 1039 switch (Width) { 1040 case OPW32: 1041 case OPW16: 1042 case OPWV216: 1043 return decodeSpecialReg32(Val); 1044 case OPW64: 1045 return decodeSpecialReg64(Val); 1046 default: 1047 llvm_unreachable("unexpected immediate type"); 1048 } 1049 } 1050 1051 MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const { 1052 using namespace AMDGPU::EncValues; 1053 1054 assert(Val < 128); 1055 assert(Width == OPW256 || Width == OPW512); 1056 1057 if (Val <= SGPR_MAX) { 1058 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning. 1059 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN); 1060 } 1061 1062 int TTmpIdx = getTTmpIdx(Val); 1063 if (TTmpIdx >= 0) { 1064 return createSRegOperand(getTtmpClassId(Width), TTmpIdx); 1065 } 1066 1067 llvm_unreachable("unknown dst register"); 1068 } 1069 1070 MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const { 1071 using namespace AMDGPU; 1072 1073 switch (Val) { 1074 case 102: return createRegOperand(FLAT_SCR_LO); 1075 case 103: return createRegOperand(FLAT_SCR_HI); 1076 case 104: return createRegOperand(XNACK_MASK_LO); 1077 case 105: return createRegOperand(XNACK_MASK_HI); 1078 case 106: return createRegOperand(VCC_LO); 1079 case 107: return createRegOperand(VCC_HI); 1080 case 108: return createRegOperand(TBA_LO); 1081 case 109: return createRegOperand(TBA_HI); 1082 case 110: return createRegOperand(TMA_LO); 1083 case 111: return createRegOperand(TMA_HI); 1084 case 124: return createRegOperand(M0); 1085 case 125: return createRegOperand(SGPR_NULL); 1086 case 126: return createRegOperand(EXEC_LO); 1087 case 127: return createRegOperand(EXEC_HI); 1088 case 235: return createRegOperand(SRC_SHARED_BASE); 1089 case 236: return createRegOperand(SRC_SHARED_LIMIT); 1090 case 237: return createRegOperand(SRC_PRIVATE_BASE); 1091 case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 1092 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID); 1093 case 251: return createRegOperand(SRC_VCCZ); 1094 case 252: return createRegOperand(SRC_EXECZ); 1095 case 253: return createRegOperand(SRC_SCC); 1096 case 254: return createRegOperand(LDS_DIRECT); 1097 default: break; 1098 } 1099 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 1100 } 1101 1102 MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const { 1103 using namespace AMDGPU; 1104 1105 switch (Val) { 1106 case 102: return createRegOperand(FLAT_SCR); 1107 case 104: return createRegOperand(XNACK_MASK); 1108 case 106: return createRegOperand(VCC); 1109 case 108: return createRegOperand(TBA); 1110 case 110: return createRegOperand(TMA); 1111 case 125: return createRegOperand(SGPR_NULL); 1112 case 126: return createRegOperand(EXEC); 1113 case 235: return createRegOperand(SRC_SHARED_BASE); 1114 case 236: return createRegOperand(SRC_SHARED_LIMIT); 1115 case 237: return createRegOperand(SRC_PRIVATE_BASE); 1116 case 238: return createRegOperand(SRC_PRIVATE_LIMIT); 1117 case 239: return createRegOperand(SRC_POPS_EXITING_WAVE_ID); 1118 case 251: return createRegOperand(SRC_VCCZ); 1119 case 252: return createRegOperand(SRC_EXECZ); 1120 case 253: return createRegOperand(SRC_SCC); 1121 default: break; 1122 } 1123 return errOperand(Val, "unknown operand encoding " + Twine(Val)); 1124 } 1125 1126 MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, 1127 const unsigned Val) const { 1128 using namespace AMDGPU::SDWA; 1129 using namespace AMDGPU::EncValues; 1130 1131 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9] || 1132 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) { 1133 // XXX: cast to int is needed to avoid stupid warning: 1134 // compare with unsigned is always true 1135 if (int(SDWA9EncValues::SRC_VGPR_MIN) <= int(Val) && 1136 Val <= SDWA9EncValues::SRC_VGPR_MAX) { 1137 return createRegOperand(getVgprClassId(Width), 1138 Val - SDWA9EncValues::SRC_VGPR_MIN); 1139 } 1140 if (SDWA9EncValues::SRC_SGPR_MIN <= Val && 1141 Val <= (isGFX10() ? SDWA9EncValues::SRC_SGPR_MAX_GFX10 1142 : SDWA9EncValues::SRC_SGPR_MAX_SI)) { 1143 return createSRegOperand(getSgprClassId(Width), 1144 Val - SDWA9EncValues::SRC_SGPR_MIN); 1145 } 1146 if (SDWA9EncValues::SRC_TTMP_MIN <= Val && 1147 Val <= SDWA9EncValues::SRC_TTMP_MAX) { 1148 return createSRegOperand(getTtmpClassId(Width), 1149 Val - SDWA9EncValues::SRC_TTMP_MIN); 1150 } 1151 1152 const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN; 1153 1154 if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX) 1155 return decodeIntImmed(SVal); 1156 1157 if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX) 1158 return decodeFPImmed(Width, SVal); 1159 1160 return decodeSpecialReg32(SVal); 1161 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) { 1162 return createRegOperand(getVgprClassId(Width), Val); 1163 } 1164 llvm_unreachable("unsupported target"); 1165 } 1166 1167 MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const { 1168 return decodeSDWASrc(OPW16, Val); 1169 } 1170 1171 MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const { 1172 return decodeSDWASrc(OPW32, Val); 1173 } 1174 1175 MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const { 1176 using namespace AMDGPU::SDWA; 1177 1178 assert((STI.getFeatureBits()[AMDGPU::FeatureGFX9] || 1179 STI.getFeatureBits()[AMDGPU::FeatureGFX10]) && 1180 "SDWAVopcDst should be present only on GFX9+"); 1181 1182 bool IsWave64 = STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64]; 1183 1184 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) { 1185 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK; 1186 1187 int TTmpIdx = getTTmpIdx(Val); 1188 if (TTmpIdx >= 0) { 1189 auto TTmpClsId = getTtmpClassId(IsWave64 ? OPW64 : OPW32); 1190 return createSRegOperand(TTmpClsId, TTmpIdx); 1191 } else if (Val > SGPR_MAX) { 1192 return IsWave64 ? decodeSpecialReg64(Val) 1193 : decodeSpecialReg32(Val); 1194 } else { 1195 return createSRegOperand(getSgprClassId(IsWave64 ? OPW64 : OPW32), Val); 1196 } 1197 } else { 1198 return createRegOperand(IsWave64 ? AMDGPU::VCC : AMDGPU::VCC_LO); 1199 } 1200 } 1201 1202 MCOperand AMDGPUDisassembler::decodeBoolReg(unsigned Val) const { 1203 return STI.getFeatureBits()[AMDGPU::FeatureWavefrontSize64] ? 1204 decodeOperand_SReg_64(Val) : decodeOperand_SReg_32(Val); 1205 } 1206 1207 bool AMDGPUDisassembler::isVI() const { 1208 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]; 1209 } 1210 1211 bool AMDGPUDisassembler::isGFX9() const { 1212 return STI.getFeatureBits()[AMDGPU::FeatureGFX9]; 1213 } 1214 1215 bool AMDGPUDisassembler::isGFX10() const { 1216 return STI.getFeatureBits()[AMDGPU::FeatureGFX10]; 1217 } 1218 1219 static void checkError(DataExtractor::Cursor &C) { 1220 // For some malformed KD cases, the Cursor tends to hold Error::success(). 1221 // We check that here to prevent runtime crash in this case. 1222 if (!C) { 1223 auto Err = C.takeError(); 1224 assert(!Err); 1225 } 1226 } 1227 1228 //===----------------------------------------------------------------------===// 1229 // AMDGPU specific symbol handling 1230 //===----------------------------------------------------------------------===// 1231 #define PRINT_DIRECTIVE(DIRECTIVE, MASK) \ 1232 do { \ 1233 KdStream << Indent << DIRECTIVE " " \ 1234 << ((FourByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \ 1235 } while (0) 1236 1237 // NOLINTNEXTLINE(readability-identifier-naming) 1238 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1( 1239 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const { 1240 using namespace amdhsa; 1241 StringRef Indent = "\t"; 1242 1243 // We cannot accurately backward compute #VGPRs used from 1244 // GRANULATED_WORKITEM_VGPR_COUNT. But we are concerned with getting the same 1245 // value of GRANULATED_WORKITEM_VGPR_COUNT in the reassembled binary. So we 1246 // simply calculate the inverse of what the assembler does. 1247 1248 uint32_t GranulatedWorkitemVGPRCount = 1249 (FourByteBuffer & COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT) >> 1250 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_SHIFT; 1251 1252 uint32_t NextFreeVGPR = (GranulatedWorkitemVGPRCount + 1) * 1253 AMDGPU::IsaInfo::getVGPREncodingGranule(&STI); 1254 1255 KdStream << Indent << ".amdhsa_next_free_vgpr " << NextFreeVGPR << '\n'; 1256 1257 // We cannot backward compute values used to calculate 1258 // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following 1259 // directives can't be computed: 1260 // .amdhsa_reserve_vcc 1261 // .amdhsa_reserve_flat_scratch 1262 // .amdhsa_reserve_xnack_mask 1263 // They take their respective default values if not specified in the assembly. 1264 // 1265 // GRANULATED_WAVEFRONT_SGPR_COUNT 1266 // = f(NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK) 1267 // 1268 // We compute the inverse as though all directives apart from NEXT_FREE_SGPR 1269 // are set to 0. So while disassembling we consider that: 1270 // 1271 // GRANULATED_WAVEFRONT_SGPR_COUNT 1272 // = f(NEXT_FREE_SGPR + 0 + 0 + 0) 1273 // 1274 // The disassembler cannot recover the original values of those 3 directives. 1275 1276 uint32_t GranulatedWavefrontSGPRCount = 1277 (FourByteBuffer & COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT) >> 1278 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_SHIFT; 1279 1280 if (isGFX10() && GranulatedWavefrontSGPRCount) 1281 return MCDisassembler::Fail; 1282 1283 uint32_t NextFreeSGPR = (GranulatedWavefrontSGPRCount + 1) * 1284 AMDGPU::IsaInfo::getSGPREncodingGranule(&STI); 1285 1286 KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << '\n'; 1287 KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << '\n'; 1288 KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << '\n'; 1289 KdStream << Indent << ".amdhsa_next_free_sgpr " << NextFreeSGPR << "\n"; 1290 1291 if (FourByteBuffer & COMPUTE_PGM_RSRC1_PRIORITY) 1292 return MCDisassembler::Fail; 1293 1294 PRINT_DIRECTIVE(".amdhsa_float_round_mode_32", 1295 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32); 1296 PRINT_DIRECTIVE(".amdhsa_float_round_mode_16_64", 1297 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64); 1298 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_32", 1299 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32); 1300 PRINT_DIRECTIVE(".amdhsa_float_denorm_mode_16_64", 1301 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64); 1302 1303 if (FourByteBuffer & COMPUTE_PGM_RSRC1_PRIV) 1304 return MCDisassembler::Fail; 1305 1306 PRINT_DIRECTIVE(".amdhsa_dx10_clamp", COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP); 1307 1308 if (FourByteBuffer & COMPUTE_PGM_RSRC1_DEBUG_MODE) 1309 return MCDisassembler::Fail; 1310 1311 PRINT_DIRECTIVE(".amdhsa_ieee_mode", COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE); 1312 1313 if (FourByteBuffer & COMPUTE_PGM_RSRC1_BULKY) 1314 return MCDisassembler::Fail; 1315 1316 if (FourByteBuffer & COMPUTE_PGM_RSRC1_CDBG_USER) 1317 return MCDisassembler::Fail; 1318 1319 PRINT_DIRECTIVE(".amdhsa_fp16_overflow", COMPUTE_PGM_RSRC1_FP16_OVFL); 1320 1321 if (FourByteBuffer & COMPUTE_PGM_RSRC1_RESERVED0) 1322 return MCDisassembler::Fail; 1323 1324 if (isGFX10()) { 1325 PRINT_DIRECTIVE(".amdhsa_workgroup_processor_mode", 1326 COMPUTE_PGM_RSRC1_WGP_MODE); 1327 PRINT_DIRECTIVE(".amdhsa_memory_ordered", COMPUTE_PGM_RSRC1_MEM_ORDERED); 1328 PRINT_DIRECTIVE(".amdhsa_forward_progress", COMPUTE_PGM_RSRC1_FWD_PROGRESS); 1329 } 1330 return MCDisassembler::Success; 1331 } 1332 1333 // NOLINTNEXTLINE(readability-identifier-naming) 1334 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC2( 1335 uint32_t FourByteBuffer, raw_string_ostream &KdStream) const { 1336 using namespace amdhsa; 1337 StringRef Indent = "\t"; 1338 PRINT_DIRECTIVE( 1339 ".amdhsa_system_sgpr_private_segment_wavefront_offset", 1340 COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET); 1341 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_x", 1342 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X); 1343 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_y", 1344 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y); 1345 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_id_z", 1346 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z); 1347 PRINT_DIRECTIVE(".amdhsa_system_sgpr_workgroup_info", 1348 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO); 1349 PRINT_DIRECTIVE(".amdhsa_system_vgpr_workitem_id", 1350 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID); 1351 1352 if (FourByteBuffer & COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH) 1353 return MCDisassembler::Fail; 1354 1355 if (FourByteBuffer & COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY) 1356 return MCDisassembler::Fail; 1357 1358 if (FourByteBuffer & COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE) 1359 return MCDisassembler::Fail; 1360 1361 PRINT_DIRECTIVE( 1362 ".amdhsa_exception_fp_ieee_invalid_op", 1363 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION); 1364 PRINT_DIRECTIVE(".amdhsa_exception_fp_denorm_src", 1365 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE); 1366 PRINT_DIRECTIVE( 1367 ".amdhsa_exception_fp_ieee_div_zero", 1368 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO); 1369 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_overflow", 1370 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW); 1371 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_underflow", 1372 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW); 1373 PRINT_DIRECTIVE(".amdhsa_exception_fp_ieee_inexact", 1374 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT); 1375 PRINT_DIRECTIVE(".amdhsa_exception_int_div_zero", 1376 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO); 1377 1378 if (FourByteBuffer & COMPUTE_PGM_RSRC2_RESERVED0) 1379 return MCDisassembler::Fail; 1380 1381 return MCDisassembler::Success; 1382 } 1383 1384 #undef PRINT_DIRECTIVE 1385 1386 MCDisassembler::DecodeStatus 1387 AMDGPUDisassembler::decodeKernelDescriptorDirective( 1388 DataExtractor::Cursor &Cursor, ArrayRef<uint8_t> Bytes, 1389 raw_string_ostream &KdStream) const { 1390 #define PRINT_DIRECTIVE(DIRECTIVE, MASK) \ 1391 do { \ 1392 KdStream << Indent << DIRECTIVE " " \ 1393 << ((TwoByteBuffer & MASK) >> (MASK##_SHIFT)) << '\n'; \ 1394 } while (0) 1395 1396 uint16_t TwoByteBuffer = 0; 1397 uint32_t FourByteBuffer = 0; 1398 uint64_t EightByteBuffer = 0; 1399 1400 StringRef ReservedBytes; 1401 StringRef Indent = "\t"; 1402 1403 DataExtractor DE(Bytes, /*IsLittleEndian=*/true, /*AddressSize=*/8); 1404 1405 switch (Cursor.tell()) { 1406 case amdhsa::GROUP_SEGMENT_FIXED_SIZE_OFFSET: 1407 FourByteBuffer = DE.getU32(Cursor); 1408 checkError(Cursor); 1409 KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer 1410 << '\n'; 1411 return MCDisassembler::Success; 1412 1413 case amdhsa::PRIVATE_SEGMENT_FIXED_SIZE_OFFSET: 1414 FourByteBuffer = DE.getU32(Cursor); 1415 checkError(Cursor); 1416 KdStream << Indent << ".amdhsa_private_segment_fixed_size " 1417 << FourByteBuffer << '\n'; 1418 return MCDisassembler::Success; 1419 1420 case amdhsa::RESERVED0_OFFSET: 1421 // 8 reserved bytes, must be 0. 1422 EightByteBuffer = DE.getU64(Cursor); 1423 checkError(Cursor); 1424 if (EightByteBuffer) { 1425 return MCDisassembler::Fail; 1426 } 1427 return MCDisassembler::Success; 1428 1429 case amdhsa::KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET: 1430 // KERNEL_CODE_ENTRY_BYTE_OFFSET 1431 // So far no directive controls this for Code Object V3, so simply skip for 1432 // disassembly. 1433 DE.skip(Cursor, 8); 1434 checkError(Cursor); 1435 return MCDisassembler::Success; 1436 1437 case amdhsa::RESERVED1_OFFSET: 1438 // 20 reserved bytes, must be 0. 1439 ReservedBytes = DE.getBytes(Cursor, 20); 1440 checkError(Cursor); 1441 for (int I = 0; I < 20; ++I) { 1442 if (ReservedBytes[I] != 0) { 1443 return MCDisassembler::Fail; 1444 } 1445 } 1446 return MCDisassembler::Success; 1447 1448 case amdhsa::COMPUTE_PGM_RSRC3_OFFSET: 1449 // COMPUTE_PGM_RSRC3 1450 // - Only set for GFX10, GFX6-9 have this to be 0. 1451 // - Currently no directives directly control this. 1452 FourByteBuffer = DE.getU32(Cursor); 1453 checkError(Cursor); 1454 if (!isGFX10() && FourByteBuffer) { 1455 return MCDisassembler::Fail; 1456 } 1457 return MCDisassembler::Success; 1458 1459 case amdhsa::COMPUTE_PGM_RSRC1_OFFSET: 1460 FourByteBuffer = DE.getU32(Cursor); 1461 checkError(Cursor); 1462 if (decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream) == 1463 MCDisassembler::Fail) { 1464 return MCDisassembler::Fail; 1465 } 1466 return MCDisassembler::Success; 1467 1468 case amdhsa::COMPUTE_PGM_RSRC2_OFFSET: 1469 FourByteBuffer = DE.getU32(Cursor); 1470 checkError(Cursor); 1471 if (decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream) == 1472 MCDisassembler::Fail) { 1473 return MCDisassembler::Fail; 1474 } 1475 return MCDisassembler::Success; 1476 1477 case amdhsa::KERNEL_CODE_PROPERTIES_OFFSET: 1478 using namespace amdhsa; 1479 TwoByteBuffer = DE.getU16(Cursor); 1480 checkError(Cursor); 1481 1482 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_buffer", 1483 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER); 1484 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_ptr", 1485 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR); 1486 PRINT_DIRECTIVE(".amdhsa_user_sgpr_queue_ptr", 1487 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR); 1488 PRINT_DIRECTIVE(".amdhsa_user_sgpr_kernarg_segment_ptr", 1489 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR); 1490 PRINT_DIRECTIVE(".amdhsa_user_sgpr_dispatch_id", 1491 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID); 1492 PRINT_DIRECTIVE(".amdhsa_user_sgpr_flat_scratch_init", 1493 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT); 1494 PRINT_DIRECTIVE(".amdhsa_user_sgpr_private_segment_size", 1495 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE); 1496 1497 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED0) 1498 return MCDisassembler::Fail; 1499 1500 // Reserved for GFX9 1501 if (isGFX9() && 1502 (TwoByteBuffer & KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32)) { 1503 return MCDisassembler::Fail; 1504 } else if (isGFX10()) { 1505 PRINT_DIRECTIVE(".amdhsa_wavefront_size32", 1506 KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32); 1507 } 1508 1509 if (TwoByteBuffer & KERNEL_CODE_PROPERTY_RESERVED1) 1510 return MCDisassembler::Fail; 1511 1512 return MCDisassembler::Success; 1513 1514 case amdhsa::RESERVED2_OFFSET: 1515 // 6 bytes from here are reserved, must be 0. 1516 ReservedBytes = DE.getBytes(Cursor, 6); 1517 checkError(Cursor); 1518 for (int I = 0; I < 6; ++I) { 1519 if (ReservedBytes[I] != 0) 1520 return MCDisassembler::Fail; 1521 } 1522 return MCDisassembler::Success; 1523 1524 default: 1525 llvm_unreachable("Unhandled index. Case statements cover everything."); 1526 return MCDisassembler::Fail; 1527 } 1528 #undef PRINT_DIRECTIVE 1529 } 1530 1531 MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeKernelDescriptor( 1532 StringRef KdName, ArrayRef<uint8_t> Bytes, uint64_t KdAddress) const { 1533 // CP microcode requires the kernel descriptor to be 64 aligned. 1534 if (Bytes.size() != 64 || KdAddress % 64 != 0) 1535 return MCDisassembler::Fail; 1536 1537 std::string Kd; 1538 raw_string_ostream KdStream(Kd); 1539 KdStream << ".amdhsa_kernel " << KdName << '\n'; 1540 1541 DataExtractor::Cursor C(0); 1542 while (C && C.tell() < Bytes.size()) { 1543 MCDisassembler::DecodeStatus Status = 1544 decodeKernelDescriptorDirective(C, Bytes, KdStream); 1545 1546 if (Status == MCDisassembler::Fail) 1547 return MCDisassembler::Fail; 1548 } 1549 KdStream << ".end_amdhsa_kernel\n"; 1550 outs() << KdStream.str(); 1551 return MCDisassembler::Success; 1552 } 1553 1554 Optional<MCDisassembler::DecodeStatus> 1555 AMDGPUDisassembler::onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size, 1556 ArrayRef<uint8_t> Bytes, uint64_t Address, 1557 raw_ostream &CStream) const { 1558 // Right now only kernel descriptor needs to be handled. 1559 // We ignore all other symbols for target specific handling. 1560 // TODO: 1561 // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code 1562 // Object V2 and V3 when symbols are marked protected. 1563 1564 // amd_kernel_code_t for Code Object V2. 1565 if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) { 1566 Size = 256; 1567 return MCDisassembler::Fail; 1568 } 1569 1570 // Code Object V3 kernel descriptors. 1571 StringRef Name = Symbol.Name; 1572 if (Symbol.Type == ELF::STT_OBJECT && Name.endswith(StringRef(".kd"))) { 1573 Size = 64; // Size = 64 regardless of success or failure. 1574 return decodeKernelDescriptor(Name.drop_back(3), Bytes, Address); 1575 } 1576 return None; 1577 } 1578 1579 //===----------------------------------------------------------------------===// 1580 // AMDGPUSymbolizer 1581 //===----------------------------------------------------------------------===// 1582 1583 // Try to find symbol name for specified label 1584 bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst, 1585 raw_ostream &/*cStream*/, int64_t Value, 1586 uint64_t /*Address*/, bool IsBranch, 1587 uint64_t /*Offset*/, uint64_t /*InstSize*/) { 1588 1589 if (!IsBranch) { 1590 return false; 1591 } 1592 1593 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo); 1594 if (!Symbols) 1595 return false; 1596 1597 auto Result = std::find_if(Symbols->begin(), Symbols->end(), 1598 [Value](const SymbolInfoTy& Val) { 1599 return Val.Addr == static_cast<uint64_t>(Value) 1600 && Val.Type == ELF::STT_NOTYPE; 1601 }); 1602 if (Result != Symbols->end()) { 1603 auto *Sym = Ctx.getOrCreateSymbol(Result->Name); 1604 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx); 1605 Inst.addOperand(MCOperand::createExpr(Add)); 1606 return true; 1607 } 1608 return false; 1609 } 1610 1611 void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream, 1612 int64_t Value, 1613 uint64_t Address) { 1614 llvm_unreachable("unimplemented"); 1615 } 1616 1617 //===----------------------------------------------------------------------===// 1618 // Initialization 1619 //===----------------------------------------------------------------------===// 1620 1621 static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/, 1622 LLVMOpInfoCallback /*GetOpInfo*/, 1623 LLVMSymbolLookupCallback /*SymbolLookUp*/, 1624 void *DisInfo, 1625 MCContext *Ctx, 1626 std::unique_ptr<MCRelocationInfo> &&RelInfo) { 1627 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo); 1628 } 1629 1630 static MCDisassembler *createAMDGPUDisassembler(const Target &T, 1631 const MCSubtargetInfo &STI, 1632 MCContext &Ctx) { 1633 return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo()); 1634 } 1635 1636 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUDisassembler() { 1637 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(), 1638 createAMDGPUDisassembler); 1639 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(), 1640 createAMDGPUSymbolizer); 1641 } 1642