1 //===- SIPeepholeSDWA.cpp - Peephole optimization for SDWA instructions ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file This pass tries to apply several peephole SDWA patterns. 10 /// 11 /// E.g. original: 12 /// V_LSHRREV_B32_e32 %0, 16, %1 13 /// V_ADD_I32_e32 %2, %0, %3 14 /// V_LSHLREV_B32_e32 %4, 16, %2 15 /// 16 /// Replace: 17 /// V_ADD_I32_sdwa %4, %1, %3 18 /// dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD 19 /// 20 //===----------------------------------------------------------------------===// 21 22 #include "AMDGPU.h" 23 #include "AMDGPUSubtarget.h" 24 #include "SIDefines.h" 25 #include "SIInstrInfo.h" 26 #include "SIRegisterInfo.h" 27 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 28 #include "Utils/AMDGPUBaseInfo.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/Optional.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/Statistic.h" 34 #include "llvm/CodeGen/MachineBasicBlock.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineFunctionPass.h" 37 #include "llvm/CodeGen/MachineInstr.h" 38 #include "llvm/CodeGen/MachineInstrBuilder.h" 39 #include "llvm/CodeGen/MachineOperand.h" 40 #include "llvm/CodeGen/MachineRegisterInfo.h" 41 #include "llvm/CodeGen/TargetRegisterInfo.h" 42 #include "llvm/Config/llvm-config.h" 43 #include "llvm/MC/LaneBitmask.h" 44 #include "llvm/MC/MCInstrDesc.h" 45 #include "llvm/Pass.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Support/raw_ostream.h" 48 #include <algorithm> 49 #include <cassert> 50 #include <cstdint> 51 #include <memory> 52 #include <unordered_map> 53 54 using namespace llvm; 55 56 #define DEBUG_TYPE "si-peephole-sdwa" 57 58 STATISTIC(NumSDWAPatternsFound, "Number of SDWA patterns found."); 59 STATISTIC(NumSDWAInstructionsPeepholed, 60 "Number of instruction converted to SDWA."); 61 62 namespace { 63 64 class SDWAOperand; 65 class SDWADstOperand; 66 67 class SIPeepholeSDWA : public MachineFunctionPass { 68 public: 69 using SDWAOperandsVector = SmallVector<SDWAOperand *, 4>; 70 71 private: 72 MachineRegisterInfo *MRI; 73 const SIRegisterInfo *TRI; 74 const SIInstrInfo *TII; 75 76 std::unordered_map<MachineInstr *, std::unique_ptr<SDWAOperand>> SDWAOperands; 77 std::unordered_map<MachineInstr *, SDWAOperandsVector> PotentialMatches; 78 SmallVector<MachineInstr *, 8> ConvertedInstructions; 79 80 Optional<int64_t> foldToImm(const MachineOperand &Op) const; 81 82 public: 83 static char ID; 84 85 SIPeepholeSDWA() : MachineFunctionPass(ID) { 86 initializeSIPeepholeSDWAPass(*PassRegistry::getPassRegistry()); 87 } 88 89 bool runOnMachineFunction(MachineFunction &MF) override; 90 void matchSDWAOperands(MachineBasicBlock &MBB); 91 std::unique_ptr<SDWAOperand> matchSDWAOperand(MachineInstr &MI); 92 bool isConvertibleToSDWA(MachineInstr &MI, const GCNSubtarget &ST) const; 93 void pseudoOpConvertToVOP2(MachineInstr &MI, 94 const GCNSubtarget &ST) const; 95 bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands); 96 void legalizeScalarOperands(MachineInstr &MI, const GCNSubtarget &ST) const; 97 98 StringRef getPassName() const override { return "SI Peephole SDWA"; } 99 100 void getAnalysisUsage(AnalysisUsage &AU) const override { 101 AU.setPreservesCFG(); 102 MachineFunctionPass::getAnalysisUsage(AU); 103 } 104 }; 105 106 class SDWAOperand { 107 private: 108 MachineOperand *Target; // Operand that would be used in converted instruction 109 MachineOperand *Replaced; // Operand that would be replace by Target 110 111 public: 112 SDWAOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp) 113 : Target(TargetOp), Replaced(ReplacedOp) { 114 assert(Target->isReg()); 115 assert(Replaced->isReg()); 116 } 117 118 virtual ~SDWAOperand() = default; 119 120 virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) = 0; 121 virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0; 122 123 MachineOperand *getTargetOperand() const { return Target; } 124 MachineOperand *getReplacedOperand() const { return Replaced; } 125 MachineInstr *getParentInst() const { return Target->getParent(); } 126 127 MachineRegisterInfo *getMRI() const { 128 return &getParentInst()->getParent()->getParent()->getRegInfo(); 129 } 130 131 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 132 virtual void print(raw_ostream& OS) const = 0; 133 void dump() const { print(dbgs()); } 134 #endif 135 }; 136 137 using namespace AMDGPU::SDWA; 138 139 class SDWASrcOperand : public SDWAOperand { 140 private: 141 SdwaSel SrcSel; 142 bool Abs; 143 bool Neg; 144 bool Sext; 145 146 public: 147 SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp, 148 SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false, 149 bool Sext_ = false) 150 : SDWAOperand(TargetOp, ReplacedOp), 151 SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {} 152 153 MachineInstr *potentialToConvert(const SIInstrInfo *TII) override; 154 bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override; 155 156 SdwaSel getSrcSel() const { return SrcSel; } 157 bool getAbs() const { return Abs; } 158 bool getNeg() const { return Neg; } 159 bool getSext() const { return Sext; } 160 161 uint64_t getSrcMods(const SIInstrInfo *TII, 162 const MachineOperand *SrcOp) const; 163 164 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 165 void print(raw_ostream& OS) const override; 166 #endif 167 }; 168 169 class SDWADstOperand : public SDWAOperand { 170 private: 171 SdwaSel DstSel; 172 DstUnused DstUn; 173 174 public: 175 176 SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp, 177 SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD) 178 : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {} 179 180 MachineInstr *potentialToConvert(const SIInstrInfo *TII) override; 181 bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override; 182 183 SdwaSel getDstSel() const { return DstSel; } 184 DstUnused getDstUnused() const { return DstUn; } 185 186 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 187 void print(raw_ostream& OS) const override; 188 #endif 189 }; 190 191 class SDWADstPreserveOperand : public SDWADstOperand { 192 private: 193 MachineOperand *Preserve; 194 195 public: 196 SDWADstPreserveOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp, 197 MachineOperand *PreserveOp, SdwaSel DstSel_ = DWORD) 198 : SDWADstOperand(TargetOp, ReplacedOp, DstSel_, UNUSED_PRESERVE), 199 Preserve(PreserveOp) {} 200 201 bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override; 202 203 MachineOperand *getPreservedOperand() const { return Preserve; } 204 205 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 206 void print(raw_ostream& OS) const override; 207 #endif 208 }; 209 210 } // end anonymous namespace 211 212 INITIALIZE_PASS(SIPeepholeSDWA, DEBUG_TYPE, "SI Peephole SDWA", false, false) 213 214 char SIPeepholeSDWA::ID = 0; 215 216 char &llvm::SIPeepholeSDWAID = SIPeepholeSDWA::ID; 217 218 FunctionPass *llvm::createSIPeepholeSDWAPass() { 219 return new SIPeepholeSDWA(); 220 } 221 222 223 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 224 static raw_ostream& operator<<(raw_ostream &OS, SdwaSel Sel) { 225 switch(Sel) { 226 case BYTE_0: OS << "BYTE_0"; break; 227 case BYTE_1: OS << "BYTE_1"; break; 228 case BYTE_2: OS << "BYTE_2"; break; 229 case BYTE_3: OS << "BYTE_3"; break; 230 case WORD_0: OS << "WORD_0"; break; 231 case WORD_1: OS << "WORD_1"; break; 232 case DWORD: OS << "DWORD"; break; 233 } 234 return OS; 235 } 236 237 static raw_ostream& operator<<(raw_ostream &OS, const DstUnused &Un) { 238 switch(Un) { 239 case UNUSED_PAD: OS << "UNUSED_PAD"; break; 240 case UNUSED_SEXT: OS << "UNUSED_SEXT"; break; 241 case UNUSED_PRESERVE: OS << "UNUSED_PRESERVE"; break; 242 } 243 return OS; 244 } 245 246 static raw_ostream& operator<<(raw_ostream &OS, const SDWAOperand &Operand) { 247 Operand.print(OS); 248 return OS; 249 } 250 251 LLVM_DUMP_METHOD 252 void SDWASrcOperand::print(raw_ostream& OS) const { 253 OS << "SDWA src: " << *getTargetOperand() 254 << " src_sel:" << getSrcSel() 255 << " abs:" << getAbs() << " neg:" << getNeg() 256 << " sext:" << getSext() << '\n'; 257 } 258 259 LLVM_DUMP_METHOD 260 void SDWADstOperand::print(raw_ostream& OS) const { 261 OS << "SDWA dst: " << *getTargetOperand() 262 << " dst_sel:" << getDstSel() 263 << " dst_unused:" << getDstUnused() << '\n'; 264 } 265 266 LLVM_DUMP_METHOD 267 void SDWADstPreserveOperand::print(raw_ostream& OS) const { 268 OS << "SDWA preserve dst: " << *getTargetOperand() 269 << " dst_sel:" << getDstSel() 270 << " preserve:" << *getPreservedOperand() << '\n'; 271 } 272 273 #endif 274 275 static void copyRegOperand(MachineOperand &To, const MachineOperand &From) { 276 assert(To.isReg() && From.isReg()); 277 To.setReg(From.getReg()); 278 To.setSubReg(From.getSubReg()); 279 To.setIsUndef(From.isUndef()); 280 if (To.isUse()) { 281 To.setIsKill(From.isKill()); 282 } else { 283 To.setIsDead(From.isDead()); 284 } 285 } 286 287 static bool isSameReg(const MachineOperand &LHS, const MachineOperand &RHS) { 288 return LHS.isReg() && 289 RHS.isReg() && 290 LHS.getReg() == RHS.getReg() && 291 LHS.getSubReg() == RHS.getSubReg(); 292 } 293 294 static MachineOperand *findSingleRegUse(const MachineOperand *Reg, 295 const MachineRegisterInfo *MRI) { 296 if (!Reg->isReg() || !Reg->isDef()) 297 return nullptr; 298 299 MachineOperand *ResMO = nullptr; 300 for (MachineOperand &UseMO : MRI->use_nodbg_operands(Reg->getReg())) { 301 // If there exist use of subreg of Reg then return nullptr 302 if (!isSameReg(UseMO, *Reg)) 303 return nullptr; 304 305 // Check that there is only one instruction that uses Reg 306 if (!ResMO) { 307 ResMO = &UseMO; 308 } else if (ResMO->getParent() != UseMO.getParent()) { 309 return nullptr; 310 } 311 } 312 313 return ResMO; 314 } 315 316 static MachineOperand *findSingleRegDef(const MachineOperand *Reg, 317 const MachineRegisterInfo *MRI) { 318 if (!Reg->isReg()) 319 return nullptr; 320 321 MachineInstr *DefInstr = MRI->getUniqueVRegDef(Reg->getReg()); 322 if (!DefInstr) 323 return nullptr; 324 325 for (auto &DefMO : DefInstr->defs()) { 326 if (DefMO.isReg() && DefMO.getReg() == Reg->getReg()) 327 return &DefMO; 328 } 329 330 // Ignore implicit defs. 331 return nullptr; 332 } 333 334 uint64_t SDWASrcOperand::getSrcMods(const SIInstrInfo *TII, 335 const MachineOperand *SrcOp) const { 336 uint64_t Mods = 0; 337 const auto *MI = SrcOp->getParent(); 338 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) { 339 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { 340 Mods = Mod->getImm(); 341 } 342 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) { 343 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) { 344 Mods = Mod->getImm(); 345 } 346 } 347 if (Abs || Neg) { 348 assert(!Sext && 349 "Float and integer src modifiers can't be set simulteniously"); 350 Mods |= Abs ? SISrcMods::ABS : 0; 351 Mods ^= Neg ? SISrcMods::NEG : 0; 352 } else if (Sext) { 353 Mods |= SISrcMods::SEXT; 354 } 355 356 return Mods; 357 } 358 359 MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII) { 360 // For SDWA src operand potential instruction is one that use register 361 // defined by parent instruction 362 MachineOperand *PotentialMO = findSingleRegUse(getReplacedOperand(), getMRI()); 363 if (!PotentialMO) 364 return nullptr; 365 366 return PotentialMO->getParent(); 367 } 368 369 bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) { 370 // Find operand in instruction that matches source operand and replace it with 371 // target operand. Set corresponding src_sel 372 bool IsPreserveSrc = false; 373 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 374 MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel); 375 MachineOperand *SrcMods = 376 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 377 assert(Src && (Src->isReg() || Src->isImm())); 378 if (!isSameReg(*Src, *getReplacedOperand())) { 379 // If this is not src0 then it could be src1 380 Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 381 SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel); 382 SrcMods = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 383 384 if (!Src || 385 !isSameReg(*Src, *getReplacedOperand())) { 386 // It's possible this Src is a tied operand for 387 // UNUSED_PRESERVE, in which case we can either 388 // abandon the peephole attempt, or if legal we can 389 // copy the target operand into the tied slot 390 // if the preserve operation will effectively cause the same 391 // result by overwriting the rest of the dst. 392 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 393 MachineOperand *DstUnused = 394 TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused); 395 396 if (Dst && 397 DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) { 398 // This will work if the tied src is acessing WORD_0, and the dst is 399 // writing WORD_1. Modifiers don't matter because all the bits that 400 // would be impacted are being overwritten by the dst. 401 // Any other case will not work. 402 SdwaSel DstSel = static_cast<SdwaSel>( 403 TII->getNamedImmOperand(MI, AMDGPU::OpName::dst_sel)); 404 if (DstSel == AMDGPU::SDWA::SdwaSel::WORD_1 && 405 getSrcSel() == AMDGPU::SDWA::SdwaSel::WORD_0) { 406 IsPreserveSrc = true; 407 auto DstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 408 AMDGPU::OpName::vdst); 409 auto TiedIdx = MI.findTiedOperandIdx(DstIdx); 410 Src = &MI.getOperand(TiedIdx); 411 SrcSel = nullptr; 412 SrcMods = nullptr; 413 } else { 414 // Not legal to convert this src 415 return false; 416 } 417 } 418 } 419 assert(Src && Src->isReg()); 420 421 if ((MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa || 422 MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) && 423 !isSameReg(*Src, *getReplacedOperand())) { 424 // In case of v_mac_f16/32_sdwa this pass can try to apply src operand to 425 // src2. This is not allowed. 426 return false; 427 } 428 429 assert(isSameReg(*Src, *getReplacedOperand()) && 430 (IsPreserveSrc || (SrcSel && SrcMods))); 431 } 432 copyRegOperand(*Src, *getTargetOperand()); 433 if (!IsPreserveSrc) { 434 SrcSel->setImm(getSrcSel()); 435 SrcMods->setImm(getSrcMods(TII, Src)); 436 } 437 getTargetOperand()->setIsKill(false); 438 return true; 439 } 440 441 MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII) { 442 // For SDWA dst operand potential instruction is one that defines register 443 // that this operand uses 444 MachineRegisterInfo *MRI = getMRI(); 445 MachineInstr *ParentMI = getParentInst(); 446 447 MachineOperand *PotentialMO = findSingleRegDef(getReplacedOperand(), MRI); 448 if (!PotentialMO) 449 return nullptr; 450 451 // Check that ParentMI is the only instruction that uses replaced register 452 for (MachineInstr &UseInst : MRI->use_nodbg_instructions(PotentialMO->getReg())) { 453 if (&UseInst != ParentMI) 454 return nullptr; 455 } 456 457 return PotentialMO->getParent(); 458 } 459 460 bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) { 461 // Replace vdst operand in MI with target operand. Set dst_sel and dst_unused 462 463 if ((MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa || 464 MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) && 465 getDstSel() != AMDGPU::SDWA::DWORD) { 466 // v_mac_f16/32_sdwa allow dst_sel to be equal only to DWORD 467 return false; 468 } 469 470 MachineOperand *Operand = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 471 assert(Operand && 472 Operand->isReg() && 473 isSameReg(*Operand, *getReplacedOperand())); 474 copyRegOperand(*Operand, *getTargetOperand()); 475 MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel); 476 assert(DstSel); 477 DstSel->setImm(getDstSel()); 478 MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused); 479 assert(DstUnused); 480 DstUnused->setImm(getDstUnused()); 481 482 // Remove original instruction because it would conflict with our new 483 // instruction by register definition 484 getParentInst()->eraseFromParent(); 485 return true; 486 } 487 488 bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI, 489 const SIInstrInfo *TII) { 490 // MI should be moved right before v_or_b32. 491 // For this we should clear all kill flags on uses of MI src-operands or else 492 // we can encounter problem with use of killed operand. 493 for (MachineOperand &MO : MI.uses()) { 494 if (!MO.isReg()) 495 continue; 496 getMRI()->clearKillFlags(MO.getReg()); 497 } 498 499 // Move MI before v_or_b32 500 auto MBB = MI.getParent(); 501 MBB->remove(&MI); 502 MBB->insert(getParentInst(), &MI); 503 504 // Add Implicit use of preserved register 505 MachineInstrBuilder MIB(*MBB->getParent(), MI); 506 MIB.addReg(getPreservedOperand()->getReg(), 507 RegState::ImplicitKill, 508 getPreservedOperand()->getSubReg()); 509 510 // Tie dst to implicit use 511 MI.tieOperands(AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst), 512 MI.getNumOperands() - 1); 513 514 // Convert MI as any other SDWADstOperand and remove v_or_b32 515 return SDWADstOperand::convertToSDWA(MI, TII); 516 } 517 518 Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const { 519 if (Op.isImm()) { 520 return Op.getImm(); 521 } 522 523 // If this is not immediate then it can be copy of immediate value, e.g.: 524 // %1 = S_MOV_B32 255; 525 if (Op.isReg()) { 526 for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) { 527 if (!isSameReg(Op, Def)) 528 continue; 529 530 const MachineInstr *DefInst = Def.getParent(); 531 if (!TII->isFoldableCopy(*DefInst)) 532 return None; 533 534 const MachineOperand &Copied = DefInst->getOperand(1); 535 if (!Copied.isImm()) 536 return None; 537 538 return Copied.getImm(); 539 } 540 } 541 542 return None; 543 } 544 545 std::unique_ptr<SDWAOperand> 546 SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) { 547 unsigned Opcode = MI.getOpcode(); 548 switch (Opcode) { 549 case AMDGPU::V_LSHRREV_B32_e32: 550 case AMDGPU::V_ASHRREV_I32_e32: 551 case AMDGPU::V_LSHLREV_B32_e32: 552 case AMDGPU::V_LSHRREV_B32_e64: 553 case AMDGPU::V_ASHRREV_I32_e64: 554 case AMDGPU::V_LSHLREV_B32_e64: { 555 // from: v_lshrrev_b32_e32 v1, 16/24, v0 556 // to SDWA src:v0 src_sel:WORD_1/BYTE_3 557 558 // from: v_ashrrev_i32_e32 v1, 16/24, v0 559 // to SDWA src:v0 src_sel:WORD_1/BYTE_3 sext:1 560 561 // from: v_lshlrev_b32_e32 v1, 16/24, v0 562 // to SDWA dst:v1 dst_sel:WORD_1/BYTE_3 dst_unused:UNUSED_PAD 563 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 564 auto Imm = foldToImm(*Src0); 565 if (!Imm) 566 break; 567 568 if (*Imm != 16 && *Imm != 24) 569 break; 570 571 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 572 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 573 if (TRI->isPhysicalRegister(Src1->getReg()) || 574 TRI->isPhysicalRegister(Dst->getReg())) 575 break; 576 577 if (Opcode == AMDGPU::V_LSHLREV_B32_e32 || 578 Opcode == AMDGPU::V_LSHLREV_B32_e64) { 579 return make_unique<SDWADstOperand>( 580 Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD); 581 } else { 582 return make_unique<SDWASrcOperand>( 583 Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false, 584 Opcode != AMDGPU::V_LSHRREV_B32_e32 && 585 Opcode != AMDGPU::V_LSHRREV_B32_e64); 586 } 587 break; 588 } 589 590 case AMDGPU::V_LSHRREV_B16_e32: 591 case AMDGPU::V_ASHRREV_I16_e32: 592 case AMDGPU::V_LSHLREV_B16_e32: 593 case AMDGPU::V_LSHRREV_B16_e64: 594 case AMDGPU::V_ASHRREV_I16_e64: 595 case AMDGPU::V_LSHLREV_B16_e64: { 596 // from: v_lshrrev_b16_e32 v1, 8, v0 597 // to SDWA src:v0 src_sel:BYTE_1 598 599 // from: v_ashrrev_i16_e32 v1, 8, v0 600 // to SDWA src:v0 src_sel:BYTE_1 sext:1 601 602 // from: v_lshlrev_b16_e32 v1, 8, v0 603 // to SDWA dst:v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD 604 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 605 auto Imm = foldToImm(*Src0); 606 if (!Imm || *Imm != 8) 607 break; 608 609 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 610 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 611 612 if (TRI->isPhysicalRegister(Src1->getReg()) || 613 TRI->isPhysicalRegister(Dst->getReg())) 614 break; 615 616 if (Opcode == AMDGPU::V_LSHLREV_B16_e32 || 617 Opcode == AMDGPU::V_LSHLREV_B16_e64) { 618 return make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD); 619 } else { 620 return make_unique<SDWASrcOperand>( 621 Src1, Dst, BYTE_1, false, false, 622 Opcode != AMDGPU::V_LSHRREV_B16_e32 && 623 Opcode != AMDGPU::V_LSHRREV_B16_e64); 624 } 625 break; 626 } 627 628 case AMDGPU::V_BFE_I32: 629 case AMDGPU::V_BFE_U32: { 630 // e.g.: 631 // from: v_bfe_u32 v1, v0, 8, 8 632 // to SDWA src:v0 src_sel:BYTE_1 633 634 // offset | width | src_sel 635 // ------------------------ 636 // 0 | 8 | BYTE_0 637 // 0 | 16 | WORD_0 638 // 0 | 32 | DWORD ? 639 // 8 | 8 | BYTE_1 640 // 16 | 8 | BYTE_2 641 // 16 | 16 | WORD_1 642 // 24 | 8 | BYTE_3 643 644 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 645 auto Offset = foldToImm(*Src1); 646 if (!Offset) 647 break; 648 649 MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); 650 auto Width = foldToImm(*Src2); 651 if (!Width) 652 break; 653 654 SdwaSel SrcSel = DWORD; 655 656 if (*Offset == 0 && *Width == 8) 657 SrcSel = BYTE_0; 658 else if (*Offset == 0 && *Width == 16) 659 SrcSel = WORD_0; 660 else if (*Offset == 0 && *Width == 32) 661 SrcSel = DWORD; 662 else if (*Offset == 8 && *Width == 8) 663 SrcSel = BYTE_1; 664 else if (*Offset == 16 && *Width == 8) 665 SrcSel = BYTE_2; 666 else if (*Offset == 16 && *Width == 16) 667 SrcSel = WORD_1; 668 else if (*Offset == 24 && *Width == 8) 669 SrcSel = BYTE_3; 670 else 671 break; 672 673 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 674 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 675 676 if (TRI->isPhysicalRegister(Src0->getReg()) || 677 TRI->isPhysicalRegister(Dst->getReg())) 678 break; 679 680 return make_unique<SDWASrcOperand>( 681 Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32); 682 } 683 684 case AMDGPU::V_AND_B32_e32: 685 case AMDGPU::V_AND_B32_e64: { 686 // e.g.: 687 // from: v_and_b32_e32 v1, 0x0000ffff/0x000000ff, v0 688 // to SDWA src:v0 src_sel:WORD_0/BYTE_0 689 690 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 691 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 692 auto ValSrc = Src1; 693 auto Imm = foldToImm(*Src0); 694 695 if (!Imm) { 696 Imm = foldToImm(*Src1); 697 ValSrc = Src0; 698 } 699 700 if (!Imm || (*Imm != 0x0000ffff && *Imm != 0x000000ff)) 701 break; 702 703 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 704 705 if (TRI->isPhysicalRegister(ValSrc->getReg()) || 706 TRI->isPhysicalRegister(Dst->getReg())) 707 break; 708 709 return make_unique<SDWASrcOperand>( 710 ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0); 711 } 712 713 case AMDGPU::V_OR_B32_e32: 714 case AMDGPU::V_OR_B32_e64: { 715 // Patterns for dst_unused:UNUSED_PRESERVE. 716 // e.g., from: 717 // v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD 718 // src1_sel:WORD_1 src2_sel:WORD1 719 // v_add_f16_e32 v3, v1, v2 720 // v_or_b32_e32 v4, v0, v3 721 // to SDWA preserve dst:v4 dst_sel:WORD_1 dst_unused:UNUSED_PRESERVE preserve:v3 722 723 // Check if one of operands of v_or_b32 is SDWA instruction 724 using CheckRetType = Optional<std::pair<MachineOperand *, MachineOperand *>>; 725 auto CheckOROperandsForSDWA = 726 [&](const MachineOperand *Op1, const MachineOperand *Op2) -> CheckRetType { 727 if (!Op1 || !Op1->isReg() || !Op2 || !Op2->isReg()) 728 return CheckRetType(None); 729 730 MachineOperand *Op1Def = findSingleRegDef(Op1, MRI); 731 if (!Op1Def) 732 return CheckRetType(None); 733 734 MachineInstr *Op1Inst = Op1Def->getParent(); 735 if (!TII->isSDWA(*Op1Inst)) 736 return CheckRetType(None); 737 738 MachineOperand *Op2Def = findSingleRegDef(Op2, MRI); 739 if (!Op2Def) 740 return CheckRetType(None); 741 742 return CheckRetType(std::make_pair(Op1Def, Op2Def)); 743 }; 744 745 MachineOperand *OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 746 MachineOperand *OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 747 assert(OrSDWA && OrOther); 748 auto Res = CheckOROperandsForSDWA(OrSDWA, OrOther); 749 if (!Res) { 750 OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 751 OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 752 assert(OrSDWA && OrOther); 753 Res = CheckOROperandsForSDWA(OrSDWA, OrOther); 754 if (!Res) 755 break; 756 } 757 758 MachineOperand *OrSDWADef = Res->first; 759 MachineOperand *OrOtherDef = Res->second; 760 assert(OrSDWADef && OrOtherDef); 761 762 MachineInstr *SDWAInst = OrSDWADef->getParent(); 763 MachineInstr *OtherInst = OrOtherDef->getParent(); 764 765 // Check that OtherInstr is actually bitwise compatible with SDWAInst = their 766 // destination patterns don't overlap. Compatible instruction can be either 767 // regular instruction with compatible bitness or SDWA instruction with 768 // correct dst_sel 769 // SDWAInst | OtherInst bitness / OtherInst dst_sel 770 // ----------------------------------------------------- 771 // DWORD | no / no 772 // WORD_0 | no / BYTE_2/3, WORD_1 773 // WORD_1 | 8/16-bit instructions / BYTE_0/1, WORD_0 774 // BYTE_0 | no / BYTE_1/2/3, WORD_1 775 // BYTE_1 | 8-bit / BYTE_0/2/3, WORD_1 776 // BYTE_2 | 8/16-bit / BYTE_0/1/3. WORD_0 777 // BYTE_3 | 8/16/24-bit / BYTE_0/1/2, WORD_0 778 // E.g. if SDWAInst is v_add_f16_sdwa dst_sel:WORD_1 then v_add_f16 is OK 779 // but v_add_f32 is not. 780 781 // TODO: add support for non-SDWA instructions as OtherInst. 782 // For now this only works with SDWA instructions. For regular instructions 783 // there is no way to determine if the instruction writes only 8/16/24-bit 784 // out of full register size and all registers are at min 32-bit wide. 785 if (!TII->isSDWA(*OtherInst)) 786 break; 787 788 SdwaSel DstSel = static_cast<SdwaSel>( 789 TII->getNamedImmOperand(*SDWAInst, AMDGPU::OpName::dst_sel));; 790 SdwaSel OtherDstSel = static_cast<SdwaSel>( 791 TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_sel)); 792 793 bool DstSelAgree = false; 794 switch (DstSel) { 795 case WORD_0: DstSelAgree = ((OtherDstSel == BYTE_2) || 796 (OtherDstSel == BYTE_3) || 797 (OtherDstSel == WORD_1)); 798 break; 799 case WORD_1: DstSelAgree = ((OtherDstSel == BYTE_0) || 800 (OtherDstSel == BYTE_1) || 801 (OtherDstSel == WORD_0)); 802 break; 803 case BYTE_0: DstSelAgree = ((OtherDstSel == BYTE_1) || 804 (OtherDstSel == BYTE_2) || 805 (OtherDstSel == BYTE_3) || 806 (OtherDstSel == WORD_1)); 807 break; 808 case BYTE_1: DstSelAgree = ((OtherDstSel == BYTE_0) || 809 (OtherDstSel == BYTE_2) || 810 (OtherDstSel == BYTE_3) || 811 (OtherDstSel == WORD_1)); 812 break; 813 case BYTE_2: DstSelAgree = ((OtherDstSel == BYTE_0) || 814 (OtherDstSel == BYTE_1) || 815 (OtherDstSel == BYTE_3) || 816 (OtherDstSel == WORD_0)); 817 break; 818 case BYTE_3: DstSelAgree = ((OtherDstSel == BYTE_0) || 819 (OtherDstSel == BYTE_1) || 820 (OtherDstSel == BYTE_2) || 821 (OtherDstSel == WORD_0)); 822 break; 823 default: DstSelAgree = false; 824 } 825 826 if (!DstSelAgree) 827 break; 828 829 // Also OtherInst dst_unused should be UNUSED_PAD 830 DstUnused OtherDstUnused = static_cast<DstUnused>( 831 TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_unused)); 832 if (OtherDstUnused != DstUnused::UNUSED_PAD) 833 break; 834 835 // Create DstPreserveOperand 836 MachineOperand *OrDst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 837 assert(OrDst && OrDst->isReg()); 838 839 return make_unique<SDWADstPreserveOperand>( 840 OrDst, OrSDWADef, OrOtherDef, DstSel); 841 842 } 843 } 844 845 return std::unique_ptr<SDWAOperand>(nullptr); 846 } 847 848 void SIPeepholeSDWA::matchSDWAOperands(MachineBasicBlock &MBB) { 849 for (MachineInstr &MI : MBB) { 850 if (auto Operand = matchSDWAOperand(MI)) { 851 LLVM_DEBUG(dbgs() << "Match: " << MI << "To: " << *Operand << '\n'); 852 SDWAOperands[&MI] = std::move(Operand); 853 ++NumSDWAPatternsFound; 854 } 855 } 856 } 857 858 // Convert the V_ADDC_U32_e64 into V_ADDC_U32_e32, and 859 // V_ADD_I32_e64 into V_ADD_I32_e32. This allows isConvertibleToSDWA 860 // to perform its transformation on V_ADD_I32_e32 into V_ADD_I32_sdwa. 861 // 862 // We are transforming from a VOP3 into a VOP2 form of the instruction. 863 // %19:vgpr_32 = V_AND_B32_e32 255, 864 // killed %16:vgpr_32, implicit $exec 865 // %47:vgpr_32, %49:sreg_64_xexec = V_ADD_I32_e64 866 // %26.sub0:vreg_64, %19:vgpr_32, implicit $exec 867 // %48:vgpr_32, dead %50:sreg_64_xexec = V_ADDC_U32_e64 868 // %26.sub1:vreg_64, %54:vgpr_32, killed %49:sreg_64_xexec, implicit $exec 869 // 870 // becomes 871 // %47:vgpr_32 = V_ADD_I32_sdwa 872 // 0, %26.sub0:vreg_64, 0, killed %16:vgpr_32, 0, 6, 0, 6, 0, 873 // implicit-def $vcc, implicit $exec 874 // %48:vgpr_32 = V_ADDC_U32_e32 875 // 0, %26.sub1:vreg_64, implicit-def $vcc, implicit $vcc, implicit $exec 876 void SIPeepholeSDWA::pseudoOpConvertToVOP2(MachineInstr &MI, 877 const GCNSubtarget &ST) const { 878 int Opc = MI.getOpcode(); 879 assert((Opc == AMDGPU::V_ADD_I32_e64 || Opc == AMDGPU::V_SUB_I32_e64) && 880 "Currently only handles V_ADD_I32_e64 or V_SUB_I32_e64"); 881 882 // Can the candidate MI be shrunk? 883 if (!TII->canShrink(MI, *MRI)) 884 return; 885 Opc = AMDGPU::getVOPe32(Opc); 886 // Find the related ADD instruction. 887 const MachineOperand *Sdst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst); 888 if (!Sdst) 889 return; 890 MachineOperand *NextOp = findSingleRegUse(Sdst, MRI); 891 if (!NextOp) 892 return; 893 MachineInstr &MISucc = *NextOp->getParent(); 894 // Can the successor be shrunk? 895 if (!TII->canShrink(MISucc, *MRI)) 896 return; 897 int SuccOpc = AMDGPU::getVOPe32(MISucc.getOpcode()); 898 // Make sure the carry in/out are subsequently unused. 899 MachineOperand *CarryIn = TII->getNamedOperand(MISucc, AMDGPU::OpName::src2); 900 if (!CarryIn) 901 return; 902 MachineOperand *CarryOut = TII->getNamedOperand(MISucc, AMDGPU::OpName::sdst); 903 if (!CarryOut) 904 return; 905 if (!MRI->hasOneUse(CarryIn->getReg()) || !MRI->use_empty(CarryOut->getReg())) 906 return; 907 // Make sure VCC or its subregs are dead before MI. 908 MachineBasicBlock &MBB = *MI.getParent(); 909 auto Liveness = MBB.computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 25); 910 if (Liveness != MachineBasicBlock::LQR_Dead) 911 return; 912 // Check if VCC is referenced in range of (MI,MISucc]. 913 for (auto I = std::next(MI.getIterator()), E = MISucc.getIterator(); 914 I != E; ++I) { 915 if (I->modifiesRegister(AMDGPU::VCC, TRI)) 916 return; 917 } 918 // Make the two new e32 instruction variants. 919 // Replace MI with V_{SUB|ADD}_I32_e32 920 auto NewMI = BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(Opc)); 921 NewMI.add(*TII->getNamedOperand(MI, AMDGPU::OpName::vdst)); 922 NewMI.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0)); 923 NewMI.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src1)); 924 MI.eraseFromParent(); 925 // Replace MISucc with V_{SUBB|ADDC}_U32_e32 926 auto NewInst = BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(SuccOpc)); 927 NewInst.add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::vdst)); 928 NewInst.add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src0)); 929 NewInst.add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src1)); 930 MISucc.eraseFromParent(); 931 } 932 933 bool SIPeepholeSDWA::isConvertibleToSDWA(MachineInstr &MI, 934 const GCNSubtarget &ST) const { 935 // Check if this is already an SDWA instruction 936 unsigned Opc = MI.getOpcode(); 937 if (TII->isSDWA(Opc)) 938 return true; 939 940 // Check if this instruction has opcode that supports SDWA 941 if (AMDGPU::getSDWAOp(Opc) == -1) 942 Opc = AMDGPU::getVOPe32(Opc); 943 944 if (AMDGPU::getSDWAOp(Opc) == -1) 945 return false; 946 947 if (!ST.hasSDWAOmod() && TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 948 return false; 949 950 if (TII->isVOPC(Opc)) { 951 if (!ST.hasSDWASdst()) { 952 const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst); 953 if (SDst && SDst->getReg() != AMDGPU::VCC) 954 return false; 955 } 956 957 if (!ST.hasSDWAOutModsVOPC() && 958 (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) || 959 TII->hasModifiersSet(MI, AMDGPU::OpName::omod))) 960 return false; 961 962 } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst) || 963 !TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) { 964 return false; 965 } 966 967 if (!ST.hasSDWAMac() && (Opc == AMDGPU::V_MAC_F16_e32 || 968 Opc == AMDGPU::V_MAC_F32_e32)) 969 return false; 970 971 // FIXME: has SDWA but require handling of implicit VCC use 972 if (Opc == AMDGPU::V_CNDMASK_B32_e32) 973 return false; 974 975 return true; 976 } 977 978 bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI, 979 const SDWAOperandsVector &SDWAOperands) { 980 981 LLVM_DEBUG(dbgs() << "Convert instruction:" << MI); 982 983 // Convert to sdwa 984 int SDWAOpcode; 985 unsigned Opcode = MI.getOpcode(); 986 if (TII->isSDWA(Opcode)) { 987 SDWAOpcode = Opcode; 988 } else { 989 SDWAOpcode = AMDGPU::getSDWAOp(Opcode); 990 if (SDWAOpcode == -1) 991 SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode)); 992 } 993 assert(SDWAOpcode != -1); 994 995 const MCInstrDesc &SDWADesc = TII->get(SDWAOpcode); 996 997 // Create SDWA version of instruction MI and initialize its operands 998 MachineInstrBuilder SDWAInst = 999 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), SDWADesc); 1000 1001 // Copy dst, if it is present in original then should also be present in SDWA 1002 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 1003 if (Dst) { 1004 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst) != -1); 1005 SDWAInst.add(*Dst); 1006 } else if ((Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst))) { 1007 assert(Dst && 1008 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1); 1009 SDWAInst.add(*Dst); 1010 } else { 1011 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1); 1012 SDWAInst.addReg(AMDGPU::VCC, RegState::Define); 1013 } 1014 1015 // Copy src0, initialize src0_modifiers. All sdwa instructions has src0 and 1016 // src0_modifiers (except for v_nop_sdwa, but it can't get here) 1017 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 1018 assert( 1019 Src0 && 1020 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0) != -1 && 1021 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_modifiers) != -1); 1022 if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)) 1023 SDWAInst.addImm(Mod->getImm()); 1024 else 1025 SDWAInst.addImm(0); 1026 SDWAInst.add(*Src0); 1027 1028 // Copy src1 if present, initialize src1_modifiers. 1029 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 1030 if (Src1) { 1031 assert( 1032 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1) != -1 && 1033 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_modifiers) != -1); 1034 if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)) 1035 SDWAInst.addImm(Mod->getImm()); 1036 else 1037 SDWAInst.addImm(0); 1038 SDWAInst.add(*Src1); 1039 } 1040 1041 if (SDWAOpcode == AMDGPU::V_MAC_F16_sdwa || 1042 SDWAOpcode == AMDGPU::V_MAC_F32_sdwa) { 1043 // v_mac_f16/32 has additional src2 operand tied to vdst 1044 MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); 1045 assert(Src2); 1046 SDWAInst.add(*Src2); 1047 } 1048 1049 // Copy clamp if present, initialize otherwise 1050 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::clamp) != -1); 1051 MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp); 1052 if (Clamp) { 1053 SDWAInst.add(*Clamp); 1054 } else { 1055 SDWAInst.addImm(0); 1056 } 1057 1058 // Copy omod if present, initialize otherwise if needed 1059 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::omod) != -1) { 1060 MachineOperand *OMod = TII->getNamedOperand(MI, AMDGPU::OpName::omod); 1061 if (OMod) { 1062 SDWAInst.add(*OMod); 1063 } else { 1064 SDWAInst.addImm(0); 1065 } 1066 } 1067 1068 // Copy dst_sel if present, initialize otherwise if needed 1069 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_sel) != -1) { 1070 MachineOperand *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel); 1071 if (DstSel) { 1072 SDWAInst.add(*DstSel); 1073 } else { 1074 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD); 1075 } 1076 } 1077 1078 // Copy dst_unused if present, initialize otherwise if needed 1079 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_unused) != -1) { 1080 MachineOperand *DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused); 1081 if (DstUnused) { 1082 SDWAInst.add(*DstUnused); 1083 } else { 1084 SDWAInst.addImm(AMDGPU::SDWA::DstUnused::UNUSED_PAD); 1085 } 1086 } 1087 1088 // Copy src0_sel if present, initialize otherwise 1089 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_sel) != -1); 1090 MachineOperand *Src0Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel); 1091 if (Src0Sel) { 1092 SDWAInst.add(*Src0Sel); 1093 } else { 1094 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD); 1095 } 1096 1097 // Copy src1_sel if present, initialize otherwise if needed 1098 if (Src1) { 1099 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_sel) != -1); 1100 MachineOperand *Src1Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel); 1101 if (Src1Sel) { 1102 SDWAInst.add(*Src1Sel); 1103 } else { 1104 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD); 1105 } 1106 } 1107 1108 // Check for a preserved register that needs to be copied. 1109 auto DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused); 1110 if (DstUnused && 1111 DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) { 1112 // We expect, if we are here, that the instruction was already in it's SDWA form, 1113 // with a tied operand. 1114 assert(Dst && Dst->isTied()); 1115 assert(Opcode == static_cast<unsigned int>(SDWAOpcode)); 1116 // We also expect a vdst, since sdst can't preserve. 1117 auto PreserveDstIdx = AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst); 1118 assert(PreserveDstIdx != -1); 1119 1120 auto TiedIdx = MI.findTiedOperandIdx(PreserveDstIdx); 1121 auto Tied = MI.getOperand(TiedIdx); 1122 1123 SDWAInst.add(Tied); 1124 SDWAInst->tieOperands(PreserveDstIdx, SDWAInst->getNumOperands() - 1); 1125 } 1126 1127 // Apply all sdwa operand patterns. 1128 bool Converted = false; 1129 for (auto &Operand : SDWAOperands) { 1130 LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand); 1131 // There should be no intesection between SDWA operands and potential MIs 1132 // e.g.: 1133 // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0 1134 // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0 1135 // v_add_u32 v3, v4, v2 1136 // 1137 // In that example it is possible that we would fold 2nd instruction into 3rd 1138 // (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd (that was 1139 // already destroyed). So if SDWAOperand is also a potential MI then do not 1140 // apply it. 1141 if (PotentialMatches.count(Operand->getParentInst()) == 0) 1142 Converted |= Operand->convertToSDWA(*SDWAInst, TII); 1143 } 1144 if (Converted) { 1145 ConvertedInstructions.push_back(SDWAInst); 1146 } else { 1147 SDWAInst->eraseFromParent(); 1148 return false; 1149 } 1150 1151 LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n'); 1152 ++NumSDWAInstructionsPeepholed; 1153 1154 MI.eraseFromParent(); 1155 return true; 1156 } 1157 1158 // If an instruction was converted to SDWA it should not have immediates or SGPR 1159 // operands (allowed one SGPR on GFX9). Copy its scalar operands into VGPRs. 1160 void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI, 1161 const GCNSubtarget &ST) const { 1162 const MCInstrDesc &Desc = TII->get(MI.getOpcode()); 1163 unsigned ConstantBusCount = 0; 1164 for (MachineOperand &Op : MI.explicit_uses()) { 1165 if (!Op.isImm() && !(Op.isReg() && !TRI->isVGPR(*MRI, Op.getReg()))) 1166 continue; 1167 1168 unsigned I = MI.getOperandNo(&Op); 1169 if (Desc.OpInfo[I].RegClass == -1 || 1170 !TRI->hasVGPRs(TRI->getRegClass(Desc.OpInfo[I].RegClass))) 1171 continue; 1172 1173 if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() && 1174 TRI->isSGPRReg(*MRI, Op.getReg())) { 1175 ++ConstantBusCount; 1176 continue; 1177 } 1178 1179 unsigned VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1180 auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), 1181 TII->get(AMDGPU::V_MOV_B32_e32), VGPR); 1182 if (Op.isImm()) 1183 Copy.addImm(Op.getImm()); 1184 else if (Op.isReg()) 1185 Copy.addReg(Op.getReg(), Op.isKill() ? RegState::Kill : 0, 1186 Op.getSubReg()); 1187 Op.ChangeToRegister(VGPR, false); 1188 } 1189 } 1190 1191 bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) { 1192 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1193 1194 if (!ST.hasSDWA() || skipFunction(MF.getFunction())) 1195 return false; 1196 1197 MRI = &MF.getRegInfo(); 1198 TRI = ST.getRegisterInfo(); 1199 TII = ST.getInstrInfo(); 1200 1201 // Find all SDWA operands in MF. 1202 bool Ret = false; 1203 for (MachineBasicBlock &MBB : MF) { 1204 bool Changed = false; 1205 do { 1206 // Preprocess the ADD/SUB pairs so they could be SDWA'ed. 1207 // Look for a possible ADD or SUB that resulted from a previously lowered 1208 // V_{ADD|SUB}_U64_PSEUDO. The function pseudoOpConvertToVOP2 1209 // lowers the pair of instructions into e32 form. 1210 matchSDWAOperands(MBB); 1211 for (const auto &OperandPair : SDWAOperands) { 1212 const auto &Operand = OperandPair.second; 1213 MachineInstr *PotentialMI = Operand->potentialToConvert(TII); 1214 if (PotentialMI && 1215 (PotentialMI->getOpcode() == AMDGPU::V_ADD_I32_e64 || 1216 PotentialMI->getOpcode() == AMDGPU::V_SUB_I32_e64)) 1217 pseudoOpConvertToVOP2(*PotentialMI, ST); 1218 } 1219 SDWAOperands.clear(); 1220 1221 // Generate potential match list. 1222 matchSDWAOperands(MBB); 1223 1224 for (const auto &OperandPair : SDWAOperands) { 1225 const auto &Operand = OperandPair.second; 1226 MachineInstr *PotentialMI = Operand->potentialToConvert(TII); 1227 if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST)) { 1228 PotentialMatches[PotentialMI].push_back(Operand.get()); 1229 } 1230 } 1231 1232 for (auto &PotentialPair : PotentialMatches) { 1233 MachineInstr &PotentialMI = *PotentialPair.first; 1234 convertToSDWA(PotentialMI, PotentialPair.second); 1235 } 1236 1237 PotentialMatches.clear(); 1238 SDWAOperands.clear(); 1239 1240 Changed = !ConvertedInstructions.empty(); 1241 1242 if (Changed) 1243 Ret = true; 1244 while (!ConvertedInstructions.empty()) 1245 legalizeScalarOperands(*ConvertedInstructions.pop_back_val(), ST); 1246 } while (Changed); 1247 } 1248 1249 return Ret; 1250 } 1251