1 //===-- SIPeepholeSDWA.cpp - Peephole optimization for SDWA instructions --===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file This pass tries to apply several peephole SDWA patterns. 11 /// 12 /// E.g. original: 13 /// V_LSHRREV_B32_e32 %vreg0, 16, %vreg1 14 /// V_ADD_I32_e32 %vreg2, %vreg0, %vreg3 15 /// V_LSHLREV_B32_e32 %vreg4, 16, %vreg2 16 /// 17 /// Replace: 18 /// V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3 19 /// dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD 20 /// 21 //===----------------------------------------------------------------------===// 22 23 #include "AMDGPU.h" 24 #include "AMDGPUSubtarget.h" 25 #include "SIDefines.h" 26 #include "SIInstrInfo.h" 27 #include "llvm/ADT/STLExtras.h" 28 #include "llvm/ADT/Statistic.h" 29 #include "llvm/CodeGen/MachineFunctionPass.h" 30 #include "llvm/CodeGen/MachineInstrBuilder.h" 31 #include <unordered_map> 32 #include <unordered_set> 33 34 using namespace llvm; 35 36 #define DEBUG_TYPE "si-peephole-sdwa" 37 38 STATISTIC(NumSDWAPatternsFound, "Number of SDWA patterns found."); 39 STATISTIC(NumSDWAInstructionsPeepholed, 40 "Number of instruction converted to SDWA."); 41 42 namespace { 43 44 class SDWAOperand; 45 46 class SIPeepholeSDWA : public MachineFunctionPass { 47 public: 48 typedef SmallVector<SDWAOperand *, 4> SDWAOperandsVector; 49 50 private: 51 MachineRegisterInfo *MRI; 52 const SIRegisterInfo *TRI; 53 const SIInstrInfo *TII; 54 55 std::unordered_map<MachineInstr *, std::unique_ptr<SDWAOperand>> SDWAOperands; 56 std::unordered_map<MachineInstr *, SDWAOperandsVector> PotentialMatches; 57 SmallVector<MachineInstr *, 8> ConvertedInstructions; 58 59 Optional<int64_t> foldToImm(const MachineOperand &Op) const; 60 61 public: 62 static char ID; 63 64 SIPeepholeSDWA() : MachineFunctionPass(ID) { 65 initializeSIPeepholeSDWAPass(*PassRegistry::getPassRegistry()); 66 } 67 68 bool runOnMachineFunction(MachineFunction &MF) override; 69 void matchSDWAOperands(MachineFunction &MF); 70 bool isConvertibleToSDWA(const MachineInstr &MI, const SISubtarget &ST) const; 71 bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands); 72 void legalizeScalarOperands(MachineInstr &MI, const SISubtarget &ST) const; 73 74 StringRef getPassName() const override { return "SI Peephole SDWA"; } 75 76 void getAnalysisUsage(AnalysisUsage &AU) const override { 77 AU.setPreservesCFG(); 78 MachineFunctionPass::getAnalysisUsage(AU); 79 } 80 }; 81 82 class SDWAOperand { 83 private: 84 MachineOperand *Target; // Operand that would be used in converted instruction 85 MachineOperand *Replaced; // Operand that would be replace by Target 86 87 public: 88 SDWAOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp) 89 : Target(TargetOp), Replaced(ReplacedOp) { 90 assert(Target->isReg()); 91 assert(Replaced->isReg()); 92 } 93 94 virtual ~SDWAOperand() {} 95 96 virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) = 0; 97 virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0; 98 99 MachineOperand *getTargetOperand() const { return Target; } 100 MachineOperand *getReplacedOperand() const { return Replaced; } 101 MachineInstr *getParentInst() const { return Target->getParent(); } 102 MachineRegisterInfo *getMRI() const { 103 return &getParentInst()->getParent()->getParent()->getRegInfo(); 104 } 105 }; 106 107 using namespace AMDGPU::SDWA; 108 109 class SDWASrcOperand : public SDWAOperand { 110 private: 111 SdwaSel SrcSel; 112 bool Abs; 113 bool Neg; 114 bool Sext; 115 116 public: 117 SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp, 118 SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false, 119 bool Sext_ = false) 120 : SDWAOperand(TargetOp, ReplacedOp), SrcSel(SrcSel_), Abs(Abs_), 121 Neg(Neg_), Sext(Sext_) {} 122 123 virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) override; 124 virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override; 125 126 SdwaSel getSrcSel() const { return SrcSel; } 127 bool getAbs() const { return Abs; } 128 bool getNeg() const { return Neg; } 129 bool getSext() const { return Sext; } 130 131 uint64_t getSrcMods(const SIInstrInfo *TII, 132 const MachineOperand *SrcOp) const; 133 }; 134 135 class SDWADstOperand : public SDWAOperand { 136 private: 137 SdwaSel DstSel; 138 DstUnused DstUn; 139 140 public: 141 SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp, 142 SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD) 143 : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {} 144 145 virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) override; 146 virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override; 147 148 SdwaSel getDstSel() const { return DstSel; } 149 DstUnused getDstUnused() const { return DstUn; } 150 }; 151 152 } // End anonymous namespace. 153 154 INITIALIZE_PASS(SIPeepholeSDWA, DEBUG_TYPE, "SI Peephole SDWA", false, false) 155 156 char SIPeepholeSDWA::ID = 0; 157 158 char &llvm::SIPeepholeSDWAID = SIPeepholeSDWA::ID; 159 160 FunctionPass *llvm::createSIPeepholeSDWAPass() { 161 return new SIPeepholeSDWA(); 162 } 163 164 #ifndef NDEBUG 165 166 static raw_ostream& operator<<(raw_ostream &OS, const SdwaSel &Sel) { 167 switch(Sel) { 168 case BYTE_0: OS << "BYTE_0"; break; 169 case BYTE_1: OS << "BYTE_1"; break; 170 case BYTE_2: OS << "BYTE_2"; break; 171 case BYTE_3: OS << "BYTE_3"; break; 172 case WORD_0: OS << "WORD_0"; break; 173 case WORD_1: OS << "WORD_1"; break; 174 case DWORD: OS << "DWORD"; break; 175 } 176 return OS; 177 } 178 179 static raw_ostream& operator<<(raw_ostream &OS, const DstUnused &Un) { 180 switch(Un) { 181 case UNUSED_PAD: OS << "UNUSED_PAD"; break; 182 case UNUSED_SEXT: OS << "UNUSED_SEXT"; break; 183 case UNUSED_PRESERVE: OS << "UNUSED_PRESERVE"; break; 184 } 185 return OS; 186 } 187 188 static raw_ostream& operator<<(raw_ostream &OS, const SDWASrcOperand &Src) { 189 OS << "SDWA src: " << *Src.getTargetOperand() 190 << " src_sel:" << Src.getSrcSel() 191 << " abs:" << Src.getAbs() << " neg:" << Src.getNeg() 192 << " sext:" << Src.getSext() << '\n'; 193 return OS; 194 } 195 196 static raw_ostream& operator<<(raw_ostream &OS, const SDWADstOperand &Dst) { 197 OS << "SDWA dst: " << *Dst.getTargetOperand() 198 << " dst_sel:" << Dst.getDstSel() 199 << " dst_unused:" << Dst.getDstUnused() << '\n'; 200 return OS; 201 } 202 203 #endif 204 205 static void copyRegOperand(MachineOperand &To, const MachineOperand &From) { 206 assert(To.isReg() && From.isReg()); 207 To.setReg(From.getReg()); 208 To.setSubReg(From.getSubReg()); 209 To.setIsUndef(From.isUndef()); 210 if (To.isUse()) { 211 To.setIsKill(From.isKill()); 212 } else { 213 To.setIsDead(From.isDead()); 214 } 215 } 216 217 static bool isSameReg(const MachineOperand &LHS, const MachineOperand &RHS) { 218 return LHS.isReg() && 219 RHS.isReg() && 220 LHS.getReg() == RHS.getReg() && 221 LHS.getSubReg() == RHS.getSubReg(); 222 } 223 224 static bool isSubregOf(const MachineOperand &SubReg, 225 const MachineOperand &SuperReg, 226 const TargetRegisterInfo *TRI) { 227 228 if (!SuperReg.isReg() || !SubReg.isReg()) 229 return false; 230 231 if (isSameReg(SuperReg, SubReg)) 232 return true; 233 234 if (SuperReg.getReg() != SubReg.getReg()) 235 return false; 236 237 LaneBitmask SuperMask = TRI->getSubRegIndexLaneMask(SuperReg.getSubReg()); 238 LaneBitmask SubMask = TRI->getSubRegIndexLaneMask(SubReg.getSubReg()); 239 SuperMask |= ~SubMask; 240 return SuperMask.all(); 241 } 242 243 uint64_t SDWASrcOperand::getSrcMods(const SIInstrInfo *TII, 244 const MachineOperand *SrcOp) const { 245 uint64_t Mods = 0; 246 const auto *MI = SrcOp->getParent(); 247 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) { 248 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { 249 Mods = Mod->getImm(); 250 } 251 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) { 252 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) { 253 Mods = Mod->getImm(); 254 } 255 } 256 if (Abs || Neg) { 257 assert(!Sext && 258 "Float and integer src modifiers can't be set simulteniously"); 259 Mods |= Abs ? SISrcMods::ABS : 0; 260 Mods ^= Neg ? SISrcMods::NEG : 0; 261 } else if (Sext) { 262 Mods |= SISrcMods::SEXT; 263 } 264 265 return Mods; 266 } 267 268 MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII) { 269 // For SDWA src operand potential instruction is one that use register 270 // defined by parent instruction 271 MachineRegisterInfo *MRI = getMRI(); 272 MachineOperand *Replaced = getReplacedOperand(); 273 assert(Replaced->isReg()); 274 275 MachineInstr *PotentialMI = nullptr; 276 for (MachineOperand &PotentialMO : MRI->use_operands(Replaced->getReg())) { 277 // If this is use of another subreg of dst reg then do nothing 278 if (!isSubregOf(*Replaced, PotentialMO, MRI->getTargetRegisterInfo())) 279 continue; 280 281 // If there exist use of superreg of dst then we should not combine this 282 // opernad 283 if (!isSameReg(PotentialMO, *Replaced)) 284 return nullptr; 285 286 // Check that PotentialMI is only instruction that uses dst reg 287 if (PotentialMI == nullptr) { 288 PotentialMI = PotentialMO.getParent(); 289 } else if (PotentialMI != PotentialMO.getParent()) { 290 return nullptr; 291 } 292 } 293 294 return PotentialMI; 295 } 296 297 bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) { 298 // Find operand in instruction that matches source operand and replace it with 299 // target operand. Set corresponding src_sel 300 301 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 302 MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel); 303 MachineOperand *SrcMods = 304 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 305 assert(Src && (Src->isReg() || Src->isImm())); 306 if (!isSameReg(*Src, *getReplacedOperand())) { 307 // If this is not src0 then it should be src1 308 Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 309 SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel); 310 SrcMods = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 311 312 assert(Src && Src->isReg()); 313 314 if ((MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa || 315 MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) && 316 !isSameReg(*Src, *getReplacedOperand())) { 317 // In case of v_mac_f16/32_sdwa this pass can try to apply src operand to 318 // src2. This is not allowed. 319 return false; 320 } 321 322 assert(isSameReg(*Src, *getReplacedOperand()) && SrcSel && SrcMods); 323 } 324 copyRegOperand(*Src, *getTargetOperand()); 325 SrcSel->setImm(getSrcSel()); 326 SrcMods->setImm(getSrcMods(TII, Src)); 327 getTargetOperand()->setIsKill(false); 328 return true; 329 } 330 331 MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII) { 332 // For SDWA dst operand potential instruction is one that defines register 333 // that this operand uses 334 MachineRegisterInfo *MRI = getMRI(); 335 MachineInstr *ParentMI = getParentInst(); 336 MachineOperand *Replaced = getReplacedOperand(); 337 assert(Replaced->isReg()); 338 339 for (MachineOperand &PotentialMO : MRI->def_operands(Replaced->getReg())) { 340 if (!isSubregOf(*Replaced, PotentialMO, MRI->getTargetRegisterInfo())) 341 continue; 342 343 if (!isSameReg(*Replaced, PotentialMO)) 344 return nullptr; 345 346 // Check that ParentMI is the only instruction that uses replaced register 347 for (MachineOperand &UseMO : MRI->use_operands(PotentialMO.getReg())) { 348 if (isSubregOf(UseMO, PotentialMO, MRI->getTargetRegisterInfo()) && 349 UseMO.getParent() != ParentMI) { 350 return nullptr; 351 } 352 } 353 354 // Due to SSA this should be onle def of replaced register, so return it 355 return PotentialMO.getParent(); 356 } 357 358 return nullptr; 359 } 360 361 bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) { 362 // Replace vdst operand in MI with target operand. Set dst_sel and dst_unused 363 364 if ((MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa || 365 MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) && 366 getDstSel() != AMDGPU::SDWA::DWORD) { 367 // v_mac_f16/32_sdwa allow dst_sel to be equal only to DWORD 368 return false; 369 } 370 371 MachineOperand *Operand = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 372 assert(Operand && 373 Operand->isReg() && 374 isSameReg(*Operand, *getReplacedOperand())); 375 copyRegOperand(*Operand, *getTargetOperand()); 376 MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel); 377 assert(DstSel); 378 DstSel->setImm(getDstSel()); 379 MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused); 380 assert(DstUnused); 381 DstUnused->setImm(getDstUnused()); 382 383 // Remove original instruction because it would conflict with our new 384 // instruction by register definition 385 getParentInst()->eraseFromParent(); 386 return true; 387 } 388 389 Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const { 390 if (Op.isImm()) { 391 return Op.getImm(); 392 } 393 394 // If this is not immediate then it can be copy of immediate value, e.g.: 395 // %vreg1<def> = S_MOV_B32 255; 396 if (Op.isReg()) { 397 for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) { 398 if (!isSameReg(Op, Def)) 399 continue; 400 401 const MachineInstr *DefInst = Def.getParent(); 402 if (!TII->isFoldableCopy(*DefInst)) 403 return None; 404 405 const MachineOperand &Copied = DefInst->getOperand(1); 406 if (!Copied.isImm()) 407 return None; 408 409 return Copied.getImm(); 410 } 411 } 412 413 return None; 414 } 415 416 void SIPeepholeSDWA::matchSDWAOperands(MachineFunction &MF) { 417 for (MachineBasicBlock &MBB : MF) { 418 for (MachineInstr &MI : MBB) { 419 unsigned Opcode = MI.getOpcode(); 420 switch (Opcode) { 421 case AMDGPU::V_LSHRREV_B32_e32: 422 case AMDGPU::V_ASHRREV_I32_e32: 423 case AMDGPU::V_LSHLREV_B32_e32: 424 case AMDGPU::V_LSHRREV_B32_e64: 425 case AMDGPU::V_ASHRREV_I32_e64: 426 case AMDGPU::V_LSHLREV_B32_e64: { 427 // from: v_lshrrev_b32_e32 v1, 16/24, v0 428 // to SDWA src:v0 src_sel:WORD_1/BYTE_3 429 430 // from: v_ashrrev_i32_e32 v1, 16/24, v0 431 // to SDWA src:v0 src_sel:WORD_1/BYTE_3 sext:1 432 433 // from: v_lshlrev_b32_e32 v1, 16/24, v0 434 // to SDWA dst:v1 dst_sel:WORD_1/BYTE_3 dst_unused:UNUSED_PAD 435 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 436 auto Imm = foldToImm(*Src0); 437 if (!Imm) 438 break; 439 440 if (*Imm != 16 && *Imm != 24) 441 break; 442 443 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 444 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 445 if (TRI->isPhysicalRegister(Src1->getReg()) || 446 TRI->isPhysicalRegister(Dst->getReg())) 447 break; 448 449 if (Opcode == AMDGPU::V_LSHLREV_B32_e32 || 450 Opcode == AMDGPU::V_LSHLREV_B32_e64) { 451 auto SDWADst = make_unique<SDWADstOperand>( 452 Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD); 453 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWADst << '\n'); 454 SDWAOperands[&MI] = std::move(SDWADst); 455 ++NumSDWAPatternsFound; 456 } else { 457 auto SDWASrc = make_unique<SDWASrcOperand>( 458 Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false, 459 Opcode != AMDGPU::V_LSHRREV_B32_e32 && 460 Opcode != AMDGPU::V_LSHRREV_B32_e64); 461 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWASrc << '\n'); 462 SDWAOperands[&MI] = std::move(SDWASrc); 463 ++NumSDWAPatternsFound; 464 } 465 break; 466 } 467 468 case AMDGPU::V_LSHRREV_B16_e32: 469 case AMDGPU::V_ASHRREV_I16_e32: 470 case AMDGPU::V_LSHLREV_B16_e32: 471 case AMDGPU::V_LSHRREV_B16_e64: 472 case AMDGPU::V_ASHRREV_I16_e64: 473 case AMDGPU::V_LSHLREV_B16_e64: { 474 // from: v_lshrrev_b16_e32 v1, 8, v0 475 // to SDWA src:v0 src_sel:BYTE_1 476 477 // from: v_ashrrev_i16_e32 v1, 8, v0 478 // to SDWA src:v0 src_sel:BYTE_1 sext:1 479 480 // from: v_lshlrev_b16_e32 v1, 8, v0 481 // to SDWA dst:v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD 482 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 483 auto Imm = foldToImm(*Src0); 484 if (!Imm || *Imm != 8) 485 break; 486 487 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 488 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 489 490 if (TRI->isPhysicalRegister(Src1->getReg()) || 491 TRI->isPhysicalRegister(Dst->getReg())) 492 break; 493 494 if (Opcode == AMDGPU::V_LSHLREV_B16_e32 || 495 Opcode == AMDGPU::V_LSHLREV_B16_e64) { 496 auto SDWADst = 497 make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD); 498 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWADst << '\n'); 499 SDWAOperands[&MI] = std::move(SDWADst); 500 ++NumSDWAPatternsFound; 501 } else { 502 auto SDWASrc = make_unique<SDWASrcOperand>( 503 Src1, Dst, BYTE_1, false, false, 504 Opcode != AMDGPU::V_LSHRREV_B16_e32 && 505 Opcode != AMDGPU::V_LSHRREV_B16_e64); 506 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWASrc << '\n'); 507 SDWAOperands[&MI] = std::move(SDWASrc); 508 ++NumSDWAPatternsFound; 509 } 510 break; 511 } 512 513 case AMDGPU::V_BFE_I32: 514 case AMDGPU::V_BFE_U32: { 515 // e.g.: 516 // from: v_bfe_u32 v1, v0, 8, 8 517 // to SDWA src:v0 src_sel:BYTE_1 518 519 // offset | width | src_sel 520 // ------------------------ 521 // 0 | 8 | BYTE_0 522 // 0 | 16 | WORD_0 523 // 0 | 32 | DWORD ? 524 // 8 | 8 | BYTE_1 525 // 16 | 8 | BYTE_2 526 // 16 | 16 | WORD_1 527 // 24 | 8 | BYTE_3 528 529 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 530 auto Offset = foldToImm(*Src1); 531 if (!Offset) 532 break; 533 534 MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); 535 auto Width = foldToImm(*Src2); 536 if (!Width) 537 break; 538 539 SdwaSel SrcSel = DWORD; 540 541 if (*Offset == 0 && *Width == 8) 542 SrcSel = BYTE_0; 543 else if (*Offset == 0 && *Width == 16) 544 SrcSel = WORD_0; 545 else if (*Offset == 0 && *Width == 32) 546 SrcSel = DWORD; 547 else if (*Offset == 8 && *Width == 8) 548 SrcSel = BYTE_1; 549 else if (*Offset == 16 && *Width == 8) 550 SrcSel = BYTE_2; 551 else if (*Offset == 16 && *Width == 16) 552 SrcSel = WORD_1; 553 else if (*Offset == 24 && *Width == 8) 554 SrcSel = BYTE_3; 555 else 556 break; 557 558 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 559 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 560 561 if (TRI->isPhysicalRegister(Src0->getReg()) || 562 TRI->isPhysicalRegister(Dst->getReg())) 563 break; 564 565 auto SDWASrc = make_unique<SDWASrcOperand>( 566 Src0, Dst, SrcSel, false, false, 567 Opcode == AMDGPU::V_BFE_U32 ? false : true); 568 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWASrc << '\n'); 569 SDWAOperands[&MI] = std::move(SDWASrc); 570 ++NumSDWAPatternsFound; 571 break; 572 } 573 case AMDGPU::V_AND_B32_e32: 574 case AMDGPU::V_AND_B32_e64: { 575 // e.g.: 576 // from: v_and_b32_e32 v1, 0x0000ffff/0x000000ff, v0 577 // to SDWA src:v0 src_sel:WORD_0/BYTE_0 578 579 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 580 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 581 auto ValSrc = Src1; 582 auto Imm = foldToImm(*Src0); 583 584 if (!Imm) { 585 Imm = foldToImm(*Src1); 586 ValSrc = Src0; 587 } 588 589 if (!Imm || (*Imm != 0x0000ffff && *Imm != 0x000000ff)) 590 break; 591 592 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 593 594 if (TRI->isPhysicalRegister(Src1->getReg()) || 595 TRI->isPhysicalRegister(Dst->getReg())) 596 break; 597 598 auto SDWASrc = make_unique<SDWASrcOperand>( 599 ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0); 600 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWASrc << '\n'); 601 SDWAOperands[&MI] = std::move(SDWASrc); 602 ++NumSDWAPatternsFound; 603 break; 604 } 605 } 606 } 607 } 608 } 609 610 bool SIPeepholeSDWA::isConvertibleToSDWA(const MachineInstr &MI, 611 const SISubtarget &ST) const { 612 // Check if this instruction has opcode that supports SDWA 613 int Opc = MI.getOpcode(); 614 if (AMDGPU::getSDWAOp(Opc) == -1) 615 Opc = AMDGPU::getVOPe32(Opc); 616 617 if (Opc == -1 || AMDGPU::getSDWAOp(Opc) == -1) 618 return false; 619 620 if (!ST.hasSDWAOmod() && TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) 621 return false; 622 623 if (TII->isVOPC(Opc)) { 624 if (!ST.hasSDWASdst()) { 625 const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst); 626 if (SDst && SDst->getReg() != AMDGPU::VCC) 627 return false; 628 } 629 630 if (!ST.hasSDWAOutModsVOPC() && 631 (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) || 632 TII->hasModifiersSet(MI, AMDGPU::OpName::omod))) 633 return false; 634 635 } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst) || 636 !TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) { 637 return false; 638 } 639 640 if (!ST.hasSDWAMac() && (Opc == AMDGPU::V_MAC_F16_e32 || 641 Opc == AMDGPU::V_MAC_F32_e32)) 642 return false; 643 644 return true; 645 } 646 647 bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI, 648 const SDWAOperandsVector &SDWAOperands) { 649 // Convert to sdwa 650 int SDWAOpcode = AMDGPU::getSDWAOp(MI.getOpcode()); 651 if (SDWAOpcode == -1) 652 SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(MI.getOpcode())); 653 assert(SDWAOpcode != -1); 654 655 const MCInstrDesc &SDWADesc = TII->get(SDWAOpcode); 656 657 // Create SDWA version of instruction MI and initialize its operands 658 MachineInstrBuilder SDWAInst = 659 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), SDWADesc); 660 661 // Copy dst, if it is present in original then should also be present in SDWA 662 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst); 663 if (Dst) { 664 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst) != -1); 665 SDWAInst.add(*Dst); 666 } else if ((Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst))) { 667 assert(Dst && 668 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1); 669 SDWAInst.add(*Dst); 670 } else { 671 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1); 672 SDWAInst.addReg(AMDGPU::VCC, RegState::Define); 673 } 674 675 // Copy src0, initialize src0_modifiers. All sdwa instructions has src0 and 676 // src0_modifiers (except for v_nop_sdwa, but it can't get here) 677 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); 678 assert( 679 Src0 && 680 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0) != -1 && 681 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_modifiers) != -1); 682 if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)) 683 SDWAInst.addImm(Mod->getImm()); 684 else 685 SDWAInst.addImm(0); 686 SDWAInst.add(*Src0); 687 688 // Copy src1 if present, initialize src1_modifiers. 689 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); 690 if (Src1) { 691 assert( 692 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1) != -1 && 693 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_modifiers) != -1); 694 if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)) 695 SDWAInst.addImm(Mod->getImm()); 696 else 697 SDWAInst.addImm(0); 698 SDWAInst.add(*Src1); 699 } 700 701 if (SDWAOpcode == AMDGPU::V_MAC_F16_sdwa || 702 SDWAOpcode == AMDGPU::V_MAC_F32_sdwa) { 703 // v_mac_f16/32 has additional src2 operand tied to vdst 704 MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); 705 assert(Src2); 706 SDWAInst.add(*Src2); 707 } 708 709 // Copy clamp if present, initialize otherwise 710 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::clamp) != -1); 711 MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp); 712 if (Clamp) { 713 SDWAInst.add(*Clamp); 714 } else { 715 SDWAInst.addImm(0); 716 } 717 718 // Copy omod if present, initialize otherwise if needed 719 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::omod) != -1) { 720 MachineOperand *OMod = TII->getNamedOperand(MI, AMDGPU::OpName::omod); 721 if (OMod) { 722 SDWAInst.add(*OMod); 723 } else { 724 SDWAInst.addImm(0); 725 } 726 } 727 728 // Initialize dst_sel if present 729 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_sel) != -1) { 730 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD); 731 } 732 733 // Initialize dst_unused if present 734 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_unused) != -1) { 735 SDWAInst.addImm(AMDGPU::SDWA::DstUnused::UNUSED_PAD); 736 } 737 738 // Initialize src0_sel 739 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_sel) != -1); 740 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD); 741 742 743 // Initialize src1_sel if present 744 if (Src1) { 745 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_sel) != -1); 746 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD); 747 } 748 749 // Apply all sdwa operand pattenrs 750 bool Converted = false; 751 for (auto &Operand : SDWAOperands) { 752 // There should be no intesection between SDWA operands and potential MIs 753 // e.g.: 754 // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0 755 // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0 756 // v_add_u32 v3, v4, v2 757 // 758 // In that example it is possible that we would fold 2nd instruction into 3rd 759 // (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd (that was 760 // already destroyed). So if SDWAOperand is also a potential MI then do not 761 // apply it. 762 if (PotentialMatches.count(Operand->getParentInst()) == 0) 763 Converted |= Operand->convertToSDWA(*SDWAInst, TII); 764 } 765 if (Converted) { 766 ConvertedInstructions.push_back(SDWAInst); 767 } else { 768 SDWAInst->eraseFromParent(); 769 return false; 770 } 771 772 DEBUG(dbgs() << "Convert instruction:" << MI 773 << "Into:" << *SDWAInst << '\n'); 774 ++NumSDWAInstructionsPeepholed; 775 776 MI.eraseFromParent(); 777 return true; 778 } 779 780 // If an instruction was converted to SDWA it should not have immediates or SGPR 781 // operands (allowed one SGPR on GFX9). Copy its scalar operands into VGPRs. 782 void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI, const SISubtarget &ST) const { 783 const MCInstrDesc &Desc = TII->get(MI.getOpcode()); 784 unsigned ConstantBusCount = 0; 785 for (MachineOperand &Op: MI.explicit_uses()) { 786 if (!Op.isImm() && !(Op.isReg() && !TRI->isVGPR(*MRI, Op.getReg()))) 787 continue; 788 789 unsigned I = MI.getOperandNo(&Op); 790 if (Desc.OpInfo[I].RegClass == -1 || 791 !TRI->hasVGPRs(TRI->getRegClass(Desc.OpInfo[I].RegClass))) 792 continue; 793 794 if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() && 795 TRI->isSGPRReg(*MRI, Op.getReg())) { 796 ++ConstantBusCount; 797 continue; 798 } 799 800 unsigned VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 801 auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(), 802 TII->get(AMDGPU::V_MOV_B32_e32), VGPR); 803 if (Op.isImm()) 804 Copy.addImm(Op.getImm()); 805 else if (Op.isReg()) 806 Copy.addReg(Op.getReg(), Op.isKill() ? RegState::Kill : 0, 807 Op.getSubReg()); 808 Op.ChangeToRegister(VGPR, false); 809 } 810 } 811 812 bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) { 813 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 814 815 if (!ST.hasSDWA()) 816 return false; 817 818 MRI = &MF.getRegInfo(); 819 TRI = ST.getRegisterInfo(); 820 TII = ST.getInstrInfo(); 821 822 // Find all SDWA operands in MF. 823 matchSDWAOperands(MF); 824 825 for (const auto &OperandPair : SDWAOperands) { 826 const auto &Operand = OperandPair.second; 827 MachineInstr *PotentialMI = Operand->potentialToConvert(TII); 828 if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST)) { 829 PotentialMatches[PotentialMI].push_back(Operand.get()); 830 } 831 } 832 833 for (auto &PotentialPair : PotentialMatches) { 834 MachineInstr &PotentialMI = *PotentialPair.first; 835 convertToSDWA(PotentialMI, PotentialPair.second); 836 } 837 838 PotentialMatches.clear(); 839 SDWAOperands.clear(); 840 841 bool Ret = !ConvertedInstructions.empty(); 842 while (!ConvertedInstructions.empty()) 843 legalizeScalarOperands(*ConvertedInstructions.pop_back_val(), ST); 844 845 return Ret; 846 } 847