1 //===-- GCNHazardRecognizers.cpp - GCN Hazard Recognizer Impls ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements hazard recognizers for scheduling on GCN processors. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "GCNHazardRecognizer.h" 14 #include "GCNSubtarget.h" 15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 16 #include "SIMachineFunctionInfo.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/ScheduleDAG.h" 19 #include "llvm/TargetParser/TargetParser.h" 20 21 using namespace llvm; 22 23 namespace { 24 25 struct MFMAPaddingRatioParser : public cl::parser<unsigned> { 26 MFMAPaddingRatioParser(cl::Option &O) : cl::parser<unsigned>(O) {} 27 28 bool parse(cl::Option &O, StringRef ArgName, StringRef Arg, unsigned &Value) { 29 if (Arg.getAsInteger(0, Value)) 30 return O.error("'" + Arg + "' value invalid for uint argument!"); 31 32 if (Value > 100) 33 return O.error("'" + Arg + "' value must be in the range [0, 100]!"); 34 35 return false; 36 } 37 }; 38 39 } // end anonymous namespace 40 41 static cl::opt<unsigned, false, MFMAPaddingRatioParser> 42 MFMAPaddingRatio("amdgpu-mfma-padding-ratio", cl::init(0), cl::Hidden, 43 cl::desc("Fill a percentage of the latency between " 44 "neighboring MFMA with s_nops.")); 45 46 //===----------------------------------------------------------------------===// 47 // Hazard Recognizer Implementation 48 //===----------------------------------------------------------------------===// 49 50 static bool shouldRunLdsBranchVmemWARHazardFixup(const MachineFunction &MF, 51 const GCNSubtarget &ST); 52 53 GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction &MF) : 54 IsHazardRecognizerMode(false), 55 CurrCycleInstr(nullptr), 56 MF(MF), 57 ST(MF.getSubtarget<GCNSubtarget>()), 58 TII(*ST.getInstrInfo()), 59 TRI(TII.getRegisterInfo()), 60 ClauseUses(TRI.getNumRegUnits()), 61 ClauseDefs(TRI.getNumRegUnits()) { 62 MaxLookAhead = MF.getRegInfo().isPhysRegUsed(AMDGPU::AGPR0) ? 19 : 5; 63 TSchedModel.init(&ST); 64 RunLdsBranchVmemWARHazardFixup = shouldRunLdsBranchVmemWARHazardFixup(MF, ST); 65 } 66 67 void GCNHazardRecognizer::Reset() { 68 EmittedInstrs.clear(); 69 } 70 71 void GCNHazardRecognizer::EmitInstruction(SUnit *SU) { 72 EmitInstruction(SU->getInstr()); 73 } 74 75 void GCNHazardRecognizer::EmitInstruction(MachineInstr *MI) { 76 CurrCycleInstr = MI; 77 } 78 79 static bool isDivFMas(unsigned Opcode) { 80 return Opcode == AMDGPU::V_DIV_FMAS_F32_e64 || Opcode == AMDGPU::V_DIV_FMAS_F64_e64; 81 } 82 83 static bool isSGetReg(unsigned Opcode) { 84 return Opcode == AMDGPU::S_GETREG_B32; 85 } 86 87 static bool isSSetReg(unsigned Opcode) { 88 switch (Opcode) { 89 case AMDGPU::S_SETREG_B32: 90 case AMDGPU::S_SETREG_B32_mode: 91 case AMDGPU::S_SETREG_IMM32_B32: 92 case AMDGPU::S_SETREG_IMM32_B32_mode: 93 return true; 94 } 95 return false; 96 } 97 98 static bool isRWLane(unsigned Opcode) { 99 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32; 100 } 101 102 static bool isRFE(unsigned Opcode) { 103 return Opcode == AMDGPU::S_RFE_B64; 104 } 105 106 static bool isSMovRel(unsigned Opcode) { 107 switch (Opcode) { 108 case AMDGPU::S_MOVRELS_B32: 109 case AMDGPU::S_MOVRELS_B64: 110 case AMDGPU::S_MOVRELD_B32: 111 case AMDGPU::S_MOVRELD_B64: 112 return true; 113 default: 114 return false; 115 } 116 } 117 118 static bool isDGEMM(unsigned Opcode) { 119 return AMDGPU::getMAIIsDGEMM(Opcode); 120 } 121 122 static bool isXDL(const GCNSubtarget &ST, const MachineInstr &MI) { 123 unsigned Opcode = MI.getOpcode(); 124 125 if (!SIInstrInfo::isMAI(MI) || 126 isDGEMM(Opcode) || 127 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 || 128 Opcode == AMDGPU::V_ACCVGPR_READ_B32_e64) 129 return false; 130 131 if (!ST.hasGFX940Insts()) 132 return true; 133 134 return AMDGPU::getMAIIsGFX940XDL(Opcode); 135 } 136 137 static bool isSendMsgTraceDataOrGDS(const SIInstrInfo &TII, 138 const MachineInstr &MI) { 139 if (TII.isAlwaysGDS(MI.getOpcode())) 140 return true; 141 142 switch (MI.getOpcode()) { 143 case AMDGPU::S_SENDMSG: 144 case AMDGPU::S_SENDMSGHALT: 145 case AMDGPU::S_TTRACEDATA: 146 return true; 147 // These DS opcodes don't support GDS. 148 case AMDGPU::DS_NOP: 149 case AMDGPU::DS_PERMUTE_B32: 150 case AMDGPU::DS_BPERMUTE_B32: 151 return false; 152 default: 153 if (TII.isDS(MI.getOpcode())) { 154 int GDS = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 155 AMDGPU::OpName::gds); 156 if (MI.getOperand(GDS).getImm()) 157 return true; 158 } 159 return false; 160 } 161 } 162 163 static bool isPermlane(const MachineInstr &MI) { 164 unsigned Opcode = MI.getOpcode(); 165 return Opcode == AMDGPU::V_PERMLANE16_B32_e64 || 166 Opcode == AMDGPU::V_PERMLANEX16_B32_e64; 167 } 168 169 static bool isLdsDma(const MachineInstr &MI) { 170 return SIInstrInfo::isVALU(MI) && 171 (SIInstrInfo::isMUBUF(MI) || SIInstrInfo::isFLAT(MI)); 172 } 173 174 static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) { 175 const MachineOperand *RegOp = TII->getNamedOperand(RegInstr, 176 AMDGPU::OpName::simm16); 177 return RegOp->getImm() & AMDGPU::Hwreg::ID_MASK_; 178 } 179 180 ScheduleHazardRecognizer::HazardType 181 GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) { 182 MachineInstr *MI = SU->getInstr(); 183 // If we are not in "HazardRecognizerMode" and therefore not being run from 184 // the scheduler, track possible stalls from hazards but don't insert noops. 185 auto HazardType = IsHazardRecognizerMode ? NoopHazard : Hazard; 186 187 if (MI->isBundle()) 188 return NoHazard; 189 190 if (SIInstrInfo::isSMRD(*MI) && checkSMRDHazards(MI) > 0) 191 return HazardType; 192 193 if (ST.hasNSAtoVMEMBug() && checkNSAtoVMEMHazard(MI) > 0) 194 return HazardType; 195 196 if (checkFPAtomicToDenormModeHazard(MI) > 0) 197 return HazardType; 198 199 if (ST.hasNoDataDepHazard()) 200 return NoHazard; 201 202 // FIXME: Should flat be considered vmem? 203 if ((SIInstrInfo::isVMEM(*MI) || 204 SIInstrInfo::isFLAT(*MI)) 205 && checkVMEMHazards(MI) > 0) 206 return HazardType; 207 208 if (SIInstrInfo::isVALU(*MI) && checkVALUHazards(MI) > 0) 209 return HazardType; 210 211 if (SIInstrInfo::isDPP(*MI) && checkDPPHazards(MI) > 0) 212 return HazardType; 213 214 if (isDivFMas(MI->getOpcode()) && checkDivFMasHazards(MI) > 0) 215 return HazardType; 216 217 if (isRWLane(MI->getOpcode()) && checkRWLaneHazards(MI) > 0) 218 return HazardType; 219 220 if ((SIInstrInfo::isVALU(*MI) || SIInstrInfo::isVMEM(*MI) || 221 SIInstrInfo::isFLAT(*MI) || SIInstrInfo::isDS(*MI) || 222 SIInstrInfo::isEXP(*MI)) && checkMAIVALUHazards(MI) > 0) 223 return HazardType; 224 225 if (isSGetReg(MI->getOpcode()) && checkGetRegHazards(MI) > 0) 226 return HazardType; 227 228 if (isSSetReg(MI->getOpcode()) && checkSetRegHazards(MI) > 0) 229 return HazardType; 230 231 if (isRFE(MI->getOpcode()) && checkRFEHazards(MI) > 0) 232 return HazardType; 233 234 if (((ST.hasReadM0MovRelInterpHazard() && 235 (TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode()) || 236 MI->getOpcode() == AMDGPU::DS_WRITE_ADDTID_B32 || 237 MI->getOpcode() == AMDGPU::DS_READ_ADDTID_B32)) || 238 (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI)) || 239 (ST.hasReadM0LdsDmaHazard() && isLdsDma(*MI)) || 240 (ST.hasReadM0LdsDirectHazard() && 241 MI->readsRegister(AMDGPU::LDS_DIRECT))) && 242 checkReadM0Hazards(MI) > 0) 243 return HazardType; 244 245 if (SIInstrInfo::isMAI(*MI) && checkMAIHazards(MI) > 0) 246 return HazardType; 247 248 if ((SIInstrInfo::isVMEM(*MI) || 249 SIInstrInfo::isFLAT(*MI) || 250 SIInstrInfo::isDS(*MI)) && checkMAILdStHazards(MI) > 0) 251 return HazardType; 252 253 if (MI->isInlineAsm() && checkInlineAsmHazards(MI) > 0) 254 return HazardType; 255 256 return NoHazard; 257 } 258 259 static void insertNoopsInBundle(MachineInstr *MI, const SIInstrInfo &TII, 260 unsigned Quantity) { 261 while (Quantity > 0) { 262 unsigned Arg = std::min(Quantity, 8u); 263 Quantity -= Arg; 264 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII.get(AMDGPU::S_NOP)) 265 .addImm(Arg - 1); 266 } 267 } 268 269 unsigned 270 GCNHazardRecognizer::getMFMAPipelineWaitStates(const MachineInstr &MI) const { 271 const MCSchedClassDesc *SC = TSchedModel.resolveSchedClass(&MI); 272 assert(TSchedModel.getWriteProcResBegin(SC) != 273 TSchedModel.getWriteProcResEnd(SC)); 274 return TSchedModel.getWriteProcResBegin(SC)->Cycles; 275 } 276 277 void GCNHazardRecognizer::processBundle() { 278 MachineBasicBlock::instr_iterator MI = std::next(CurrCycleInstr->getIterator()); 279 MachineBasicBlock::instr_iterator E = CurrCycleInstr->getParent()->instr_end(); 280 // Check bundled MachineInstr's for hazards. 281 for (; MI != E && MI->isInsideBundle(); ++MI) { 282 CurrCycleInstr = &*MI; 283 unsigned WaitStates = PreEmitNoopsCommon(CurrCycleInstr); 284 285 if (IsHazardRecognizerMode) { 286 fixHazards(CurrCycleInstr); 287 288 insertNoopsInBundle(CurrCycleInstr, TII, WaitStates); 289 } 290 291 // It’s unnecessary to track more than MaxLookAhead instructions. Since we 292 // include the bundled MI directly after, only add a maximum of 293 // (MaxLookAhead - 1) noops to EmittedInstrs. 294 for (unsigned i = 0, e = std::min(WaitStates, MaxLookAhead - 1); i < e; ++i) 295 EmittedInstrs.push_front(nullptr); 296 297 EmittedInstrs.push_front(CurrCycleInstr); 298 EmittedInstrs.resize(MaxLookAhead); 299 } 300 CurrCycleInstr = nullptr; 301 } 302 303 void GCNHazardRecognizer::runOnInstruction(MachineInstr *MI) { 304 assert(IsHazardRecognizerMode); 305 306 unsigned NumPreNoops = PreEmitNoops(MI); 307 EmitNoops(NumPreNoops); 308 if (MI->isInsideBundle()) 309 insertNoopsInBundle(MI, TII, NumPreNoops); 310 else 311 TII.insertNoops(*MI->getParent(), MachineBasicBlock::iterator(MI), 312 NumPreNoops); 313 EmitInstruction(MI); 314 AdvanceCycle(); 315 } 316 317 unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr *MI) { 318 IsHazardRecognizerMode = true; 319 CurrCycleInstr = MI; 320 unsigned W = PreEmitNoopsCommon(MI); 321 fixHazards(MI); 322 CurrCycleInstr = nullptr; 323 return W; 324 } 325 326 unsigned GCNHazardRecognizer::PreEmitNoopsCommon(MachineInstr *MI) { 327 if (MI->isBundle()) 328 return 0; 329 330 int WaitStates = 0; 331 332 if (SIInstrInfo::isSMRD(*MI)) 333 return std::max(WaitStates, checkSMRDHazards(MI)); 334 335 if (ST.hasNSAtoVMEMBug()) 336 WaitStates = std::max(WaitStates, checkNSAtoVMEMHazard(MI)); 337 338 WaitStates = std::max(WaitStates, checkFPAtomicToDenormModeHazard(MI)); 339 340 if (ST.hasNoDataDepHazard()) 341 return WaitStates; 342 343 if (SIInstrInfo::isVMEM(*MI) || SIInstrInfo::isFLAT(*MI)) 344 WaitStates = std::max(WaitStates, checkVMEMHazards(MI)); 345 346 if (SIInstrInfo::isVALU(*MI)) 347 WaitStates = std::max(WaitStates, checkVALUHazards(MI)); 348 349 if (SIInstrInfo::isDPP(*MI)) 350 WaitStates = std::max(WaitStates, checkDPPHazards(MI)); 351 352 if (isDivFMas(MI->getOpcode())) 353 WaitStates = std::max(WaitStates, checkDivFMasHazards(MI)); 354 355 if (isRWLane(MI->getOpcode())) 356 WaitStates = std::max(WaitStates, checkRWLaneHazards(MI)); 357 358 if ((SIInstrInfo::isVALU(*MI) || SIInstrInfo::isVMEM(*MI) || 359 SIInstrInfo::isFLAT(*MI) || SIInstrInfo::isDS(*MI) || 360 SIInstrInfo::isEXP(*MI)) && checkMAIVALUHazards(MI) > 0) 361 WaitStates = std::max(WaitStates, checkMAIVALUHazards(MI)); 362 363 if (MI->isInlineAsm()) 364 return std::max(WaitStates, checkInlineAsmHazards(MI)); 365 366 if (isSGetReg(MI->getOpcode())) 367 return std::max(WaitStates, checkGetRegHazards(MI)); 368 369 if (isSSetReg(MI->getOpcode())) 370 return std::max(WaitStates, checkSetRegHazards(MI)); 371 372 if (isRFE(MI->getOpcode())) 373 return std::max(WaitStates, checkRFEHazards(MI)); 374 375 if ((ST.hasReadM0MovRelInterpHazard() && 376 (TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode()) || 377 MI->getOpcode() == AMDGPU::DS_WRITE_ADDTID_B32 || 378 MI->getOpcode() == AMDGPU::DS_READ_ADDTID_B32)) || 379 (ST.hasReadM0SendMsgHazard() && isSendMsgTraceDataOrGDS(TII, *MI)) || 380 (ST.hasReadM0LdsDmaHazard() && isLdsDma(*MI)) || 381 (ST.hasReadM0LdsDirectHazard() && MI->readsRegister(AMDGPU::LDS_DIRECT))) 382 return std::max(WaitStates, checkReadM0Hazards(MI)); 383 384 if (SIInstrInfo::isMAI(*MI)) 385 return std::max(WaitStates, checkMAIHazards(MI)); 386 387 if (SIInstrInfo::isVMEM(*MI) || 388 SIInstrInfo::isFLAT(*MI) || 389 SIInstrInfo::isDS(*MI)) 390 return std::max(WaitStates, checkMAILdStHazards(MI)); 391 392 return WaitStates; 393 } 394 395 void GCNHazardRecognizer::EmitNoop() { 396 EmittedInstrs.push_front(nullptr); 397 } 398 399 void GCNHazardRecognizer::AdvanceCycle() { 400 // When the scheduler detects a stall, it will call AdvanceCycle() without 401 // emitting any instructions. 402 if (!CurrCycleInstr) { 403 EmittedInstrs.push_front(nullptr); 404 return; 405 } 406 407 if (CurrCycleInstr->isBundle()) { 408 processBundle(); 409 return; 410 } 411 412 unsigned NumWaitStates = TII.getNumWaitStates(*CurrCycleInstr); 413 if (!NumWaitStates) { 414 CurrCycleInstr = nullptr; 415 return; 416 } 417 418 // Keep track of emitted instructions 419 EmittedInstrs.push_front(CurrCycleInstr); 420 421 // Add a nullptr for each additional wait state after the first. Make sure 422 // not to add more than getMaxLookAhead() items to the list, since we 423 // truncate the list to that size right after this loop. 424 for (unsigned i = 1, e = std::min(NumWaitStates, getMaxLookAhead()); 425 i < e; ++i) { 426 EmittedInstrs.push_front(nullptr); 427 } 428 429 // getMaxLookahead() is the largest number of wait states we will ever need 430 // to insert, so there is no point in keeping track of more than that many 431 // wait states. 432 EmittedInstrs.resize(getMaxLookAhead()); 433 434 CurrCycleInstr = nullptr; 435 } 436 437 void GCNHazardRecognizer::RecedeCycle() { 438 llvm_unreachable("hazard recognizer does not support bottom-up scheduling."); 439 } 440 441 //===----------------------------------------------------------------------===// 442 // Helper Functions 443 //===----------------------------------------------------------------------===// 444 445 typedef enum { HazardFound, HazardExpired, NoHazardFound } HazardFnResult; 446 447 typedef function_ref<bool(const MachineInstr &, int WaitStates)> IsExpiredFn; 448 typedef function_ref<unsigned int(const MachineInstr &)> GetNumWaitStatesFn; 449 450 // Search for a hazard in a block and its predecessors. 451 template <typename StateT> 452 static bool 453 hasHazard(StateT State, 454 function_ref<HazardFnResult(StateT &, const MachineInstr &)> IsHazard, 455 function_ref<void(StateT &, const MachineInstr &)> UpdateState, 456 const MachineBasicBlock *MBB, 457 MachineBasicBlock::const_reverse_instr_iterator I, 458 DenseSet<const MachineBasicBlock *> &Visited) { 459 for (auto E = MBB->instr_rend(); I != E; ++I) { 460 // No need to look at parent BUNDLE instructions. 461 if (I->isBundle()) 462 continue; 463 464 switch (IsHazard(State, *I)) { 465 case HazardFound: 466 return true; 467 case HazardExpired: 468 return false; 469 default: 470 // Continue search 471 break; 472 } 473 474 if (I->isInlineAsm() || I->isMetaInstruction()) 475 continue; 476 477 UpdateState(State, *I); 478 } 479 480 for (MachineBasicBlock *Pred : MBB->predecessors()) { 481 if (!Visited.insert(Pred).second) 482 continue; 483 484 if (hasHazard(State, IsHazard, UpdateState, Pred, Pred->instr_rbegin(), 485 Visited)) 486 return true; 487 } 488 489 return false; 490 } 491 492 // Returns a minimum wait states since \p I walking all predecessors. 493 // Only scans until \p IsExpired does not return true. 494 // Can only be run in a hazard recognizer mode. 495 static int getWaitStatesSince( 496 GCNHazardRecognizer::IsHazardFn IsHazard, const MachineBasicBlock *MBB, 497 MachineBasicBlock::const_reverse_instr_iterator I, int WaitStates, 498 IsExpiredFn IsExpired, DenseSet<const MachineBasicBlock *> &Visited, 499 GetNumWaitStatesFn GetNumWaitStates = SIInstrInfo::getNumWaitStates) { 500 for (auto E = MBB->instr_rend(); I != E; ++I) { 501 // Don't add WaitStates for parent BUNDLE instructions. 502 if (I->isBundle()) 503 continue; 504 505 if (IsHazard(*I)) 506 return WaitStates; 507 508 if (I->isInlineAsm()) 509 continue; 510 511 WaitStates += GetNumWaitStates(*I); 512 513 if (IsExpired(*I, WaitStates)) 514 return std::numeric_limits<int>::max(); 515 } 516 517 int MinWaitStates = std::numeric_limits<int>::max(); 518 for (MachineBasicBlock *Pred : MBB->predecessors()) { 519 if (!Visited.insert(Pred).second) 520 continue; 521 522 int W = getWaitStatesSince(IsHazard, Pred, Pred->instr_rbegin(), WaitStates, 523 IsExpired, Visited, GetNumWaitStates); 524 525 MinWaitStates = std::min(MinWaitStates, W); 526 } 527 528 return MinWaitStates; 529 } 530 531 static int getWaitStatesSince(GCNHazardRecognizer::IsHazardFn IsHazard, 532 const MachineInstr *MI, IsExpiredFn IsExpired) { 533 DenseSet<const MachineBasicBlock *> Visited; 534 return getWaitStatesSince(IsHazard, MI->getParent(), 535 std::next(MI->getReverseIterator()), 536 0, IsExpired, Visited); 537 } 538 539 int GCNHazardRecognizer::getWaitStatesSince(IsHazardFn IsHazard, int Limit) { 540 if (IsHazardRecognizerMode) { 541 auto IsExpiredFn = [Limit](const MachineInstr &, int WaitStates) { 542 return WaitStates >= Limit; 543 }; 544 return ::getWaitStatesSince(IsHazard, CurrCycleInstr, IsExpiredFn); 545 } 546 547 int WaitStates = 0; 548 for (MachineInstr *MI : EmittedInstrs) { 549 if (MI) { 550 if (IsHazard(*MI)) 551 return WaitStates; 552 553 if (MI->isInlineAsm()) 554 continue; 555 } 556 ++WaitStates; 557 558 if (WaitStates >= Limit) 559 break; 560 } 561 return std::numeric_limits<int>::max(); 562 } 563 564 int GCNHazardRecognizer::getWaitStatesSinceDef(unsigned Reg, 565 IsHazardFn IsHazardDef, 566 int Limit) { 567 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 568 569 auto IsHazardFn = [IsHazardDef, TRI, Reg](const MachineInstr &MI) { 570 return IsHazardDef(MI) && MI.modifiesRegister(Reg, TRI); 571 }; 572 573 return getWaitStatesSince(IsHazardFn, Limit); 574 } 575 576 int GCNHazardRecognizer::getWaitStatesSinceSetReg(IsHazardFn IsHazard, 577 int Limit) { 578 auto IsHazardFn = [IsHazard](const MachineInstr &MI) { 579 return isSSetReg(MI.getOpcode()) && IsHazard(MI); 580 }; 581 582 return getWaitStatesSince(IsHazardFn, Limit); 583 } 584 585 //===----------------------------------------------------------------------===// 586 // No-op Hazard Detection 587 //===----------------------------------------------------------------------===// 588 589 static void addRegUnits(const SIRegisterInfo &TRI, BitVector &BV, 590 MCRegister Reg) { 591 for (MCRegUnit Unit : TRI.regunits(Reg)) 592 BV.set(Unit); 593 } 594 595 static void addRegsToSet(const SIRegisterInfo &TRI, 596 iterator_range<MachineInstr::const_mop_iterator> Ops, 597 BitVector &DefSet, BitVector &UseSet) { 598 for (const MachineOperand &Op : Ops) { 599 if (Op.isReg()) 600 addRegUnits(TRI, Op.isDef() ? DefSet : UseSet, Op.getReg().asMCReg()); 601 } 602 } 603 604 void GCNHazardRecognizer::addClauseInst(const MachineInstr &MI) { 605 addRegsToSet(TRI, MI.operands(), ClauseDefs, ClauseUses); 606 } 607 608 static bool breaksSMEMSoftClause(MachineInstr *MI) { 609 return !SIInstrInfo::isSMRD(*MI); 610 } 611 612 static bool breaksVMEMSoftClause(MachineInstr *MI) { 613 return !SIInstrInfo::isVMEM(*MI) && !SIInstrInfo::isFLAT(*MI); 614 } 615 616 int GCNHazardRecognizer::checkSoftClauseHazards(MachineInstr *MEM) { 617 // SMEM soft clause are only present on VI+, and only matter if xnack is 618 // enabled. 619 if (!ST.isXNACKEnabled()) 620 return 0; 621 622 bool IsSMRD = TII.isSMRD(*MEM); 623 624 resetClause(); 625 626 // A soft-clause is any group of consecutive SMEM instructions. The 627 // instructions in this group may return out of order and/or may be 628 // replayed (i.e. the same instruction issued more than once). 629 // 630 // In order to handle these situations correctly we need to make sure that 631 // when a clause has more than one instruction, no instruction in the clause 632 // writes to a register that is read by another instruction in the clause 633 // (including itself). If we encounter this situation, we need to break the 634 // clause by inserting a non SMEM instruction. 635 636 for (MachineInstr *MI : EmittedInstrs) { 637 // When we hit a non-SMEM instruction then we have passed the start of the 638 // clause and we can stop. 639 if (!MI) 640 break; 641 642 if (IsSMRD ? breaksSMEMSoftClause(MI) : breaksVMEMSoftClause(MI)) 643 break; 644 645 addClauseInst(*MI); 646 } 647 648 if (ClauseDefs.none()) 649 return 0; 650 651 // We need to make sure not to put loads and stores in the same clause if they 652 // use the same address. For now, just start a new clause whenever we see a 653 // store. 654 if (MEM->mayStore()) 655 return 1; 656 657 addClauseInst(*MEM); 658 659 // If the set of defs and uses intersect then we cannot add this instruction 660 // to the clause, so we have a hazard. 661 return ClauseDefs.anyCommon(ClauseUses) ? 1 : 0; 662 } 663 664 int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) { 665 int WaitStatesNeeded = 0; 666 667 WaitStatesNeeded = checkSoftClauseHazards(SMRD); 668 669 // This SMRD hazard only affects SI. 670 if (!ST.hasSMRDReadVALUDefHazard()) 671 return WaitStatesNeeded; 672 673 // A read of an SGPR by SMRD instruction requires 4 wait states when the 674 // SGPR was written by a VALU instruction. 675 int SmrdSgprWaitStates = 4; 676 auto IsHazardDefFn = [this](const MachineInstr &MI) { 677 return TII.isVALU(MI); 678 }; 679 auto IsBufferHazardDefFn = [this](const MachineInstr &MI) { 680 return TII.isSALU(MI); 681 }; 682 683 bool IsBufferSMRD = TII.isBufferSMRD(*SMRD); 684 685 for (const MachineOperand &Use : SMRD->uses()) { 686 if (!Use.isReg()) 687 continue; 688 int WaitStatesNeededForUse = 689 SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn, 690 SmrdSgprWaitStates); 691 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 692 693 // This fixes what appears to be undocumented hardware behavior in SI where 694 // s_mov writing a descriptor and s_buffer_load_dword reading the descriptor 695 // needs some number of nops in between. We don't know how many we need, but 696 // let's use 4. This wasn't discovered before probably because the only 697 // case when this happens is when we expand a 64-bit pointer into a full 698 // descriptor and use s_buffer_load_dword instead of s_load_dword, which was 699 // probably never encountered in the closed-source land. 700 if (IsBufferSMRD) { 701 int WaitStatesNeededForUse = 702 SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), 703 IsBufferHazardDefFn, 704 SmrdSgprWaitStates); 705 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 706 } 707 } 708 709 return WaitStatesNeeded; 710 } 711 712 int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) { 713 if (!ST.hasVMEMReadSGPRVALUDefHazard()) 714 return 0; 715 716 int WaitStatesNeeded = checkSoftClauseHazards(VMEM); 717 718 // A read of an SGPR by a VMEM instruction requires 5 wait states when the 719 // SGPR was written by a VALU Instruction. 720 const int VmemSgprWaitStates = 5; 721 auto IsHazardDefFn = [this](const MachineInstr &MI) { 722 return TII.isVALU(MI); 723 }; 724 for (const MachineOperand &Use : VMEM->uses()) { 725 if (!Use.isReg() || TRI.isVectorRegister(MF.getRegInfo(), Use.getReg())) 726 continue; 727 728 int WaitStatesNeededForUse = 729 VmemSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn, 730 VmemSgprWaitStates); 731 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 732 } 733 return WaitStatesNeeded; 734 } 735 736 int GCNHazardRecognizer::checkDPPHazards(MachineInstr *DPP) { 737 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 738 const SIInstrInfo *TII = ST.getInstrInfo(); 739 740 // Check for DPP VGPR read after VALU VGPR write and EXEC write. 741 int DppVgprWaitStates = 2; 742 int DppExecWaitStates = 5; 743 int WaitStatesNeeded = 0; 744 auto IsHazardDefFn = [TII](const MachineInstr &MI) { 745 return TII->isVALU(MI); 746 }; 747 748 for (const MachineOperand &Use : DPP->uses()) { 749 if (!Use.isReg() || !TRI->isVGPR(MF.getRegInfo(), Use.getReg())) 750 continue; 751 int WaitStatesNeededForUse = 752 DppVgprWaitStates - getWaitStatesSinceDef( 753 Use.getReg(), 754 [](const MachineInstr &) { return true; }, 755 DppVgprWaitStates); 756 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 757 } 758 759 WaitStatesNeeded = std::max( 760 WaitStatesNeeded, 761 DppExecWaitStates - getWaitStatesSinceDef(AMDGPU::EXEC, IsHazardDefFn, 762 DppExecWaitStates)); 763 764 return WaitStatesNeeded; 765 } 766 767 int GCNHazardRecognizer::checkDivFMasHazards(MachineInstr *DivFMas) { 768 const SIInstrInfo *TII = ST.getInstrInfo(); 769 770 // v_div_fmas requires 4 wait states after a write to vcc from a VALU 771 // instruction. 772 const int DivFMasWaitStates = 4; 773 auto IsHazardDefFn = [TII](const MachineInstr &MI) { 774 return TII->isVALU(MI); 775 }; 776 int WaitStatesNeeded = getWaitStatesSinceDef(AMDGPU::VCC, IsHazardDefFn, 777 DivFMasWaitStates); 778 779 return DivFMasWaitStates - WaitStatesNeeded; 780 } 781 782 int GCNHazardRecognizer::checkGetRegHazards(MachineInstr *GetRegInstr) { 783 const SIInstrInfo *TII = ST.getInstrInfo(); 784 unsigned GetRegHWReg = getHWReg(TII, *GetRegInstr); 785 786 const int GetRegWaitStates = 2; 787 auto IsHazardFn = [TII, GetRegHWReg](const MachineInstr &MI) { 788 return GetRegHWReg == getHWReg(TII, MI); 789 }; 790 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, GetRegWaitStates); 791 792 return GetRegWaitStates - WaitStatesNeeded; 793 } 794 795 int GCNHazardRecognizer::checkSetRegHazards(MachineInstr *SetRegInstr) { 796 const SIInstrInfo *TII = ST.getInstrInfo(); 797 unsigned HWReg = getHWReg(TII, *SetRegInstr); 798 799 const int SetRegWaitStates = ST.getSetRegWaitStates(); 800 auto IsHazardFn = [TII, HWReg](const MachineInstr &MI) { 801 return HWReg == getHWReg(TII, MI); 802 }; 803 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, SetRegWaitStates); 804 return SetRegWaitStates - WaitStatesNeeded; 805 } 806 807 int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) { 808 if (!MI.mayStore()) 809 return -1; 810 811 const SIInstrInfo *TII = ST.getInstrInfo(); 812 unsigned Opcode = MI.getOpcode(); 813 const MCInstrDesc &Desc = MI.getDesc(); 814 815 int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata); 816 int VDataRCID = -1; 817 if (VDataIdx != -1) 818 VDataRCID = Desc.operands()[VDataIdx].RegClass; 819 820 if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) { 821 // There is no hazard if the instruction does not use vector regs 822 // (like wbinvl1) 823 if (VDataIdx == -1) 824 return -1; 825 // For MUBUF/MTBUF instructions this hazard only exists if the 826 // instruction is not using a register in the soffset field. 827 const MachineOperand *SOffset = 828 TII->getNamedOperand(MI, AMDGPU::OpName::soffset); 829 // If we have no soffset operand, then assume this field has been 830 // hardcoded to zero. 831 if (AMDGPU::getRegBitWidth(VDataRCID) > 64 && 832 (!SOffset || !SOffset->isReg())) 833 return VDataIdx; 834 } 835 836 // MIMG instructions create a hazard if they don't use a 256-bit T# and 837 // the store size is greater than 8 bytes and they have more than two bits 838 // of their dmask set. 839 // All our MIMG definitions use a 256-bit T#, so we can skip checking for them. 840 if (TII->isMIMG(MI)) { 841 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 842 assert(SRsrcIdx != -1 && 843 AMDGPU::getRegBitWidth(Desc.operands()[SRsrcIdx].RegClass) == 256); 844 (void)SRsrcIdx; 845 } 846 847 if (TII->isFLAT(MI)) { 848 int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata); 849 if (AMDGPU::getRegBitWidth(Desc.operands()[DataIdx].RegClass) > 64) 850 return DataIdx; 851 } 852 853 return -1; 854 } 855 856 int 857 GCNHazardRecognizer::checkVALUHazardsHelper(const MachineOperand &Def, 858 const MachineRegisterInfo &MRI) { 859 // Helper to check for the hazard where VMEM instructions that store more than 860 // 8 bytes can have there store data over written by the next instruction. 861 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 862 863 const int VALUWaitStates = ST.hasGFX940Insts() ? 2 : 1; 864 int WaitStatesNeeded = 0; 865 866 if (!TRI->isVectorRegister(MRI, Def.getReg())) 867 return WaitStatesNeeded; 868 Register Reg = Def.getReg(); 869 auto IsHazardFn = [this, Reg, TRI](const MachineInstr &MI) { 870 int DataIdx = createsVALUHazard(MI); 871 return DataIdx >= 0 && 872 TRI->regsOverlap(MI.getOperand(DataIdx).getReg(), Reg); 873 }; 874 int WaitStatesNeededForDef = 875 VALUWaitStates - getWaitStatesSince(IsHazardFn, VALUWaitStates); 876 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef); 877 878 return WaitStatesNeeded; 879 } 880 881 int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) { 882 int WaitStatesNeeded = 0; 883 884 if (ST.hasTransForwardingHazard() && !SIInstrInfo::isTRANS(*VALU)) { 885 const int TransDefWaitstates = 1; 886 887 auto IsTransDefFn = [this, VALU](const MachineInstr &MI) { 888 if (!SIInstrInfo::isTRANS(MI)) 889 return false; 890 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 891 const SIInstrInfo *TII = ST.getInstrInfo(); 892 Register Def = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)->getReg(); 893 894 for (const MachineOperand &Use : VALU->explicit_uses()) { 895 if (Use.isReg() && TRI->regsOverlap(Def, Use.getReg())) 896 return true; 897 } 898 899 return false; 900 }; 901 902 int WaitStatesNeededForDef = 903 TransDefWaitstates - 904 getWaitStatesSince(IsTransDefFn, TransDefWaitstates); 905 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef); 906 } 907 908 if (ST.hasDstSelForwardingHazard()) { 909 const int Shift16DefWaitstates = 1; 910 911 auto IsShift16BitDefFn = [this, VALU](const MachineInstr &MI) { 912 if (!SIInstrInfo::isVALU(MI)) 913 return false; 914 const SIInstrInfo *TII = ST.getInstrInfo(); 915 if (SIInstrInfo::isSDWA(MI)) { 916 if (auto *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel)) 917 if (DstSel->getImm() == AMDGPU::SDWA::DWORD) 918 return false; 919 } else { 920 if (!AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::op_sel) || 921 !(TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers) 922 ->getImm() & 923 SISrcMods::DST_OP_SEL)) 924 return false; 925 } 926 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 927 if (auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) { 928 Register Def = Dst->getReg(); 929 930 for (const MachineOperand &Use : VALU->explicit_uses()) { 931 if (Use.isReg() && TRI->regsOverlap(Def, Use.getReg())) 932 return true; 933 } 934 } 935 936 return false; 937 }; 938 939 int WaitStatesNeededForDef = 940 Shift16DefWaitstates - 941 getWaitStatesSince(IsShift16BitDefFn, Shift16DefWaitstates); 942 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef); 943 } 944 945 if (ST.hasVDecCoExecHazard()) { 946 const int VALUWriteSGPRVALUReadWaitstates = 2; 947 const int VALUWriteEXECRWLane = 4; 948 const int VALUWriteVGPRReadlaneRead = 1; 949 950 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 951 const MachineRegisterInfo &MRI = MF.getRegInfo(); 952 Register UseReg; 953 auto IsVALUDefSGPRFn = [&UseReg, TRI](const MachineInstr &MI) { 954 if (!SIInstrInfo::isVALU(MI)) 955 return false; 956 return MI.modifiesRegister(UseReg, TRI); 957 }; 958 959 for (const MachineOperand &Use : VALU->explicit_uses()) { 960 if (!Use.isReg()) 961 continue; 962 963 UseReg = Use.getReg(); 964 if (TRI->isSGPRReg(MRI, UseReg)) { 965 int WaitStatesNeededForDef = 966 VALUWriteSGPRVALUReadWaitstates - 967 getWaitStatesSince(IsVALUDefSGPRFn, 968 VALUWriteSGPRVALUReadWaitstates); 969 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef); 970 } 971 } 972 973 if (VALU->readsRegister(AMDGPU::VCC, TRI)) { 974 UseReg = AMDGPU::VCC; 975 int WaitStatesNeededForDef = 976 VALUWriteSGPRVALUReadWaitstates - 977 getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteSGPRVALUReadWaitstates); 978 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef); 979 } 980 981 switch (VALU->getOpcode()) { 982 case AMDGPU::V_READLANE_B32: 983 case AMDGPU::V_READFIRSTLANE_B32: { 984 MachineOperand *Src = TII.getNamedOperand(*VALU, AMDGPU::OpName::src0); 985 UseReg = Src->getReg(); 986 int WaitStatesNeededForDef = 987 VALUWriteVGPRReadlaneRead - 988 getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteVGPRReadlaneRead); 989 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef); 990 } 991 [[fallthrough]]; 992 case AMDGPU::V_WRITELANE_B32: { 993 UseReg = AMDGPU::EXEC; 994 int WaitStatesNeededForDef = 995 VALUWriteEXECRWLane - 996 getWaitStatesSince(IsVALUDefSGPRFn, VALUWriteEXECRWLane); 997 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef); 998 break; 999 } 1000 default: 1001 break; 1002 } 1003 } 1004 1005 // This checks for the hazard where VMEM instructions that store more than 1006 // 8 bytes can have there store data over written by the next instruction. 1007 if (!ST.has12DWordStoreHazard()) 1008 return WaitStatesNeeded; 1009 1010 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1011 1012 for (const MachineOperand &Def : VALU->defs()) { 1013 WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Def, MRI)); 1014 } 1015 1016 return WaitStatesNeeded; 1017 } 1018 1019 int GCNHazardRecognizer::checkInlineAsmHazards(MachineInstr *IA) { 1020 // This checks for hazards associated with inline asm statements. 1021 // Since inline asms can contain just about anything, we use this 1022 // to call/leverage other check*Hazard routines. Note that 1023 // this function doesn't attempt to address all possible inline asm 1024 // hazards (good luck), but is a collection of what has been 1025 // problematic thus far. 1026 1027 // see checkVALUHazards() 1028 if (!ST.has12DWordStoreHazard()) 1029 return 0; 1030 1031 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1032 int WaitStatesNeeded = 0; 1033 1034 for (const MachineOperand &Op : 1035 llvm::drop_begin(IA->operands(), InlineAsm::MIOp_FirstOperand)) { 1036 if (Op.isReg() && Op.isDef()) { 1037 WaitStatesNeeded = 1038 std::max(WaitStatesNeeded, checkVALUHazardsHelper(Op, MRI)); 1039 } 1040 } 1041 1042 return WaitStatesNeeded; 1043 } 1044 1045 int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) { 1046 const SIInstrInfo *TII = ST.getInstrInfo(); 1047 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1048 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1049 1050 const MachineOperand *LaneSelectOp = 1051 TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1); 1052 1053 if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg())) 1054 return 0; 1055 1056 Register LaneSelectReg = LaneSelectOp->getReg(); 1057 auto IsHazardFn = [TII](const MachineInstr &MI) { return TII->isVALU(MI); }; 1058 1059 const int RWLaneWaitStates = 4; 1060 int WaitStatesSince = getWaitStatesSinceDef(LaneSelectReg, IsHazardFn, 1061 RWLaneWaitStates); 1062 return RWLaneWaitStates - WaitStatesSince; 1063 } 1064 1065 int GCNHazardRecognizer::checkRFEHazards(MachineInstr *RFE) { 1066 if (!ST.hasRFEHazards()) 1067 return 0; 1068 1069 const SIInstrInfo *TII = ST.getInstrInfo(); 1070 1071 const int RFEWaitStates = 1; 1072 1073 auto IsHazardFn = [TII](const MachineInstr &MI) { 1074 return getHWReg(TII, MI) == AMDGPU::Hwreg::ID_TRAPSTS; 1075 }; 1076 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn, RFEWaitStates); 1077 return RFEWaitStates - WaitStatesNeeded; 1078 } 1079 1080 int GCNHazardRecognizer::checkReadM0Hazards(MachineInstr *MI) { 1081 const SIInstrInfo *TII = ST.getInstrInfo(); 1082 const int ReadM0WaitStates = 1; 1083 auto IsHazardFn = [TII](const MachineInstr &MI) { return TII->isSALU(MI); }; 1084 return ReadM0WaitStates - 1085 getWaitStatesSinceDef(AMDGPU::M0, IsHazardFn, ReadM0WaitStates); 1086 } 1087 1088 void GCNHazardRecognizer::fixHazards(MachineInstr *MI) { 1089 fixVMEMtoScalarWriteHazards(MI); 1090 fixVcmpxPermlaneHazards(MI); 1091 fixSMEMtoVectorWriteHazards(MI); 1092 fixVcmpxExecWARHazard(MI); 1093 fixLdsBranchVmemWARHazard(MI); 1094 if (ST.hasLdsDirect()) { 1095 fixLdsDirectVALUHazard(MI); 1096 fixLdsDirectVMEMHazard(MI); 1097 } 1098 fixVALUPartialForwardingHazard(MI); 1099 fixVALUTransUseHazard(MI); 1100 fixWMMAHazards(MI); 1101 fixShift64HighRegBug(MI); 1102 fixVALUMaskWriteHazard(MI); 1103 } 1104 1105 bool GCNHazardRecognizer::fixVcmpxPermlaneHazards(MachineInstr *MI) { 1106 if (!ST.hasVcmpxPermlaneHazard() || !isPermlane(*MI)) 1107 return false; 1108 1109 const SIInstrInfo *TII = ST.getInstrInfo(); 1110 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1111 auto IsHazardFn = [TII, TRI](const MachineInstr &MI) { 1112 return (TII->isVOPC(MI) || 1113 ((TII->isVOP3(MI) || TII->isSDWA(MI)) && MI.isCompare())) && 1114 MI.modifiesRegister(AMDGPU::EXEC, TRI); 1115 }; 1116 1117 auto IsExpiredFn = [](const MachineInstr &MI, int) { 1118 unsigned Opc = MI.getOpcode(); 1119 return SIInstrInfo::isVALU(MI) && Opc != AMDGPU::V_NOP_e32 && 1120 Opc != AMDGPU::V_NOP_e64 && Opc != AMDGPU::V_NOP_sdwa; 1121 }; 1122 1123 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) == 1124 std::numeric_limits<int>::max()) 1125 return false; 1126 1127 // V_NOP will be discarded by SQ. 1128 // Use V_MOV_B32 v?, v?. Register must be alive so use src0 of V_PERMLANE* 1129 // which is always a VGPR and available. 1130 auto *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); 1131 Register Reg = Src0->getReg(); 1132 bool IsUndef = Src0->isUndef(); 1133 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 1134 TII->get(AMDGPU::V_MOV_B32_e32)) 1135 .addReg(Reg, RegState::Define | (IsUndef ? RegState::Dead : 0)) 1136 .addReg(Reg, IsUndef ? RegState::Undef : RegState::Kill); 1137 1138 return true; 1139 } 1140 1141 bool GCNHazardRecognizer::fixVMEMtoScalarWriteHazards(MachineInstr *MI) { 1142 if (!ST.hasVMEMtoScalarWriteHazard()) 1143 return false; 1144 1145 if (!SIInstrInfo::isSALU(*MI) && !SIInstrInfo::isSMRD(*MI)) 1146 return false; 1147 1148 if (MI->getNumDefs() == 0) 1149 return false; 1150 1151 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1152 1153 auto IsHazardFn = [TRI, MI](const MachineInstr &I) { 1154 if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isDS(I) && 1155 !SIInstrInfo::isFLAT(I)) 1156 return false; 1157 1158 for (const MachineOperand &Def : MI->defs()) { 1159 const MachineOperand *Op = 1160 I.findRegisterUseOperand(Def.getReg(), false, TRI); 1161 if (!Op) 1162 continue; 1163 return true; 1164 } 1165 return false; 1166 }; 1167 1168 auto IsExpiredFn = [](const MachineInstr &MI, int) { 1169 return SIInstrInfo::isVALU(MI) || 1170 (MI.getOpcode() == AMDGPU::S_WAITCNT && 1171 !MI.getOperand(0).getImm()) || 1172 (MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && 1173 AMDGPU::DepCtr::decodeFieldVmVsrc(MI.getOperand(0).getImm()) == 0); 1174 }; 1175 1176 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) == 1177 std::numeric_limits<int>::max()) 1178 return false; 1179 1180 const SIInstrInfo *TII = ST.getInstrInfo(); 1181 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 1182 TII->get(AMDGPU::S_WAITCNT_DEPCTR)) 1183 .addImm(AMDGPU::DepCtr::encodeFieldVmVsrc(0)); 1184 return true; 1185 } 1186 1187 bool GCNHazardRecognizer::fixSMEMtoVectorWriteHazards(MachineInstr *MI) { 1188 if (!ST.hasSMEMtoVectorWriteHazard()) 1189 return false; 1190 1191 if (!SIInstrInfo::isVALU(*MI)) 1192 return false; 1193 1194 unsigned SDSTName; 1195 switch (MI->getOpcode()) { 1196 case AMDGPU::V_READLANE_B32: 1197 case AMDGPU::V_READFIRSTLANE_B32: 1198 SDSTName = AMDGPU::OpName::vdst; 1199 break; 1200 default: 1201 SDSTName = AMDGPU::OpName::sdst; 1202 break; 1203 } 1204 1205 const SIInstrInfo *TII = ST.getInstrInfo(); 1206 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1207 const AMDGPU::IsaVersion IV = AMDGPU::getIsaVersion(ST.getCPU()); 1208 const MachineOperand *SDST = TII->getNamedOperand(*MI, SDSTName); 1209 if (!SDST) { 1210 for (const auto &MO : MI->implicit_operands()) { 1211 if (MO.isDef() && TRI->isSGPRClass(TRI->getPhysRegBaseClass(MO.getReg()))) { 1212 SDST = &MO; 1213 break; 1214 } 1215 } 1216 } 1217 1218 if (!SDST) 1219 return false; 1220 1221 const Register SDSTReg = SDST->getReg(); 1222 auto IsHazardFn = [SDSTReg, TRI](const MachineInstr &I) { 1223 return SIInstrInfo::isSMRD(I) && I.readsRegister(SDSTReg, TRI); 1224 }; 1225 1226 auto IsExpiredFn = [TII, IV](const MachineInstr &MI, int) { 1227 if (TII->isSALU(MI)) { 1228 switch (MI.getOpcode()) { 1229 case AMDGPU::S_SETVSKIP: 1230 case AMDGPU::S_VERSION: 1231 case AMDGPU::S_WAITCNT_VSCNT: 1232 case AMDGPU::S_WAITCNT_VMCNT: 1233 case AMDGPU::S_WAITCNT_EXPCNT: 1234 // These instructions cannot not mitigate the hazard. 1235 return false; 1236 case AMDGPU::S_WAITCNT_LGKMCNT: 1237 // Reducing lgkmcnt count to 0 always mitigates the hazard. 1238 return (MI.getOperand(1).getImm() == 0) && 1239 (MI.getOperand(0).getReg() == AMDGPU::SGPR_NULL); 1240 case AMDGPU::S_WAITCNT: { 1241 const int64_t Imm = MI.getOperand(0).getImm(); 1242 AMDGPU::Waitcnt Decoded = AMDGPU::decodeWaitcnt(IV, Imm); 1243 return (Decoded.LgkmCnt == 0); 1244 } 1245 default: 1246 // SOPP instructions cannot mitigate the hazard. 1247 if (TII->isSOPP(MI)) 1248 return false; 1249 // At this point the SALU can be assumed to mitigate the hazard 1250 // because either: 1251 // (a) it is independent of the at risk SMEM (breaking chain), 1252 // or 1253 // (b) it is dependent on the SMEM, in which case an appropriate 1254 // s_waitcnt lgkmcnt _must_ exist between it and the at risk 1255 // SMEM instruction. 1256 return true; 1257 } 1258 } 1259 return false; 1260 }; 1261 1262 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) == 1263 std::numeric_limits<int>::max()) 1264 return false; 1265 1266 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 1267 TII->get(AMDGPU::S_MOV_B32), AMDGPU::SGPR_NULL) 1268 .addImm(0); 1269 return true; 1270 } 1271 1272 bool GCNHazardRecognizer::fixVcmpxExecWARHazard(MachineInstr *MI) { 1273 if (!ST.hasVcmpxExecWARHazard() || !SIInstrInfo::isVALU(*MI)) 1274 return false; 1275 1276 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1277 if (!MI->modifiesRegister(AMDGPU::EXEC, TRI)) 1278 return false; 1279 1280 auto IsHazardFn = [TRI](const MachineInstr &I) { 1281 if (SIInstrInfo::isVALU(I)) 1282 return false; 1283 return I.readsRegister(AMDGPU::EXEC, TRI); 1284 }; 1285 1286 const SIInstrInfo *TII = ST.getInstrInfo(); 1287 auto IsExpiredFn = [TII, TRI](const MachineInstr &MI, int) { 1288 if (SIInstrInfo::isVALU(MI)) { 1289 if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) 1290 return true; 1291 for (auto MO : MI.implicit_operands()) 1292 if (MO.isDef() && TRI->isSGPRClass(TRI->getPhysRegBaseClass(MO.getReg()))) 1293 return true; 1294 } 1295 if (MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && 1296 AMDGPU::DepCtr::encodeFieldSaSdst(MI.getOperand(0).getImm(), 0) == 1297 0xfffe) 1298 return true; 1299 return false; 1300 }; 1301 1302 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) == 1303 std::numeric_limits<int>::max()) 1304 return false; 1305 1306 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 1307 TII->get(AMDGPU::S_WAITCNT_DEPCTR)) 1308 .addImm(AMDGPU::DepCtr::encodeFieldSaSdst(0)); 1309 return true; 1310 } 1311 1312 static bool shouldRunLdsBranchVmemWARHazardFixup(const MachineFunction &MF, 1313 const GCNSubtarget &ST) { 1314 if (!ST.hasLdsBranchVmemWARHazard()) 1315 return false; 1316 1317 // Check if the necessary condition for the hazard is met: both LDS and VMEM 1318 // instructions need to appear in the same function. 1319 bool HasLds = false; 1320 bool HasVmem = false; 1321 for (auto &MBB : MF) { 1322 for (auto &MI : MBB) { 1323 HasLds |= SIInstrInfo::isDS(MI); 1324 HasVmem |= 1325 SIInstrInfo::isVMEM(MI) || SIInstrInfo::isSegmentSpecificFLAT(MI); 1326 if (HasLds && HasVmem) 1327 return true; 1328 } 1329 } 1330 return false; 1331 } 1332 1333 static bool isStoreCountWaitZero(const MachineInstr &I) { 1334 return I.getOpcode() == AMDGPU::S_WAITCNT_VSCNT && 1335 I.getOperand(0).getReg() == AMDGPU::SGPR_NULL && 1336 !I.getOperand(1).getImm(); 1337 } 1338 1339 bool GCNHazardRecognizer::fixLdsBranchVmemWARHazard(MachineInstr *MI) { 1340 if (!RunLdsBranchVmemWARHazardFixup) 1341 return false; 1342 1343 assert(ST.hasLdsBranchVmemWARHazard()); 1344 1345 auto IsHazardInst = [](const MachineInstr &MI) { 1346 if (SIInstrInfo::isDS(MI)) 1347 return 1; 1348 if (SIInstrInfo::isVMEM(MI) || SIInstrInfo::isSegmentSpecificFLAT(MI)) 1349 return 2; 1350 return 0; 1351 }; 1352 1353 auto InstType = IsHazardInst(*MI); 1354 if (!InstType) 1355 return false; 1356 1357 auto IsExpiredFn = [&IsHazardInst](const MachineInstr &I, int) { 1358 return IsHazardInst(I) || isStoreCountWaitZero(I); 1359 }; 1360 1361 auto IsHazardFn = [InstType, &IsHazardInst](const MachineInstr &I) { 1362 if (!I.isBranch()) 1363 return false; 1364 1365 auto IsHazardFn = [InstType, IsHazardInst](const MachineInstr &I) { 1366 auto InstType2 = IsHazardInst(I); 1367 return InstType2 && InstType != InstType2; 1368 }; 1369 1370 auto IsExpiredFn = [InstType, &IsHazardInst](const MachineInstr &I, int) { 1371 auto InstType2 = IsHazardInst(I); 1372 if (InstType == InstType2) 1373 return true; 1374 1375 return isStoreCountWaitZero(I); 1376 }; 1377 1378 return ::getWaitStatesSince(IsHazardFn, &I, IsExpiredFn) != 1379 std::numeric_limits<int>::max(); 1380 }; 1381 1382 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) == 1383 std::numeric_limits<int>::max()) 1384 return false; 1385 1386 const SIInstrInfo *TII = ST.getInstrInfo(); 1387 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 1388 TII->get(AMDGPU::S_WAITCNT_VSCNT)) 1389 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) 1390 .addImm(0); 1391 1392 return true; 1393 } 1394 1395 bool GCNHazardRecognizer::fixLdsDirectVALUHazard(MachineInstr *MI) { 1396 if (!SIInstrInfo::isLDSDIR(*MI)) 1397 return false; 1398 1399 const int NoHazardWaitStates = 15; 1400 const MachineOperand *VDST = TII.getNamedOperand(*MI, AMDGPU::OpName::vdst); 1401 const Register VDSTReg = VDST->getReg(); 1402 1403 bool VisitedTrans = false; 1404 auto IsHazardFn = [this, VDSTReg, &VisitedTrans](const MachineInstr &I) { 1405 if (!SIInstrInfo::isVALU(I)) 1406 return false; 1407 VisitedTrans = VisitedTrans || SIInstrInfo::isTRANS(I); 1408 // Cover both WAR and WAW 1409 return I.readsRegister(VDSTReg, &TRI) || I.modifiesRegister(VDSTReg, &TRI); 1410 }; 1411 auto IsExpiredFn = [&](const MachineInstr &I, int WaitStates) { 1412 if (WaitStates >= NoHazardWaitStates) 1413 return true; 1414 // Instructions which cause va_vdst==0 expire hazard 1415 return SIInstrInfo::isVMEM(I) || SIInstrInfo::isFLAT(I) || 1416 SIInstrInfo::isDS(I) || SIInstrInfo::isEXP(I); 1417 }; 1418 auto GetWaitStatesFn = [](const MachineInstr &MI) { 1419 return SIInstrInfo::isVALU(MI) ? 1 : 0; 1420 }; 1421 1422 DenseSet<const MachineBasicBlock *> Visited; 1423 auto Count = ::getWaitStatesSince(IsHazardFn, MI->getParent(), 1424 std::next(MI->getReverseIterator()), 0, 1425 IsExpiredFn, Visited, GetWaitStatesFn); 1426 1427 // Transcendentals can execute in parallel to other VALUs. 1428 // This makes va_vdst count unusable with a mixture of VALU and TRANS. 1429 if (VisitedTrans) 1430 Count = 0; 1431 1432 MachineOperand *WaitVdstOp = 1433 TII.getNamedOperand(*MI, AMDGPU::OpName::waitvdst); 1434 WaitVdstOp->setImm(std::min(Count, NoHazardWaitStates)); 1435 1436 return true; 1437 } 1438 1439 bool GCNHazardRecognizer::fixLdsDirectVMEMHazard(MachineInstr *MI) { 1440 if (!SIInstrInfo::isLDSDIR(*MI)) 1441 return false; 1442 1443 const MachineOperand *VDST = TII.getNamedOperand(*MI, AMDGPU::OpName::vdst); 1444 const Register VDSTReg = VDST->getReg(); 1445 1446 auto IsHazardFn = [this, VDSTReg](const MachineInstr &I) { 1447 if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isFLAT(I) && 1448 !SIInstrInfo::isDS(I)) 1449 return false; 1450 return I.readsRegister(VDSTReg, &TRI) || I.modifiesRegister(VDSTReg, &TRI); 1451 }; 1452 auto IsExpiredFn = [](const MachineInstr &I, int) { 1453 return SIInstrInfo::isVALU(I) || SIInstrInfo::isEXP(I) || 1454 (I.getOpcode() == AMDGPU::S_WAITCNT && !I.getOperand(0).getImm()) || 1455 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && 1456 AMDGPU::DepCtr::decodeFieldVmVsrc(I.getOperand(0).getImm()) == 0); 1457 }; 1458 1459 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) == 1460 std::numeric_limits<int>::max()) 1461 return false; 1462 1463 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 1464 TII.get(AMDGPU::S_WAITCNT_DEPCTR)) 1465 .addImm(AMDGPU::DepCtr::encodeFieldVmVsrc(0)); 1466 1467 return true; 1468 } 1469 1470 bool GCNHazardRecognizer::fixVALUPartialForwardingHazard(MachineInstr *MI) { 1471 if (!ST.isWave64()) 1472 return false; 1473 if (!ST.hasVALUPartialForwardingHazard()) 1474 return false; 1475 if (!SIInstrInfo::isVALU(*MI)) 1476 return false; 1477 1478 SmallSetVector<Register, 4> SrcVGPRs; 1479 1480 for (const MachineOperand &Use : MI->explicit_uses()) { 1481 if (Use.isReg() && TRI.isVGPR(MF.getRegInfo(), Use.getReg())) 1482 SrcVGPRs.insert(Use.getReg()); 1483 } 1484 1485 // Only applies with >= 2 unique VGPR sources 1486 if (SrcVGPRs.size() <= 1) 1487 return false; 1488 1489 // Look for the following pattern: 1490 // Va <- VALU [PreExecPos] 1491 // intv1 1492 // Exec <- SALU [ExecPos] 1493 // intv2 1494 // Vb <- VALU [PostExecPos] 1495 // intv3 1496 // MI Va, Vb (WaitState = 0) 1497 // 1498 // Where: 1499 // intv1 + intv2 <= 2 VALUs 1500 // intv3 <= 4 VALUs 1501 // 1502 // If found, insert an appropriate S_WAITCNT_DEPCTR before MI. 1503 1504 const int Intv1plus2MaxVALUs = 2; 1505 const int Intv3MaxVALUs = 4; 1506 const int IntvMaxVALUs = 6; 1507 const int NoHazardVALUWaitStates = IntvMaxVALUs + 2; 1508 1509 struct StateType { 1510 SmallDenseMap<Register, int, 4> DefPos; 1511 int ExecPos = std::numeric_limits<int>::max(); 1512 int VALUs = 0; 1513 }; 1514 1515 StateType State; 1516 1517 // This overloads expiry testing with all the hazard detection 1518 auto IsHazardFn = [&, this](StateType &State, const MachineInstr &I) { 1519 // Too many VALU states have passed 1520 if (State.VALUs > NoHazardVALUWaitStates) 1521 return HazardExpired; 1522 1523 // Instructions which cause va_vdst==0 expire hazard 1524 if (SIInstrInfo::isVMEM(I) || SIInstrInfo::isFLAT(I) || 1525 SIInstrInfo::isDS(I) || SIInstrInfo::isEXP(I) || 1526 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && 1527 AMDGPU::DepCtr::decodeFieldVaVdst(I.getOperand(0).getImm()) == 0)) 1528 return HazardExpired; 1529 1530 // Track registers writes 1531 bool Changed = false; 1532 if (SIInstrInfo::isVALU(I)) { 1533 for (Register Src : SrcVGPRs) { 1534 if (!State.DefPos.count(Src) && I.modifiesRegister(Src, &TRI)) { 1535 State.DefPos[Src] = State.VALUs; 1536 Changed = true; 1537 } 1538 } 1539 } else if (SIInstrInfo::isSALU(I)) { 1540 if (State.ExecPos == std::numeric_limits<int>::max()) { 1541 if (!State.DefPos.empty() && I.modifiesRegister(AMDGPU::EXEC, &TRI)) { 1542 State.ExecPos = State.VALUs; 1543 Changed = true; 1544 } 1545 } 1546 } 1547 1548 // Early expiration: too many VALUs in intv3 1549 if (State.VALUs > Intv3MaxVALUs && State.DefPos.empty()) 1550 return HazardExpired; 1551 1552 // Only evaluate state if something changed 1553 if (!Changed) 1554 return NoHazardFound; 1555 1556 // Determine positions of VALUs pre/post exec change 1557 if (State.ExecPos == std::numeric_limits<int>::max()) 1558 return NoHazardFound; 1559 1560 int PreExecPos = std::numeric_limits<int>::max(); 1561 int PostExecPos = std::numeric_limits<int>::max(); 1562 1563 for (auto Entry : State.DefPos) { 1564 int DefVALUs = Entry.second; 1565 if (DefVALUs != std::numeric_limits<int>::max()) { 1566 if (DefVALUs >= State.ExecPos) 1567 PreExecPos = std::min(PreExecPos, DefVALUs); 1568 else if (DefVALUs < State.ExecPos) 1569 PostExecPos = std::min(PostExecPos, DefVALUs); 1570 } 1571 } 1572 1573 // Need a VALUs post exec change 1574 if (PostExecPos == std::numeric_limits<int>::max()) 1575 return NoHazardFound; 1576 1577 // Too many VALUs in intv3? 1578 int Intv3VALUs = PostExecPos; 1579 if (Intv3VALUs > Intv3MaxVALUs) 1580 return HazardExpired; 1581 1582 // Too many VALUs in intv2? 1583 int Intv2VALUs = (State.ExecPos - PostExecPos) - 1; 1584 if (Intv2VALUs > Intv1plus2MaxVALUs) 1585 return HazardExpired; 1586 1587 // Need a VALUs pre exec change 1588 if (PreExecPos == std::numeric_limits<int>::max()) 1589 return NoHazardFound; 1590 1591 // Too many VALUs in intv1? 1592 int Intv1VALUs = PreExecPos - State.ExecPos; 1593 if (Intv1VALUs > Intv1plus2MaxVALUs) 1594 return HazardExpired; 1595 1596 // Too many VALUs in intv1 + intv2 1597 if (Intv1VALUs + Intv2VALUs > Intv1plus2MaxVALUs) 1598 return HazardExpired; 1599 1600 return HazardFound; 1601 }; 1602 auto UpdateStateFn = [](StateType &State, const MachineInstr &MI) { 1603 if (SIInstrInfo::isVALU(MI)) 1604 State.VALUs += 1; 1605 }; 1606 1607 DenseSet<const MachineBasicBlock *> Visited; 1608 if (!hasHazard<StateType>(State, IsHazardFn, UpdateStateFn, MI->getParent(), 1609 std::next(MI->getReverseIterator()), Visited)) 1610 return false; 1611 1612 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 1613 TII.get(AMDGPU::S_WAITCNT_DEPCTR)) 1614 .addImm(0x0fff); 1615 1616 return true; 1617 } 1618 1619 bool GCNHazardRecognizer::fixVALUTransUseHazard(MachineInstr *MI) { 1620 if (!ST.hasVALUTransUseHazard()) 1621 return false; 1622 if (!SIInstrInfo::isVALU(*MI)) 1623 return false; 1624 1625 SmallSet<Register, 4> SrcVGPRs; 1626 1627 for (const MachineOperand &Use : MI->explicit_uses()) { 1628 if (Use.isReg() && TRI.isVGPR(MF.getRegInfo(), Use.getReg())) 1629 SrcVGPRs.insert(Use.getReg()); 1630 } 1631 1632 // Look for the following pattern: 1633 // Va <- TRANS VALU 1634 // intv 1635 // MI Va (WaitState = 0) 1636 // 1637 // Where: 1638 // intv <= 5 VALUs / 1 TRANS 1639 // 1640 // If found, insert an appropriate S_WAITCNT_DEPCTR before MI. 1641 1642 const int IntvMaxVALUs = 5; 1643 const int IntvMaxTRANS = 1; 1644 1645 struct StateType { 1646 int VALUs = 0; 1647 int TRANS = 0; 1648 }; 1649 1650 StateType State; 1651 1652 // This overloads expiry testing with all the hazard detection 1653 auto IsHazardFn = [&, this](StateType &State, const MachineInstr &I) { 1654 // Too many VALU states have passed 1655 if (State.VALUs > IntvMaxVALUs || State.TRANS > IntvMaxTRANS) 1656 return HazardExpired; 1657 1658 // Instructions which cause va_vdst==0 expire hazard 1659 if (SIInstrInfo::isVMEM(I) || SIInstrInfo::isFLAT(I) || 1660 SIInstrInfo::isDS(I) || SIInstrInfo::isEXP(I) || 1661 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && 1662 I.getOperand(0).getImm() == 0x0fff)) 1663 return HazardExpired; 1664 1665 // Track registers writes 1666 if (SIInstrInfo::isTRANS(I)) { 1667 for (Register Src : SrcVGPRs) { 1668 if (I.modifiesRegister(Src, &TRI)) { 1669 return HazardFound; 1670 } 1671 } 1672 } 1673 1674 return NoHazardFound; 1675 }; 1676 auto UpdateStateFn = [](StateType &State, const MachineInstr &MI) { 1677 if (SIInstrInfo::isVALU(MI)) 1678 State.VALUs += 1; 1679 if (SIInstrInfo::isTRANS(MI)) 1680 State.TRANS += 1; 1681 }; 1682 1683 DenseSet<const MachineBasicBlock *> Visited; 1684 if (!hasHazard<StateType>(State, IsHazardFn, UpdateStateFn, MI->getParent(), 1685 std::next(MI->getReverseIterator()), Visited)) 1686 return false; 1687 1688 // Hazard is observed - insert a wait on va_dst counter to ensure hazard is 1689 // avoided. 1690 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 1691 TII.get(AMDGPU::S_WAITCNT_DEPCTR)) 1692 .addImm(AMDGPU::DepCtr::encodeFieldVaVdst(0)); 1693 1694 return true; 1695 } 1696 1697 bool GCNHazardRecognizer::fixWMMAHazards(MachineInstr *MI) { 1698 if (!SIInstrInfo::isWMMA(*MI)) 1699 return false; 1700 1701 const SIInstrInfo *TII = ST.getInstrInfo(); 1702 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1703 1704 auto IsHazardFn = [MI, TII, TRI](const MachineInstr &I) { 1705 if (!SIInstrInfo::isWMMA(I)) 1706 return false; 1707 1708 // Src0 or Src1 of the current wmma instruction overlaps with the dest of 1709 // the previous wmma. 1710 const Register CurSrc0Reg = 1711 TII->getNamedOperand(*MI, AMDGPU::OpName::src0)->getReg(); 1712 const Register CurSrc1Reg = 1713 TII->getNamedOperand(*MI, AMDGPU::OpName::src1)->getReg(); 1714 1715 const Register PrevDstReg = 1716 TII->getNamedOperand(I, AMDGPU::OpName::vdst)->getReg(); 1717 1718 if (TRI->regsOverlap(PrevDstReg, CurSrc0Reg) || 1719 TRI->regsOverlap(PrevDstReg, CurSrc1Reg)) { 1720 return true; 1721 } 1722 1723 // Src2 of the current wmma instruction overlaps with the dest of the 1724 // previous wmma. 1725 const MachineOperand *Src2 = 1726 TII->getNamedOperand(*MI, AMDGPU::OpName::src2); 1727 const Register CurSrc2Reg = Src2->isReg() ? Src2->getReg() : Register(); 1728 1729 if (CurSrc2Reg != AMDGPU::NoRegister && 1730 TRI->regsOverlap(PrevDstReg, CurSrc2Reg)) { 1731 1732 const MachineOperand *Src2Mods = 1733 TII->getNamedOperand(*MI, AMDGPU::OpName::src2_modifiers); 1734 const bool NoSrc2Mods = 1735 (Src2Mods->getImm() & (SISrcMods::NEG | SISrcMods::NEG_HI)) == 0; 1736 // Exception: there is no hazard if the wmma instructions are of the same 1737 // type and there is no input modifier on src2 of the current instruction. 1738 return !(NoSrc2Mods && (TII->pseudoToMCOpcode(I.getOpcode()) == 1739 TII->pseudoToMCOpcode(MI->getOpcode()))); 1740 } 1741 1742 return false; 1743 }; 1744 1745 auto IsExpiredFn = [](const MachineInstr &I, int) { 1746 return SIInstrInfo::isVALU(I); 1747 }; 1748 1749 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) == 1750 std::numeric_limits<int>::max()) 1751 return false; 1752 1753 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AMDGPU::V_NOP_e32)); 1754 1755 return true; 1756 } 1757 1758 bool GCNHazardRecognizer::fixShift64HighRegBug(MachineInstr *MI) { 1759 if (!ST.hasShift64HighRegBug()) 1760 return false; 1761 1762 switch (MI->getOpcode()) { 1763 default: 1764 return false; 1765 case AMDGPU::V_LSHLREV_B64_e64: 1766 case AMDGPU::V_LSHRREV_B64_e64: 1767 case AMDGPU::V_ASHRREV_I64_e64: 1768 break; 1769 } 1770 1771 MachineOperand *Amt = TII.getNamedOperand(*MI, AMDGPU::OpName::src0); 1772 if (!Amt->isReg()) 1773 return false; 1774 1775 Register AmtReg = Amt->getReg(); 1776 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1777 // Check if this is a last VGPR in the allocation block. 1778 if (!TRI.isVGPR(MRI, AmtReg) || ((AmtReg - AMDGPU::VGPR0) & 7) != 7) 1779 return false; 1780 1781 if (AmtReg != AMDGPU::VGPR255 && MRI.isPhysRegUsed(AmtReg + 1)) 1782 return false; 1783 1784 MachineOperand *Src1 = TII.getNamedOperand(*MI, AMDGPU::OpName::src1); 1785 bool OverlappedSrc = Src1->isReg() && TRI.regsOverlap(Src1->getReg(), AmtReg); 1786 bool OverlappedDst = MI->modifiesRegister(AmtReg, &TRI); 1787 bool Overlapped = OverlappedSrc || OverlappedDst; 1788 1789 assert(!OverlappedDst || !OverlappedSrc || 1790 Src1->getReg() == MI->getOperand(0).getReg()); 1791 assert(ST.needsAlignedVGPRs()); 1792 static_assert(AMDGPU::VGPR0 + 1 == AMDGPU::VGPR1); 1793 1794 Register NewReg; 1795 for (MCRegister Reg : Overlapped ? AMDGPU::VReg_64_Align2RegClass 1796 : AMDGPU::VGPR_32RegClass) { 1797 if (!MI->modifiesRegister(Reg, &TRI) && !MI->readsRegister(Reg, &TRI)) { 1798 NewReg = Reg; 1799 break; 1800 } 1801 } 1802 1803 Register NewAmt = Overlapped ? (Register)TRI.getSubReg(NewReg, AMDGPU::sub1) 1804 : NewReg; 1805 Register NewAmtLo; 1806 1807 if (Overlapped) 1808 NewAmtLo = TRI.getSubReg(NewReg, AMDGPU::sub0); 1809 1810 DebugLoc DL = MI->getDebugLoc(); 1811 MachineBasicBlock *MBB = MI->getParent(); 1812 // Insert a full wait count because found register might be pending a wait. 1813 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_WAITCNT)) 1814 .addImm(0); 1815 1816 // Insert V_SWAP_B32 instruction(s) and run hazard recognizer on them. 1817 if (Overlapped) 1818 runOnInstruction( 1819 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_SWAP_B32), NewAmtLo) 1820 .addDef(AmtReg - 1) 1821 .addReg(AmtReg - 1, RegState::Undef) 1822 .addReg(NewAmtLo, RegState::Undef)); 1823 runOnInstruction(BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_SWAP_B32), NewAmt) 1824 .addDef(AmtReg) 1825 .addReg(AmtReg, RegState::Undef) 1826 .addReg(NewAmt, RegState::Undef)); 1827 1828 // Instructions emitted after the current instruction will be processed by the 1829 // parent loop of the hazard recognizer in a natural way. 1830 BuildMI(*MBB, std::next(MI->getIterator()), DL, TII.get(AMDGPU::V_SWAP_B32), 1831 AmtReg) 1832 .addDef(NewAmt) 1833 .addReg(NewAmt) 1834 .addReg(AmtReg); 1835 if (Overlapped) 1836 BuildMI(*MBB, std::next(MI->getIterator()), DL, TII.get(AMDGPU::V_SWAP_B32), 1837 AmtReg - 1) 1838 .addDef(NewAmtLo) 1839 .addReg(NewAmtLo) 1840 .addReg(AmtReg - 1); 1841 1842 // Re-running hazard recognizer on the modified instruction is not necessary, 1843 // inserted V_SWAP_B32 has already both read and write new registers so 1844 // hazards related to these register has already been handled. 1845 Amt->setReg(NewAmt); 1846 Amt->setIsKill(false); 1847 // We do not update liveness, so verifier may see it as undef. 1848 Amt->setIsUndef(); 1849 if (OverlappedDst) 1850 MI->getOperand(0).setReg(NewReg); 1851 if (OverlappedSrc) { 1852 Src1->setReg(NewReg); 1853 Src1->setIsKill(false); 1854 Src1->setIsUndef(); 1855 } 1856 1857 return true; 1858 } 1859 1860 int GCNHazardRecognizer::checkNSAtoVMEMHazard(MachineInstr *MI) { 1861 int NSAtoVMEMWaitStates = 1; 1862 1863 if (!ST.hasNSAtoVMEMBug()) 1864 return 0; 1865 1866 if (!SIInstrInfo::isMUBUF(*MI) && !SIInstrInfo::isMTBUF(*MI)) 1867 return 0; 1868 1869 const SIInstrInfo *TII = ST.getInstrInfo(); 1870 const auto *Offset = TII->getNamedOperand(*MI, AMDGPU::OpName::offset); 1871 if (!Offset || (Offset->getImm() & 6) == 0) 1872 return 0; 1873 1874 auto IsHazardFn = [TII](const MachineInstr &I) { 1875 if (!SIInstrInfo::isMIMG(I)) 1876 return false; 1877 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(I.getOpcode()); 1878 return Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA && 1879 TII->getInstSizeInBytes(I) >= 16; 1880 }; 1881 1882 return NSAtoVMEMWaitStates - getWaitStatesSince(IsHazardFn, 1); 1883 } 1884 1885 int GCNHazardRecognizer::checkFPAtomicToDenormModeHazard(MachineInstr *MI) { 1886 int FPAtomicToDenormModeWaitStates = 3; 1887 1888 if (!ST.hasFPAtomicToDenormModeHazard()) 1889 return 0; 1890 1891 if (MI->getOpcode() != AMDGPU::S_DENORM_MODE) 1892 return 0; 1893 1894 auto IsHazardFn = [](const MachineInstr &I) { 1895 if (!SIInstrInfo::isVMEM(I) && !SIInstrInfo::isFLAT(I)) 1896 return false; 1897 return SIInstrInfo::isFPAtomic(I); 1898 }; 1899 1900 auto IsExpiredFn = [](const MachineInstr &MI, int WaitStates) { 1901 if (WaitStates >= 3 || SIInstrInfo::isVALU(MI)) 1902 return true; 1903 1904 switch (MI.getOpcode()) { 1905 case AMDGPU::S_WAITCNT: 1906 case AMDGPU::S_WAITCNT_VSCNT: 1907 case AMDGPU::S_WAITCNT_VMCNT: 1908 case AMDGPU::S_WAITCNT_EXPCNT: 1909 case AMDGPU::S_WAITCNT_LGKMCNT: 1910 case AMDGPU::S_WAIT_IDLE: 1911 return true; 1912 default: 1913 break; 1914 } 1915 1916 return false; 1917 }; 1918 1919 return FPAtomicToDenormModeWaitStates - 1920 ::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn); 1921 } 1922 1923 int GCNHazardRecognizer::checkMAIHazards(MachineInstr *MI) { 1924 assert(SIInstrInfo::isMAI(*MI)); 1925 1926 return ST.hasGFX90AInsts() ? checkMAIHazards90A(MI) : checkMAIHazards908(MI); 1927 } 1928 1929 int GCNHazardRecognizer::checkMFMAPadding(MachineInstr *MI) { 1930 // Early exit if no padding is requested. 1931 if (MFMAPaddingRatio == 0) 1932 return 0; 1933 1934 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1935 if (!SIInstrInfo::isMFMA(*MI) || MFI->getOccupancy() < 2) 1936 return 0; 1937 1938 int NeighborMFMALatency = 0; 1939 auto IsNeighboringMFMA = [&NeighborMFMALatency, 1940 this](const MachineInstr &MI) { 1941 if (!SIInstrInfo::isMFMA(MI)) 1942 return false; 1943 1944 NeighborMFMALatency = this->getMFMAPipelineWaitStates(MI); 1945 return true; 1946 }; 1947 1948 const int MaxMFMAPipelineWaitStates = 16; 1949 int WaitStatesSinceNeighborMFMA = 1950 getWaitStatesSince(IsNeighboringMFMA, MaxMFMAPipelineWaitStates); 1951 1952 int NeighborMFMAPaddingNeeded = 1953 (NeighborMFMALatency * MFMAPaddingRatio / 100) - 1954 WaitStatesSinceNeighborMFMA; 1955 1956 return std::max(0, NeighborMFMAPaddingNeeded); 1957 } 1958 1959 int GCNHazardRecognizer::checkMAIHazards908(MachineInstr *MI) { 1960 int WaitStatesNeeded = 0; 1961 unsigned Opc = MI->getOpcode(); 1962 1963 auto IsVALUFn = [](const MachineInstr &MI) { 1964 return SIInstrInfo::isVALU(MI) || MI.isInlineAsm(); 1965 }; 1966 1967 if (Opc != AMDGPU::V_ACCVGPR_READ_B32_e64) { // MFMA or v_accvgpr_write 1968 const int LegacyVALUWritesVGPRWaitStates = 2; 1969 const int VALUWritesExecWaitStates = 4; 1970 const int MaxWaitStates = 4; 1971 1972 int WaitStatesNeededForUse = VALUWritesExecWaitStates - 1973 getWaitStatesSinceDef(AMDGPU::EXEC, IsVALUFn, MaxWaitStates); 1974 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 1975 1976 if (WaitStatesNeeded < MaxWaitStates) { 1977 for (const MachineOperand &Use : MI->explicit_uses()) { 1978 const int MaxWaitStates = 2; 1979 1980 if (!Use.isReg() || !TRI.isVGPR(MF.getRegInfo(), Use.getReg())) 1981 continue; 1982 1983 int WaitStatesNeededForUse = LegacyVALUWritesVGPRWaitStates - 1984 getWaitStatesSinceDef(Use.getReg(), IsVALUFn, MaxWaitStates); 1985 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 1986 1987 if (WaitStatesNeeded == MaxWaitStates) 1988 break; 1989 } 1990 } 1991 } 1992 1993 for (const MachineOperand &Op : MI->explicit_operands()) { 1994 if (!Op.isReg() || !TRI.isAGPR(MF.getRegInfo(), Op.getReg())) 1995 continue; 1996 1997 if (Op.isDef() && Opc != AMDGPU::V_ACCVGPR_WRITE_B32_e64) 1998 continue; 1999 2000 const int MFMAWritesAGPROverlappedSrcABWaitStates = 4; 2001 const int MFMAWritesAGPROverlappedSrcCWaitStates = 2; 2002 const int MFMA4x4WritesAGPRAccVgprReadWaitStates = 4; 2003 const int MFMA16x16WritesAGPRAccVgprReadWaitStates = 10; 2004 const int MFMA32x32WritesAGPRAccVgprReadWaitStates = 18; 2005 const int MFMA4x4WritesAGPRAccVgprWriteWaitStates = 1; 2006 const int MFMA16x16WritesAGPRAccVgprWriteWaitStates = 7; 2007 const int MFMA32x32WritesAGPRAccVgprWriteWaitStates = 15; 2008 const int MaxWaitStates = 18; 2009 Register Reg = Op.getReg(); 2010 unsigned HazardDefLatency = 0; 2011 2012 auto IsOverlappedMFMAFn = [Reg, &HazardDefLatency, 2013 this](const MachineInstr &MI) { 2014 if (!SIInstrInfo::isMFMA(MI)) 2015 return false; 2016 Register DstReg = MI.getOperand(0).getReg(); 2017 if (DstReg == Reg) 2018 return false; 2019 HazardDefLatency = 2020 std::max(HazardDefLatency, TSchedModel.computeInstrLatency(&MI)); 2021 return TRI.regsOverlap(DstReg, Reg); 2022 }; 2023 2024 int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsOverlappedMFMAFn, 2025 MaxWaitStates); 2026 int NeedWaitStates = MFMAWritesAGPROverlappedSrcABWaitStates; 2027 int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 2028 int OpNo = Op.getOperandNo(); 2029 if (OpNo == SrcCIdx) { 2030 NeedWaitStates = MFMAWritesAGPROverlappedSrcCWaitStates; 2031 } else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64) { 2032 switch (HazardDefLatency) { 2033 case 2: NeedWaitStates = MFMA4x4WritesAGPRAccVgprReadWaitStates; 2034 break; 2035 case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprReadWaitStates; 2036 break; 2037 case 16: [[fallthrough]]; 2038 default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprReadWaitStates; 2039 break; 2040 } 2041 } else if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64) { 2042 switch (HazardDefLatency) { 2043 case 2: NeedWaitStates = MFMA4x4WritesAGPRAccVgprWriteWaitStates; 2044 break; 2045 case 8: NeedWaitStates = MFMA16x16WritesAGPRAccVgprWriteWaitStates; 2046 break; 2047 case 16: [[fallthrough]]; 2048 default: NeedWaitStates = MFMA32x32WritesAGPRAccVgprWriteWaitStates; 2049 break; 2050 } 2051 } 2052 2053 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef; 2054 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2055 2056 if (WaitStatesNeeded == MaxWaitStates) 2057 return WaitStatesNeeded; // Early exit. 2058 2059 auto IsAccVgprWriteFn = [Reg, this](const MachineInstr &MI) { 2060 if (MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64) 2061 return false; 2062 Register DstReg = MI.getOperand(0).getReg(); 2063 return TRI.regsOverlap(Reg, DstReg); 2064 }; 2065 2066 const int AccVGPRWriteMFMAReadSrcCWaitStates = 1; 2067 const int AccVGPRWriteMFMAReadSrcABWaitStates = 3; 2068 const int AccVGPRWriteAccVgprReadWaitStates = 3; 2069 NeedWaitStates = AccVGPRWriteMFMAReadSrcABWaitStates; 2070 if (OpNo == SrcCIdx) 2071 NeedWaitStates = AccVGPRWriteMFMAReadSrcCWaitStates; 2072 else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64) 2073 NeedWaitStates = AccVGPRWriteAccVgprReadWaitStates; 2074 2075 WaitStatesNeededForUse = NeedWaitStates - 2076 getWaitStatesSinceDef(Reg, IsAccVgprWriteFn, MaxWaitStates); 2077 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2078 2079 if (WaitStatesNeeded == MaxWaitStates) 2080 return WaitStatesNeeded; // Early exit. 2081 } 2082 2083 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64) { 2084 const int MFMA4x4ReadSrcCAccVgprWriteWaitStates = 0; 2085 const int MFMA16x16ReadSrcCAccVgprWriteWaitStates = 5; 2086 const int MFMA32x32ReadSrcCAccVgprWriteWaitStates = 13; 2087 const int MaxWaitStates = 13; 2088 Register DstReg = MI->getOperand(0).getReg(); 2089 unsigned HazardDefLatency = 0; 2090 2091 auto IsSrcCMFMAFn = [DstReg, &HazardDefLatency, 2092 this](const MachineInstr &MI) { 2093 if (!SIInstrInfo::isMFMA(MI)) 2094 return false; 2095 Register Reg = TII.getNamedOperand(MI, AMDGPU::OpName::src2)->getReg(); 2096 HazardDefLatency = 2097 std::max(HazardDefLatency, TSchedModel.computeInstrLatency(&MI)); 2098 return TRI.regsOverlap(Reg, DstReg); 2099 }; 2100 2101 int WaitStatesSince = getWaitStatesSince(IsSrcCMFMAFn, MaxWaitStates); 2102 int NeedWaitStates; 2103 switch (HazardDefLatency) { 2104 case 2: NeedWaitStates = MFMA4x4ReadSrcCAccVgprWriteWaitStates; 2105 break; 2106 case 8: NeedWaitStates = MFMA16x16ReadSrcCAccVgprWriteWaitStates; 2107 break; 2108 case 16: [[fallthrough]]; 2109 default: NeedWaitStates = MFMA32x32ReadSrcCAccVgprWriteWaitStates; 2110 break; 2111 } 2112 2113 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSince; 2114 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2115 } 2116 2117 // Pad neighboring MFMA with noops for better inter-wave performance. 2118 WaitStatesNeeded = std::max(WaitStatesNeeded, checkMFMAPadding(MI)); 2119 2120 return WaitStatesNeeded; 2121 } 2122 2123 int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) { 2124 int WaitStatesNeeded = 0; 2125 unsigned Opc = MI->getOpcode(); 2126 2127 auto IsLegacyVALUFn = [](const MachineInstr &MI) { 2128 return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isMFMA(MI); 2129 }; 2130 2131 auto IsLegacyVALUNotDotFn = [](const MachineInstr &MI) { 2132 return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isMFMA(MI) && 2133 !SIInstrInfo::isDOT(MI); 2134 }; 2135 2136 if (!SIInstrInfo::isMFMA(*MI)) 2137 return WaitStatesNeeded; 2138 2139 const int VALUWritesExecWaitStates = 4; 2140 int WaitStatesNeededForUse = VALUWritesExecWaitStates - 2141 getWaitStatesSinceDef(AMDGPU::EXEC, IsLegacyVALUFn, 2142 VALUWritesExecWaitStates); 2143 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2144 2145 int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 2146 2147 // Loop for both DGEMM and S/HGEMM 2nd instruction. 2148 for (const MachineOperand &Use : MI->explicit_uses()) { 2149 const int LegacyVALUNotDotWritesVGPRWaitStates = 2; 2150 const int SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates = 2; 2151 const int GFX940_XDL2PassWritesVGPROverlappedSMFMASrcCWaitStates = 3; 2152 const int GFX940_XDL4PassWritesVGPROverlappedSMFMASrcCWaitStates = 5; 2153 const int GFX940_SMFMA4PassWritesVGPROverlappedSMFMASrcCWaitStates = 4; 2154 const int GFX940_XDL8PassWritesVGPROverlappedSMFMASrcCWaitStates = 9; 2155 const int GFX940_SMFMA8PassWritesVGPROverlappedSMFMASrcCWaitStates = 8; 2156 const int GFX940_XDL16PassWritesVGPROverlappedSMFMASrcCWaitStates = 17; 2157 const int GFX940_SMFMA16PassWritesVGPROverlappedSMFMASrcCWaitStates = 16; 2158 const int SMFMA16x16WritesVGPROverlappedSMFMASrcCWaitStates = 8; 2159 const int SMFMA32x32WritesVGPROverlappedSMFMASrcCWaitStates = 16; 2160 const int SMFMA4x4WritesVGPROverlappedDMFMASrcCWaitStates = 3; 2161 const int SMFMA16x16WritesVGPROverlappedDMFMASrcCWaitStates = 9; 2162 const int SMFMA32x32WritesVGPROverlappedDMFMASrcCWaitStates = 17; 2163 const int DMFMA16x16WritesVGPROverlappedSrcCWaitStates = 9; 2164 const int DMFMA4x4WritesVGPROverlappedSrcCWaitStates = 4; 2165 const int SMFMA4x4WritesVGPROverlappedSrcABWaitStates = 5; 2166 const int SMFMA16x16WritesVGPROverlappedSrcABWaitStates = 11; 2167 const int SMFMA32x32WritesVGPROverlappedSrcABWaitStates = 19; 2168 const int GFX940_SMFMA2PassWritesVGPROverlappedSrcABWaitStates = 4; 2169 const int GFX940_SMFMA4PassWritesVGPROverlappedSrcABWaitStates = 6; 2170 const int GFX940_SMFMA8PassWritesVGPROverlappedSrcABWaitStates = 10; 2171 const int GFX940_SMFMA16PassWritesVGPROverlappedSrcABWaitStates = 18; 2172 const int GFX940_XDL2PassWritesVGPROverlappedSrcABWaitStates = 5; 2173 const int GFX940_XDL4PassWritesVGPROverlappedSrcABWaitStates = 7; 2174 const int GFX940_XDL8PassWritesVGPROverlappedSrcABWaitStates = 11; 2175 const int GFX940_XDL16PassWritesVGPROverlappedSrcABWaitStates = 19; 2176 const int DMFMA4x4WritesVGPROverlappedMFMASrcABWaitStates = 6; 2177 const int DMFMA16x16WritesVGPROverlappedMFMASrcABWaitStates = 11; 2178 const int DMFMA4x4WritesVGPRFullSrcCWaitStates = 4; 2179 const int GFX940_SMFMA4x4WritesVGPRFullSrcCWaitStates = 2; 2180 const int MaxWaitStates = 19; 2181 2182 if (!Use.isReg()) 2183 continue; 2184 Register Reg = Use.getReg(); 2185 bool FullReg; 2186 const MachineInstr *MI1; 2187 2188 auto IsOverlappedMFMAFn = [Reg, &FullReg, &MI1, 2189 this](const MachineInstr &MI) { 2190 if (!SIInstrInfo::isMFMA(MI)) 2191 return false; 2192 Register DstReg = MI.getOperand(0).getReg(); 2193 FullReg = (DstReg == Reg); 2194 MI1 = &MI; 2195 return TRI.regsOverlap(DstReg, Reg); 2196 }; 2197 2198 WaitStatesNeededForUse = LegacyVALUNotDotWritesVGPRWaitStates - 2199 getWaitStatesSinceDef(Reg, IsLegacyVALUNotDotFn, MaxWaitStates); 2200 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2201 2202 int NumWaitStates = 2203 getWaitStatesSinceDef(Reg, IsOverlappedMFMAFn, MaxWaitStates); 2204 if (NumWaitStates == std::numeric_limits<int>::max()) 2205 continue; 2206 2207 int OpNo = Use.getOperandNo(); 2208 unsigned Opc1 = MI1->getOpcode(); 2209 int NeedWaitStates = 0; 2210 if (OpNo == SrcCIdx) { 2211 if (!isDGEMM(Opc) && (!ST.hasGFX940Insts() && isDGEMM(Opc1))) { 2212 NeedWaitStates = 0; 2213 } else if (FullReg) { 2214 if ((Opc == AMDGPU::V_MFMA_F64_4X4X4F64_e64 || 2215 Opc == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64) && 2216 (Opc1 == AMDGPU::V_MFMA_F64_4X4X4F64_e64 || 2217 Opc1 == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64)) 2218 NeedWaitStates = DMFMA4x4WritesVGPRFullSrcCWaitStates; 2219 else if (ST.hasGFX940Insts() && 2220 TSchedModel.computeInstrLatency(MI1) == 2) 2221 NeedWaitStates = GFX940_SMFMA4x4WritesVGPRFullSrcCWaitStates; 2222 } else { 2223 switch (Opc1) { 2224 case AMDGPU::V_MFMA_F64_16X16X4F64_e64: 2225 case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64: 2226 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64: 2227 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64: 2228 if (!isXDL(ST, *MI)) 2229 NeedWaitStates = DMFMA16x16WritesVGPROverlappedSrcCWaitStates; 2230 break; 2231 case AMDGPU::V_MFMA_F64_4X4X4F64_e64: 2232 case AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64: 2233 if (!isXDL(ST, *MI)) 2234 NeedWaitStates = DMFMA4x4WritesVGPROverlappedSrcCWaitStates; 2235 break; 2236 default: 2237 if (ST.hasGFX940Insts() && isXDL(ST, *MI) && !isXDL(ST, *MI1)) 2238 break; 2239 switch (TSchedModel.computeInstrLatency(MI1)) { 2240 case 2: 2241 NeedWaitStates = ST.hasGFX940Insts() 2242 ? isXDL(ST, *MI1) 2243 ? GFX940_XDL2PassWritesVGPROverlappedSMFMASrcCWaitStates 2244 : SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates 2245 : isDGEMM(Opc) 2246 ? SMFMA4x4WritesVGPROverlappedDMFMASrcCWaitStates 2247 : SMFMA4x4WritesVGPROverlappedSMFMASrcCWaitStates; 2248 break; 2249 case 4: 2250 assert(ST.hasGFX940Insts()); 2251 NeedWaitStates = isXDL(ST, *MI1) 2252 ? GFX940_XDL4PassWritesVGPROverlappedSMFMASrcCWaitStates 2253 : GFX940_SMFMA4PassWritesVGPROverlappedSMFMASrcCWaitStates; 2254 break; 2255 case 8: 2256 NeedWaitStates = ST.hasGFX940Insts() 2257 ? isXDL(ST, *MI1) 2258 ? GFX940_XDL8PassWritesVGPROverlappedSMFMASrcCWaitStates 2259 : GFX940_SMFMA8PassWritesVGPROverlappedSMFMASrcCWaitStates 2260 : isDGEMM(Opc) 2261 ? SMFMA16x16WritesVGPROverlappedDMFMASrcCWaitStates 2262 : SMFMA16x16WritesVGPROverlappedSMFMASrcCWaitStates; 2263 break; 2264 case 16: [[fallthrough]]; 2265 default: 2266 NeedWaitStates = ST.hasGFX940Insts() 2267 ? isXDL(ST, *MI1) 2268 ? GFX940_XDL16PassWritesVGPROverlappedSMFMASrcCWaitStates 2269 : GFX940_SMFMA16PassWritesVGPROverlappedSMFMASrcCWaitStates 2270 : isDGEMM(Opc) 2271 ? SMFMA32x32WritesVGPROverlappedDMFMASrcCWaitStates 2272 : SMFMA32x32WritesVGPROverlappedSMFMASrcCWaitStates; 2273 } 2274 } 2275 } 2276 } else { 2277 switch (Opc1) { 2278 case AMDGPU::V_MFMA_F64_16X16X4F64_e64: 2279 case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64: 2280 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64: 2281 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64: 2282 NeedWaitStates = DMFMA16x16WritesVGPROverlappedMFMASrcABWaitStates; 2283 break; 2284 case AMDGPU::V_MFMA_F64_4X4X4F64_e64: 2285 case AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64: 2286 NeedWaitStates = DMFMA4x4WritesVGPROverlappedMFMASrcABWaitStates; 2287 break; 2288 default: 2289 switch (TSchedModel.computeInstrLatency(MI1)) { 2290 case 2: 2291 NeedWaitStates = ST.hasGFX940Insts() 2292 ? isXDL(ST, *MI1) 2293 ? GFX940_XDL2PassWritesVGPROverlappedSrcABWaitStates 2294 : GFX940_SMFMA2PassWritesVGPROverlappedSrcABWaitStates 2295 : SMFMA4x4WritesVGPROverlappedSrcABWaitStates; 2296 break; 2297 case 4: 2298 assert(ST.hasGFX940Insts()); 2299 NeedWaitStates = isXDL(ST, *MI1) 2300 ? GFX940_XDL4PassWritesVGPROverlappedSrcABWaitStates 2301 : GFX940_SMFMA4PassWritesVGPROverlappedSrcABWaitStates; 2302 break; 2303 case 8: 2304 NeedWaitStates = ST.hasGFX940Insts() 2305 ? isXDL(ST, *MI1) 2306 ? GFX940_XDL8PassWritesVGPROverlappedSrcABWaitStates 2307 : GFX940_SMFMA8PassWritesVGPROverlappedSrcABWaitStates 2308 : SMFMA16x16WritesVGPROverlappedSrcABWaitStates; 2309 break; 2310 case 16: [[fallthrough]]; 2311 default: 2312 NeedWaitStates = ST.hasGFX940Insts() 2313 ? isXDL(ST, *MI1) 2314 ? GFX940_XDL16PassWritesVGPROverlappedSrcABWaitStates 2315 : GFX940_SMFMA16PassWritesVGPROverlappedSrcABWaitStates 2316 : SMFMA32x32WritesVGPROverlappedSrcABWaitStates; 2317 } 2318 } 2319 } 2320 if (WaitStatesNeeded >= NeedWaitStates) 2321 continue; 2322 2323 WaitStatesNeededForUse = NeedWaitStates - NumWaitStates; 2324 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2325 2326 if (WaitStatesNeeded == MaxWaitStates) 2327 break; 2328 } 2329 2330 return WaitStatesNeeded; 2331 } 2332 2333 int GCNHazardRecognizer::checkMAILdStHazards(MachineInstr *MI) { 2334 // On gfx90a+ relevant hazards are checked in checkMAIVALUHazards() 2335 if (!ST.hasMAIInsts() || ST.hasGFX90AInsts()) 2336 return 0; 2337 2338 int WaitStatesNeeded = 0; 2339 2340 auto IsAccVgprReadFn = [](const MachineInstr &MI) { 2341 return MI.getOpcode() == AMDGPU::V_ACCVGPR_READ_B32_e64; 2342 }; 2343 2344 for (const MachineOperand &Op : MI->explicit_uses()) { 2345 if (!Op.isReg() || !TRI.isVGPR(MF.getRegInfo(), Op.getReg())) 2346 continue; 2347 2348 Register Reg = Op.getReg(); 2349 2350 const int AccVgprReadLdStWaitStates = 2; 2351 const int VALUWriteAccVgprRdWrLdStDepVALUWaitStates = 1; 2352 const int MaxWaitStates = 2; 2353 2354 int WaitStatesNeededForUse = AccVgprReadLdStWaitStates - 2355 getWaitStatesSinceDef(Reg, IsAccVgprReadFn, MaxWaitStates); 2356 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2357 2358 if (WaitStatesNeeded == MaxWaitStates) 2359 return WaitStatesNeeded; // Early exit. 2360 2361 auto IsVALUAccVgprRdWrCheckFn = [Reg, this](const MachineInstr &MI) { 2362 if (MI.getOpcode() != AMDGPU::V_ACCVGPR_READ_B32_e64 && 2363 MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64) 2364 return false; 2365 auto IsVALUFn = [](const MachineInstr &MI) { 2366 return SIInstrInfo::isVALU(MI) && !SIInstrInfo::isMAI(MI); 2367 }; 2368 return getWaitStatesSinceDef(Reg, IsVALUFn, 2 /*MaxWaitStates*/) < 2369 std::numeric_limits<int>::max(); 2370 }; 2371 2372 WaitStatesNeededForUse = VALUWriteAccVgprRdWrLdStDepVALUWaitStates - 2373 getWaitStatesSince(IsVALUAccVgprRdWrCheckFn, MaxWaitStates); 2374 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2375 } 2376 2377 return WaitStatesNeeded; 2378 } 2379 2380 int GCNHazardRecognizer::checkMAIVALUHazards(MachineInstr *MI) { 2381 if (!ST.hasGFX90AInsts()) 2382 return 0; 2383 2384 auto IsDGEMMFn = [](const MachineInstr &MI) -> bool { 2385 return isDGEMM(MI.getOpcode()); 2386 }; 2387 2388 // This is checked in checkMAIHazards90A() 2389 if (SIInstrInfo::isMFMA(*MI)) 2390 return 0; 2391 2392 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2393 2394 int WaitStatesNeeded = 0; 2395 2396 bool IsMem = SIInstrInfo::isVMEM(*MI) || 2397 SIInstrInfo::isFLAT(*MI) || 2398 SIInstrInfo::isDS(*MI); 2399 bool IsMemOrExport = IsMem || SIInstrInfo::isEXP(*MI); 2400 bool IsVALU = SIInstrInfo::isVALU(*MI); 2401 2402 const MachineInstr *MFMA = nullptr; 2403 unsigned Reg; 2404 auto IsMFMAWriteFn = [&Reg, &MFMA, this](const MachineInstr &MI) { 2405 if (!SIInstrInfo::isMFMA(MI) || 2406 !TRI.regsOverlap(MI.getOperand(0).getReg(), Reg)) 2407 return false; 2408 MFMA = &MI; 2409 return true; 2410 }; 2411 2412 const MachineInstr *DOT = nullptr; 2413 auto IsDotWriteFn = [&Reg, &DOT, this](const MachineInstr &MI) { 2414 if (!SIInstrInfo::isDOT(MI) || 2415 !TRI.regsOverlap(MI.getOperand(0).getReg(), Reg)) 2416 return false; 2417 DOT = &MI; 2418 return true; 2419 }; 2420 2421 bool DGEMMAfterVALUWrite = false; 2422 auto IsDGEMMHazard = [&DGEMMAfterVALUWrite, this](const MachineInstr &MI) { 2423 // Found DGEMM on reverse traversal to def. 2424 if (isDGEMM(MI.getOpcode())) 2425 DGEMMAfterVALUWrite = true; 2426 2427 // Only hazard if register is defined by a VALU and a DGEMM is found after 2428 // after the def. 2429 if (!TII.isVALU(MI) || !DGEMMAfterVALUWrite) 2430 return false; 2431 2432 return true; 2433 }; 2434 2435 int SrcCIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 2436 AMDGPU::OpName::src2); 2437 2438 if (IsMemOrExport || IsVALU) { 2439 const int SMFMA4x4WriteVgprVALUMemExpReadWaitStates = 5; 2440 const int SMFMA16x16WriteVgprVALUMemExpReadWaitStates = 11; 2441 const int SMFMA32x32WriteVgprVALUMemExpReadWaitStates = 19; 2442 const int GFX940_SMFMA2PassWriteVgprVALUMemExpReadWaitStates = 4; 2443 const int GFX940_SMFMA4PassWriteVgprVALUMemExpReadWaitStates = 6; 2444 const int GFX940_SMFMA8PassWriteVgprVALUMemExpReadWaitStates = 10; 2445 const int GFX940_SMFMA16PassWriteVgprVALUMemExpReadWaitStates = 18; 2446 const int GFX940_XDL2PassWriteVgprVALUMemExpReadWaitStates = 5; 2447 const int GFX940_XDL4PassWriteVgprVALUMemExpReadWaitStates = 7; 2448 const int GFX940_XDL8PassWriteVgprVALUMemExpReadWaitStates = 11; 2449 const int GFX940_XDL16PassWriteVgprVALUMemExpReadWaitStates = 19; 2450 const int DMFMA4x4WriteVgprMemExpReadWaitStates = 9; 2451 const int DMFMA16x16WriteVgprMemExpReadWaitStates = 18; 2452 const int DMFMA4x4WriteVgprVALUReadWaitStates = 6; 2453 const int DMFMA16x16WriteVgprVALUReadWaitStates = 11; 2454 const int DotWriteSameDotReadSrcAB = 3; 2455 const int DotWriteDifferentVALURead = 3; 2456 const int DMFMABetweenVALUWriteVMEMRead = 2; 2457 const int MaxWaitStates = 19; 2458 2459 for (const MachineOperand &Use : MI->explicit_uses()) { 2460 if (!Use.isReg()) 2461 continue; 2462 Reg = Use.getReg(); 2463 2464 DOT = nullptr; 2465 int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsDotWriteFn, 2466 MaxWaitStates); 2467 if (DOT) { 2468 int NeedWaitStates = 0; 2469 if (DOT->getOpcode() == MI->getOpcode()) { 2470 if (&Use - &MI->getOperand(0) != SrcCIdx) 2471 NeedWaitStates = DotWriteSameDotReadSrcAB; 2472 } else { 2473 NeedWaitStates = DotWriteDifferentVALURead; 2474 } 2475 2476 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef; 2477 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2478 } 2479 2480 // Workaround for HW data hazard bug observed only in GFX90A. When there 2481 // is a DGEMM instruction in-between a VALU and a VMEM instruction it 2482 // causes the SQ to incorrectly not insert two wait states between the two 2483 // instructions needed to avoid data hazard. 2484 if (IsMem && ST.hasGFX90AInsts() && !ST.hasGFX940Insts()) { 2485 DGEMMAfterVALUWrite = false; 2486 if (TRI.isVectorRegister(MRI, Reg)) { 2487 int WaitStatesNeededForUse = 2488 DMFMABetweenVALUWriteVMEMRead - 2489 getWaitStatesSinceDef(Reg, IsDGEMMHazard, 2490 DMFMABetweenVALUWriteVMEMRead); 2491 2492 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2493 } 2494 } 2495 2496 MFMA = nullptr; 2497 WaitStatesSinceDef = 2498 getWaitStatesSinceDef(Reg, IsMFMAWriteFn, MaxWaitStates); 2499 if (!MFMA) 2500 continue; 2501 2502 unsigned HazardDefLatency = TSchedModel.computeInstrLatency(MFMA); 2503 int NeedWaitStates = MaxWaitStates; 2504 switch (HazardDefLatency) { 2505 case 2: 2506 NeedWaitStates = 2507 ST.hasGFX940Insts() 2508 ? isXDL(ST, *MFMA) 2509 ? GFX940_XDL2PassWriteVgprVALUMemExpReadWaitStates 2510 : GFX940_SMFMA2PassWriteVgprVALUMemExpReadWaitStates 2511 : SMFMA4x4WriteVgprVALUMemExpReadWaitStates; 2512 break; 2513 case 4: 2514 assert(isDGEMM(MFMA->getOpcode()) || ST.hasGFX940Insts()); 2515 NeedWaitStates = 2516 isDGEMM(MFMA->getOpcode()) 2517 ? IsMemOrExport ? DMFMA4x4WriteVgprMemExpReadWaitStates 2518 : DMFMA4x4WriteVgprVALUReadWaitStates 2519 : isXDL(ST, *MFMA) 2520 ? GFX940_XDL4PassWriteVgprVALUMemExpReadWaitStates 2521 : GFX940_SMFMA4PassWriteVgprVALUMemExpReadWaitStates; 2522 break; 2523 case 8: 2524 NeedWaitStates = 2525 ST.hasGFX940Insts() 2526 ? isXDL(ST, *MFMA) 2527 ? GFX940_XDL8PassWriteVgprVALUMemExpReadWaitStates 2528 : GFX940_SMFMA8PassWriteVgprVALUMemExpReadWaitStates 2529 : SMFMA16x16WriteVgprVALUMemExpReadWaitStates; 2530 break; 2531 case 16: [[fallthrough]]; 2532 default: 2533 NeedWaitStates = 2534 isDGEMM(MFMA->getOpcode()) 2535 ? IsMemOrExport ? DMFMA16x16WriteVgprMemExpReadWaitStates 2536 : DMFMA16x16WriteVgprVALUReadWaitStates 2537 : ST.hasGFX940Insts() 2538 ? isXDL(ST, *MFMA) 2539 ? GFX940_XDL16PassWriteVgprVALUMemExpReadWaitStates 2540 : GFX940_SMFMA16PassWriteVgprVALUMemExpReadWaitStates 2541 : SMFMA32x32WriteVgprVALUMemExpReadWaitStates; 2542 break; 2543 } 2544 2545 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef; 2546 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2547 2548 if (WaitStatesNeeded == MaxWaitStates) 2549 break; 2550 } 2551 } 2552 2553 unsigned Opc = MI->getOpcode(); 2554 const int DMFMAToFMA64WaitStates = 2; 2555 if ((Opc == AMDGPU::V_FMA_F64_e64 || 2556 Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64 || 2557 Opc == AMDGPU::V_FMAC_F64_dpp) && 2558 WaitStatesNeeded < DMFMAToFMA64WaitStates) { 2559 int WaitStatesNeededForUse = DMFMAToFMA64WaitStates - 2560 getWaitStatesSince(IsDGEMMFn, DMFMAToFMA64WaitStates); 2561 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2562 } 2563 2564 if (!IsVALU && !IsMemOrExport) 2565 return WaitStatesNeeded; 2566 2567 for (const MachineOperand &Def : MI->defs()) { 2568 const int SMFMA4x4WriteVgprVALUWawWaitStates = 5; 2569 const int SMFMA16x16WriteVgprVALUWawWaitStates = 11; 2570 const int SMFMA32x32WriteVgprVALUWawWaitStates = 19; 2571 const int GFX940_SMFMA2PassWriteVgprVALUWawWaitStates = 4; 2572 const int GFX940_SMFMA4PassWriteVgprVALUWawWaitStates = 6; 2573 const int GFX940_SMFMA8PassWriteVgprVALUWawWaitStates = 10; 2574 const int GFX940_SMFMA16PassWriteVgprVALUWawWaitStates = 18; 2575 const int GFX940_XDL2PassWriteVgprVALUWawWaitStates = 5; 2576 const int GFX940_XDL4PassWriteVgprVALUWawWaitStates = 7; 2577 const int GFX940_XDL8PassWriteVgprVALUWawWaitStates = 11; 2578 const int GFX940_XDL16PassWriteVgprVALUWawWaitStates = 19; 2579 const int SMFMA4x4ReadVgprVALUWarWaitStates = 1; 2580 const int GFX940_XDL4PassReadVgprVALUWarWaitStates = 3; 2581 const int SMFMA16x16ReadVgprVALUWarWaitStates = 7; 2582 const int SMFMA32x32ReadVgprVALUWarWaitStates = 15; 2583 const int DMFMA4x4WriteVgprVALUWriteWaitStates = 6; 2584 const int DMFMA16x16WriteVgprVALUWriteWaitStates = 11; 2585 const int DotWriteDifferentVALUWrite = 3; 2586 const int MaxWaitStates = 19; 2587 const int MaxWarWaitStates = 15; 2588 2589 Reg = Def.getReg(); 2590 2591 DOT = nullptr; 2592 int WaitStatesSinceDef = getWaitStatesSinceDef(Reg, IsDotWriteFn, 2593 MaxWaitStates); 2594 if (DOT && DOT->getOpcode() != MI->getOpcode()) 2595 WaitStatesNeeded = std::max(WaitStatesNeeded, DotWriteDifferentVALUWrite - 2596 WaitStatesSinceDef); 2597 2598 MFMA = nullptr; 2599 WaitStatesSinceDef = 2600 getWaitStatesSinceDef(Reg, IsMFMAWriteFn, MaxWaitStates); 2601 if (MFMA) { 2602 int NeedWaitStates = MaxWaitStates; 2603 switch (TSchedModel.computeInstrLatency(MFMA)) { 2604 case 2: 2605 NeedWaitStates = ST.hasGFX940Insts() 2606 ? isXDL(ST, *MFMA) 2607 ? GFX940_XDL2PassWriteVgprVALUWawWaitStates 2608 : GFX940_SMFMA2PassWriteVgprVALUWawWaitStates 2609 : SMFMA4x4WriteVgprVALUWawWaitStates; 2610 break; 2611 case 4: 2612 assert(isDGEMM(MFMA->getOpcode()) || ST.hasGFX940Insts()); 2613 NeedWaitStates = isDGEMM(MFMA->getOpcode()) 2614 ? DMFMA4x4WriteVgprVALUWriteWaitStates 2615 : isXDL(ST, *MFMA) 2616 ? GFX940_XDL4PassWriteVgprVALUWawWaitStates 2617 : GFX940_SMFMA4PassWriteVgprVALUWawWaitStates; 2618 break; 2619 case 8: 2620 NeedWaitStates = ST.hasGFX940Insts() 2621 ? isXDL(ST, *MFMA) 2622 ? GFX940_XDL8PassWriteVgprVALUWawWaitStates 2623 : GFX940_SMFMA8PassWriteVgprVALUWawWaitStates 2624 : SMFMA16x16WriteVgprVALUWawWaitStates; 2625 break; 2626 case 16: [[fallthrough]]; 2627 default: 2628 NeedWaitStates = isDGEMM(MFMA->getOpcode()) 2629 ? DMFMA16x16WriteVgprVALUWriteWaitStates 2630 : ST.hasGFX940Insts() 2631 ? isXDL(ST, *MFMA) 2632 ? GFX940_XDL16PassWriteVgprVALUWawWaitStates 2633 : GFX940_SMFMA16PassWriteVgprVALUWawWaitStates 2634 : SMFMA32x32WriteVgprVALUWawWaitStates; 2635 break; 2636 } 2637 2638 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceDef; 2639 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2640 2641 if (WaitStatesNeeded == MaxWaitStates) 2642 break; 2643 } 2644 2645 auto IsSMFMAReadAsCFn = [&Reg, &MFMA, this](const MachineInstr &MI) { 2646 if (!SIInstrInfo::isMFMA(MI) || isDGEMM(MI.getOpcode()) || 2647 !MI.readsRegister(Reg, &TRI)) 2648 return false; 2649 2650 if (ST.hasGFX940Insts() && !isXDL(ST, MI)) 2651 return false; 2652 2653 const MachineOperand *SrcC = 2654 TII.getNamedOperand(MI, AMDGPU::OpName::src2); 2655 assert(SrcC); 2656 if (!SrcC->isReg() || !TRI.regsOverlap(SrcC->getReg(), Reg)) 2657 return false; 2658 2659 MFMA = &MI; 2660 return true; 2661 }; 2662 2663 MFMA = nullptr; 2664 int WaitStatesSinceUse = getWaitStatesSince(IsSMFMAReadAsCFn, 2665 MaxWarWaitStates); 2666 if (!MFMA) 2667 continue; 2668 2669 unsigned HazardDefLatency = TSchedModel.computeInstrLatency(MFMA); 2670 int NeedWaitStates = MaxWaitStates; 2671 switch (HazardDefLatency) { 2672 case 2: NeedWaitStates = SMFMA4x4ReadVgprVALUWarWaitStates; 2673 break; 2674 case 4: assert(ST.hasGFX940Insts()); 2675 NeedWaitStates = GFX940_XDL4PassReadVgprVALUWarWaitStates; 2676 break; 2677 case 8: NeedWaitStates = SMFMA16x16ReadVgprVALUWarWaitStates; 2678 break; 2679 case 16: [[fallthrough]]; 2680 default: NeedWaitStates = SMFMA32x32ReadVgprVALUWarWaitStates; 2681 break; 2682 } 2683 2684 int WaitStatesNeededForUse = NeedWaitStates - WaitStatesSinceUse; 2685 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse); 2686 } 2687 2688 return WaitStatesNeeded; 2689 } 2690 2691 bool GCNHazardRecognizer::ShouldPreferAnother(SUnit *SU) { 2692 if (!SU->isInstr()) 2693 return false; 2694 2695 const MachineInstr *MAI = nullptr; 2696 2697 auto IsMFMAFn = [&MAI](const MachineInstr &MI) { 2698 MAI = nullptr; 2699 if (SIInstrInfo::isMFMA(MI)) 2700 MAI = &MI; 2701 return MAI != nullptr; 2702 }; 2703 2704 MachineInstr *MI = SU->getInstr(); 2705 if (IsMFMAFn(*MI)) { 2706 int W = getWaitStatesSince(IsMFMAFn, 16); 2707 if (MAI) 2708 return W < (int)TSchedModel.computeInstrLatency(MAI); 2709 } 2710 2711 return false; 2712 } 2713 2714 bool GCNHazardRecognizer::fixVALUMaskWriteHazard(MachineInstr *MI) { 2715 if (!ST.isWave64()) 2716 return false; 2717 if (!ST.hasVALUMaskWriteHazard()) 2718 return false; 2719 if (!SIInstrInfo::isSALU(*MI)) 2720 return false; 2721 2722 // The hazard sequence is three instructions: 2723 // 1. VALU reads SGPR as mask 2724 // 2. SALU writes SGPR 2725 // 3. SALU reads SGPR 2726 // The hazard can expire if the distance between 2 and 3 is sufficient. 2727 // In practice this happens <10% of the time, hence this always assumes 2728 // the hazard exists if 1 and 2 are present to avoid searching. 2729 2730 const MachineOperand *SDSTOp = TII.getNamedOperand(*MI, AMDGPU::OpName::sdst); 2731 if (!SDSTOp || !SDSTOp->isReg()) 2732 return false; 2733 2734 const Register HazardReg = SDSTOp->getReg(); 2735 if (HazardReg == AMDGPU::EXEC || 2736 HazardReg == AMDGPU::EXEC_LO || 2737 HazardReg == AMDGPU::EXEC_HI || 2738 HazardReg == AMDGPU::M0) 2739 return false; 2740 2741 auto IsHazardFn = [HazardReg, this](const MachineInstr &I) { 2742 switch (I.getOpcode()) { 2743 case AMDGPU::V_ADDC_U32_e32: 2744 case AMDGPU::V_ADDC_U32_dpp: 2745 case AMDGPU::V_CNDMASK_B16_e32: 2746 case AMDGPU::V_CNDMASK_B16_dpp: 2747 case AMDGPU::V_CNDMASK_B32_e32: 2748 case AMDGPU::V_CNDMASK_B32_dpp: 2749 case AMDGPU::V_DIV_FMAS_F32_e64: 2750 case AMDGPU::V_DIV_FMAS_F64_e64: 2751 case AMDGPU::V_SUBB_U32_e32: 2752 case AMDGPU::V_SUBB_U32_dpp: 2753 case AMDGPU::V_SUBBREV_U32_e32: 2754 case AMDGPU::V_SUBBREV_U32_dpp: 2755 // These implicitly read VCC as mask source. 2756 return HazardReg == AMDGPU::VCC || 2757 HazardReg == AMDGPU::VCC_LO || 2758 HazardReg == AMDGPU::VCC_HI; 2759 case AMDGPU::V_ADDC_U32_e64: 2760 case AMDGPU::V_ADDC_U32_e64_dpp: 2761 case AMDGPU::V_CNDMASK_B16_e64: 2762 case AMDGPU::V_CNDMASK_B16_e64_dpp: 2763 case AMDGPU::V_CNDMASK_B32_e64: 2764 case AMDGPU::V_CNDMASK_B32_e64_dpp: 2765 case AMDGPU::V_SUBB_U32_e64: 2766 case AMDGPU::V_SUBB_U32_e64_dpp: 2767 case AMDGPU::V_SUBBREV_U32_e64: 2768 case AMDGPU::V_SUBBREV_U32_e64_dpp: { 2769 // Only check mask register overlaps. 2770 const MachineOperand *SSRCOp = TII.getNamedOperand(I, AMDGPU::OpName::src2); 2771 assert(SSRCOp); 2772 return TRI.regsOverlap(SSRCOp->getReg(), HazardReg); 2773 } 2774 default: 2775 return false; 2776 } 2777 }; 2778 2779 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2780 auto IsExpiredFn = [&MRI, this](const MachineInstr &I, int) { 2781 // s_waitcnt_depctr sa_sdst(0) mitigates hazard. 2782 if (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && 2783 AMDGPU::DepCtr::decodeFieldSaSdst(I.getOperand(0).getImm()) == 0) 2784 return true; 2785 2786 // VALU access to any SGPR or literal constant other than HazardReg 2787 // mitigates hazard. No need to check HazardReg here as this will 2788 // only be called when !IsHazardFn. 2789 if (!SIInstrInfo::isVALU(I)) 2790 return false; 2791 for (int OpNo = 0, End = I.getNumOperands(); OpNo < End; ++OpNo) { 2792 const MachineOperand &Op = I.getOperand(OpNo); 2793 if (Op.isReg()) { 2794 Register OpReg = Op.getReg(); 2795 // Only consider uses 2796 if (!Op.isUse()) 2797 continue; 2798 // Ignore EXEC 2799 if (OpReg == AMDGPU::EXEC || 2800 OpReg == AMDGPU::EXEC_LO || 2801 OpReg == AMDGPU::EXEC_HI) 2802 continue; 2803 // Ignore all implicit uses except VCC 2804 if (Op.isImplicit()) { 2805 if (OpReg == AMDGPU::VCC || 2806 OpReg == AMDGPU::VCC_LO || 2807 OpReg == AMDGPU::VCC_HI) 2808 return true; 2809 continue; 2810 } 2811 if (TRI.isSGPRReg(MRI, OpReg)) 2812 return true; 2813 } else { 2814 const MCInstrDesc &InstDesc = I.getDesc(); 2815 const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo]; 2816 if (!TII.isInlineConstant(Op, OpInfo)) 2817 return true; 2818 } 2819 } 2820 return false; 2821 }; 2822 2823 // Check for hazard 2824 if (::getWaitStatesSince(IsHazardFn, MI, IsExpiredFn) == 2825 std::numeric_limits<int>::max()) 2826 return false; 2827 2828 auto NextMI = std::next(MI->getIterator()); 2829 2830 // Add s_waitcnt_depctr sa_sdst(0) after SALU write. 2831 BuildMI(*MI->getParent(), NextMI, MI->getDebugLoc(), 2832 TII.get(AMDGPU::S_WAITCNT_DEPCTR)) 2833 .addImm(AMDGPU::DepCtr::encodeFieldSaSdst(0)); 2834 2835 // SALU write may be s_getpc in a bundle. 2836 if (MI->getOpcode() == AMDGPU::S_GETPC_B64) { 2837 // Update offsets of any references in the bundle. 2838 while (NextMI != MI->getParent()->end() && 2839 NextMI->isBundledWithPred()) { 2840 for (auto &Operand : NextMI->operands()) { 2841 if (Operand.isGlobal()) 2842 Operand.setOffset(Operand.getOffset() + 4); 2843 } 2844 NextMI++; 2845 } 2846 } 2847 2848 return true; 2849 } 2850