1 //===-- SIWholeQuadMode.cpp - enter and suspend whole quad mode -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief This pass adds instructions to enable whole quad mode for pixel 12 /// shaders. 13 /// 14 /// Whole quad mode is required for derivative computations, but it interferes 15 /// with shader side effects (stores and atomics). This pass is run on the 16 /// scheduled machine IR but before register coalescing, so that machine SSA is 17 /// available for analysis. It ensures that WQM is enabled when necessary, but 18 /// disabled around stores and atomics. 19 /// 20 /// When necessary, this pass creates a function prolog 21 /// 22 /// S_MOV_B64 LiveMask, EXEC 23 /// S_WQM_B64 EXEC, EXEC 24 /// 25 /// to enter WQM at the top of the function and surrounds blocks of Exact 26 /// instructions by 27 /// 28 /// S_AND_SAVEEXEC_B64 Tmp, LiveMask 29 /// ... 30 /// S_MOV_B64 EXEC, Tmp 31 /// 32 /// In order to avoid excessive switching during sequences of Exact 33 /// instructions, the pass first analyzes which instructions must be run in WQM 34 /// (aka which instructions produce values that lead to derivative 35 /// computations). 36 /// 37 /// Basic blocks are always exited in WQM as long as some successor needs WQM. 38 /// 39 /// There is room for improvement given better control flow analysis: 40 /// 41 /// (1) at the top level (outside of control flow statements, and as long as 42 /// kill hasn't been used), one SGPR can be saved by recovering WQM from 43 /// the LiveMask (this is implemented for the entry block). 44 /// 45 /// (2) when entire regions (e.g. if-else blocks or entire loops) only 46 /// consist of exact and don't-care instructions, the switch only has to 47 /// be done at the entry and exit points rather than potentially in each 48 /// block of the region. 49 /// 50 //===----------------------------------------------------------------------===// 51 52 #include "AMDGPU.h" 53 #include "AMDGPUSubtarget.h" 54 #include "SIInstrInfo.h" 55 #include "SIMachineFunctionInfo.h" 56 #include "llvm/CodeGen/MachineDominanceFrontier.h" 57 #include "llvm/CodeGen/MachineDominators.h" 58 #include "llvm/CodeGen/MachineFunction.h" 59 #include "llvm/CodeGen/MachineFunctionPass.h" 60 #include "llvm/CodeGen/MachineInstrBuilder.h" 61 #include "llvm/CodeGen/MachineRegisterInfo.h" 62 #include "llvm/IR/Constants.h" 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "si-wqm" 67 68 namespace { 69 70 enum { 71 StateWQM = 0x1, 72 StateExact = 0x2, 73 }; 74 75 struct InstrInfo { 76 char Needs = 0; 77 char OutNeeds = 0; 78 }; 79 80 struct BlockInfo { 81 char Needs = 0; 82 char InNeeds = 0; 83 char OutNeeds = 0; 84 }; 85 86 struct WorkItem { 87 const MachineBasicBlock *MBB = nullptr; 88 const MachineInstr *MI = nullptr; 89 90 WorkItem() {} 91 WorkItem(const MachineBasicBlock *MBB) : MBB(MBB) {} 92 WorkItem(const MachineInstr *MI) : MI(MI) {} 93 }; 94 95 class SIWholeQuadMode : public MachineFunctionPass { 96 private: 97 const SIInstrInfo *TII; 98 const SIRegisterInfo *TRI; 99 MachineRegisterInfo *MRI; 100 101 DenseMap<const MachineInstr *, InstrInfo> Instructions; 102 DenseMap<const MachineBasicBlock *, BlockInfo> Blocks; 103 SmallVector<const MachineInstr *, 2> ExecExports; 104 SmallVector<MachineInstr *, 1> LiveMaskQueries; 105 106 char scanInstructions(MachineFunction &MF, std::vector<WorkItem>& Worklist); 107 void propagateInstruction(const MachineInstr &MI, std::vector<WorkItem>& Worklist); 108 void propagateBlock(const MachineBasicBlock &MBB, std::vector<WorkItem>& Worklist); 109 char analyzeFunction(MachineFunction &MF); 110 111 void toExact(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before, 112 unsigned SaveWQM, unsigned LiveMaskReg); 113 void toWQM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before, 114 unsigned SavedWQM); 115 void processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg, bool isEntry); 116 117 void lowerLiveMaskQueries(unsigned LiveMaskReg); 118 119 public: 120 static char ID; 121 122 SIWholeQuadMode() : 123 MachineFunctionPass(ID) { } 124 125 bool runOnMachineFunction(MachineFunction &MF) override; 126 127 const char *getPassName() const override { 128 return "SI Whole Quad Mode"; 129 } 130 131 void getAnalysisUsage(AnalysisUsage &AU) const override { 132 AU.setPreservesCFG(); 133 MachineFunctionPass::getAnalysisUsage(AU); 134 } 135 }; 136 137 } // End anonymous namespace 138 139 char SIWholeQuadMode::ID = 0; 140 141 INITIALIZE_PASS_BEGIN(SIWholeQuadMode, DEBUG_TYPE, 142 "SI Whole Quad Mode", false, false) 143 INITIALIZE_PASS_END(SIWholeQuadMode, DEBUG_TYPE, 144 "SI Whole Quad Mode", false, false) 145 146 char &llvm::SIWholeQuadModeID = SIWholeQuadMode::ID; 147 148 FunctionPass *llvm::createSIWholeQuadModePass() { 149 return new SIWholeQuadMode; 150 } 151 152 // Scan instructions to determine which ones require an Exact execmask and 153 // which ones seed WQM requirements. 154 char SIWholeQuadMode::scanInstructions(MachineFunction &MF, 155 std::vector<WorkItem> &Worklist) { 156 char GlobalFlags = 0; 157 bool WQMOutputs = MF.getFunction()->hasFnAttribute("amdgpu-ps-wqm-outputs"); 158 159 for (auto BI = MF.begin(), BE = MF.end(); BI != BE; ++BI) { 160 MachineBasicBlock &MBB = *BI; 161 162 for (auto II = MBB.begin(), IE = MBB.end(); II != IE; ++II) { 163 MachineInstr &MI = *II; 164 unsigned Opcode = MI.getOpcode(); 165 char Flags = 0; 166 167 if (TII->isWQM(Opcode) || TII->isDS(Opcode)) { 168 Flags = StateWQM; 169 } else if (TII->get(Opcode).mayStore() && 170 (MI.getDesc().TSFlags & SIInstrFlags::VM_CNT)) { 171 Flags = StateExact; 172 } else { 173 // Handle export instructions with the exec mask valid flag set 174 if (Opcode == AMDGPU::EXP) { 175 if (MI.getOperand(4).getImm() != 0) 176 ExecExports.push_back(&MI); 177 } else if (Opcode == AMDGPU::SI_PS_LIVE) { 178 LiveMaskQueries.push_back(&MI); 179 } else if (WQMOutputs) { 180 // The function is in machine SSA form, which means that physical 181 // VGPRs correspond to shader inputs and outputs. Inputs are 182 // only used, outputs are only defined. 183 for (const MachineOperand &MO : MI.defs()) { 184 if (!MO.isReg()) 185 continue; 186 187 unsigned Reg = MO.getReg(); 188 189 if (!TRI->isVirtualRegister(Reg) && 190 TRI->hasVGPRs(TRI->getPhysRegClass(Reg))) { 191 Flags = StateWQM; 192 break; 193 } 194 } 195 } 196 197 if (!Flags) 198 continue; 199 } 200 201 Instructions[&MI].Needs = Flags; 202 Worklist.push_back(&MI); 203 GlobalFlags |= Flags; 204 } 205 206 if (WQMOutputs && MBB.succ_empty()) { 207 // This is a prolog shader. Make sure we go back to exact mode at the end. 208 Blocks[&MBB].OutNeeds = StateExact; 209 Worklist.push_back(&MBB); 210 GlobalFlags |= StateExact; 211 } 212 } 213 214 return GlobalFlags; 215 } 216 217 void SIWholeQuadMode::propagateInstruction(const MachineInstr &MI, 218 std::vector<WorkItem>& Worklist) { 219 const MachineBasicBlock &MBB = *MI.getParent(); 220 InstrInfo II = Instructions[&MI]; // take a copy to prevent dangling references 221 BlockInfo &BI = Blocks[&MBB]; 222 223 // Control flow-type instructions that are followed by WQM computations 224 // must themselves be in WQM. 225 if ((II.OutNeeds & StateWQM) && !(II.Needs & StateWQM) && 226 (MI.isBranch() || MI.isTerminator() || MI.getOpcode() == AMDGPU::SI_KILL)) { 227 Instructions[&MI].Needs = StateWQM; 228 II.Needs = StateWQM; 229 } 230 231 // Propagate to block level 232 BI.Needs |= II.Needs; 233 if ((BI.InNeeds | II.Needs) != BI.InNeeds) { 234 BI.InNeeds |= II.Needs; 235 Worklist.push_back(&MBB); 236 } 237 238 // Propagate backwards within block 239 if (const MachineInstr *PrevMI = MI.getPrevNode()) { 240 char InNeeds = II.Needs | II.OutNeeds; 241 if (!PrevMI->isPHI()) { 242 InstrInfo &PrevII = Instructions[PrevMI]; 243 if ((PrevII.OutNeeds | InNeeds) != PrevII.OutNeeds) { 244 PrevII.OutNeeds |= InNeeds; 245 Worklist.push_back(PrevMI); 246 } 247 } 248 } 249 250 // Propagate WQM flag to instruction inputs 251 assert(II.Needs != (StateWQM | StateExact)); 252 if (II.Needs != StateWQM) 253 return; 254 255 for (const MachineOperand &Use : MI.uses()) { 256 if (!Use.isReg() || !Use.isUse()) 257 continue; 258 259 // At this point, physical registers appear as inputs or outputs 260 // and following them makes no sense (and would in fact be incorrect 261 // when the same VGPR is used as both an output and an input that leads 262 // to a NeedsWQM instruction). 263 // 264 // Note: VCC appears e.g. in 64-bit addition with carry - theoretically we 265 // have to trace this, in practice it happens for 64-bit computations like 266 // pointers where both dwords are followed already anyway. 267 if (!TargetRegisterInfo::isVirtualRegister(Use.getReg())) 268 continue; 269 270 for (const MachineOperand &Def : MRI->def_operands(Use.getReg())) { 271 const MachineInstr *DefMI = Def.getParent(); 272 InstrInfo &DefII = Instructions[DefMI]; 273 274 // Obviously skip if DefMI is already flagged as NeedWQM. 275 // 276 // The instruction might also be flagged as NeedExact. This happens when 277 // the result of an atomic is used in a WQM computation. In this case, 278 // the atomic must not run for helper pixels and the WQM result is 279 // undefined. 280 if (DefII.Needs != 0) 281 continue; 282 283 DefII.Needs = StateWQM; 284 Worklist.push_back(DefMI); 285 } 286 } 287 } 288 289 void SIWholeQuadMode::propagateBlock(const MachineBasicBlock &MBB, 290 std::vector<WorkItem>& Worklist) { 291 BlockInfo BI = Blocks[&MBB]; // take a copy to prevent dangling references 292 293 // Propagate through instructions 294 if (!MBB.empty()) { 295 const MachineInstr *LastMI = &*MBB.rbegin(); 296 InstrInfo &LastII = Instructions[LastMI]; 297 if ((LastII.OutNeeds | BI.OutNeeds) != LastII.OutNeeds) { 298 LastII.OutNeeds |= BI.OutNeeds; 299 Worklist.push_back(LastMI); 300 } 301 } 302 303 // Predecessor blocks must provide for our WQM/Exact needs. 304 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 305 BlockInfo &PredBI = Blocks[Pred]; 306 if ((PredBI.OutNeeds | BI.InNeeds) == PredBI.OutNeeds) 307 continue; 308 309 PredBI.OutNeeds |= BI.InNeeds; 310 PredBI.InNeeds |= BI.InNeeds; 311 Worklist.push_back(Pred); 312 } 313 314 // All successors must be prepared to accept the same set of WQM/Exact 315 // data. 316 for (const MachineBasicBlock *Succ : MBB.successors()) { 317 BlockInfo &SuccBI = Blocks[Succ]; 318 if ((SuccBI.InNeeds | BI.OutNeeds) == SuccBI.InNeeds) 319 continue; 320 321 SuccBI.InNeeds |= BI.OutNeeds; 322 Worklist.push_back(Succ); 323 } 324 } 325 326 char SIWholeQuadMode::analyzeFunction(MachineFunction &MF) { 327 std::vector<WorkItem> Worklist; 328 char GlobalFlags = scanInstructions(MF, Worklist); 329 330 while (!Worklist.empty()) { 331 WorkItem WI = Worklist.back(); 332 Worklist.pop_back(); 333 334 if (WI.MI) 335 propagateInstruction(*WI.MI, Worklist); 336 else 337 propagateBlock(*WI.MBB, Worklist); 338 } 339 340 return GlobalFlags; 341 } 342 343 void SIWholeQuadMode::toExact(MachineBasicBlock &MBB, 344 MachineBasicBlock::iterator Before, 345 unsigned SaveWQM, unsigned LiveMaskReg) { 346 if (SaveWQM) { 347 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_SAVEEXEC_B64), 348 SaveWQM) 349 .addReg(LiveMaskReg); 350 } else { 351 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_B64), 352 AMDGPU::EXEC) 353 .addReg(AMDGPU::EXEC) 354 .addReg(LiveMaskReg); 355 } 356 } 357 358 void SIWholeQuadMode::toWQM(MachineBasicBlock &MBB, 359 MachineBasicBlock::iterator Before, 360 unsigned SavedWQM) { 361 if (SavedWQM) { 362 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::EXEC) 363 .addReg(SavedWQM); 364 } else { 365 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_WQM_B64), 366 AMDGPU::EXEC) 367 .addReg(AMDGPU::EXEC); 368 } 369 } 370 371 void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg, 372 bool isEntry) { 373 auto BII = Blocks.find(&MBB); 374 if (BII == Blocks.end()) 375 return; 376 377 const BlockInfo &BI = BII->second; 378 379 if (!(BI.InNeeds & StateWQM)) 380 return; 381 382 // This is a non-entry block that is WQM throughout, so no need to do 383 // anything. 384 if (!isEntry && !(BI.Needs & StateExact) && BI.OutNeeds != StateExact) 385 return; 386 387 unsigned SavedWQMReg = 0; 388 bool WQMFromExec = isEntry; 389 char State = isEntry ? StateExact : StateWQM; 390 391 auto II = MBB.getFirstNonPHI(), IE = MBB.end(); 392 while (II != IE) { 393 MachineInstr &MI = *II; 394 ++II; 395 396 // Skip instructions that are not affected by EXEC 397 if (MI.getDesc().TSFlags & (SIInstrFlags::SALU | SIInstrFlags::SMRD) && 398 !MI.isBranch() && !MI.isTerminator()) 399 continue; 400 401 // Generic instructions such as COPY will either disappear by register 402 // coalescing or be lowered to SALU or VALU instructions. 403 if (TargetInstrInfo::isGenericOpcode(MI.getOpcode())) { 404 if (MI.getNumExplicitOperands() >= 1) { 405 const MachineOperand &Op = MI.getOperand(0); 406 if (Op.isReg()) { 407 if (TRI->isSGPRReg(*MRI, Op.getReg())) { 408 // SGPR instructions are not affected by EXEC 409 continue; 410 } 411 } 412 } 413 } 414 415 char Needs = 0; 416 char OutNeeds = 0; 417 auto InstrInfoIt = Instructions.find(&MI); 418 if (InstrInfoIt != Instructions.end()) { 419 Needs = InstrInfoIt->second.Needs; 420 OutNeeds = InstrInfoIt->second.OutNeeds; 421 422 // Make sure to switch to Exact mode before the end of the block when 423 // Exact and only Exact is needed further downstream. 424 if (OutNeeds == StateExact && (MI.isBranch() || MI.isTerminator())) { 425 assert(Needs == 0); 426 Needs = StateExact; 427 } 428 } 429 430 // State switching 431 if (Needs && State != Needs) { 432 if (Needs == StateExact) { 433 assert(!SavedWQMReg); 434 435 if (!WQMFromExec && (OutNeeds & StateWQM)) 436 SavedWQMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); 437 438 toExact(MBB, &MI, SavedWQMReg, LiveMaskReg); 439 } else { 440 assert(WQMFromExec == (SavedWQMReg == 0)); 441 toWQM(MBB, &MI, SavedWQMReg); 442 SavedWQMReg = 0; 443 } 444 445 State = Needs; 446 } 447 448 if (MI.getOpcode() == AMDGPU::SI_KILL) 449 WQMFromExec = false; 450 } 451 452 if ((BI.OutNeeds & StateWQM) && State != StateWQM) { 453 assert(WQMFromExec == (SavedWQMReg == 0)); 454 toWQM(MBB, MBB.end(), SavedWQMReg); 455 } else if (BI.OutNeeds == StateExact && State != StateExact) { 456 toExact(MBB, MBB.end(), 0, LiveMaskReg); 457 } 458 } 459 460 void SIWholeQuadMode::lowerLiveMaskQueries(unsigned LiveMaskReg) { 461 for (MachineInstr *MI : LiveMaskQueries) { 462 DebugLoc DL = MI->getDebugLoc(); 463 unsigned Dest = MI->getOperand(0).getReg(); 464 BuildMI(*MI->getParent(), MI, DL, TII->get(AMDGPU::COPY), Dest) 465 .addReg(LiveMaskReg); 466 MI->eraseFromParent(); 467 } 468 } 469 470 bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) { 471 if (MF.getFunction()->getCallingConv() != CallingConv::AMDGPU_PS) 472 return false; 473 474 Instructions.clear(); 475 Blocks.clear(); 476 ExecExports.clear(); 477 LiveMaskQueries.clear(); 478 479 TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo()); 480 TRI = static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo()); 481 MRI = &MF.getRegInfo(); 482 483 char GlobalFlags = analyzeFunction(MF); 484 if (!(GlobalFlags & StateWQM)) { 485 lowerLiveMaskQueries(AMDGPU::EXEC); 486 return !LiveMaskQueries.empty(); 487 } 488 489 // Store a copy of the original live mask when required 490 MachineBasicBlock &Entry = MF.front(); 491 MachineInstr *EntryMI = Entry.getFirstNonPHI(); 492 unsigned LiveMaskReg = 0; 493 494 if (GlobalFlags & StateExact || !LiveMaskQueries.empty()) { 495 LiveMaskReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); 496 BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::COPY), LiveMaskReg) 497 .addReg(AMDGPU::EXEC); 498 } 499 500 if (GlobalFlags == StateWQM) { 501 // For a shader that needs only WQM, we can just set it once. 502 BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::S_WQM_B64), 503 AMDGPU::EXEC).addReg(AMDGPU::EXEC); 504 505 lowerLiveMaskQueries(LiveMaskReg); 506 // EntryMI may become invalid here 507 return true; 508 } 509 510 lowerLiveMaskQueries(LiveMaskReg); 511 EntryMI = nullptr; 512 513 // Handle the general case 514 for (const auto &BII : Blocks) 515 processBlock(const_cast<MachineBasicBlock &>(*BII.first), LiveMaskReg, 516 BII.first == &*MF.begin()); 517 518 return true; 519 } 520