1 //===-- SIWholeQuadMode.cpp - enter and suspend whole quad mode -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief This pass adds instructions to enable whole quad mode for pixel 12 /// shaders. 13 /// 14 /// Whole quad mode is required for derivative computations, but it interferes 15 /// with shader side effects (stores and atomics). This pass is run on the 16 /// scheduled machine IR but before register coalescing, so that machine SSA is 17 /// available for analysis. It ensures that WQM is enabled when necessary, but 18 /// disabled around stores and atomics. 19 /// 20 /// When necessary, this pass creates a function prolog 21 /// 22 /// S_MOV_B64 LiveMask, EXEC 23 /// S_WQM_B64 EXEC, EXEC 24 /// 25 /// to enter WQM at the top of the function and surrounds blocks of Exact 26 /// instructions by 27 /// 28 /// S_AND_SAVEEXEC_B64 Tmp, LiveMask 29 /// ... 30 /// S_MOV_B64 EXEC, Tmp 31 /// 32 /// In order to avoid excessive switching during sequences of Exact 33 /// instructions, the pass first analyzes which instructions must be run in WQM 34 /// (aka which instructions produce values that lead to derivative 35 /// computations). 36 /// 37 /// Basic blocks are always exited in WQM as long as some successor needs WQM. 38 /// 39 /// There is room for improvement given better control flow analysis: 40 /// 41 /// (1) at the top level (outside of control flow statements, and as long as 42 /// kill hasn't been used), one SGPR can be saved by recovering WQM from 43 /// the LiveMask (this is implemented for the entry block). 44 /// 45 /// (2) when entire regions (e.g. if-else blocks or entire loops) only 46 /// consist of exact and don't-care instructions, the switch only has to 47 /// be done at the entry and exit points rather than potentially in each 48 /// block of the region. 49 /// 50 //===----------------------------------------------------------------------===// 51 52 #include "AMDGPU.h" 53 #include "AMDGPUSubtarget.h" 54 #include "SIInstrInfo.h" 55 #include "SIMachineFunctionInfo.h" 56 #include "llvm/CodeGen/MachineDominanceFrontier.h" 57 #include "llvm/CodeGen/MachineDominators.h" 58 #include "llvm/CodeGen/MachineFunction.h" 59 #include "llvm/CodeGen/MachineFunctionPass.h" 60 #include "llvm/CodeGen/MachineInstrBuilder.h" 61 #include "llvm/CodeGen/MachineRegisterInfo.h" 62 #include "llvm/IR/Constants.h" 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "si-wqm" 67 68 namespace { 69 70 enum { 71 StateWQM = 0x1, 72 StateExact = 0x2, 73 }; 74 75 struct InstrInfo { 76 char Needs = 0; 77 char OutNeeds = 0; 78 }; 79 80 struct BlockInfo { 81 char Needs = 0; 82 char InNeeds = 0; 83 char OutNeeds = 0; 84 }; 85 86 struct WorkItem { 87 const MachineBasicBlock *MBB = nullptr; 88 const MachineInstr *MI = nullptr; 89 90 WorkItem() {} 91 WorkItem(const MachineBasicBlock *MBB) : MBB(MBB) {} 92 WorkItem(const MachineInstr *MI) : MI(MI) {} 93 }; 94 95 class SIWholeQuadMode : public MachineFunctionPass { 96 private: 97 const SIInstrInfo *TII; 98 const SIRegisterInfo *TRI; 99 MachineRegisterInfo *MRI; 100 101 DenseMap<const MachineInstr *, InstrInfo> Instructions; 102 DenseMap<const MachineBasicBlock *, BlockInfo> Blocks; 103 SmallVector<const MachineInstr *, 2> ExecExports; 104 105 char scanInstructions(const MachineFunction &MF, std::vector<WorkItem>& Worklist); 106 void propagateInstruction(const MachineInstr &MI, std::vector<WorkItem>& Worklist); 107 void propagateBlock(const MachineBasicBlock &MBB, std::vector<WorkItem>& Worklist); 108 char analyzeFunction(const MachineFunction &MF); 109 110 void toExact(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before, 111 unsigned SaveWQM, unsigned LiveMaskReg); 112 void toWQM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before, 113 unsigned SavedWQM); 114 void processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg, bool isEntry); 115 116 public: 117 static char ID; 118 119 SIWholeQuadMode() : 120 MachineFunctionPass(ID) { } 121 122 bool runOnMachineFunction(MachineFunction &MF) override; 123 124 const char *getPassName() const override { 125 return "SI Whole Quad Mode"; 126 } 127 128 void getAnalysisUsage(AnalysisUsage &AU) const override { 129 AU.setPreservesCFG(); 130 MachineFunctionPass::getAnalysisUsage(AU); 131 } 132 }; 133 134 } // End anonymous namespace 135 136 char SIWholeQuadMode::ID = 0; 137 138 INITIALIZE_PASS_BEGIN(SIWholeQuadMode, DEBUG_TYPE, 139 "SI Whole Quad Mode", false, false) 140 INITIALIZE_PASS_END(SIWholeQuadMode, DEBUG_TYPE, 141 "SI Whole Quad Mode", false, false) 142 143 char &llvm::SIWholeQuadModeID = SIWholeQuadMode::ID; 144 145 FunctionPass *llvm::createSIWholeQuadModePass() { 146 return new SIWholeQuadMode; 147 } 148 149 // Scan instructions to determine which ones require an Exact execmask and 150 // which ones seed WQM requirements. 151 char SIWholeQuadMode::scanInstructions(const MachineFunction &MF, 152 std::vector<WorkItem> &Worklist) { 153 char GlobalFlags = 0; 154 155 for (auto BI = MF.begin(), BE = MF.end(); BI != BE; ++BI) { 156 const MachineBasicBlock &MBB = *BI; 157 158 for (auto II = MBB.begin(), IE = MBB.end(); II != IE; ++II) { 159 const MachineInstr &MI = *II; 160 unsigned Opcode = MI.getOpcode(); 161 char Flags; 162 163 if (TII->isWQM(Opcode) || TII->isDS(Opcode)) { 164 Flags = StateWQM; 165 } else if (TII->get(Opcode).mayStore() && 166 (MI.getDesc().TSFlags & SIInstrFlags::VM_CNT)) { 167 Flags = StateExact; 168 } else { 169 // Handle export instructions with the exec mask valid flag set 170 if (Opcode == AMDGPU::EXP && MI.getOperand(4).getImm() != 0) 171 ExecExports.push_back(&MI); 172 continue; 173 } 174 175 Instructions[&MI].Needs = Flags; 176 Worklist.push_back(&MI); 177 GlobalFlags |= Flags; 178 } 179 } 180 181 return GlobalFlags; 182 } 183 184 void SIWholeQuadMode::propagateInstruction(const MachineInstr &MI, 185 std::vector<WorkItem>& Worklist) { 186 const MachineBasicBlock &MBB = *MI.getParent(); 187 InstrInfo &II = Instructions[&MI]; 188 BlockInfo &BI = Blocks[&MBB]; 189 190 // Control flow-type instructions that are followed by WQM computations 191 // must themselves be in WQM. 192 if ((II.OutNeeds & StateWQM) && !(II.Needs & StateWQM) && 193 (MI.isBranch() || MI.isTerminator() || MI.getOpcode() == AMDGPU::SI_KILL)) 194 II.Needs = StateWQM; 195 196 // Propagate to block level 197 BI.Needs |= II.Needs; 198 if ((BI.InNeeds | II.Needs) != BI.InNeeds) { 199 BI.InNeeds |= II.Needs; 200 Worklist.push_back(&MBB); 201 } 202 203 // Propagate backwards within block 204 if (const MachineInstr *PrevMI = MI.getPrevNode()) { 205 char InNeeds = II.Needs | II.OutNeeds; 206 if (!PrevMI->isPHI()) { 207 InstrInfo &PrevII = Instructions[PrevMI]; 208 if ((PrevII.OutNeeds | InNeeds) != PrevII.OutNeeds) { 209 PrevII.OutNeeds |= InNeeds; 210 Worklist.push_back(PrevMI); 211 } 212 } 213 } 214 215 // Propagate WQM flag to instruction inputs 216 assert(II.Needs != (StateWQM | StateExact)); 217 if (II.Needs != StateWQM) 218 return; 219 220 for (const MachineOperand &Use : MI.uses()) { 221 if (!Use.isReg() || !Use.isUse()) 222 continue; 223 224 // At this point, physical registers appear as inputs or outputs 225 // and following them makes no sense (and would in fact be incorrect 226 // when the same VGPR is used as both an output and an input that leads 227 // to a NeedsWQM instruction). 228 // 229 // Note: VCC appears e.g. in 64-bit addition with carry - theoretically we 230 // have to trace this, in practice it happens for 64-bit computations like 231 // pointers where both dwords are followed already anyway. 232 if (!TargetRegisterInfo::isVirtualRegister(Use.getReg())) 233 continue; 234 235 for (const MachineOperand &Def : MRI->def_operands(Use.getReg())) { 236 const MachineInstr *DefMI = Def.getParent(); 237 InstrInfo &DefII = Instructions[DefMI]; 238 239 // Obviously skip if DefMI is already flagged as NeedWQM. 240 // 241 // The instruction might also be flagged as NeedExact. This happens when 242 // the result of an atomic is used in a WQM computation. In this case, 243 // the atomic must not run for helper pixels and the WQM result is 244 // undefined. 245 if (DefII.Needs != 0) 246 continue; 247 248 DefII.Needs = StateWQM; 249 Worklist.push_back(DefMI); 250 } 251 } 252 } 253 254 void SIWholeQuadMode::propagateBlock(const MachineBasicBlock &MBB, 255 std::vector<WorkItem>& Worklist) { 256 BlockInfo &BI = Blocks[&MBB]; 257 258 // Propagate through instructions 259 if (!MBB.empty()) { 260 const MachineInstr *LastMI = &*MBB.rbegin(); 261 InstrInfo &LastII = Instructions[LastMI]; 262 if ((LastII.OutNeeds | BI.OutNeeds) != LastII.OutNeeds) { 263 LastII.OutNeeds |= BI.OutNeeds; 264 Worklist.push_back(LastMI); 265 } 266 } 267 268 // Predecessor blocks must provide for our WQM/Exact needs. 269 for (const MachineBasicBlock *Pred : MBB.predecessors()) { 270 BlockInfo &PredBI = Blocks[Pred]; 271 if ((PredBI.OutNeeds | BI.InNeeds) == PredBI.OutNeeds) 272 continue; 273 274 PredBI.OutNeeds |= BI.InNeeds; 275 PredBI.InNeeds |= BI.InNeeds; 276 Worklist.push_back(Pred); 277 } 278 279 // All successors must be prepared to accept the same set of WQM/Exact 280 // data. 281 for (const MachineBasicBlock *Succ : MBB.successors()) { 282 BlockInfo &SuccBI = Blocks[Succ]; 283 if ((SuccBI.InNeeds | BI.OutNeeds) == SuccBI.InNeeds) 284 continue; 285 286 SuccBI.InNeeds |= BI.OutNeeds; 287 Worklist.push_back(Succ); 288 } 289 } 290 291 char SIWholeQuadMode::analyzeFunction(const MachineFunction &MF) { 292 std::vector<WorkItem> Worklist; 293 char GlobalFlags = scanInstructions(MF, Worklist); 294 295 while (!Worklist.empty()) { 296 WorkItem WI = Worklist.back(); 297 Worklist.pop_back(); 298 299 if (WI.MI) 300 propagateInstruction(*WI.MI, Worklist); 301 else 302 propagateBlock(*WI.MBB, Worklist); 303 } 304 305 return GlobalFlags; 306 } 307 308 void SIWholeQuadMode::toExact(MachineBasicBlock &MBB, 309 MachineBasicBlock::iterator Before, 310 unsigned SaveWQM, unsigned LiveMaskReg) 311 { 312 if (SaveWQM) { 313 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_SAVEEXEC_B64), 314 SaveWQM) 315 .addReg(LiveMaskReg); 316 } else { 317 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_B64), 318 AMDGPU::EXEC) 319 .addReg(AMDGPU::EXEC) 320 .addReg(LiveMaskReg); 321 } 322 } 323 324 void SIWholeQuadMode::toWQM(MachineBasicBlock &MBB, 325 MachineBasicBlock::iterator Before, 326 unsigned SavedWQM) 327 { 328 if (SavedWQM) { 329 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::EXEC) 330 .addReg(SavedWQM); 331 } else { 332 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_WQM_B64), 333 AMDGPU::EXEC) 334 .addReg(AMDGPU::EXEC); 335 } 336 } 337 338 void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg, 339 bool isEntry) { 340 auto BII = Blocks.find(&MBB); 341 if (BII == Blocks.end()) 342 return; 343 344 const BlockInfo &BI = BII->second; 345 346 if (!(BI.InNeeds & StateWQM)) 347 return; 348 349 // This is a non-entry block that is WQM throughout, so no need to do 350 // anything. 351 if (!isEntry && !(BI.Needs & StateExact) && BI.OutNeeds != StateExact) 352 return; 353 354 unsigned SavedWQMReg = 0; 355 bool WQMFromExec = isEntry; 356 char State = isEntry ? StateExact : StateWQM; 357 358 auto II = MBB.getFirstNonPHI(), IE = MBB.end(); 359 while (II != IE) { 360 MachineInstr &MI = *II; 361 ++II; 362 363 // Skip instructions that are not affected by EXEC 364 if (MI.getDesc().TSFlags & (SIInstrFlags::SALU | SIInstrFlags::SMRD) && 365 !MI.isBranch() && !MI.isTerminator()) 366 continue; 367 368 // Generic instructions such as COPY will either disappear by register 369 // coalescing or be lowered to SALU or VALU instructions. 370 if (TargetInstrInfo::isGenericOpcode(MI.getOpcode())) { 371 if (MI.getNumExplicitOperands() >= 1) { 372 const MachineOperand &Op = MI.getOperand(0); 373 if (Op.isReg()) { 374 if (TRI->isSGPRReg(*MRI, Op.getReg())) { 375 // SGPR instructions are not affected by EXEC 376 continue; 377 } 378 } 379 } 380 } 381 382 char Needs = 0; 383 char OutNeeds = 0; 384 auto InstrInfoIt = Instructions.find(&MI); 385 if (InstrInfoIt != Instructions.end()) { 386 Needs = InstrInfoIt->second.Needs; 387 OutNeeds = InstrInfoIt->second.OutNeeds; 388 389 // Make sure to switch to Exact mode before the end of the block when 390 // Exact and only Exact is needed further downstream. 391 if (OutNeeds == StateExact && (MI.isBranch() || MI.isTerminator())) { 392 assert(Needs == 0); 393 Needs = StateExact; 394 } 395 } 396 397 // State switching 398 if (Needs && State != Needs) { 399 if (Needs == StateExact) { 400 assert(!SavedWQMReg); 401 402 if (!WQMFromExec && (OutNeeds & StateWQM)) 403 SavedWQMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); 404 405 toExact(MBB, &MI, SavedWQMReg, LiveMaskReg); 406 } else { 407 assert(WQMFromExec == (SavedWQMReg == 0)); 408 toWQM(MBB, &MI, SavedWQMReg); 409 SavedWQMReg = 0; 410 } 411 412 State = Needs; 413 } 414 415 if (MI.getOpcode() == AMDGPU::SI_KILL) 416 WQMFromExec = false; 417 } 418 419 if ((BI.OutNeeds & StateWQM) && State != StateWQM) { 420 assert(WQMFromExec == (SavedWQMReg == 0)); 421 toWQM(MBB, MBB.end(), SavedWQMReg); 422 } else if (BI.OutNeeds == StateExact && State != StateExact) { 423 toExact(MBB, MBB.end(), 0, LiveMaskReg); 424 } 425 } 426 427 bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) { 428 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 429 430 if (MFI->getShaderType() != ShaderType::PIXEL) 431 return false; 432 433 Instructions.clear(); 434 Blocks.clear(); 435 ExecExports.clear(); 436 437 TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo()); 438 TRI = static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo()); 439 MRI = &MF.getRegInfo(); 440 441 char GlobalFlags = analyzeFunction(MF); 442 if (!(GlobalFlags & StateWQM)) 443 return false; 444 445 MachineBasicBlock &Entry = MF.front(); 446 MachineInstr *EntryMI = Entry.getFirstNonPHI(); 447 448 if (GlobalFlags == StateWQM) { 449 // For a shader that needs only WQM, we can just set it once. 450 BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::S_WQM_B64), 451 AMDGPU::EXEC).addReg(AMDGPU::EXEC); 452 return true; 453 } 454 455 // Handle the general case 456 unsigned LiveMaskReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); 457 BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::COPY), LiveMaskReg) 458 .addReg(AMDGPU::EXEC); 459 460 for (const auto &BII : Blocks) 461 processBlock(const_cast<MachineBasicBlock &>(*BII.first), LiveMaskReg, 462 BII.first == &*MF.begin()); 463 464 return true; 465 } 466