1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief This pass lowers the pseudo control flow instructions to real 12 /// machine instructions. 13 /// 14 /// All control flow is handled using predicated instructions and 15 /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector 16 /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs 17 /// by writting to the 64-bit EXEC register (each bit corresponds to a 18 /// single vector ALU). Typically, for predicates, a vector ALU will write 19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each 20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the 21 /// EXEC to update the predicates. 22 /// 23 /// For example: 24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2 25 /// %SGPR0 = SI_IF %VCC 26 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 27 /// %SGPR0 = SI_ELSE %SGPR0 28 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0 29 /// SI_END_CF %SGPR0 30 /// 31 /// becomes: 32 /// 33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask 34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 35 /// S_CBRANCH_EXECZ label0 // This instruction is an optional 36 /// // optimization which allows us to 37 /// // branch if all the bits of 38 /// // EXEC are zero. 39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch 40 /// 41 /// label0: 42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block 43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 44 /// S_BRANCH_EXECZ label1 // Use our branch optimization 45 /// // instruction again. 46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block 47 /// label1: 48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits 49 //===----------------------------------------------------------------------===// 50 51 #include "AMDGPU.h" 52 #include "AMDGPUSubtarget.h" 53 #include "SIInstrInfo.h" 54 #include "SIMachineFunctionInfo.h" 55 #include "llvm/CodeGen/LivePhysRegs.h" 56 #include "llvm/CodeGen/MachineFrameInfo.h" 57 #include "llvm/CodeGen/MachineFunction.h" 58 #include "llvm/CodeGen/MachineFunctionPass.h" 59 #include "llvm/CodeGen/MachineInstrBuilder.h" 60 #include "llvm/CodeGen/MachineRegisterInfo.h" 61 #include "llvm/IR/Constants.h" 62 #include "llvm/MC/MCAsmInfo.h" 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "si-lower-control-flow" 67 68 namespace { 69 70 static cl::opt<unsigned> SkipThresholdFlag( 71 "amdgpu-skip-threshold", 72 cl::desc("Number of instructions before jumping over divergent control flow"), 73 cl::init(12), cl::Hidden); 74 75 class SILowerControlFlow : public MachineFunctionPass { 76 private: 77 const SIRegisterInfo *TRI; 78 const SIInstrInfo *TII; 79 unsigned SkipThreshold; 80 81 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To); 82 83 MachineInstr *Skip(MachineInstr &From, MachineOperand &To); 84 bool skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB); 85 86 void If(MachineInstr &MI); 87 void Else(MachineInstr &MI); 88 void Break(MachineInstr &MI); 89 void IfBreak(MachineInstr &MI); 90 void ElseBreak(MachineInstr &MI); 91 void Loop(MachineInstr &MI); 92 void EndCf(MachineInstr &MI); 93 94 void Kill(MachineInstr &MI); 95 void Branch(MachineInstr &MI); 96 97 MachineBasicBlock *insertSkipBlock(MachineBasicBlock &MBB, 98 MachineBasicBlock::iterator I) const; 99 public: 100 static char ID; 101 102 SILowerControlFlow() : 103 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr), SkipThreshold(0) { } 104 105 bool runOnMachineFunction(MachineFunction &MF) override; 106 107 const char *getPassName() const override { 108 return "SI Lower control flow pseudo instructions"; 109 } 110 }; 111 112 } // End anonymous namespace 113 114 char SILowerControlFlow::ID = 0; 115 116 INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE, 117 "SI lower control flow", false, false) 118 119 char &llvm::SILowerControlFlowPassID = SILowerControlFlow::ID; 120 121 122 FunctionPass *llvm::createSILowerControlFlowPass() { 123 return new SILowerControlFlow(); 124 } 125 126 static bool opcodeEmitsNoInsts(unsigned Opc) { 127 switch (Opc) { 128 case TargetOpcode::IMPLICIT_DEF: 129 case TargetOpcode::KILL: 130 case TargetOpcode::BUNDLE: 131 case TargetOpcode::CFI_INSTRUCTION: 132 case TargetOpcode::EH_LABEL: 133 case TargetOpcode::GC_LABEL: 134 case TargetOpcode::DBG_VALUE: 135 return true; 136 default: 137 return false; 138 } 139 } 140 141 bool SILowerControlFlow::shouldSkip(MachineBasicBlock *From, 142 MachineBasicBlock *To) { 143 if (From->succ_empty()) 144 return false; 145 146 unsigned NumInstr = 0; 147 MachineFunction *MF = From->getParent(); 148 149 for (MachineFunction::iterator MBBI(From), ToI(To), End = MF->end(); 150 MBBI != End && MBBI != ToI; ++MBBI) { 151 MachineBasicBlock &MBB = *MBBI; 152 153 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); 154 NumInstr < SkipThreshold && I != E; ++I) { 155 if (opcodeEmitsNoInsts(I->getOpcode())) 156 continue; 157 158 // When a uniform loop is inside non-uniform control flow, the branch 159 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken 160 // when EXEC = 0. We should skip the loop lest it becomes infinite. 161 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ || 162 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ) 163 return true; 164 165 if (I->isInlineAsm()) { 166 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 167 const char *AsmStr = I->getOperand(0).getSymbolName(); 168 169 // inlineasm length estimate is number of bytes assuming the longest 170 // instruction. 171 uint64_t MaxAsmSize = TII->getInlineAsmLength(AsmStr, *MAI); 172 NumInstr += MaxAsmSize / MAI->getMaxInstLength(); 173 } else { 174 ++NumInstr; 175 } 176 177 if (NumInstr >= SkipThreshold) 178 return true; 179 } 180 } 181 182 return false; 183 } 184 185 MachineInstr *SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) { 186 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB())) 187 return nullptr; 188 189 const DebugLoc &DL = From.getDebugLoc(); 190 MachineInstr *Skip = 191 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) 192 .addOperand(To); 193 return Skip; 194 } 195 196 bool SILowerControlFlow::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) { 197 MachineBasicBlock &MBB = *MI.getParent(); 198 MachineFunction *MF = MBB.getParent(); 199 200 if (MF->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS || 201 !shouldSkip(&MBB, &MBB.getParent()->back())) 202 return false; 203 204 MachineBasicBlock *SkipBB = insertSkipBlock(MBB, MI.getIterator()); 205 MBB.addSuccessor(SkipBB); 206 207 const DebugLoc &DL = MI.getDebugLoc(); 208 209 // If the exec mask is non-zero, skip the next two instructions 210 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 211 .addMBB(&NextBB); 212 213 MachineBasicBlock::iterator Insert = SkipBB->begin(); 214 215 // Exec mask is zero: Export to NULL target... 216 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP)) 217 .addImm(0) 218 .addImm(0x09) // V_008DFC_SQ_EXP_NULL 219 .addImm(0) 220 .addImm(1) 221 .addImm(1) 222 .addReg(AMDGPU::VGPR0, RegState::Undef) 223 .addReg(AMDGPU::VGPR0, RegState::Undef) 224 .addReg(AMDGPU::VGPR0, RegState::Undef) 225 .addReg(AMDGPU::VGPR0, RegState::Undef); 226 227 // ... and terminate wavefront. 228 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); 229 230 return true; 231 } 232 233 void SILowerControlFlow::If(MachineInstr &MI) { 234 MachineBasicBlock &MBB = *MI.getParent(); 235 DebugLoc DL = MI.getDebugLoc(); 236 unsigned Reg = MI.getOperand(0).getReg(); 237 unsigned Vcc = MI.getOperand(1).getReg(); 238 239 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg) 240 .addReg(Vcc); 241 242 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg) 243 .addReg(AMDGPU::EXEC) 244 .addReg(Reg); 245 246 MachineInstr *SkipInst = Skip(MI, MI.getOperand(2)); 247 248 // Insert before the new branch instruction. 249 MachineInstr *InsPt = SkipInst ? SkipInst : &MI; 250 251 // Insert a pseudo terminator to help keep the verifier happy. 252 BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) 253 .addOperand(MI.getOperand(2)) 254 .addReg(Reg); 255 256 MI.eraseFromParent(); 257 } 258 259 void SILowerControlFlow::Else(MachineInstr &MI) { 260 MachineBasicBlock &MBB = *MI.getParent(); 261 DebugLoc DL = MI.getDebugLoc(); 262 unsigned Dst = MI.getOperand(0).getReg(); 263 unsigned Src = MI.getOperand(1).getReg(); 264 265 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 266 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) 267 .addReg(Src); // Saved EXEC 268 269 if (MI.getOperand(3).getImm() != 0) { 270 // Adjust the saved exec to account for the modifications during the flow 271 // block that contains the ELSE. This can happen when WQM mode is switched 272 // off. 273 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst) 274 .addReg(AMDGPU::EXEC) 275 .addReg(Dst); 276 } 277 278 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 279 .addReg(AMDGPU::EXEC) 280 .addReg(Dst); 281 282 MachineInstr *SkipInst = Skip(MI, MI.getOperand(2)); 283 284 // Insert before the new branch instruction. 285 MachineInstr *InsPt = SkipInst ? SkipInst : &MI; 286 287 // Insert a pseudo terminator to help keep the verifier happy. 288 BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) 289 .addOperand(MI.getOperand(2)) 290 .addReg(Dst); 291 292 MI.eraseFromParent(); 293 } 294 295 void SILowerControlFlow::Break(MachineInstr &MI) { 296 MachineBasicBlock &MBB = *MI.getParent(); 297 DebugLoc DL = MI.getDebugLoc(); 298 299 unsigned Dst = MI.getOperand(0).getReg(); 300 unsigned Src = MI.getOperand(1).getReg(); 301 302 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 303 .addReg(AMDGPU::EXEC) 304 .addReg(Src); 305 306 MI.eraseFromParent(); 307 } 308 309 void SILowerControlFlow::IfBreak(MachineInstr &MI) { 310 MachineBasicBlock &MBB = *MI.getParent(); 311 DebugLoc DL = MI.getDebugLoc(); 312 313 unsigned Dst = MI.getOperand(0).getReg(); 314 unsigned Vcc = MI.getOperand(1).getReg(); 315 unsigned Src = MI.getOperand(2).getReg(); 316 317 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 318 .addReg(Vcc) 319 .addReg(Src); 320 321 MI.eraseFromParent(); 322 } 323 324 void SILowerControlFlow::ElseBreak(MachineInstr &MI) { 325 MachineBasicBlock &MBB = *MI.getParent(); 326 DebugLoc DL = MI.getDebugLoc(); 327 328 unsigned Dst = MI.getOperand(0).getReg(); 329 unsigned Saved = MI.getOperand(1).getReg(); 330 unsigned Src = MI.getOperand(2).getReg(); 331 332 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 333 .addReg(Saved) 334 .addReg(Src); 335 336 MI.eraseFromParent(); 337 } 338 339 void SILowerControlFlow::Loop(MachineInstr &MI) { 340 MachineBasicBlock &MBB = *MI.getParent(); 341 DebugLoc DL = MI.getDebugLoc(); 342 unsigned Src = MI.getOperand(0).getReg(); 343 344 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC) 345 .addReg(AMDGPU::EXEC) 346 .addReg(Src); 347 348 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 349 .addOperand(MI.getOperand(1)); 350 351 MI.eraseFromParent(); 352 } 353 354 void SILowerControlFlow::EndCf(MachineInstr &MI) { 355 MachineBasicBlock &MBB = *MI.getParent(); 356 DebugLoc DL = MI.getDebugLoc(); 357 unsigned Reg = MI.getOperand(0).getReg(); 358 359 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 360 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) 361 .addReg(AMDGPU::EXEC) 362 .addReg(Reg); 363 364 MI.eraseFromParent(); 365 } 366 367 void SILowerControlFlow::Branch(MachineInstr &MI) { 368 MachineBasicBlock *MBB = MI.getOperand(0).getMBB(); 369 if (MBB == MI.getParent()->getNextNode()) 370 MI.eraseFromParent(); 371 372 // If these aren't equal, this is probably an infinite loop. 373 } 374 375 void SILowerControlFlow::Kill(MachineInstr &MI) { 376 MachineBasicBlock &MBB = *MI.getParent(); 377 DebugLoc DL = MI.getDebugLoc(); 378 const MachineOperand &Op = MI.getOperand(0); 379 380 #ifndef NDEBUG 381 CallingConv::ID CallConv = MBB.getParent()->getFunction()->getCallingConv(); 382 // Kill is only allowed in pixel / geometry shaders. 383 assert(CallConv == CallingConv::AMDGPU_PS || 384 CallConv == CallingConv::AMDGPU_GS); 385 #endif 386 387 // Clear this thread from the exec mask if the operand is negative 388 if ((Op.isImm())) { 389 // Constant operand: Set exec mask to 0 or do nothing 390 if (Op.getImm() & 0x80000000) { 391 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 392 .addImm(0); 393 } 394 } else { 395 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32)) 396 .addImm(0) 397 .addOperand(Op); 398 } 399 400 MI.eraseFromParent(); 401 } 402 403 MachineBasicBlock *SILowerControlFlow::insertSkipBlock( 404 MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { 405 MachineFunction *MF = MBB.getParent(); 406 407 MachineBasicBlock *SkipBB = MF->CreateMachineBasicBlock(); 408 MachineFunction::iterator MBBI(MBB); 409 ++MBBI; 410 411 MF->insert(MBBI, SkipBB); 412 413 return SkipBB; 414 } 415 416 bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) { 417 const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); 418 TII = ST.getInstrInfo(); 419 TRI = &TII->getRegisterInfo(); 420 SkipThreshold = SkipThresholdFlag; 421 422 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 423 424 bool HaveKill = false; 425 bool NeedFlat = false; 426 unsigned Depth = 0; 427 428 MachineFunction::iterator NextBB; 429 430 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 431 BI != BE; BI = NextBB) { 432 NextBB = std::next(BI); 433 MachineBasicBlock &MBB = *BI; 434 435 MachineBasicBlock *EmptyMBBAtEnd = nullptr; 436 MachineBasicBlock::iterator I, Next; 437 438 for (I = MBB.begin(); I != MBB.end(); I = Next) { 439 Next = std::next(I); 440 441 MachineInstr &MI = *I; 442 443 // Flat uses m0 in case it needs to access LDS. 444 if (TII->isFLAT(MI)) 445 NeedFlat = true; 446 447 switch (MI.getOpcode()) { 448 default: break; 449 case AMDGPU::SI_IF: 450 ++Depth; 451 If(MI); 452 break; 453 454 case AMDGPU::SI_ELSE: 455 Else(MI); 456 break; 457 458 case AMDGPU::SI_BREAK: 459 Break(MI); 460 break; 461 462 case AMDGPU::SI_IF_BREAK: 463 IfBreak(MI); 464 break; 465 466 case AMDGPU::SI_ELSE_BREAK: 467 ElseBreak(MI); 468 break; 469 470 case AMDGPU::SI_LOOP: 471 ++Depth; 472 Loop(MI); 473 break; 474 475 case AMDGPU::SI_END_CF: 476 if (--Depth == 0 && HaveKill) { 477 HaveKill = false; 478 // TODO: Insert skip if exec is 0? 479 } 480 481 EndCf(MI); 482 break; 483 484 case AMDGPU::SI_KILL_TERMINATOR: 485 if (Depth == 0) { 486 if (skipIfDead(MI, *NextBB)) { 487 NextBB = std::next(BI); 488 BE = MF.end(); 489 } 490 } else 491 HaveKill = true; 492 Kill(MI); 493 break; 494 495 case AMDGPU::S_BRANCH: 496 Branch(MI); 497 break; 498 499 case AMDGPU::SI_RETURN: { 500 assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid()); 501 502 // Graphics shaders returning non-void shouldn't contain S_ENDPGM, 503 // because external bytecode will be appended at the end. 504 if (BI != --MF.end() || I != MBB.getFirstTerminator()) { 505 // SI_RETURN is not the last instruction. Add an empty block at 506 // the end and jump there. 507 if (!EmptyMBBAtEnd) { 508 EmptyMBBAtEnd = MF.CreateMachineBasicBlock(); 509 MF.insert(MF.end(), EmptyMBBAtEnd); 510 } 511 512 MBB.addSuccessor(EmptyMBBAtEnd); 513 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH)) 514 .addMBB(EmptyMBBAtEnd); 515 I->eraseFromParent(); 516 } 517 break; 518 } 519 } 520 } 521 } 522 523 if (NeedFlat && MFI->isKernel()) { 524 // TODO: What to use with function calls? 525 // We will need to Initialize the flat scratch register pair. 526 if (NeedFlat) 527 MFI->setHasFlatInstructions(true); 528 } 529 530 return true; 531 } 532