1 //===-- SILateBranchLowering.cpp - Final preparation of branches ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This pass mainly lowers early terminate pseudo instructions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "GCNSubtarget.h" 16 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 17 #include "SIMachineFunctionInfo.h" 18 #include "llvm/CodeGen/MachineDominators.h" 19 20 using namespace llvm; 21 22 #define DEBUG_TYPE "si-late-branch-lowering" 23 24 namespace { 25 26 class SILateBranchLowering : public MachineFunctionPass { 27 private: 28 const SIRegisterInfo *TRI = nullptr; 29 const SIInstrInfo *TII = nullptr; 30 MachineDominatorTree *MDT = nullptr; 31 32 void expandChainCall(MachineInstr &MI); 33 void earlyTerm(MachineInstr &MI, MachineBasicBlock *EarlyExitBlock); 34 35 public: 36 static char ID; 37 38 unsigned MovOpc; 39 Register ExecReg; 40 41 SILateBranchLowering() : MachineFunctionPass(ID) {} 42 43 bool runOnMachineFunction(MachineFunction &MF) override; 44 45 StringRef getPassName() const override { 46 return "SI Final Branch Preparation"; 47 } 48 49 void getAnalysisUsage(AnalysisUsage &AU) const override { 50 AU.addRequired<MachineDominatorTreeWrapperPass>(); 51 AU.addPreserved<MachineDominatorTreeWrapperPass>(); 52 MachineFunctionPass::getAnalysisUsage(AU); 53 } 54 }; 55 56 } // end anonymous namespace 57 58 char SILateBranchLowering::ID = 0; 59 60 INITIALIZE_PASS_BEGIN(SILateBranchLowering, DEBUG_TYPE, 61 "SI insert s_cbranch_execz instructions", false, false) 62 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass) 63 INITIALIZE_PASS_END(SILateBranchLowering, DEBUG_TYPE, 64 "SI insert s_cbranch_execz instructions", false, false) 65 66 char &llvm::SILateBranchLoweringPassID = SILateBranchLowering::ID; 67 68 static void generateEndPgm(MachineBasicBlock &MBB, 69 MachineBasicBlock::iterator I, DebugLoc DL, 70 const SIInstrInfo *TII, MachineFunction &MF) { 71 const Function &F = MF.getFunction(); 72 bool IsPS = F.getCallingConv() == CallingConv::AMDGPU_PS; 73 74 // Check if hardware has been configured to expect color or depth exports. 75 bool HasColorExports = AMDGPU::getHasColorExport(F); 76 bool HasDepthExports = AMDGPU::getHasDepthExport(F); 77 bool HasExports = HasColorExports || HasDepthExports; 78 79 // Prior to GFX10, hardware always expects at least one export for PS. 80 bool MustExport = !AMDGPU::isGFX10Plus(TII->getSubtarget()); 81 82 if (IsPS && (HasExports || MustExport)) { 83 // Generate "null export" if hardware is expecting PS to export. 84 const GCNSubtarget &ST = MBB.getParent()->getSubtarget<GCNSubtarget>(); 85 int Target = 86 ST.hasNullExportTarget() 87 ? AMDGPU::Exp::ET_NULL 88 : (HasColorExports ? AMDGPU::Exp::ET_MRT0 : AMDGPU::Exp::ET_MRTZ); 89 BuildMI(MBB, I, DL, TII->get(AMDGPU::EXP_DONE)) 90 .addImm(Target) 91 .addReg(AMDGPU::VGPR0, RegState::Undef) 92 .addReg(AMDGPU::VGPR0, RegState::Undef) 93 .addReg(AMDGPU::VGPR0, RegState::Undef) 94 .addReg(AMDGPU::VGPR0, RegState::Undef) 95 .addImm(1) // vm 96 .addImm(0) // compr 97 .addImm(0); // en 98 } 99 100 // s_endpgm 101 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ENDPGM)).addImm(0); 102 } 103 104 static void splitBlock(MachineBasicBlock &MBB, MachineInstr &MI, 105 MachineDominatorTree *MDT) { 106 MachineBasicBlock *SplitBB = MBB.splitAt(MI, /*UpdateLiveIns*/ true); 107 108 // Update dominator tree 109 using DomTreeT = DomTreeBase<MachineBasicBlock>; 110 SmallVector<DomTreeT::UpdateType, 16> DTUpdates; 111 for (MachineBasicBlock *Succ : SplitBB->successors()) { 112 DTUpdates.push_back({DomTreeT::Insert, SplitBB, Succ}); 113 DTUpdates.push_back({DomTreeT::Delete, &MBB, Succ}); 114 } 115 DTUpdates.push_back({DomTreeT::Insert, &MBB, SplitBB}); 116 MDT->applyUpdates(DTUpdates); 117 } 118 119 void SILateBranchLowering::expandChainCall(MachineInstr &MI) { 120 // This is a tail call that needs to be expanded into at least 121 // 2 instructions, one for setting EXEC and one for the actual tail call. 122 constexpr unsigned ExecIdx = 3; 123 124 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(MovOpc), ExecReg) 125 ->addOperand(MI.getOperand(ExecIdx)); 126 MI.removeOperand(ExecIdx); 127 128 MI.setDesc(TII->get(AMDGPU::SI_TCRETURN)); 129 } 130 131 void SILateBranchLowering::earlyTerm(MachineInstr &MI, 132 MachineBasicBlock *EarlyExitBlock) { 133 MachineBasicBlock &MBB = *MI.getParent(); 134 const DebugLoc DL = MI.getDebugLoc(); 135 136 auto BranchMI = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC0)) 137 .addMBB(EarlyExitBlock); 138 auto Next = std::next(MI.getIterator()); 139 140 if (Next != MBB.end() && !Next->isTerminator()) 141 splitBlock(MBB, *BranchMI, MDT); 142 143 MBB.addSuccessor(EarlyExitBlock); 144 MDT->insertEdge(&MBB, EarlyExitBlock); 145 } 146 147 bool SILateBranchLowering::runOnMachineFunction(MachineFunction &MF) { 148 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 149 TII = ST.getInstrInfo(); 150 TRI = &TII->getRegisterInfo(); 151 MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree(); 152 153 MovOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 154 ExecReg = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 155 156 SmallVector<MachineInstr *, 4> EarlyTermInstrs; 157 SmallVector<MachineInstr *, 1> EpilogInstrs; 158 bool MadeChange = false; 159 160 for (MachineBasicBlock &MBB : MF) { 161 for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) { 162 switch (MI.getOpcode()) { 163 case AMDGPU::S_BRANCH: 164 // Optimize out branches to the next block. 165 // This only occurs in -O0 when BranchFolding is not executed. 166 if (MBB.isLayoutSuccessor(MI.getOperand(0).getMBB())) { 167 assert(&MI == &MBB.back()); 168 MI.eraseFromParent(); 169 MadeChange = true; 170 } 171 break; 172 173 case AMDGPU::SI_CS_CHAIN_TC_W32: 174 case AMDGPU::SI_CS_CHAIN_TC_W64: 175 expandChainCall(MI); 176 MadeChange = true; 177 break; 178 179 case AMDGPU::SI_EARLY_TERMINATE_SCC0: 180 EarlyTermInstrs.push_back(&MI); 181 break; 182 183 case AMDGPU::SI_RETURN_TO_EPILOG: 184 EpilogInstrs.push_back(&MI); 185 break; 186 187 default: 188 break; 189 } 190 } 191 } 192 193 // Lower any early exit branches first 194 if (!EarlyTermInstrs.empty()) { 195 MachineBasicBlock *EarlyExitBlock = MF.CreateMachineBasicBlock(); 196 DebugLoc DL; 197 198 MF.insert(MF.end(), EarlyExitBlock); 199 BuildMI(*EarlyExitBlock, EarlyExitBlock->end(), DL, TII->get(MovOpc), 200 ExecReg) 201 .addImm(0); 202 generateEndPgm(*EarlyExitBlock, EarlyExitBlock->end(), DL, TII, MF); 203 204 for (MachineInstr *Instr : EarlyTermInstrs) { 205 // Early termination in GS does nothing 206 if (MF.getFunction().getCallingConv() != CallingConv::AMDGPU_GS) 207 earlyTerm(*Instr, EarlyExitBlock); 208 Instr->eraseFromParent(); 209 } 210 211 EarlyTermInstrs.clear(); 212 MadeChange = true; 213 } 214 215 // Now check return to epilog instructions occur at function end 216 if (!EpilogInstrs.empty()) { 217 MachineBasicBlock *EmptyMBBAtEnd = nullptr; 218 assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid()); 219 220 // If there are multiple returns to epilog then all will 221 // become jumps to new empty end block. 222 if (EpilogInstrs.size() > 1) { 223 EmptyMBBAtEnd = MF.CreateMachineBasicBlock(); 224 MF.insert(MF.end(), EmptyMBBAtEnd); 225 } 226 227 for (auto *MI : EpilogInstrs) { 228 auto *MBB = MI->getParent(); 229 if (MBB == &MF.back() && MI == &MBB->back()) 230 continue; 231 232 // SI_RETURN_TO_EPILOG is not the last instruction. 233 // Jump to empty block at function end. 234 if (!EmptyMBBAtEnd) { 235 EmptyMBBAtEnd = MF.CreateMachineBasicBlock(); 236 MF.insert(MF.end(), EmptyMBBAtEnd); 237 } 238 239 MBB->addSuccessor(EmptyMBBAtEnd); 240 MDT->insertEdge(MBB, EmptyMBBAtEnd); 241 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::S_BRANCH)) 242 .addMBB(EmptyMBBAtEnd); 243 MI->eraseFromParent(); 244 MadeChange = true; 245 } 246 247 EpilogInstrs.clear(); 248 } 249 250 return MadeChange; 251 } 252