xref: /llvm-project/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp (revision c464dddccbd8b7cf4fc6cf51126ab559cd34749e)
1 //===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass removes redundant S_OR_B64 instructions enabling lanes in
11 /// the exec. If two SI_END_CF (lowered as S_OR_B64) come together without any
12 /// vector instructions between them we can only keep outer SI_END_CF, given
13 /// that CFG is structured and exec bits of the outer end statement are always
14 /// not less than exec bit of the inner one.
15 ///
16 /// This needs to be done before the RA to eliminate saved exec bits registers
17 /// but after register coalescer to have no vector registers copies in between
18 /// of different end cf statements.
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #include "AMDGPU.h"
23 #include "AMDGPUSubtarget.h"
24 #include "SIInstrInfo.h"
25 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
26 #include "llvm/CodeGen/LiveIntervals.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 
29 using namespace llvm;
30 
31 #define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
32 
33 namespace {
34 
35 class SIOptimizeExecMaskingPreRA : public MachineFunctionPass {
36 private:
37   const SIRegisterInfo *TRI;
38   const SIInstrInfo *TII;
39   MachineRegisterInfo *MRI;
40 
41 public:
42   MachineBasicBlock::iterator skipIgnoreExecInsts(
43     MachineBasicBlock::iterator I, MachineBasicBlock::iterator E) const;
44 
45     MachineBasicBlock::iterator skipIgnoreExecInstsTrivialSucc(
46       MachineBasicBlock *&MBB,
47       MachineBasicBlock::iterator It) const;
48 
49 public:
50   static char ID;
51 
52   SIOptimizeExecMaskingPreRA() : MachineFunctionPass(ID) {
53     initializeSIOptimizeExecMaskingPreRAPass(*PassRegistry::getPassRegistry());
54   }
55 
56   bool runOnMachineFunction(MachineFunction &MF) override;
57 
58   StringRef getPassName() const override {
59     return "SI optimize exec mask operations pre-RA";
60   }
61 
62   void getAnalysisUsage(AnalysisUsage &AU) const override {
63     AU.addRequired<LiveIntervals>();
64     AU.setPreservesAll();
65     MachineFunctionPass::getAnalysisUsage(AU);
66   }
67 };
68 
69 } // End anonymous namespace.
70 
71 INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
72                       "SI optimize exec mask operations pre-RA", false, false)
73 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
74 INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
75                     "SI optimize exec mask operations pre-RA", false, false)
76 
77 char SIOptimizeExecMaskingPreRA::ID = 0;
78 
79 char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRA::ID;
80 
81 FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() {
82   return new SIOptimizeExecMaskingPreRA();
83 }
84 
85 static bool isEndCF(const MachineInstr& MI, const SIRegisterInfo* TRI) {
86   return MI.getOpcode() == AMDGPU::S_OR_B64_term &&
87          MI.modifiesRegister(AMDGPU::EXEC, TRI);
88 }
89 
90 static bool isFullExecCopy(const MachineInstr& MI) {
91   if (MI.isCopy() && MI.getOperand(1).getReg() == AMDGPU::EXEC) {
92     assert(MI.isFullCopy());
93     return true;
94   }
95 
96   return false;
97 }
98 
99 static unsigned getOrNonExecReg(const MachineInstr &MI,
100                                 const SIInstrInfo &TII) {
101   auto Op = TII.getNamedOperand(MI, AMDGPU::OpName::src1);
102   if (Op->isReg() && Op->getReg() != AMDGPU::EXEC)
103      return Op->getReg();
104   Op = TII.getNamedOperand(MI, AMDGPU::OpName::src0);
105   if (Op->isReg() && Op->getReg() != AMDGPU::EXEC)
106      return Op->getReg();
107   return AMDGPU::NoRegister;
108 }
109 
110 static MachineInstr* getOrExecSource(const MachineInstr &MI,
111                                      const SIInstrInfo &TII,
112                                      const MachineRegisterInfo &MRI) {
113   auto SavedExec = getOrNonExecReg(MI, TII);
114   if (SavedExec == AMDGPU::NoRegister)
115     return nullptr;
116   auto SaveExecInst = MRI.getUniqueVRegDef(SavedExec);
117   if (!SaveExecInst || !isFullExecCopy(*SaveExecInst))
118     return nullptr;
119   return SaveExecInst;
120 }
121 
122 /// Skip over instructions that don't care about the exec mask.
123 MachineBasicBlock::iterator SIOptimizeExecMaskingPreRA::skipIgnoreExecInsts(
124   MachineBasicBlock::iterator I, MachineBasicBlock::iterator E) const {
125   for ( ; I != E; ++I) {
126     if (TII->mayReadEXEC(*MRI, *I))
127       break;
128   }
129 
130   return I;
131 }
132 
133 // Skip to the next instruction, ignoring debug instructions, and trivial block
134 // boundaries (blocks that have one (typically fallthrough) successor, and the
135 // successor has one predecessor.
136 MachineBasicBlock::iterator
137 SIOptimizeExecMaskingPreRA::skipIgnoreExecInstsTrivialSucc(
138   MachineBasicBlock *&MBB,
139   MachineBasicBlock::iterator It) const {
140 
141   do {
142     It = skipIgnoreExecInsts(It, MBB->end());
143     if (It != MBB->end() || MBB->succ_size() != 1)
144       break;
145 
146     // If there is one trivial successor, advance to the next block.
147     MachineBasicBlock *Succ = *MBB->succ_begin();
148 
149     // TODO: Is this really necessary?
150     if (!MBB->isLayoutSuccessor(Succ))
151       break;
152 
153     It = Succ->begin();
154     MBB = Succ;
155   } while (true);
156 
157   return It;
158 }
159 
160 
161 // Optimize sequence
162 //    %sel = V_CNDMASK_B32_e64 0, 1, %cc
163 //    %cmp = V_CMP_NE_U32 1, %1
164 //    $vcc = S_AND_B64 $exec, %cmp
165 //    S_CBRANCH_VCC[N]Z
166 // =>
167 //    $vcc = S_ANDN2_B64 $exec, %cc
168 //    S_CBRANCH_VCC[N]Z
169 //
170 // It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
171 // rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
172 // only 3 first instructions are really needed. S_AND_B64 with exec is a
173 // required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
174 // lanes.
175 //
176 // Returns %cc register on success.
177 static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
178                                      const GCNSubtarget &ST,
179                                      MachineRegisterInfo &MRI,
180                                      LiveIntervals *LIS) {
181   const SIRegisterInfo *TRI = ST.getRegisterInfo();
182   const SIInstrInfo *TII = ST.getInstrInfo();
183   const unsigned AndOpc = AMDGPU::S_AND_B64;
184   const unsigned Andn2Opc = AMDGPU::S_ANDN2_B64;
185   const unsigned CondReg = AMDGPU::VCC;
186   const unsigned ExecReg = AMDGPU::EXEC;
187 
188   auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
189                            unsigned Opc = MI.getOpcode();
190                            return Opc == AMDGPU::S_CBRANCH_VCCZ ||
191                                   Opc == AMDGPU::S_CBRANCH_VCCNZ; });
192   if (I == MBB.terminators().end())
193     return AMDGPU::NoRegister;
194 
195   auto *And = TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister,
196                                    *I, MRI, LIS);
197   if (!And || And->getOpcode() != AndOpc ||
198       !And->getOperand(1).isReg() || !And->getOperand(2).isReg())
199     return AMDGPU::NoRegister;
200 
201   MachineOperand *AndCC = &And->getOperand(1);
202   unsigned CmpReg = AndCC->getReg();
203   unsigned CmpSubReg = AndCC->getSubReg();
204   if (CmpReg == ExecReg) {
205     AndCC = &And->getOperand(2);
206     CmpReg = AndCC->getReg();
207     CmpSubReg = AndCC->getSubReg();
208   } else if (And->getOperand(2).getReg() != ExecReg) {
209     return AMDGPU::NoRegister;
210   }
211 
212   auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, MRI, LIS);
213   if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
214                 Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
215       Cmp->getParent() != And->getParent())
216     return AMDGPU::NoRegister;
217 
218   MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
219   MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
220   if (Op1->isImm() && Op2->isReg())
221     std::swap(Op1, Op2);
222   if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
223     return AMDGPU::NoRegister;
224 
225   unsigned SelReg = Op1->getReg();
226   auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, MRI, LIS);
227   if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
228     return AMDGPU::NoRegister;
229 
230   if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) ||
231       TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers))
232     return AMDGPU::NoRegister;
233 
234   Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
235   Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
236   MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
237   if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
238       Op1->getImm() != 0 || Op2->getImm() != 1)
239     return AMDGPU::NoRegister;
240 
241   LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t'
242                     << *Cmp << '\t' << *And);
243 
244   unsigned CCReg = CC->getReg();
245   LIS->RemoveMachineInstrFromMaps(*And);
246   MachineInstr *Andn2 = BuildMI(MBB, *And, And->getDebugLoc(),
247                                 TII->get(Andn2Opc), And->getOperand(0).getReg())
248                             .addReg(ExecReg)
249                             .addReg(CCReg, 0, CC->getSubReg());
250   And->eraseFromParent();
251   LIS->InsertMachineInstrInMaps(*Andn2);
252 
253   LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
254 
255   // Try to remove compare. Cmp value should not used in between of cmp
256   // and s_and_b64 if VCC or just unused if any other register.
257   if ((TargetRegisterInfo::isVirtualRegister(CmpReg) &&
258        MRI.use_nodbg_empty(CmpReg)) ||
259       (CmpReg == CondReg &&
260        std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
261                     [&](const MachineInstr &MI) {
262                       return MI.readsRegister(CondReg, TRI); }))) {
263     LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
264 
265     LIS->RemoveMachineInstrFromMaps(*Cmp);
266     Cmp->eraseFromParent();
267 
268     // Try to remove v_cndmask_b32.
269     if (TargetRegisterInfo::isVirtualRegister(SelReg) &&
270         MRI.use_nodbg_empty(SelReg)) {
271       LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
272 
273       LIS->RemoveMachineInstrFromMaps(*Sel);
274       Sel->eraseFromParent();
275     }
276   }
277 
278   return CCReg;
279 }
280 
281 bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
282   if (skipFunction(MF.getFunction()))
283     return false;
284 
285   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
286   TRI = ST.getRegisterInfo();
287   TII = ST.getInstrInfo();
288   MRI = &MF.getRegInfo();
289 
290   MachineRegisterInfo &MRI = MF.getRegInfo();
291   LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
292   DenseSet<unsigned> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
293   bool Changed = false;
294 
295   for (MachineBasicBlock &MBB : MF) {
296 
297     if (unsigned Reg = optimizeVcndVcmpPair(MBB, ST, MRI, LIS)) {
298       RecalcRegs.insert(Reg);
299       RecalcRegs.insert(AMDGPU::VCC_LO);
300       RecalcRegs.insert(AMDGPU::VCC_HI);
301       RecalcRegs.insert(AMDGPU::SCC);
302       Changed = true;
303     }
304 
305     // Try to remove unneeded instructions before s_endpgm.
306     if (MBB.succ_empty()) {
307       if (MBB.empty())
308         continue;
309 
310       // Skip this if the endpgm has any implicit uses, otherwise we would need
311       // to be careful to update / remove them.
312       // S_ENDPGM always has a single imm operand that is not used other than to
313       // end up in the encoding
314       MachineInstr &Term = MBB.back();
315       if (Term.getOpcode() != AMDGPU::S_ENDPGM || Term.getNumOperands() != 1)
316         continue;
317 
318       SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
319 
320       while (!Blocks.empty()) {
321         auto CurBB = Blocks.pop_back_val();
322         auto I = CurBB->rbegin(), E = CurBB->rend();
323         if (I != E) {
324           if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
325             ++I;
326           else if (I->isBranch())
327             continue;
328         }
329 
330         while (I != E) {
331           if (I->isDebugInstr()) {
332             I = std::next(I);
333             continue;
334           }
335 
336           if (I->mayStore() || I->isBarrier() || I->isCall() ||
337               I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
338             break;
339 
340           LLVM_DEBUG(dbgs()
341                      << "Removing no effect instruction: " << *I << '\n');
342 
343           for (auto &Op : I->operands()) {
344             if (Op.isReg())
345               RecalcRegs.insert(Op.getReg());
346           }
347 
348           auto Next = std::next(I);
349           LIS->RemoveMachineInstrFromMaps(*I);
350           I->eraseFromParent();
351           I = Next;
352 
353           Changed = true;
354         }
355 
356         if (I != E)
357           continue;
358 
359         // Try to ascend predecessors.
360         for (auto *Pred : CurBB->predecessors()) {
361           if (Pred->succ_size() == 1)
362             Blocks.push_back(Pred);
363         }
364       }
365       continue;
366     }
367 
368     // Try to collapse adjacent endifs.
369     auto E = MBB.end();
370     auto Lead = MBB.getFirstTerminator();
371     if (MBB.succ_size() != 1 || Lead == E || !isEndCF(*Lead, TRI))
372       continue;
373 
374     MachineBasicBlock *TmpMBB = &MBB;
375     auto NextLead = skipIgnoreExecInstsTrivialSucc(TmpMBB, std::next(Lead));
376     if (NextLead == TmpMBB->end() || !isEndCF(*NextLead, TRI) ||
377         !getOrExecSource(*NextLead, *TII, MRI))
378       continue;
379 
380     LLVM_DEBUG(dbgs() << "Redundant EXEC = S_OR_B64 found: " << *Lead << '\n');
381 
382     auto SaveExec = getOrExecSource(*Lead, *TII, MRI);
383     unsigned SaveExecReg = getOrNonExecReg(*Lead, *TII);
384     for (auto &Op : Lead->operands()) {
385       if (Op.isReg())
386         RecalcRegs.insert(Op.getReg());
387     }
388 
389     LIS->RemoveMachineInstrFromMaps(*Lead);
390     Lead->eraseFromParent();
391     if (SaveExecReg) {
392       LIS->removeInterval(SaveExecReg);
393       LIS->createAndComputeVirtRegInterval(SaveExecReg);
394     }
395 
396     Changed = true;
397 
398     // If the only use of saved exec in the removed instruction is S_AND_B64
399     // fold the copy now.
400     if (!SaveExec || !SaveExec->isFullCopy())
401       continue;
402 
403     unsigned SavedExec = SaveExec->getOperand(0).getReg();
404     bool SafeToReplace = true;
405     for (auto& U : MRI.use_nodbg_instructions(SavedExec)) {
406       if (U.getParent() != SaveExec->getParent()) {
407         SafeToReplace = false;
408         break;
409       }
410 
411       LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *SaveExec << '\n');
412     }
413 
414     if (SafeToReplace) {
415       LIS->RemoveMachineInstrFromMaps(*SaveExec);
416       SaveExec->eraseFromParent();
417       MRI.replaceRegWith(SavedExec, AMDGPU::EXEC);
418       LIS->removeInterval(SavedExec);
419     }
420   }
421 
422   if (Changed) {
423     for (auto Reg : RecalcRegs) {
424       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
425         LIS->removeInterval(Reg);
426         if (!MRI.reg_empty(Reg))
427           LIS->createAndComputeVirtRegInterval(Reg);
428       } else {
429         LIS->removeAllRegUnitsForPhysReg(Reg);
430       }
431     }
432   }
433 
434   return Changed;
435 }
436