xref: /llvm-project/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp (revision e9b236f411c5683d270a381bf810ba3c8f3ed12c)
1 //===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass performs exec mask handling peephole optimizations which needs
11 /// to be done before register allocation to reduce register pressure.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPU.h"
16 #include "AMDGPUSubtarget.h"
17 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
18 #include "SIInstrInfo.h"
19 #include "llvm/CodeGen/LiveIntervals.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/InitializePasses.h"
22 
23 using namespace llvm;
24 
25 #define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
26 
27 namespace {
28 
29 class SIOptimizeExecMaskingPreRA : public MachineFunctionPass {
30 private:
31   const SIRegisterInfo *TRI;
32   const SIInstrInfo *TII;
33   MachineRegisterInfo *MRI;
34 
35 public:
36   static char ID;
37 
38   SIOptimizeExecMaskingPreRA() : MachineFunctionPass(ID) {
39     initializeSIOptimizeExecMaskingPreRAPass(*PassRegistry::getPassRegistry());
40   }
41 
42   bool runOnMachineFunction(MachineFunction &MF) override;
43 
44   StringRef getPassName() const override {
45     return "SI optimize exec mask operations pre-RA";
46   }
47 
48   void getAnalysisUsage(AnalysisUsage &AU) const override {
49     AU.addRequired<LiveIntervals>();
50     AU.setPreservesAll();
51     MachineFunctionPass::getAnalysisUsage(AU);
52   }
53 };
54 
55 } // End anonymous namespace.
56 
57 INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
58                       "SI optimize exec mask operations pre-RA", false, false)
59 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
60 INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
61                     "SI optimize exec mask operations pre-RA", false, false)
62 
63 char SIOptimizeExecMaskingPreRA::ID = 0;
64 
65 char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRA::ID;
66 
67 FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() {
68   return new SIOptimizeExecMaskingPreRA();
69 }
70 
71 static bool isFullExecCopy(const MachineInstr& MI, const GCNSubtarget& ST) {
72   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
73 
74   if (MI.isFullCopy() && MI.getOperand(1).getReg() == Exec)
75     return true;
76 
77   return false;
78 }
79 
80 // See if there is a def between \p AndIdx and \p SelIdx that needs to live
81 // beyond \p AndIdx.
82 static bool isDefBetween(const LiveRange &LR, SlotIndex AndIdx,
83                          SlotIndex SelIdx) {
84   LiveQueryResult AndLRQ = LR.Query(AndIdx);
85   return (!AndLRQ.isKill() && AndLRQ.valueIn() != LR.Query(SelIdx).valueOut());
86 }
87 
88 // FIXME: Why do we bother trying to handle physical registers here?
89 static bool isDefBetween(const SIRegisterInfo &TRI,
90                          LiveIntervals *LIS, Register Reg,
91                          const MachineInstr &Sel, const MachineInstr &And) {
92   SlotIndex AndIdx = LIS->getInstructionIndex(And);
93   SlotIndex SelIdx = LIS->getInstructionIndex(Sel);
94 
95   if (Reg.isVirtual())
96     return isDefBetween(LIS->getInterval(Reg), AndIdx, SelIdx);
97 
98   for (MCRegUnitIterator UI(Reg, &TRI); UI.isValid(); ++UI) {
99     if (isDefBetween(LIS->getRegUnit(*UI), AndIdx, SelIdx))
100       return true;
101   }
102 
103   return false;
104 }
105 
106 // Optimize sequence
107 //    %sel = V_CNDMASK_B32_e64 0, 1, %cc
108 //    %cmp = V_CMP_NE_U32 1, %1
109 //    $vcc = S_AND_B64 $exec, %cmp
110 //    S_CBRANCH_VCC[N]Z
111 // =>
112 //    $vcc = S_ANDN2_B64 $exec, %cc
113 //    S_CBRANCH_VCC[N]Z
114 //
115 // It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
116 // rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
117 // only 3 first instructions are really needed. S_AND_B64 with exec is a
118 // required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
119 // lanes.
120 //
121 // Returns %cc register on success.
122 static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
123                                      const GCNSubtarget &ST,
124                                      MachineRegisterInfo &MRI,
125                                      LiveIntervals *LIS) {
126   const SIRegisterInfo *TRI = ST.getRegisterInfo();
127   const SIInstrInfo *TII = ST.getInstrInfo();
128   bool Wave32 = ST.isWave32();
129   const unsigned AndOpc = Wave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
130   const unsigned Andn2Opc = Wave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64;
131   const unsigned CondReg = Wave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
132   const unsigned ExecReg = Wave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
133 
134   auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
135                            unsigned Opc = MI.getOpcode();
136                            return Opc == AMDGPU::S_CBRANCH_VCCZ ||
137                                   Opc == AMDGPU::S_CBRANCH_VCCNZ; });
138   if (I == MBB.terminators().end())
139     return AMDGPU::NoRegister;
140 
141   auto *And = TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister,
142                                    *I, MRI, LIS);
143   if (!And || And->getOpcode() != AndOpc ||
144       !And->getOperand(1).isReg() || !And->getOperand(2).isReg())
145     return AMDGPU::NoRegister;
146 
147   MachineOperand *AndCC = &And->getOperand(1);
148   Register CmpReg = AndCC->getReg();
149   unsigned CmpSubReg = AndCC->getSubReg();
150   if (CmpReg == ExecReg) {
151     AndCC = &And->getOperand(2);
152     CmpReg = AndCC->getReg();
153     CmpSubReg = AndCC->getSubReg();
154   } else if (And->getOperand(2).getReg() != ExecReg) {
155     return AMDGPU::NoRegister;
156   }
157 
158   auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, MRI, LIS);
159   if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
160                 Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
161       Cmp->getParent() != And->getParent())
162     return AMDGPU::NoRegister;
163 
164   MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
165   MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
166   if (Op1->isImm() && Op2->isReg())
167     std::swap(Op1, Op2);
168   if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
169     return AMDGPU::NoRegister;
170 
171   Register SelReg = Op1->getReg();
172   auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, MRI, LIS);
173   if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
174     return AMDGPU::NoRegister;
175 
176   if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) ||
177       TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers))
178     return AMDGPU::NoRegister;
179 
180   Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
181   Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
182   MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
183   if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
184       Op1->getImm() != 0 || Op2->getImm() != 1)
185     return AMDGPU::NoRegister;
186 
187   Register CCReg = CC->getReg();
188 
189   // If there was a def between the select and the and, we would need to move it
190   // to fold this.
191   if (isDefBetween(*TRI, LIS, CCReg, *Sel, *And))
192     return AMDGPU::NoRegister;
193 
194   LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t' << *Cmp << '\t'
195                     << *And);
196 
197   LIS->RemoveMachineInstrFromMaps(*And);
198   MachineInstr *Andn2 =
199       BuildMI(MBB, *And, And->getDebugLoc(), TII->get(Andn2Opc),
200               And->getOperand(0).getReg())
201           .addReg(ExecReg)
202           .addReg(CCReg, getUndefRegState(CC->isUndef()), CC->getSubReg());
203   MachineOperand &AndSCC = And->getOperand(3);
204   assert(AndSCC.getReg() == AMDGPU::SCC);
205   MachineOperand &Andn2SCC = Andn2->getOperand(3);
206   assert(Andn2SCC.getReg() == AMDGPU::SCC);
207   Andn2SCC.setIsDead(AndSCC.isDead());
208   And->eraseFromParent();
209   LIS->InsertMachineInstrInMaps(*Andn2);
210 
211   LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
212 
213   // Try to remove compare. Cmp value should not used in between of cmp
214   // and s_and_b64 if VCC or just unused if any other register.
215   if ((Register::isVirtualRegister(CmpReg) && MRI.use_nodbg_empty(CmpReg)) ||
216       (CmpReg == CondReg &&
217        std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
218                     [&](const MachineInstr &MI) {
219                       return MI.readsRegister(CondReg, TRI);
220                     }))) {
221     LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
222 
223     LIS->RemoveMachineInstrFromMaps(*Cmp);
224     Cmp->eraseFromParent();
225 
226     // Try to remove v_cndmask_b32.
227     if (Register::isVirtualRegister(SelReg) && MRI.use_nodbg_empty(SelReg)) {
228       LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
229 
230       LIS->RemoveMachineInstrFromMaps(*Sel);
231       Sel->eraseFromParent();
232     }
233   }
234 
235   return CCReg;
236 }
237 
238 bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
239   if (skipFunction(MF.getFunction()))
240     return false;
241 
242   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
243   TRI = ST.getRegisterInfo();
244   TII = ST.getInstrInfo();
245   MRI = &MF.getRegInfo();
246 
247   MachineRegisterInfo &MRI = MF.getRegInfo();
248   LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
249   DenseSet<unsigned> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
250   unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
251   bool Changed = false;
252 
253   for (MachineBasicBlock &MBB : MF) {
254 
255     if (unsigned Reg = optimizeVcndVcmpPair(MBB, ST, MRI, LIS)) {
256       RecalcRegs.insert(Reg);
257       RecalcRegs.insert(AMDGPU::VCC_LO);
258       RecalcRegs.insert(AMDGPU::VCC_HI);
259       RecalcRegs.insert(AMDGPU::SCC);
260       Changed = true;
261     }
262 
263     // Try to remove unneeded instructions before s_endpgm.
264     if (MBB.succ_empty()) {
265       if (MBB.empty())
266         continue;
267 
268       // Skip this if the endpgm has any implicit uses, otherwise we would need
269       // to be careful to update / remove them.
270       // S_ENDPGM always has a single imm operand that is not used other than to
271       // end up in the encoding
272       MachineInstr &Term = MBB.back();
273       if (Term.getOpcode() != AMDGPU::S_ENDPGM || Term.getNumOperands() != 1)
274         continue;
275 
276       SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
277 
278       while (!Blocks.empty()) {
279         auto CurBB = Blocks.pop_back_val();
280         auto I = CurBB->rbegin(), E = CurBB->rend();
281         if (I != E) {
282           if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
283             ++I;
284           else if (I->isBranch())
285             continue;
286         }
287 
288         while (I != E) {
289           if (I->isDebugInstr()) {
290             I = std::next(I);
291             continue;
292           }
293 
294           if (I->mayStore() || I->isBarrier() || I->isCall() ||
295               I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
296             break;
297 
298           LLVM_DEBUG(dbgs()
299                      << "Removing no effect instruction: " << *I << '\n');
300 
301           for (auto &Op : I->operands()) {
302             if (Op.isReg())
303               RecalcRegs.insert(Op.getReg());
304           }
305 
306           auto Next = std::next(I);
307           LIS->RemoveMachineInstrFromMaps(*I);
308           I->eraseFromParent();
309           I = Next;
310 
311           Changed = true;
312         }
313 
314         if (I != E)
315           continue;
316 
317         // Try to ascend predecessors.
318         for (auto *Pred : CurBB->predecessors()) {
319           if (Pred->succ_size() == 1)
320             Blocks.push_back(Pred);
321         }
322       }
323       continue;
324     }
325 
326     // If the only user of a logical operation is move to exec, fold it now
327     // to prevent forming of saveexec. I.e:
328     //
329     //    %0:sreg_64 = COPY $exec
330     //    %1:sreg_64 = S_AND_B64 %0:sreg_64, %2:sreg_64
331     // =>
332     //    %1 = S_AND_B64 $exec, %2:sreg_64
333     unsigned ScanThreshold = 10;
334     for (auto I = MBB.rbegin(), E = MBB.rend(); I != E
335          && ScanThreshold--; ++I) {
336       if (!isFullExecCopy(*I, ST))
337         continue;
338 
339       Register SavedExec = I->getOperand(0).getReg();
340       if (SavedExec.isVirtual() && MRI.hasOneNonDBGUse(SavedExec) &&
341           MRI.use_instr_nodbg_begin(SavedExec)->getParent() == I->getParent()) {
342         LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *I << '\n');
343         LIS->RemoveMachineInstrFromMaps(*I);
344         I->eraseFromParent();
345         MRI.replaceRegWith(SavedExec, Exec);
346         LIS->removeInterval(SavedExec);
347         Changed = true;
348       }
349       break;
350     }
351   }
352 
353   if (Changed) {
354     for (auto Reg : RecalcRegs) {
355       if (Register::isVirtualRegister(Reg)) {
356         LIS->removeInterval(Reg);
357         if (!MRI.reg_empty(Reg))
358           LIS->createAndComputeVirtRegInterval(Reg);
359       } else {
360         LIS->removeAllRegUnitsForPhysReg(Reg);
361       }
362     }
363   }
364 
365   return Changed;
366 }
367