xref: /llvm-project/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp (revision 6aabbeadae2024f468990e256706148a2c05f644)
1 //===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass performs exec mask handling peephole optimizations which needs
11 /// to be done before register allocation to reduce register pressure.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPU.h"
16 #include "AMDGPUSubtarget.h"
17 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
18 #include "SIInstrInfo.h"
19 #include "llvm/CodeGen/LiveIntervals.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/InitializePasses.h"
22 
23 using namespace llvm;
24 
25 #define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
26 
27 namespace {
28 
29 class SIOptimizeExecMaskingPreRA : public MachineFunctionPass {
30 private:
31   const SIRegisterInfo *TRI;
32   const SIInstrInfo *TII;
33   MachineRegisterInfo *MRI;
34   LiveIntervals *LIS;
35 
36   unsigned AndOpc;
37   unsigned Andn2Opc;
38   Register CondReg;
39   Register ExecReg;
40 
41   Register optimizeVcndVcmpPair(MachineBasicBlock &MBB);
42 
43 public:
44   static char ID;
45 
46   SIOptimizeExecMaskingPreRA() : MachineFunctionPass(ID) {
47     initializeSIOptimizeExecMaskingPreRAPass(*PassRegistry::getPassRegistry());
48   }
49 
50   bool runOnMachineFunction(MachineFunction &MF) override;
51 
52   StringRef getPassName() const override {
53     return "SI optimize exec mask operations pre-RA";
54   }
55 
56   void getAnalysisUsage(AnalysisUsage &AU) const override {
57     AU.addRequired<LiveIntervals>();
58     AU.setPreservesAll();
59     MachineFunctionPass::getAnalysisUsage(AU);
60   }
61 };
62 
63 } // End anonymous namespace.
64 
65 INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
66                       "SI optimize exec mask operations pre-RA", false, false)
67 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
68 INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
69                     "SI optimize exec mask operations pre-RA", false, false)
70 
71 char SIOptimizeExecMaskingPreRA::ID = 0;
72 
73 char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRA::ID;
74 
75 FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() {
76   return new SIOptimizeExecMaskingPreRA();
77 }
78 
79 // See if there is a def between \p AndIdx and \p SelIdx that needs to live
80 // beyond \p AndIdx.
81 static bool isDefBetween(const LiveRange &LR, SlotIndex AndIdx,
82                          SlotIndex SelIdx) {
83   LiveQueryResult AndLRQ = LR.Query(AndIdx);
84   return (!AndLRQ.isKill() && AndLRQ.valueIn() != LR.Query(SelIdx).valueOut());
85 }
86 
87 // FIXME: Why do we bother trying to handle physical registers here?
88 static bool isDefBetween(const SIRegisterInfo &TRI,
89                          LiveIntervals *LIS, Register Reg,
90                          const MachineInstr &Sel, const MachineInstr &And) {
91   SlotIndex AndIdx = LIS->getInstructionIndex(And);
92   SlotIndex SelIdx = LIS->getInstructionIndex(Sel);
93 
94   if (Reg.isVirtual())
95     return isDefBetween(LIS->getInterval(Reg), AndIdx, SelIdx);
96 
97   for (MCRegUnitIterator UI(Reg, &TRI); UI.isValid(); ++UI) {
98     if (isDefBetween(LIS->getRegUnit(*UI), AndIdx, SelIdx))
99       return true;
100   }
101 
102   return false;
103 }
104 
105 // Optimize sequence
106 //    %sel = V_CNDMASK_B32_e64 0, 1, %cc
107 //    %cmp = V_CMP_NE_U32 1, %1
108 //    $vcc = S_AND_B64 $exec, %cmp
109 //    S_CBRANCH_VCC[N]Z
110 // =>
111 //    $vcc = S_ANDN2_B64 $exec, %cc
112 //    S_CBRANCH_VCC[N]Z
113 //
114 // It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
115 // rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
116 // only 3 first instructions are really needed. S_AND_B64 with exec is a
117 // required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
118 // lanes.
119 //
120 // Returns %cc register on success.
121 Register
122 SIOptimizeExecMaskingPreRA::optimizeVcndVcmpPair(MachineBasicBlock &MBB) {
123   auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
124                            unsigned Opc = MI.getOpcode();
125                            return Opc == AMDGPU::S_CBRANCH_VCCZ ||
126                                   Opc == AMDGPU::S_CBRANCH_VCCNZ; });
127   if (I == MBB.terminators().end())
128     return Register();
129 
130   auto *And =
131       TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister, *I, *MRI, LIS);
132   if (!And || And->getOpcode() != AndOpc ||
133       !And->getOperand(1).isReg() || !And->getOperand(2).isReg())
134     return Register();
135 
136   MachineOperand *AndCC = &And->getOperand(1);
137   Register CmpReg = AndCC->getReg();
138   unsigned CmpSubReg = AndCC->getSubReg();
139   if (CmpReg == ExecReg) {
140     AndCC = &And->getOperand(2);
141     CmpReg = AndCC->getReg();
142     CmpSubReg = AndCC->getSubReg();
143   } else if (And->getOperand(2).getReg() != ExecReg) {
144     return Register();
145   }
146 
147   auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, *MRI, LIS);
148   if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
149                 Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
150       Cmp->getParent() != And->getParent())
151     return Register();
152 
153   MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
154   MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
155   if (Op1->isImm() && Op2->isReg())
156     std::swap(Op1, Op2);
157   if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
158     return Register();
159 
160   Register SelReg = Op1->getReg();
161   auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, *MRI, LIS);
162   if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
163     return Register();
164 
165   if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) ||
166       TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers))
167     return Register();
168 
169   Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
170   Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
171   MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
172   if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
173       Op1->getImm() != 0 || Op2->getImm() != 1)
174     return Register();
175 
176   Register CCReg = CC->getReg();
177 
178   // If there was a def between the select and the and, we would need to move it
179   // to fold this.
180   if (isDefBetween(*TRI, LIS, CCReg, *Sel, *And))
181     return Register();
182 
183   LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t' << *Cmp << '\t'
184                     << *And);
185 
186   LIS->RemoveMachineInstrFromMaps(*And);
187   MachineInstr *Andn2 =
188       BuildMI(MBB, *And, And->getDebugLoc(), TII->get(Andn2Opc),
189               And->getOperand(0).getReg())
190           .addReg(ExecReg)
191           .addReg(CCReg, getUndefRegState(CC->isUndef()), CC->getSubReg());
192   MachineOperand &AndSCC = And->getOperand(3);
193   assert(AndSCC.getReg() == AMDGPU::SCC);
194   MachineOperand &Andn2SCC = Andn2->getOperand(3);
195   assert(Andn2SCC.getReg() == AMDGPU::SCC);
196   Andn2SCC.setIsDead(AndSCC.isDead());
197   And->eraseFromParent();
198   LIS->InsertMachineInstrInMaps(*Andn2);
199 
200   LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
201 
202   // Try to remove compare. Cmp value should not used in between of cmp
203   // and s_and_b64 if VCC or just unused if any other register.
204   if ((CmpReg.isVirtual() && MRI->use_nodbg_empty(CmpReg)) ||
205       (CmpReg == CondReg &&
206        std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
207                     [&](const MachineInstr &MI) {
208                       return MI.readsRegister(CondReg, TRI);
209                     }))) {
210     LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
211 
212     LIS->RemoveMachineInstrFromMaps(*Cmp);
213     Cmp->eraseFromParent();
214 
215     // Try to remove v_cndmask_b32.
216     if (SelReg.isVirtual() && MRI->use_nodbg_empty(SelReg)) {
217       LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
218 
219       LIS->RemoveMachineInstrFromMaps(*Sel);
220       Sel->eraseFromParent();
221     }
222   }
223 
224   return CCReg;
225 }
226 
227 bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
228   if (skipFunction(MF.getFunction()))
229     return false;
230 
231   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
232   TRI = ST.getRegisterInfo();
233   TII = ST.getInstrInfo();
234   MRI = &MF.getRegInfo();
235   LIS = &getAnalysis<LiveIntervals>();
236 
237   const bool Wave32 = ST.isWave32();
238   AndOpc = Wave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
239   Andn2Opc = Wave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64;
240   CondReg = Wave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
241   ExecReg = Wave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
242 
243   DenseSet<Register> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
244   bool Changed = false;
245 
246   for (MachineBasicBlock &MBB : MF) {
247 
248     if (Register Reg = optimizeVcndVcmpPair(MBB)) {
249       RecalcRegs.insert(Reg);
250       RecalcRegs.insert(AMDGPU::VCC_LO);
251       RecalcRegs.insert(AMDGPU::VCC_HI);
252       RecalcRegs.insert(AMDGPU::SCC);
253       Changed = true;
254     }
255 
256     // Try to remove unneeded instructions before s_endpgm.
257     if (MBB.succ_empty()) {
258       if (MBB.empty())
259         continue;
260 
261       // Skip this if the endpgm has any implicit uses, otherwise we would need
262       // to be careful to update / remove them.
263       // S_ENDPGM always has a single imm operand that is not used other than to
264       // end up in the encoding
265       MachineInstr &Term = MBB.back();
266       if (Term.getOpcode() != AMDGPU::S_ENDPGM || Term.getNumOperands() != 1)
267         continue;
268 
269       SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
270 
271       while (!Blocks.empty()) {
272         auto CurBB = Blocks.pop_back_val();
273         auto I = CurBB->rbegin(), E = CurBB->rend();
274         if (I != E) {
275           if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
276             ++I;
277           else if (I->isBranch())
278             continue;
279         }
280 
281         while (I != E) {
282           if (I->isDebugInstr()) {
283             I = std::next(I);
284             continue;
285           }
286 
287           if (I->mayStore() || I->isBarrier() || I->isCall() ||
288               I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
289             break;
290 
291           LLVM_DEBUG(dbgs()
292                      << "Removing no effect instruction: " << *I << '\n');
293 
294           for (auto &Op : I->operands()) {
295             if (Op.isReg())
296               RecalcRegs.insert(Op.getReg());
297           }
298 
299           auto Next = std::next(I);
300           LIS->RemoveMachineInstrFromMaps(*I);
301           I->eraseFromParent();
302           I = Next;
303 
304           Changed = true;
305         }
306 
307         if (I != E)
308           continue;
309 
310         // Try to ascend predecessors.
311         for (auto *Pred : CurBB->predecessors()) {
312           if (Pred->succ_size() == 1)
313             Blocks.push_back(Pred);
314         }
315       }
316       continue;
317     }
318 
319     // If the only user of a logical operation is move to exec, fold it now
320     // to prevent forming of saveexec. I.e:
321     //
322     //    %0:sreg_64 = COPY $exec
323     //    %1:sreg_64 = S_AND_B64 %0:sreg_64, %2:sreg_64
324     // =>
325     //    %1 = S_AND_B64 $exec, %2:sreg_64
326     unsigned ScanThreshold = 10;
327     for (auto I = MBB.rbegin(), E = MBB.rend(); I != E
328          && ScanThreshold--; ++I) {
329       // Continue scanning if this is not a full exec copy
330       if (!(I->isFullCopy() && I->getOperand(1).getReg() == ExecReg))
331         continue;
332 
333       Register SavedExec = I->getOperand(0).getReg();
334       if (SavedExec.isVirtual() && MRI->hasOneNonDBGUse(SavedExec) &&
335           MRI->use_instr_nodbg_begin(SavedExec)->getParent() ==
336               I->getParent()) {
337         LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *I << '\n');
338         LIS->RemoveMachineInstrFromMaps(*I);
339         I->eraseFromParent();
340         MRI->replaceRegWith(SavedExec, ExecReg);
341         LIS->removeInterval(SavedExec);
342         Changed = true;
343       }
344       break;
345     }
346   }
347 
348   if (Changed) {
349     for (auto Reg : RecalcRegs) {
350       if (Reg.isVirtual()) {
351         LIS->removeInterval(Reg);
352         if (!MRI->reg_empty(Reg))
353           LIS->createAndComputeVirtRegInterval(Reg);
354       } else {
355         LIS->removeAllRegUnitsForPhysReg(Reg);
356       }
357     }
358   }
359 
360   return Changed;
361 }
362