xref: /llvm-project/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp (revision 6a87e9b08bf093ba3ccba8650b89f4d337c497f4)
1 //===-- SIOptimizeExecMasking.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AMDGPU.h"
10 #include "AMDGPUSubtarget.h"
11 #include "llvm/CodeGen/MachineFunctionPass.h"
12 #include "llvm/InitializePasses.h"
13 
14 using namespace llvm;
15 
16 #define DEBUG_TYPE "si-optimize-exec-masking"
17 
18 namespace {
19 
20 class SIOptimizeExecMasking : public MachineFunctionPass {
21 public:
22   static char ID;
23 
24 public:
25   SIOptimizeExecMasking() : MachineFunctionPass(ID) {
26     initializeSIOptimizeExecMaskingPass(*PassRegistry::getPassRegistry());
27   }
28 
29   bool runOnMachineFunction(MachineFunction &MF) override;
30 
31   StringRef getPassName() const override {
32     return "SI optimize exec mask operations";
33   }
34 
35   void getAnalysisUsage(AnalysisUsage &AU) const override {
36     AU.setPreservesCFG();
37     MachineFunctionPass::getAnalysisUsage(AU);
38   }
39 };
40 
41 } // End anonymous namespace.
42 
43 INITIALIZE_PASS_BEGIN(SIOptimizeExecMasking, DEBUG_TYPE,
44                       "SI optimize exec mask operations", false, false)
45 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
46 INITIALIZE_PASS_END(SIOptimizeExecMasking, DEBUG_TYPE,
47                     "SI optimize exec mask operations", false, false)
48 
49 char SIOptimizeExecMasking::ID = 0;
50 
51 char &llvm::SIOptimizeExecMaskingID = SIOptimizeExecMasking::ID;
52 
53 /// If \p MI is a copy from exec, return the register copied to.
54 static Register isCopyFromExec(const MachineInstr &MI, const GCNSubtarget &ST) {
55   switch (MI.getOpcode()) {
56   case AMDGPU::COPY:
57   case AMDGPU::S_MOV_B64:
58   case AMDGPU::S_MOV_B64_term:
59   case AMDGPU::S_MOV_B32:
60   case AMDGPU::S_MOV_B32_term: {
61     const MachineOperand &Src = MI.getOperand(1);
62     if (Src.isReg() &&
63         Src.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC))
64       return MI.getOperand(0).getReg();
65   }
66   }
67 
68   return AMDGPU::NoRegister;
69 }
70 
71 /// If \p MI is a copy to exec, return the register copied from.
72 static Register isCopyToExec(const MachineInstr &MI, const GCNSubtarget &ST) {
73   switch (MI.getOpcode()) {
74   case AMDGPU::COPY:
75   case AMDGPU::S_MOV_B64:
76   case AMDGPU::S_MOV_B32: {
77     const MachineOperand &Dst = MI.getOperand(0);
78     if (Dst.isReg() &&
79         Dst.getReg() == (ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC) &&
80         MI.getOperand(1).isReg())
81       return MI.getOperand(1).getReg();
82     break;
83   }
84   case AMDGPU::S_MOV_B64_term:
85   case AMDGPU::S_MOV_B32_term:
86     llvm_unreachable("should have been replaced");
87   }
88 
89   return Register();
90 }
91 
92 /// If \p MI is a logical operation on an exec value,
93 /// return the register copied to.
94 static Register isLogicalOpOnExec(const MachineInstr &MI) {
95   switch (MI.getOpcode()) {
96   case AMDGPU::S_AND_B64:
97   case AMDGPU::S_OR_B64:
98   case AMDGPU::S_XOR_B64:
99   case AMDGPU::S_ANDN2_B64:
100   case AMDGPU::S_ORN2_B64:
101   case AMDGPU::S_NAND_B64:
102   case AMDGPU::S_NOR_B64:
103   case AMDGPU::S_XNOR_B64: {
104     const MachineOperand &Src1 = MI.getOperand(1);
105     if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC)
106       return MI.getOperand(0).getReg();
107     const MachineOperand &Src2 = MI.getOperand(2);
108     if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC)
109       return MI.getOperand(0).getReg();
110     break;
111   }
112   case AMDGPU::S_AND_B32:
113   case AMDGPU::S_OR_B32:
114   case AMDGPU::S_XOR_B32:
115   case AMDGPU::S_ANDN2_B32:
116   case AMDGPU::S_ORN2_B32:
117   case AMDGPU::S_NAND_B32:
118   case AMDGPU::S_NOR_B32:
119   case AMDGPU::S_XNOR_B32: {
120     const MachineOperand &Src1 = MI.getOperand(1);
121     if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC_LO)
122       return MI.getOperand(0).getReg();
123     const MachineOperand &Src2 = MI.getOperand(2);
124     if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC_LO)
125       return MI.getOperand(0).getReg();
126     break;
127   }
128   }
129 
130   return AMDGPU::NoRegister;
131 }
132 
133 static unsigned getSaveExecOp(unsigned Opc) {
134   switch (Opc) {
135   case AMDGPU::S_AND_B64:
136     return AMDGPU::S_AND_SAVEEXEC_B64;
137   case AMDGPU::S_OR_B64:
138     return AMDGPU::S_OR_SAVEEXEC_B64;
139   case AMDGPU::S_XOR_B64:
140     return AMDGPU::S_XOR_SAVEEXEC_B64;
141   case AMDGPU::S_ANDN2_B64:
142     return AMDGPU::S_ANDN2_SAVEEXEC_B64;
143   case AMDGPU::S_ORN2_B64:
144     return AMDGPU::S_ORN2_SAVEEXEC_B64;
145   case AMDGPU::S_NAND_B64:
146     return AMDGPU::S_NAND_SAVEEXEC_B64;
147   case AMDGPU::S_NOR_B64:
148     return AMDGPU::S_NOR_SAVEEXEC_B64;
149   case AMDGPU::S_XNOR_B64:
150     return AMDGPU::S_XNOR_SAVEEXEC_B64;
151   case AMDGPU::S_AND_B32:
152     return AMDGPU::S_AND_SAVEEXEC_B32;
153   case AMDGPU::S_OR_B32:
154     return AMDGPU::S_OR_SAVEEXEC_B32;
155   case AMDGPU::S_XOR_B32:
156     return AMDGPU::S_XOR_SAVEEXEC_B32;
157   case AMDGPU::S_ANDN2_B32:
158     return AMDGPU::S_ANDN2_SAVEEXEC_B32;
159   case AMDGPU::S_ORN2_B32:
160     return AMDGPU::S_ORN2_SAVEEXEC_B32;
161   case AMDGPU::S_NAND_B32:
162     return AMDGPU::S_NAND_SAVEEXEC_B32;
163   case AMDGPU::S_NOR_B32:
164     return AMDGPU::S_NOR_SAVEEXEC_B32;
165   case AMDGPU::S_XNOR_B32:
166     return AMDGPU::S_XNOR_SAVEEXEC_B32;
167   default:
168     return AMDGPU::INSTRUCTION_LIST_END;
169   }
170 }
171 
172 // These are only terminators to get correct spill code placement during
173 // register allocation, so turn them back into normal instructions.
174 static bool removeTerminatorBit(const SIInstrInfo &TII, MachineInstr &MI) {
175   switch (MI.getOpcode()) {
176   case AMDGPU::S_MOV_B32_term: {
177     bool RegSrc = MI.getOperand(1).isReg();
178     MI.setDesc(TII.get(RegSrc ? AMDGPU::COPY : AMDGPU::S_MOV_B32));
179     return true;
180   }
181   case AMDGPU::S_MOV_B64_term: {
182     bool RegSrc = MI.getOperand(1).isReg();
183     MI.setDesc(TII.get(RegSrc ? AMDGPU::COPY : AMDGPU::S_MOV_B64));
184     return true;
185   }
186   case AMDGPU::S_XOR_B64_term: {
187     // This is only a terminator to get the correct spill code placement during
188     // register allocation.
189     MI.setDesc(TII.get(AMDGPU::S_XOR_B64));
190     return true;
191   }
192   case AMDGPU::S_XOR_B32_term: {
193     // This is only a terminator to get the correct spill code placement during
194     // register allocation.
195     MI.setDesc(TII.get(AMDGPU::S_XOR_B32));
196     return true;
197   }
198   case AMDGPU::S_OR_B64_term: {
199     // This is only a terminator to get the correct spill code placement during
200     // register allocation.
201     MI.setDesc(TII.get(AMDGPU::S_OR_B64));
202     return true;
203   }
204   case AMDGPU::S_OR_B32_term: {
205     // This is only a terminator to get the correct spill code placement during
206     // register allocation.
207     MI.setDesc(TII.get(AMDGPU::S_OR_B32));
208     return true;
209   }
210   case AMDGPU::S_ANDN2_B64_term: {
211     // This is only a terminator to get the correct spill code placement during
212     // register allocation.
213     MI.setDesc(TII.get(AMDGPU::S_ANDN2_B64));
214     return true;
215   }
216   case AMDGPU::S_ANDN2_B32_term: {
217     // This is only a terminator to get the correct spill code placement during
218     // register allocation.
219     MI.setDesc(TII.get(AMDGPU::S_ANDN2_B32));
220     return true;
221   }
222   default:
223     return false;
224   }
225 }
226 
227 // Turn all pseudoterminators in the block into their equivalent non-terminator
228 // instructions. Returns the reverse iterator to the first non-terminator
229 // instruction in the block.
230 static MachineBasicBlock::reverse_iterator fixTerminators(
231   const SIInstrInfo &TII,
232   MachineBasicBlock &MBB) {
233   MachineBasicBlock::reverse_iterator I = MBB.rbegin(), E = MBB.rend();
234 
235   bool Seen = false;
236   MachineBasicBlock::reverse_iterator FirstNonTerm = I;
237   for (; I != E; ++I) {
238     if (!I->isTerminator())
239       return Seen ? FirstNonTerm : I;
240 
241     if (removeTerminatorBit(TII, *I)) {
242       if (!Seen) {
243         FirstNonTerm = I;
244         Seen = true;
245       }
246     }
247   }
248 
249   return FirstNonTerm;
250 }
251 
252 static MachineBasicBlock::reverse_iterator findExecCopy(
253   const SIInstrInfo &TII,
254   const GCNSubtarget &ST,
255   MachineBasicBlock &MBB,
256   MachineBasicBlock::reverse_iterator I,
257   unsigned CopyToExec) {
258   const unsigned InstLimit = 25;
259 
260   auto E = MBB.rend();
261   for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {
262     Register CopyFromExec = isCopyFromExec(*I, ST);
263     if (CopyFromExec.isValid())
264       return I;
265   }
266 
267   return E;
268 }
269 
270 // XXX - Seems LivePhysRegs doesn't work correctly since it will incorrectly
271 // report the register as unavailable because a super-register with a lane mask
272 // is unavailable.
273 static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) {
274   for (MachineBasicBlock *Succ : MBB.successors()) {
275     if (Succ->isLiveIn(Reg))
276       return true;
277   }
278 
279   return false;
280 }
281 
282 bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
283   if (skipFunction(MF.getFunction()))
284     return false;
285 
286   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
287   const SIRegisterInfo *TRI = ST.getRegisterInfo();
288   const SIInstrInfo *TII = ST.getInstrInfo();
289   MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
290 
291   // Optimize sequences emitted for control flow lowering. They are originally
292   // emitted as the separate operations because spill code may need to be
293   // inserted for the saved copy of exec.
294   //
295   //     x = copy exec
296   //     z = s_<op>_b64 x, y
297   //     exec = copy z
298   // =>
299   //     x = s_<op>_saveexec_b64 y
300   //
301 
302   for (MachineBasicBlock &MBB : MF) {
303     MachineBasicBlock::reverse_iterator I = fixTerminators(*TII, MBB);
304     MachineBasicBlock::reverse_iterator E = MBB.rend();
305     if (I == E)
306       continue;
307 
308     // It's possible to see other terminator copies after the exec copy. This
309     // can happen if control flow pseudos had their outputs used by phis.
310     Register CopyToExec;
311 
312     unsigned SearchCount = 0;
313     const unsigned SearchLimit = 5;
314     while (I != E && SearchCount++ < SearchLimit) {
315       CopyToExec = isCopyToExec(*I, ST);
316       if (CopyToExec)
317         break;
318       ++I;
319     }
320 
321     if (!CopyToExec)
322       continue;
323 
324     // Scan backwards to find the def.
325     auto CopyToExecInst = &*I;
326     auto CopyFromExecInst = findExecCopy(*TII, ST, MBB, I, CopyToExec);
327     if (CopyFromExecInst == E) {
328       auto PrepareExecInst = std::next(I);
329       if (PrepareExecInst == E)
330         continue;
331       // Fold exec = COPY (S_AND_B64 reg, exec) -> exec = S_AND_B64 reg, exec
332       if (CopyToExecInst->getOperand(1).isKill() &&
333           isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) {
334         LLVM_DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst);
335 
336         PrepareExecInst->getOperand(0).setReg(Exec);
337 
338         LLVM_DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n');
339 
340         CopyToExecInst->eraseFromParent();
341       }
342 
343       continue;
344     }
345 
346     if (isLiveOut(MBB, CopyToExec)) {
347       // The copied register is live out and has a second use in another block.
348       LLVM_DEBUG(dbgs() << "Exec copy source register is live out\n");
349       continue;
350     }
351 
352     Register CopyFromExec = CopyFromExecInst->getOperand(0).getReg();
353     MachineInstr *SaveExecInst = nullptr;
354     SmallVector<MachineInstr *, 4> OtherUseInsts;
355 
356     for (MachineBasicBlock::iterator J
357            = std::next(CopyFromExecInst->getIterator()), JE = I->getIterator();
358          J != JE; ++J) {
359       if (SaveExecInst && J->readsRegister(Exec, TRI)) {
360         LLVM_DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
361         // Make sure this is inserted after any VALU ops that may have been
362         // scheduled in between.
363         SaveExecInst = nullptr;
364         break;
365       }
366 
367       bool ReadsCopyFromExec = J->readsRegister(CopyFromExec, TRI);
368 
369       if (J->modifiesRegister(CopyToExec, TRI)) {
370         if (SaveExecInst) {
371           LLVM_DEBUG(dbgs() << "Multiple instructions modify "
372                             << printReg(CopyToExec, TRI) << '\n');
373           SaveExecInst = nullptr;
374           break;
375         }
376 
377         unsigned SaveExecOp = getSaveExecOp(J->getOpcode());
378         if (SaveExecOp == AMDGPU::INSTRUCTION_LIST_END)
379           break;
380 
381         if (ReadsCopyFromExec) {
382           SaveExecInst = &*J;
383           LLVM_DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');
384           continue;
385         } else {
386           LLVM_DEBUG(dbgs()
387                      << "Instruction does not read exec copy: " << *J << '\n');
388           break;
389         }
390       } else if (ReadsCopyFromExec && !SaveExecInst) {
391         // Make sure no other instruction is trying to use this copy, before it
392         // will be rewritten by the saveexec, i.e. hasOneUse. There may have
393         // been another use, such as an inserted spill. For example:
394         //
395         // %sgpr0_sgpr1 = COPY %exec
396         // spill %sgpr0_sgpr1
397         // %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1
398         //
399         LLVM_DEBUG(dbgs() << "Found second use of save inst candidate: " << *J
400                           << '\n');
401         break;
402       }
403 
404       if (SaveExecInst && J->readsRegister(CopyToExec, TRI)) {
405         assert(SaveExecInst != &*J);
406         OtherUseInsts.push_back(&*J);
407       }
408     }
409 
410     if (!SaveExecInst)
411       continue;
412 
413     LLVM_DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');
414 
415     MachineOperand &Src0 = SaveExecInst->getOperand(1);
416     MachineOperand &Src1 = SaveExecInst->getOperand(2);
417 
418     MachineOperand *OtherOp = nullptr;
419 
420     if (Src0.isReg() && Src0.getReg() == CopyFromExec) {
421       OtherOp = &Src1;
422     } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) {
423       if (!SaveExecInst->isCommutable())
424         break;
425 
426       OtherOp = &Src0;
427     } else
428       llvm_unreachable("unexpected");
429 
430     CopyFromExecInst->eraseFromParent();
431 
432     auto InsPt = SaveExecInst->getIterator();
433     const DebugLoc &DL = SaveExecInst->getDebugLoc();
434 
435     BuildMI(MBB, InsPt, DL, TII->get(getSaveExecOp(SaveExecInst->getOpcode())),
436             CopyFromExec)
437       .addReg(OtherOp->getReg());
438     SaveExecInst->eraseFromParent();
439 
440     CopyToExecInst->eraseFromParent();
441 
442     for (MachineInstr *OtherInst : OtherUseInsts) {
443       OtherInst->substituteRegister(CopyToExec, Exec,
444                                     AMDGPU::NoSubRegister, *TRI);
445     }
446   }
447 
448   return true;
449 
450 }
451