xref: /llvm-project/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp (revision 25528d6de70e98683722e28655d8568d5f09b5c7)
1 //=- AArch64ConditionOptimizer.cpp - Remove useless comparisons for AArch64 -=//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass tries to make consecutive compares of values use same operands to
11 // allow CSE pass to remove duplicated instructions.  For this it analyzes
12 // branches and adjusts comparisons with immediate values by converting:
13 //  * GE -> GT
14 //  * GT -> GE
15 //  * LT -> LE
16 //  * LE -> LT
17 // and adjusting immediate values appropriately.  It basically corrects two
18 // immediate values towards each other to make them equal.
19 //
20 // Consider the following example in C:
21 //
22 //   if ((a < 5 && ...) || (a > 5 && ...)) {
23 //        ~~~~~             ~~~~~
24 //          ^                 ^
25 //          x                 y
26 //
27 // Here both "x" and "y" expressions compare "a" with "5".  When "x" evaluates
28 // to "false", "y" can just check flags set by the first comparison.  As a
29 // result of the canonicalization employed by
30 // SelectionDAGBuilder::visitSwitchCase, DAGCombine, and other target-specific
31 // code, assembly ends up in the form that is not CSE friendly:
32 //
33 //     ...
34 //     cmp      w8, #4
35 //     b.gt     .LBB0_3
36 //     ...
37 //   .LBB0_3:
38 //     cmp      w8, #6
39 //     b.lt     .LBB0_6
40 //     ...
41 //
42 // Same assembly after the pass:
43 //
44 //     ...
45 //     cmp      w8, #5
46 //     b.ge     .LBB0_3
47 //     ...
48 //   .LBB0_3:
49 //     cmp      w8, #5     // <-- CSE pass removes this instruction
50 //     b.le     .LBB0_6
51 //     ...
52 //
53 // Currently only SUBS and ADDS followed by b.?? are supported.
54 //
55 // TODO: maybe handle TBNZ/TBZ the same way as CMP when used instead for "a < 0"
56 // TODO: handle other conditional instructions (e.g. CSET)
57 // TODO: allow second branching to be anything if it doesn't require adjusting
58 //
59 //===----------------------------------------------------------------------===//
60 
61 #include "AArch64.h"
62 #include "MCTargetDesc/AArch64AddressingModes.h"
63 #include "Utils/AArch64BaseInfo.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DepthFirstIterator.h"
66 #include "llvm/ADT/SmallVector.h"
67 #include "llvm/ADT/Statistic.h"
68 #include "llvm/CodeGen/MachineBasicBlock.h"
69 #include "llvm/CodeGen/MachineDominators.h"
70 #include "llvm/CodeGen/MachineFunction.h"
71 #include "llvm/CodeGen/MachineFunctionPass.h"
72 #include "llvm/CodeGen/MachineInstr.h"
73 #include "llvm/CodeGen/MachineInstrBuilder.h"
74 #include "llvm/CodeGen/MachineOperand.h"
75 #include "llvm/CodeGen/MachineRegisterInfo.h"
76 #include "llvm/CodeGen/TargetInstrInfo.h"
77 #include "llvm/CodeGen/TargetSubtargetInfo.h"
78 #include "llvm/Pass.h"
79 #include "llvm/Support/Debug.h"
80 #include "llvm/Support/ErrorHandling.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include <cassert>
83 #include <cstdlib>
84 #include <tuple>
85 
86 using namespace llvm;
87 
88 #define DEBUG_TYPE "aarch64-condopt"
89 
90 STATISTIC(NumConditionsAdjusted, "Number of conditions adjusted");
91 
92 namespace {
93 
94 class AArch64ConditionOptimizer : public MachineFunctionPass {
95   const TargetInstrInfo *TII;
96   MachineDominatorTree *DomTree;
97   const MachineRegisterInfo *MRI;
98 
99 public:
100   // Stores immediate, compare instruction opcode and branch condition (in this
101   // order) of adjusted comparison.
102   using CmpInfo = std::tuple<int, unsigned, AArch64CC::CondCode>;
103 
104   static char ID;
105 
106   AArch64ConditionOptimizer() : MachineFunctionPass(ID) {
107     initializeAArch64ConditionOptimizerPass(*PassRegistry::getPassRegistry());
108   }
109 
110   void getAnalysisUsage(AnalysisUsage &AU) const override;
111   MachineInstr *findSuitableCompare(MachineBasicBlock *MBB);
112   CmpInfo adjustCmp(MachineInstr *CmpMI, AArch64CC::CondCode Cmp);
113   void modifyCmp(MachineInstr *CmpMI, const CmpInfo &Info);
114   bool adjustTo(MachineInstr *CmpMI, AArch64CC::CondCode Cmp, MachineInstr *To,
115                 int ToImm);
116   bool runOnMachineFunction(MachineFunction &MF) override;
117 
118   StringRef getPassName() const override {
119     return "AArch64 Condition Optimizer";
120   }
121 };
122 
123 } // end anonymous namespace
124 
125 char AArch64ConditionOptimizer::ID = 0;
126 
127 INITIALIZE_PASS_BEGIN(AArch64ConditionOptimizer, "aarch64-condopt",
128                       "AArch64 CondOpt Pass", false, false)
129 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
130 INITIALIZE_PASS_END(AArch64ConditionOptimizer, "aarch64-condopt",
131                     "AArch64 CondOpt Pass", false, false)
132 
133 FunctionPass *llvm::createAArch64ConditionOptimizerPass() {
134   return new AArch64ConditionOptimizer();
135 }
136 
137 void AArch64ConditionOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
138   AU.addRequired<MachineDominatorTree>();
139   AU.addPreserved<MachineDominatorTree>();
140   MachineFunctionPass::getAnalysisUsage(AU);
141 }
142 
143 // Finds compare instruction that corresponds to supported types of branching.
144 // Returns the instruction or nullptr on failures or detecting unsupported
145 // instructions.
146 MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
147     MachineBasicBlock *MBB) {
148   MachineBasicBlock::iterator I = MBB->getFirstTerminator();
149   if (I == MBB->end())
150     return nullptr;
151 
152   if (I->getOpcode() != AArch64::Bcc)
153     return nullptr;
154 
155   // Since we may modify cmp of this MBB, make sure NZCV does not live out.
156   for (auto SuccBB : MBB->successors())
157     if (SuccBB->isLiveIn(AArch64::NZCV))
158       return nullptr;
159 
160   // Now find the instruction controlling the terminator.
161   for (MachineBasicBlock::iterator B = MBB->begin(); I != B;) {
162     --I;
163     assert(!I->isTerminator() && "Spurious terminator");
164     // Check if there is any use of NZCV between CMP and Bcc.
165     if (I->readsRegister(AArch64::NZCV))
166       return nullptr;
167     switch (I->getOpcode()) {
168     // cmp is an alias for subs with a dead destination register.
169     case AArch64::SUBSWri:
170     case AArch64::SUBSXri:
171     // cmn is an alias for adds with a dead destination register.
172     case AArch64::ADDSWri:
173     case AArch64::ADDSXri: {
174       unsigned ShiftAmt = AArch64_AM::getShiftValue(I->getOperand(3).getImm());
175       if (!I->getOperand(2).isImm()) {
176         DEBUG(dbgs() << "Immediate of cmp is symbolic, " << *I << '\n');
177         return nullptr;
178       } else if (I->getOperand(2).getImm() << ShiftAmt >= 0xfff) {
179         DEBUG(dbgs() << "Immediate of cmp may be out of range, " << *I << '\n');
180         return nullptr;
181       } else if (!MRI->use_empty(I->getOperand(0).getReg())) {
182         DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n');
183         return nullptr;
184       }
185       return &*I;
186     }
187     // Prevent false positive case like:
188     // cmp      w19, #0
189     // cinc     w0, w19, gt
190     // ...
191     // fcmp     d8, #0.0
192     // b.gt     .LBB0_5
193     case AArch64::FCMPDri:
194     case AArch64::FCMPSri:
195     case AArch64::FCMPESri:
196     case AArch64::FCMPEDri:
197 
198     case AArch64::SUBSWrr:
199     case AArch64::SUBSXrr:
200     case AArch64::ADDSWrr:
201     case AArch64::ADDSXrr:
202     case AArch64::FCMPSrr:
203     case AArch64::FCMPDrr:
204     case AArch64::FCMPESrr:
205     case AArch64::FCMPEDrr:
206       // Skip comparison instructions without immediate operands.
207       return nullptr;
208     }
209   }
210   DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n');
211   return nullptr;
212 }
213 
214 // Changes opcode adds <-> subs considering register operand width.
215 static int getComplementOpc(int Opc) {
216   switch (Opc) {
217   case AArch64::ADDSWri: return AArch64::SUBSWri;
218   case AArch64::ADDSXri: return AArch64::SUBSXri;
219   case AArch64::SUBSWri: return AArch64::ADDSWri;
220   case AArch64::SUBSXri: return AArch64::ADDSXri;
221   default:
222     llvm_unreachable("Unexpected opcode");
223   }
224 }
225 
226 // Changes form of comparison inclusive <-> exclusive.
227 static AArch64CC::CondCode getAdjustedCmp(AArch64CC::CondCode Cmp) {
228   switch (Cmp) {
229   case AArch64CC::GT: return AArch64CC::GE;
230   case AArch64CC::GE: return AArch64CC::GT;
231   case AArch64CC::LT: return AArch64CC::LE;
232   case AArch64CC::LE: return AArch64CC::LT;
233   default:
234     llvm_unreachable("Unexpected condition code");
235   }
236 }
237 
238 // Transforms GT -> GE, GE -> GT, LT -> LE, LE -> LT by updating comparison
239 // operator and condition code.
240 AArch64ConditionOptimizer::CmpInfo AArch64ConditionOptimizer::adjustCmp(
241     MachineInstr *CmpMI, AArch64CC::CondCode Cmp) {
242   unsigned Opc = CmpMI->getOpcode();
243 
244   // CMN (compare with negative immediate) is an alias to ADDS (as
245   // "operand - negative" == "operand + positive")
246   bool Negative = (Opc == AArch64::ADDSWri || Opc == AArch64::ADDSXri);
247 
248   int Correction = (Cmp == AArch64CC::GT) ? 1 : -1;
249   // Negate Correction value for comparison with negative immediate (CMN).
250   if (Negative) {
251     Correction = -Correction;
252   }
253 
254   const int OldImm = (int)CmpMI->getOperand(2).getImm();
255   const int NewImm = std::abs(OldImm + Correction);
256 
257   // Handle +0 -> -1 and -0 -> +1 (CMN with 0 immediate) transitions by
258   // adjusting compare instruction opcode.
259   if (OldImm == 0 && ((Negative && Correction == 1) ||
260                       (!Negative && Correction == -1))) {
261     Opc = getComplementOpc(Opc);
262   }
263 
264   return CmpInfo(NewImm, Opc, getAdjustedCmp(Cmp));
265 }
266 
267 // Applies changes to comparison instruction suggested by adjustCmp().
268 void AArch64ConditionOptimizer::modifyCmp(MachineInstr *CmpMI,
269     const CmpInfo &Info) {
270   int Imm;
271   unsigned Opc;
272   AArch64CC::CondCode Cmp;
273   std::tie(Imm, Opc, Cmp) = Info;
274 
275   MachineBasicBlock *const MBB = CmpMI->getParent();
276 
277   // Change immediate in comparison instruction (ADDS or SUBS).
278   BuildMI(*MBB, CmpMI, CmpMI->getDebugLoc(), TII->get(Opc))
279       .add(CmpMI->getOperand(0))
280       .add(CmpMI->getOperand(1))
281       .addImm(Imm)
282       .add(CmpMI->getOperand(3));
283   CmpMI->eraseFromParent();
284 
285   // The fact that this comparison was picked ensures that it's related to the
286   // first terminator instruction.
287   MachineInstr &BrMI = *MBB->getFirstTerminator();
288 
289   // Change condition in branch instruction.
290   BuildMI(*MBB, BrMI, BrMI.getDebugLoc(), TII->get(AArch64::Bcc))
291       .addImm(Cmp)
292       .add(BrMI.getOperand(1));
293   BrMI.eraseFromParent();
294 
295   MBB->updateTerminator();
296 
297   ++NumConditionsAdjusted;
298 }
299 
300 // Parse a condition code returned by AnalyzeBranch, and compute the CondCode
301 // corresponding to TBB.
302 // Returns true if parsing was successful, otherwise false is returned.
303 static bool parseCond(ArrayRef<MachineOperand> Cond, AArch64CC::CondCode &CC) {
304   // A normal br.cond simply has the condition code.
305   if (Cond[0].getImm() != -1) {
306     assert(Cond.size() == 1 && "Unknown Cond array format");
307     CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
308     return true;
309   }
310   return false;
311 }
312 
313 // Adjusts one cmp instruction to another one if result of adjustment will allow
314 // CSE.  Returns true if compare instruction was changed, otherwise false is
315 // returned.
316 bool AArch64ConditionOptimizer::adjustTo(MachineInstr *CmpMI,
317   AArch64CC::CondCode Cmp, MachineInstr *To, int ToImm)
318 {
319   CmpInfo Info = adjustCmp(CmpMI, Cmp);
320   if (std::get<0>(Info) == ToImm && std::get<1>(Info) == To->getOpcode()) {
321     modifyCmp(CmpMI, Info);
322     return true;
323   }
324   return false;
325 }
326 
327 bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
328   DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
329                << "********** Function: " << MF.getName() << '\n');
330   if (skipFunction(*MF.getFunction()))
331     return false;
332 
333   TII = MF.getSubtarget().getInstrInfo();
334   DomTree = &getAnalysis<MachineDominatorTree>();
335   MRI = &MF.getRegInfo();
336 
337   bool Changed = false;
338 
339   // Visit blocks in dominator tree pre-order. The pre-order enables multiple
340   // cmp-conversions from the same head block.
341   // Note that updateDomTree() modifies the children of the DomTree node
342   // currently being visited. The df_iterator supports that; it doesn't look at
343   // child_begin() / child_end() until after a node has been visited.
344   for (MachineDomTreeNode *I : depth_first(DomTree)) {
345     MachineBasicBlock *HBB = I->getBlock();
346 
347     SmallVector<MachineOperand, 4> HeadCond;
348     MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
349     if (TII->analyzeBranch(*HBB, TBB, FBB, HeadCond)) {
350       continue;
351     }
352 
353     // Equivalence check is to skip loops.
354     if (!TBB || TBB == HBB) {
355       continue;
356     }
357 
358     SmallVector<MachineOperand, 4> TrueCond;
359     MachineBasicBlock *TBB_TBB = nullptr, *TBB_FBB = nullptr;
360     if (TII->analyzeBranch(*TBB, TBB_TBB, TBB_FBB, TrueCond)) {
361       continue;
362     }
363 
364     MachineInstr *HeadCmpMI = findSuitableCompare(HBB);
365     if (!HeadCmpMI) {
366       continue;
367     }
368 
369     MachineInstr *TrueCmpMI = findSuitableCompare(TBB);
370     if (!TrueCmpMI) {
371       continue;
372     }
373 
374     AArch64CC::CondCode HeadCmp;
375     if (HeadCond.empty() || !parseCond(HeadCond, HeadCmp)) {
376       continue;
377     }
378 
379     AArch64CC::CondCode TrueCmp;
380     if (TrueCond.empty() || !parseCond(TrueCond, TrueCmp)) {
381       continue;
382     }
383 
384     const int HeadImm = (int)HeadCmpMI->getOperand(2).getImm();
385     const int TrueImm = (int)TrueCmpMI->getOperand(2).getImm();
386 
387     DEBUG(dbgs() << "Head branch:\n");
388     DEBUG(dbgs() << "\tcondition: "
389           << AArch64CC::getCondCodeName(HeadCmp) << '\n');
390     DEBUG(dbgs() << "\timmediate: " << HeadImm << '\n');
391 
392     DEBUG(dbgs() << "True branch:\n");
393     DEBUG(dbgs() << "\tcondition: "
394           << AArch64CC::getCondCodeName(TrueCmp) << '\n');
395     DEBUG(dbgs() << "\timmediate: " << TrueImm << '\n');
396 
397     if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::LT) ||
398          (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::GT)) &&
399         std::abs(TrueImm - HeadImm) == 2) {
400       // This branch transforms machine instructions that correspond to
401       //
402       // 1) (a > {TrueImm} && ...) || (a < {HeadImm} && ...)
403       // 2) (a < {TrueImm} && ...) || (a > {HeadImm} && ...)
404       //
405       // into
406       //
407       // 1) (a >= {NewImm} && ...) || (a <= {NewImm} && ...)
408       // 2) (a <= {NewImm} && ...) || (a >= {NewImm} && ...)
409 
410       CmpInfo HeadCmpInfo = adjustCmp(HeadCmpMI, HeadCmp);
411       CmpInfo TrueCmpInfo = adjustCmp(TrueCmpMI, TrueCmp);
412       if (std::get<0>(HeadCmpInfo) == std::get<0>(TrueCmpInfo) &&
413           std::get<1>(HeadCmpInfo) == std::get<1>(TrueCmpInfo)) {
414         modifyCmp(HeadCmpMI, HeadCmpInfo);
415         modifyCmp(TrueCmpMI, TrueCmpInfo);
416         Changed = true;
417       }
418     } else if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::GT) ||
419                 (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::LT)) &&
420                 std::abs(TrueImm - HeadImm) == 1) {
421       // This branch transforms machine instructions that correspond to
422       //
423       // 1) (a > {TrueImm} && ...) || (a > {HeadImm} && ...)
424       // 2) (a < {TrueImm} && ...) || (a < {HeadImm} && ...)
425       //
426       // into
427       //
428       // 1) (a <= {NewImm} && ...) || (a >  {NewImm} && ...)
429       // 2) (a <  {NewImm} && ...) || (a >= {NewImm} && ...)
430 
431       // GT -> GE transformation increases immediate value, so picking the
432       // smaller one; LT -> LE decreases immediate value so invert the choice.
433       bool adjustHeadCond = (HeadImm < TrueImm);
434       if (HeadCmp == AArch64CC::LT) {
435           adjustHeadCond = !adjustHeadCond;
436       }
437 
438       if (adjustHeadCond) {
439         Changed |= adjustTo(HeadCmpMI, HeadCmp, TrueCmpMI, TrueImm);
440       } else {
441         Changed |= adjustTo(TrueCmpMI, TrueCmp, HeadCmpMI, HeadImm);
442       }
443     }
444     // Other transformation cases almost never occur due to generation of < or >
445     // comparisons instead of <= and >=.
446   }
447 
448   return Changed;
449 }
450