xref: /llvm-project/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp (revision 6292a808b3524d9ba6f4ce55bc5b9e547b088dd8)
1 //===- HexagonLoopIdiomRecognition.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "HexagonLoopIdiomRecognition.h"
10 #include "llvm/ADT/APInt.h"
11 #include "llvm/ADT/DenseMap.h"
12 #include "llvm/ADT/SetVector.h"
13 #include "llvm/ADT/SmallPtrSet.h"
14 #include "llvm/ADT/SmallSet.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/LoopAnalysisManager.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/LoopPass.h"
22 #include "llvm/Analysis/MemoryLocation.h"
23 #include "llvm/Analysis/ScalarEvolution.h"
24 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
25 #include "llvm/Analysis/TargetLibraryInfo.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constant.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DebugLoc.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/IntrinsicsHexagon.h"
43 #include "llvm/IR/Module.h"
44 #include "llvm/IR/PassManager.h"
45 #include "llvm/IR/PatternMatch.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/IR/User.h"
48 #include "llvm/IR/Value.h"
49 #include "llvm/InitializePasses.h"
50 #include "llvm/Pass.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/CommandLine.h"
53 #include "llvm/Support/Compiler.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/KnownBits.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include "llvm/TargetParser/Triple.h"
59 #include "llvm/Transforms/Scalar.h"
60 #include "llvm/Transforms/Utils.h"
61 #include "llvm/Transforms/Utils/Local.h"
62 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
63 #include <algorithm>
64 #include <array>
65 #include <cassert>
66 #include <cstdint>
67 #include <cstdlib>
68 #include <deque>
69 #include <functional>
70 #include <iterator>
71 #include <map>
72 #include <set>
73 #include <utility>
74 #include <vector>
75 
76 #define DEBUG_TYPE "hexagon-lir"
77 
78 using namespace llvm;
79 
80 static cl::opt<bool> DisableMemcpyIdiom("disable-memcpy-idiom",
81   cl::Hidden, cl::init(false),
82   cl::desc("Disable generation of memcpy in loop idiom recognition"));
83 
84 static cl::opt<bool> DisableMemmoveIdiom("disable-memmove-idiom",
85   cl::Hidden, cl::init(false),
86   cl::desc("Disable generation of memmove in loop idiom recognition"));
87 
88 static cl::opt<unsigned> RuntimeMemSizeThreshold("runtime-mem-idiom-threshold",
89   cl::Hidden, cl::init(0), cl::desc("Threshold (in bytes) for the runtime "
90   "check guarding the memmove."));
91 
92 static cl::opt<unsigned> CompileTimeMemSizeThreshold(
93   "compile-time-mem-idiom-threshold", cl::Hidden, cl::init(64),
94   cl::desc("Threshold (in bytes) to perform the transformation, if the "
95     "runtime loop count (mem transfer size) is known at compile-time."));
96 
97 static cl::opt<bool> OnlyNonNestedMemmove("only-nonnested-memmove-idiom",
98   cl::Hidden, cl::init(true),
99   cl::desc("Only enable generating memmove in non-nested loops"));
100 
101 static cl::opt<bool> HexagonVolatileMemcpy(
102     "disable-hexagon-volatile-memcpy", cl::Hidden, cl::init(false),
103     cl::desc("Enable Hexagon-specific memcpy for volatile destination."));
104 
105 static cl::opt<unsigned> SimplifyLimit("hlir-simplify-limit", cl::init(10000),
106   cl::Hidden, cl::desc("Maximum number of simplification steps in HLIR"));
107 
108 static const char *HexagonVolatileMemcpyName
109   = "hexagon_memcpy_forward_vp4cp4n2";
110 
111 
112 namespace llvm {
113 
114 void initializeHexagonLoopIdiomRecognizeLegacyPassPass(PassRegistry &);
115 Pass *createHexagonLoopIdiomPass();
116 
117 } // end namespace llvm
118 
119 namespace {
120 
121 class HexagonLoopIdiomRecognize {
122 public:
123   explicit HexagonLoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
124                                      LoopInfo *LF, const TargetLibraryInfo *TLI,
125                                      ScalarEvolution *SE)
126       : AA(AA), DT(DT), LF(LF), TLI(TLI), SE(SE) {}
127 
128   bool run(Loop *L);
129 
130 private:
131   int getSCEVStride(const SCEVAddRecExpr *StoreEv);
132   bool isLegalStore(Loop *CurLoop, StoreInst *SI);
133   void collectStores(Loop *CurLoop, BasicBlock *BB,
134                      SmallVectorImpl<StoreInst *> &Stores);
135   bool processCopyingStore(Loop *CurLoop, StoreInst *SI, const SCEV *BECount);
136   bool coverLoop(Loop *L, SmallVectorImpl<Instruction *> &Insts) const;
137   bool runOnLoopBlock(Loop *CurLoop, BasicBlock *BB, const SCEV *BECount,
138                       SmallVectorImpl<BasicBlock *> &ExitBlocks);
139   bool runOnCountableLoop(Loop *L);
140 
141   AliasAnalysis *AA;
142   const DataLayout *DL;
143   DominatorTree *DT;
144   LoopInfo *LF;
145   const TargetLibraryInfo *TLI;
146   ScalarEvolution *SE;
147   bool HasMemcpy, HasMemmove;
148 };
149 
150 class HexagonLoopIdiomRecognizeLegacyPass : public LoopPass {
151 public:
152   static char ID;
153 
154   explicit HexagonLoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
155     initializeHexagonLoopIdiomRecognizeLegacyPassPass(
156         *PassRegistry::getPassRegistry());
157   }
158 
159   StringRef getPassName() const override {
160     return "Recognize Hexagon-specific loop idioms";
161   }
162 
163   void getAnalysisUsage(AnalysisUsage &AU) const override {
164     AU.addRequired<LoopInfoWrapperPass>();
165     AU.addRequiredID(LoopSimplifyID);
166     AU.addRequiredID(LCSSAID);
167     AU.addRequired<AAResultsWrapperPass>();
168     AU.addRequired<ScalarEvolutionWrapperPass>();
169     AU.addRequired<DominatorTreeWrapperPass>();
170     AU.addRequired<TargetLibraryInfoWrapperPass>();
171     AU.addPreserved<TargetLibraryInfoWrapperPass>();
172   }
173 
174   bool runOnLoop(Loop *L, LPPassManager &LPM) override;
175 };
176 
177 struct Simplifier {
178   struct Rule {
179     using FuncType = std::function<Value *(Instruction *, LLVMContext &)>;
180     Rule(StringRef N, FuncType F) : Name(N), Fn(F) {}
181     StringRef Name; // For debugging.
182     FuncType Fn;
183   };
184 
185   void addRule(StringRef N, const Rule::FuncType &F) {
186     Rules.push_back(Rule(N, F));
187   }
188 
189 private:
190   struct WorkListType {
191     WorkListType() = default;
192 
193     void push_back(Value *V) {
194       // Do not push back duplicates.
195       if (S.insert(V).second)
196         Q.push_back(V);
197     }
198 
199     Value *pop_front_val() {
200       Value *V = Q.front();
201       Q.pop_front();
202       S.erase(V);
203       return V;
204     }
205 
206     bool empty() const { return Q.empty(); }
207 
208   private:
209     std::deque<Value *> Q;
210     std::set<Value *> S;
211   };
212 
213   using ValueSetType = std::set<Value *>;
214 
215   std::vector<Rule> Rules;
216 
217 public:
218   struct Context {
219     using ValueMapType = DenseMap<Value *, Value *>;
220 
221     Value *Root;
222     ValueSetType Used;   // The set of all cloned values used by Root.
223     ValueSetType Clones; // The set of all cloned values.
224     LLVMContext &Ctx;
225 
226     Context(Instruction *Exp)
227         : Ctx(Exp->getParent()->getParent()->getContext()) {
228       initialize(Exp);
229     }
230 
231     ~Context() { cleanup(); }
232 
233     void print(raw_ostream &OS, const Value *V) const;
234     Value *materialize(BasicBlock *B, BasicBlock::iterator At);
235 
236   private:
237     friend struct Simplifier;
238 
239     void initialize(Instruction *Exp);
240     void cleanup();
241 
242     template <typename FuncT> void traverse(Value *V, FuncT F);
243     void record(Value *V);
244     void use(Value *V);
245     void unuse(Value *V);
246 
247     bool equal(const Instruction *I, const Instruction *J) const;
248     Value *find(Value *Tree, Value *Sub) const;
249     Value *subst(Value *Tree, Value *OldV, Value *NewV);
250     void replace(Value *OldV, Value *NewV);
251     void link(Instruction *I, BasicBlock *B, BasicBlock::iterator At);
252   };
253 
254   Value *simplify(Context &C);
255 };
256 
257   struct PE {
258     PE(const Simplifier::Context &c, Value *v = nullptr) : C(c), V(v) {}
259 
260     const Simplifier::Context &C;
261     const Value *V;
262   };
263 
264   LLVM_ATTRIBUTE_USED
265   raw_ostream &operator<<(raw_ostream &OS, const PE &P) {
266     P.C.print(OS, P.V ? P.V : P.C.Root);
267     return OS;
268   }
269 
270 } // end anonymous namespace
271 
272 char HexagonLoopIdiomRecognizeLegacyPass::ID = 0;
273 
274 INITIALIZE_PASS_BEGIN(HexagonLoopIdiomRecognizeLegacyPass, "hexagon-loop-idiom",
275                       "Recognize Hexagon-specific loop idioms", false, false)
276 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
277 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
278 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass)
279 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
280 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
281 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
282 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
283 INITIALIZE_PASS_END(HexagonLoopIdiomRecognizeLegacyPass, "hexagon-loop-idiom",
284                     "Recognize Hexagon-specific loop idioms", false, false)
285 
286 template <typename FuncT>
287 void Simplifier::Context::traverse(Value *V, FuncT F) {
288   WorkListType Q;
289   Q.push_back(V);
290 
291   while (!Q.empty()) {
292     Instruction *U = dyn_cast<Instruction>(Q.pop_front_val());
293     if (!U || U->getParent())
294       continue;
295     if (!F(U))
296       continue;
297     for (Value *Op : U->operands())
298       Q.push_back(Op);
299   }
300 }
301 
302 void Simplifier::Context::print(raw_ostream &OS, const Value *V) const {
303   const auto *U = dyn_cast<const Instruction>(V);
304   if (!U) {
305     OS << V << '(' << *V << ')';
306     return;
307   }
308 
309   if (U->getParent()) {
310     OS << U << '(';
311     U->printAsOperand(OS, true);
312     OS << ')';
313     return;
314   }
315 
316   unsigned N = U->getNumOperands();
317   if (N != 0)
318     OS << U << '(';
319   OS << U->getOpcodeName();
320   for (const Value *Op : U->operands()) {
321     OS << ' ';
322     print(OS, Op);
323   }
324   if (N != 0)
325     OS << ')';
326 }
327 
328 void Simplifier::Context::initialize(Instruction *Exp) {
329   // Perform a deep clone of the expression, set Root to the root
330   // of the clone, and build a map from the cloned values to the
331   // original ones.
332   ValueMapType M;
333   BasicBlock *Block = Exp->getParent();
334   WorkListType Q;
335   Q.push_back(Exp);
336 
337   while (!Q.empty()) {
338     Value *V = Q.pop_front_val();
339     if (M.contains(V))
340       continue;
341     if (Instruction *U = dyn_cast<Instruction>(V)) {
342       if (isa<PHINode>(U) || U->getParent() != Block)
343         continue;
344       for (Value *Op : U->operands())
345         Q.push_back(Op);
346       M.insert({U, U->clone()});
347     }
348   }
349 
350   for (std::pair<Value*,Value*> P : M) {
351     Instruction *U = cast<Instruction>(P.second);
352     for (unsigned i = 0, n = U->getNumOperands(); i != n; ++i) {
353       auto F = M.find(U->getOperand(i));
354       if (F != M.end())
355         U->setOperand(i, F->second);
356     }
357   }
358 
359   auto R = M.find(Exp);
360   assert(R != M.end());
361   Root = R->second;
362 
363   record(Root);
364   use(Root);
365 }
366 
367 void Simplifier::Context::record(Value *V) {
368   auto Record = [this](Instruction *U) -> bool {
369     Clones.insert(U);
370     return true;
371   };
372   traverse(V, Record);
373 }
374 
375 void Simplifier::Context::use(Value *V) {
376   auto Use = [this](Instruction *U) -> bool {
377     Used.insert(U);
378     return true;
379   };
380   traverse(V, Use);
381 }
382 
383 void Simplifier::Context::unuse(Value *V) {
384   if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != nullptr)
385     return;
386 
387   auto Unuse = [this](Instruction *U) -> bool {
388     if (!U->use_empty())
389       return false;
390     Used.erase(U);
391     return true;
392   };
393   traverse(V, Unuse);
394 }
395 
396 Value *Simplifier::Context::subst(Value *Tree, Value *OldV, Value *NewV) {
397   if (Tree == OldV)
398     return NewV;
399   if (OldV == NewV)
400     return Tree;
401 
402   WorkListType Q;
403   Q.push_back(Tree);
404   while (!Q.empty()) {
405     Instruction *U = dyn_cast<Instruction>(Q.pop_front_val());
406     // If U is not an instruction, or it's not a clone, skip it.
407     if (!U || U->getParent())
408       continue;
409     for (unsigned i = 0, n = U->getNumOperands(); i != n; ++i) {
410       Value *Op = U->getOperand(i);
411       if (Op == OldV) {
412         U->setOperand(i, NewV);
413         unuse(OldV);
414       } else {
415         Q.push_back(Op);
416       }
417     }
418   }
419   return Tree;
420 }
421 
422 void Simplifier::Context::replace(Value *OldV, Value *NewV) {
423   if (Root == OldV) {
424     Root = NewV;
425     use(Root);
426     return;
427   }
428 
429   // NewV may be a complex tree that has just been created by one of the
430   // transformation rules. We need to make sure that it is commoned with
431   // the existing Root to the maximum extent possible.
432   // Identify all subtrees of NewV (including NewV itself) that have
433   // equivalent counterparts in Root, and replace those subtrees with
434   // these counterparts.
435   WorkListType Q;
436   Q.push_back(NewV);
437   while (!Q.empty()) {
438     Value *V = Q.pop_front_val();
439     Instruction *U = dyn_cast<Instruction>(V);
440     if (!U || U->getParent())
441       continue;
442     if (Value *DupV = find(Root, V)) {
443       if (DupV != V)
444         NewV = subst(NewV, V, DupV);
445     } else {
446       for (Value *Op : U->operands())
447         Q.push_back(Op);
448     }
449   }
450 
451   // Now, simply replace OldV with NewV in Root.
452   Root = subst(Root, OldV, NewV);
453   use(Root);
454 }
455 
456 void Simplifier::Context::cleanup() {
457   for (Value *V : Clones) {
458     Instruction *U = cast<Instruction>(V);
459     if (!U->getParent())
460       U->dropAllReferences();
461   }
462 
463   for (Value *V : Clones) {
464     Instruction *U = cast<Instruction>(V);
465     if (!U->getParent())
466       U->deleteValue();
467   }
468 }
469 
470 bool Simplifier::Context::equal(const Instruction *I,
471                                 const Instruction *J) const {
472   if (I == J)
473     return true;
474   if (!I->isSameOperationAs(J))
475     return false;
476   if (isa<PHINode>(I))
477     return I->isIdenticalTo(J);
478 
479   for (unsigned i = 0, n = I->getNumOperands(); i != n; ++i) {
480     Value *OpI = I->getOperand(i), *OpJ = J->getOperand(i);
481     if (OpI == OpJ)
482       continue;
483     auto *InI = dyn_cast<const Instruction>(OpI);
484     auto *InJ = dyn_cast<const Instruction>(OpJ);
485     if (InI && InJ) {
486       if (!equal(InI, InJ))
487         return false;
488     } else if (InI != InJ || !InI)
489       return false;
490   }
491   return true;
492 }
493 
494 Value *Simplifier::Context::find(Value *Tree, Value *Sub) const {
495   Instruction *SubI = dyn_cast<Instruction>(Sub);
496   WorkListType Q;
497   Q.push_back(Tree);
498 
499   while (!Q.empty()) {
500     Value *V = Q.pop_front_val();
501     if (V == Sub)
502       return V;
503     Instruction *U = dyn_cast<Instruction>(V);
504     if (!U || U->getParent())
505       continue;
506     if (SubI && equal(SubI, U))
507       return U;
508     assert(!isa<PHINode>(U));
509     for (Value *Op : U->operands())
510       Q.push_back(Op);
511   }
512   return nullptr;
513 }
514 
515 void Simplifier::Context::link(Instruction *I, BasicBlock *B,
516       BasicBlock::iterator At) {
517   if (I->getParent())
518     return;
519 
520   for (Value *Op : I->operands()) {
521     if (Instruction *OpI = dyn_cast<Instruction>(Op))
522       link(OpI, B, At);
523   }
524 
525   I->insertInto(B, At);
526 }
527 
528 Value *Simplifier::Context::materialize(BasicBlock *B,
529       BasicBlock::iterator At) {
530   if (Instruction *RootI = dyn_cast<Instruction>(Root))
531     link(RootI, B, At);
532   return Root;
533 }
534 
535 Value *Simplifier::simplify(Context &C) {
536   WorkListType Q;
537   Q.push_back(C.Root);
538   unsigned Count = 0;
539   const unsigned Limit = SimplifyLimit;
540 
541   while (!Q.empty()) {
542     if (Count++ >= Limit)
543       break;
544     Instruction *U = dyn_cast<Instruction>(Q.pop_front_val());
545     if (!U || U->getParent() || !C.Used.count(U))
546       continue;
547     bool Changed = false;
548     for (Rule &R : Rules) {
549       Value *W = R.Fn(U, C.Ctx);
550       if (!W)
551         continue;
552       Changed = true;
553       C.record(W);
554       C.replace(U, W);
555       Q.push_back(C.Root);
556       break;
557     }
558     if (!Changed) {
559       for (Value *Op : U->operands())
560         Q.push_back(Op);
561     }
562   }
563   return Count < Limit ? C.Root : nullptr;
564 }
565 
566 //===----------------------------------------------------------------------===//
567 //
568 //          Implementation of PolynomialMultiplyRecognize
569 //
570 //===----------------------------------------------------------------------===//
571 
572 namespace {
573 
574   class PolynomialMultiplyRecognize {
575   public:
576     explicit PolynomialMultiplyRecognize(Loop *loop, const DataLayout &dl,
577         const DominatorTree &dt, const TargetLibraryInfo &tli,
578         ScalarEvolution &se)
579       : CurLoop(loop), DL(dl), DT(dt), TLI(tli), SE(se) {}
580 
581     bool recognize();
582 
583   private:
584     using ValueSeq = SetVector<Value *>;
585 
586     IntegerType *getPmpyType() const {
587       LLVMContext &Ctx = CurLoop->getHeader()->getParent()->getContext();
588       return IntegerType::get(Ctx, 32);
589     }
590 
591     bool isPromotableTo(Value *V, IntegerType *Ty);
592     void promoteTo(Instruction *In, IntegerType *DestTy, BasicBlock *LoopB);
593     bool promoteTypes(BasicBlock *LoopB, BasicBlock *ExitB);
594 
595     Value *getCountIV(BasicBlock *BB);
596     bool findCycle(Value *Out, Value *In, ValueSeq &Cycle);
597     void classifyCycle(Instruction *DivI, ValueSeq &Cycle, ValueSeq &Early,
598           ValueSeq &Late);
599     bool classifyInst(Instruction *UseI, ValueSeq &Early, ValueSeq &Late);
600     bool commutesWithShift(Instruction *I);
601     bool highBitsAreZero(Value *V, unsigned IterCount);
602     bool keepsHighBitsZero(Value *V, unsigned IterCount);
603     bool isOperandShifted(Instruction *I, Value *Op);
604     bool convertShiftsToLeft(BasicBlock *LoopB, BasicBlock *ExitB,
605           unsigned IterCount);
606     void cleanupLoopBody(BasicBlock *LoopB);
607 
608     struct ParsedValues {
609       ParsedValues() = default;
610 
611       Value *M = nullptr;
612       Value *P = nullptr;
613       Value *Q = nullptr;
614       Value *R = nullptr;
615       Value *X = nullptr;
616       Instruction *Res = nullptr;
617       unsigned IterCount = 0;
618       bool Left = false;
619       bool Inv = false;
620     };
621 
622     bool matchLeftShift(SelectInst *SelI, Value *CIV, ParsedValues &PV);
623     bool matchRightShift(SelectInst *SelI, ParsedValues &PV);
624     bool scanSelect(SelectInst *SI, BasicBlock *LoopB, BasicBlock *PrehB,
625           Value *CIV, ParsedValues &PV, bool PreScan);
626     unsigned getInverseMxN(unsigned QP);
627     Value *generate(BasicBlock::iterator At, ParsedValues &PV);
628 
629     void setupPreSimplifier(Simplifier &S);
630     void setupPostSimplifier(Simplifier &S);
631 
632     Loop *CurLoop;
633     const DataLayout &DL;
634     const DominatorTree &DT;
635     const TargetLibraryInfo &TLI;
636     ScalarEvolution &SE;
637   };
638 
639 } // end anonymous namespace
640 
641 Value *PolynomialMultiplyRecognize::getCountIV(BasicBlock *BB) {
642   pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
643   if (std::distance(PI, PE) != 2)
644     return nullptr;
645   BasicBlock *PB = (*PI == BB) ? *std::next(PI) : *PI;
646 
647   for (auto I = BB->begin(), E = BB->end(); I != E && isa<PHINode>(I); ++I) {
648     auto *PN = cast<PHINode>(I);
649     Value *InitV = PN->getIncomingValueForBlock(PB);
650     if (!isa<ConstantInt>(InitV) || !cast<ConstantInt>(InitV)->isZero())
651       continue;
652     Value *IterV = PN->getIncomingValueForBlock(BB);
653     auto *BO = dyn_cast<BinaryOperator>(IterV);
654     if (!BO)
655       continue;
656     if (BO->getOpcode() != Instruction::Add)
657       continue;
658     Value *IncV = nullptr;
659     if (BO->getOperand(0) == PN)
660       IncV = BO->getOperand(1);
661     else if (BO->getOperand(1) == PN)
662       IncV = BO->getOperand(0);
663     if (IncV == nullptr)
664       continue;
665 
666     if (auto *T = dyn_cast<ConstantInt>(IncV))
667       if (T->isOne())
668         return PN;
669   }
670   return nullptr;
671 }
672 
673 static void replaceAllUsesOfWithIn(Value *I, Value *J, BasicBlock *BB) {
674   for (auto UI = I->user_begin(), UE = I->user_end(); UI != UE;) {
675     Use &TheUse = UI.getUse();
676     ++UI;
677     if (auto *II = dyn_cast<Instruction>(TheUse.getUser()))
678       if (BB == II->getParent())
679         II->replaceUsesOfWith(I, J);
680   }
681 }
682 
683 bool PolynomialMultiplyRecognize::matchLeftShift(SelectInst *SelI,
684       Value *CIV, ParsedValues &PV) {
685   // Match the following:
686   //   select (X & (1 << i)) != 0 ? R ^ (Q << i) : R
687   //   select (X & (1 << i)) == 0 ? R : R ^ (Q << i)
688   // The condition may also check for equality with the masked value, i.e
689   //   select (X & (1 << i)) == (1 << i) ? R ^ (Q << i) : R
690   //   select (X & (1 << i)) != (1 << i) ? R : R ^ (Q << i);
691 
692   Value *CondV = SelI->getCondition();
693   Value *TrueV = SelI->getTrueValue();
694   Value *FalseV = SelI->getFalseValue();
695 
696   using namespace PatternMatch;
697 
698   CmpPredicate P;
699   Value *A = nullptr, *B = nullptr, *C = nullptr;
700 
701   if (!match(CondV, m_ICmp(P, m_And(m_Value(A), m_Value(B)), m_Value(C))) &&
702       !match(CondV, m_ICmp(P, m_Value(C), m_And(m_Value(A), m_Value(B)))))
703     return false;
704   if (P != CmpInst::ICMP_EQ && P != CmpInst::ICMP_NE)
705     return false;
706   // Matched: select (A & B) == C ? ... : ...
707   //          select (A & B) != C ? ... : ...
708 
709   Value *X = nullptr, *Sh1 = nullptr;
710   // Check (A & B) for (X & (1 << i)):
711   if (match(A, m_Shl(m_One(), m_Specific(CIV)))) {
712     Sh1 = A;
713     X = B;
714   } else if (match(B, m_Shl(m_One(), m_Specific(CIV)))) {
715     Sh1 = B;
716     X = A;
717   } else {
718     // TODO: Could also check for an induction variable containing single
719     // bit shifted left by 1 in each iteration.
720     return false;
721   }
722 
723   bool TrueIfZero;
724 
725   // Check C against the possible values for comparison: 0 and (1 << i):
726   if (match(C, m_Zero()))
727     TrueIfZero = (P == CmpInst::ICMP_EQ);
728   else if (C == Sh1)
729     TrueIfZero = (P == CmpInst::ICMP_NE);
730   else
731     return false;
732 
733   // So far, matched:
734   //   select (X & (1 << i)) ? ... : ...
735   // including variations of the check against zero/non-zero value.
736 
737   Value *ShouldSameV = nullptr, *ShouldXoredV = nullptr;
738   if (TrueIfZero) {
739     ShouldSameV = TrueV;
740     ShouldXoredV = FalseV;
741   } else {
742     ShouldSameV = FalseV;
743     ShouldXoredV = TrueV;
744   }
745 
746   Value *Q = nullptr, *R = nullptr, *Y = nullptr, *Z = nullptr;
747   Value *T = nullptr;
748   if (match(ShouldXoredV, m_Xor(m_Value(Y), m_Value(Z)))) {
749     // Matched: select +++ ? ... : Y ^ Z
750     //          select +++ ? Y ^ Z : ...
751     // where +++ denotes previously checked matches.
752     if (ShouldSameV == Y)
753       T = Z;
754     else if (ShouldSameV == Z)
755       T = Y;
756     else
757       return false;
758     R = ShouldSameV;
759     // Matched: select +++ ? R : R ^ T
760     //          select +++ ? R ^ T : R
761     // depending on TrueIfZero.
762 
763   } else if (match(ShouldSameV, m_Zero())) {
764     // Matched: select +++ ? 0 : ...
765     //          select +++ ? ... : 0
766     if (!SelI->hasOneUse())
767       return false;
768     T = ShouldXoredV;
769     // Matched: select +++ ? 0 : T
770     //          select +++ ? T : 0
771 
772     Value *U = *SelI->user_begin();
773     if (!match(U, m_c_Xor(m_Specific(SelI), m_Value(R))))
774       return false;
775     // Matched: xor (select +++ ? 0 : T), R
776     //          xor (select +++ ? T : 0), R
777   } else
778     return false;
779 
780   // The xor input value T is isolated into its own match so that it could
781   // be checked against an induction variable containing a shifted bit
782   // (todo).
783   // For now, check against (Q << i).
784   if (!match(T, m_Shl(m_Value(Q), m_Specific(CIV))) &&
785       !match(T, m_Shl(m_ZExt(m_Value(Q)), m_ZExt(m_Specific(CIV)))))
786     return false;
787   // Matched: select +++ ? R : R ^ (Q << i)
788   //          select +++ ? R ^ (Q << i) : R
789 
790   PV.X = X;
791   PV.Q = Q;
792   PV.R = R;
793   PV.Left = true;
794   return true;
795 }
796 
797 bool PolynomialMultiplyRecognize::matchRightShift(SelectInst *SelI,
798       ParsedValues &PV) {
799   // Match the following:
800   //   select (X & 1) != 0 ? (R >> 1) ^ Q : (R >> 1)
801   //   select (X & 1) == 0 ? (R >> 1) : (R >> 1) ^ Q
802   // The condition may also check for equality with the masked value, i.e
803   //   select (X & 1) == 1 ? (R >> 1) ^ Q : (R >> 1)
804   //   select (X & 1) != 1 ? (R >> 1) : (R >> 1) ^ Q
805 
806   Value *CondV = SelI->getCondition();
807   Value *TrueV = SelI->getTrueValue();
808   Value *FalseV = SelI->getFalseValue();
809 
810   using namespace PatternMatch;
811 
812   Value *C = nullptr;
813   CmpPredicate P;
814   bool TrueIfZero;
815 
816   if (match(CondV, m_c_ICmp(P, m_Value(C), m_Zero()))) {
817     if (P != CmpInst::ICMP_EQ && P != CmpInst::ICMP_NE)
818       return false;
819     // Matched: select C == 0 ? ... : ...
820     //          select C != 0 ? ... : ...
821     TrueIfZero = (P == CmpInst::ICMP_EQ);
822   } else if (match(CondV, m_c_ICmp(P, m_Value(C), m_One()))) {
823     if (P != CmpInst::ICMP_EQ && P != CmpInst::ICMP_NE)
824       return false;
825     // Matched: select C == 1 ? ... : ...
826     //          select C != 1 ? ... : ...
827     TrueIfZero = (P == CmpInst::ICMP_NE);
828   } else
829     return false;
830 
831   Value *X = nullptr;
832   if (!match(C, m_And(m_Value(X), m_One())))
833     return false;
834   // Matched: select (X & 1) == +++ ? ... : ...
835   //          select (X & 1) != +++ ? ... : ...
836 
837   Value *R = nullptr, *Q = nullptr;
838   if (TrueIfZero) {
839     // The select's condition is true if the tested bit is 0.
840     // TrueV must be the shift, FalseV must be the xor.
841     if (!match(TrueV, m_LShr(m_Value(R), m_One())))
842       return false;
843     // Matched: select +++ ? (R >> 1) : ...
844     if (!match(FalseV, m_c_Xor(m_Specific(TrueV), m_Value(Q))))
845       return false;
846     // Matched: select +++ ? (R >> 1) : (R >> 1) ^ Q
847     // with commuting ^.
848   } else {
849     // The select's condition is true if the tested bit is 1.
850     // TrueV must be the xor, FalseV must be the shift.
851     if (!match(FalseV, m_LShr(m_Value(R), m_One())))
852       return false;
853     // Matched: select +++ ? ... : (R >> 1)
854     if (!match(TrueV, m_c_Xor(m_Specific(FalseV), m_Value(Q))))
855       return false;
856     // Matched: select +++ ? (R >> 1) ^ Q : (R >> 1)
857     // with commuting ^.
858   }
859 
860   PV.X = X;
861   PV.Q = Q;
862   PV.R = R;
863   PV.Left = false;
864   return true;
865 }
866 
867 bool PolynomialMultiplyRecognize::scanSelect(SelectInst *SelI,
868       BasicBlock *LoopB, BasicBlock *PrehB, Value *CIV, ParsedValues &PV,
869       bool PreScan) {
870   using namespace PatternMatch;
871 
872   // The basic pattern for R = P.Q is:
873   // for i = 0..31
874   //   R = phi (0, R')
875   //   if (P & (1 << i))        ; test-bit(P, i)
876   //     R' = R ^ (Q << i)
877   //
878   // Similarly, the basic pattern for R = (P/Q).Q - P
879   // for i = 0..31
880   //   R = phi(P, R')
881   //   if (R & (1 << i))
882   //     R' = R ^ (Q << i)
883 
884   // There exist idioms, where instead of Q being shifted left, P is shifted
885   // right. This produces a result that is shifted right by 32 bits (the
886   // non-shifted result is 64-bit).
887   //
888   // For R = P.Q, this would be:
889   // for i = 0..31
890   //   R = phi (0, R')
891   //   if ((P >> i) & 1)
892   //     R' = (R >> 1) ^ Q      ; R is cycled through the loop, so it must
893   //   else                     ; be shifted by 1, not i.
894   //     R' = R >> 1
895   //
896   // And for the inverse:
897   // for i = 0..31
898   //   R = phi (P, R')
899   //   if (R & 1)
900   //     R' = (R >> 1) ^ Q
901   //   else
902   //     R' = R >> 1
903 
904   // The left-shifting idioms share the same pattern:
905   //   select (X & (1 << i)) ? R ^ (Q << i) : R
906   // Similarly for right-shifting idioms:
907   //   select (X & 1) ? (R >> 1) ^ Q
908 
909   if (matchLeftShift(SelI, CIV, PV)) {
910     // If this is a pre-scan, getting this far is sufficient.
911     if (PreScan)
912       return true;
913 
914     // Need to make sure that the SelI goes back into R.
915     auto *RPhi = dyn_cast<PHINode>(PV.R);
916     if (!RPhi)
917       return false;
918     if (SelI != RPhi->getIncomingValueForBlock(LoopB))
919       return false;
920     PV.Res = SelI;
921 
922     // If X is loop invariant, it must be the input polynomial, and the
923     // idiom is the basic polynomial multiply.
924     if (CurLoop->isLoopInvariant(PV.X)) {
925       PV.P = PV.X;
926       PV.Inv = false;
927     } else {
928       // X is not loop invariant. If X == R, this is the inverse pmpy.
929       // Otherwise, check for an xor with an invariant value. If the
930       // variable argument to the xor is R, then this is still a valid
931       // inverse pmpy.
932       PV.Inv = true;
933       if (PV.X != PV.R) {
934         Value *Var = nullptr, *Inv = nullptr, *X1 = nullptr, *X2 = nullptr;
935         if (!match(PV.X, m_Xor(m_Value(X1), m_Value(X2))))
936           return false;
937         auto *I1 = dyn_cast<Instruction>(X1);
938         auto *I2 = dyn_cast<Instruction>(X2);
939         if (!I1 || I1->getParent() != LoopB) {
940           Var = X2;
941           Inv = X1;
942         } else if (!I2 || I2->getParent() != LoopB) {
943           Var = X1;
944           Inv = X2;
945         } else
946           return false;
947         if (Var != PV.R)
948           return false;
949         PV.M = Inv;
950       }
951       // The input polynomial P still needs to be determined. It will be
952       // the entry value of R.
953       Value *EntryP = RPhi->getIncomingValueForBlock(PrehB);
954       PV.P = EntryP;
955     }
956 
957     return true;
958   }
959 
960   if (matchRightShift(SelI, PV)) {
961     // If this is an inverse pattern, the Q polynomial must be known at
962     // compile time.
963     if (PV.Inv && !isa<ConstantInt>(PV.Q))
964       return false;
965     if (PreScan)
966       return true;
967     // There is no exact matching of right-shift pmpy.
968     return false;
969   }
970 
971   return false;
972 }
973 
974 bool PolynomialMultiplyRecognize::isPromotableTo(Value *Val,
975       IntegerType *DestTy) {
976   IntegerType *T = dyn_cast<IntegerType>(Val->getType());
977   if (!T || T->getBitWidth() > DestTy->getBitWidth())
978     return false;
979   if (T->getBitWidth() == DestTy->getBitWidth())
980     return true;
981   // Non-instructions are promotable. The reason why an instruction may not
982   // be promotable is that it may produce a different result if its operands
983   // and the result are promoted, for example, it may produce more non-zero
984   // bits. While it would still be possible to represent the proper result
985   // in a wider type, it may require adding additional instructions (which
986   // we don't want to do).
987   Instruction *In = dyn_cast<Instruction>(Val);
988   if (!In)
989     return true;
990   // The bitwidth of the source type is smaller than the destination.
991   // Check if the individual operation can be promoted.
992   switch (In->getOpcode()) {
993     case Instruction::PHI:
994     case Instruction::ZExt:
995     case Instruction::And:
996     case Instruction::Or:
997     case Instruction::Xor:
998     case Instruction::LShr: // Shift right is ok.
999     case Instruction::Select:
1000     case Instruction::Trunc:
1001       return true;
1002     case Instruction::ICmp:
1003       if (CmpInst *CI = cast<CmpInst>(In))
1004         return CI->isEquality() || CI->isUnsigned();
1005       llvm_unreachable("Cast failed unexpectedly");
1006     case Instruction::Add:
1007       return In->hasNoSignedWrap() && In->hasNoUnsignedWrap();
1008   }
1009   return false;
1010 }
1011 
1012 void PolynomialMultiplyRecognize::promoteTo(Instruction *In,
1013       IntegerType *DestTy, BasicBlock *LoopB) {
1014   Type *OrigTy = In->getType();
1015   assert(!OrigTy->isVoidTy() && "Invalid instruction to promote");
1016 
1017   // Leave boolean values alone.
1018   if (!In->getType()->isIntegerTy(1))
1019     In->mutateType(DestTy);
1020   unsigned DestBW = DestTy->getBitWidth();
1021 
1022   // Handle PHIs.
1023   if (PHINode *P = dyn_cast<PHINode>(In)) {
1024     unsigned N = P->getNumIncomingValues();
1025     for (unsigned i = 0; i != N; ++i) {
1026       BasicBlock *InB = P->getIncomingBlock(i);
1027       if (InB == LoopB)
1028         continue;
1029       Value *InV = P->getIncomingValue(i);
1030       IntegerType *Ty = cast<IntegerType>(InV->getType());
1031       // Do not promote values in PHI nodes of type i1.
1032       if (Ty != P->getType()) {
1033         // If the value type does not match the PHI type, the PHI type
1034         // must have been promoted.
1035         assert(Ty->getBitWidth() < DestBW);
1036         InV = IRBuilder<>(InB->getTerminator()).CreateZExt(InV, DestTy);
1037         P->setIncomingValue(i, InV);
1038       }
1039     }
1040   } else if (ZExtInst *Z = dyn_cast<ZExtInst>(In)) {
1041     Value *Op = Z->getOperand(0);
1042     if (Op->getType() == Z->getType())
1043       Z->replaceAllUsesWith(Op);
1044     Z->eraseFromParent();
1045     return;
1046   }
1047   if (TruncInst *T = dyn_cast<TruncInst>(In)) {
1048     IntegerType *TruncTy = cast<IntegerType>(OrigTy);
1049     Value *Mask = ConstantInt::get(DestTy, (1u << TruncTy->getBitWidth()) - 1);
1050     Value *And = IRBuilder<>(In).CreateAnd(T->getOperand(0), Mask);
1051     T->replaceAllUsesWith(And);
1052     T->eraseFromParent();
1053     return;
1054   }
1055 
1056   // Promote immediates.
1057   for (unsigned i = 0, n = In->getNumOperands(); i != n; ++i) {
1058     if (ConstantInt *CI = dyn_cast<ConstantInt>(In->getOperand(i)))
1059       if (CI->getBitWidth() < DestBW)
1060         In->setOperand(i, ConstantInt::get(DestTy, CI->getZExtValue()));
1061   }
1062 }
1063 
1064 bool PolynomialMultiplyRecognize::promoteTypes(BasicBlock *LoopB,
1065       BasicBlock *ExitB) {
1066   assert(LoopB);
1067   // Skip loops where the exit block has more than one predecessor. The values
1068   // coming from the loop block will be promoted to another type, and so the
1069   // values coming into the exit block from other predecessors would also have
1070   // to be promoted.
1071   if (!ExitB || (ExitB->getSinglePredecessor() != LoopB))
1072     return false;
1073   IntegerType *DestTy = getPmpyType();
1074   // Check if the exit values have types that are no wider than the type
1075   // that we want to promote to.
1076   unsigned DestBW = DestTy->getBitWidth();
1077   for (PHINode &P : ExitB->phis()) {
1078     if (P.getNumIncomingValues() != 1)
1079       return false;
1080     assert(P.getIncomingBlock(0) == LoopB);
1081     IntegerType *T = dyn_cast<IntegerType>(P.getType());
1082     if (!T || T->getBitWidth() > DestBW)
1083       return false;
1084   }
1085 
1086   // Check all instructions in the loop.
1087   for (Instruction &In : *LoopB)
1088     if (!In.isTerminator() && !isPromotableTo(&In, DestTy))
1089       return false;
1090 
1091   // Perform the promotion.
1092   std::vector<Instruction*> LoopIns;
1093   std::transform(LoopB->begin(), LoopB->end(), std::back_inserter(LoopIns),
1094                  [](Instruction &In) { return &In; });
1095   for (Instruction *In : LoopIns)
1096     if (!In->isTerminator())
1097       promoteTo(In, DestTy, LoopB);
1098 
1099   // Fix up the PHI nodes in the exit block.
1100   BasicBlock::iterator End = ExitB->getFirstNonPHIIt();
1101   for (auto I = ExitB->begin(); I != End; ++I) {
1102     PHINode *P = dyn_cast<PHINode>(I);
1103     if (!P)
1104       break;
1105     Type *Ty0 = P->getIncomingValue(0)->getType();
1106     Type *PTy = P->getType();
1107     if (PTy != Ty0) {
1108       assert(Ty0 == DestTy);
1109       // In order to create the trunc, P must have the promoted type.
1110       P->mutateType(Ty0);
1111       Value *T = IRBuilder<>(ExitB, End).CreateTrunc(P, PTy);
1112       // In order for the RAUW to work, the types of P and T must match.
1113       P->mutateType(PTy);
1114       P->replaceAllUsesWith(T);
1115       // Final update of the P's type.
1116       P->mutateType(Ty0);
1117       cast<Instruction>(T)->setOperand(0, P);
1118     }
1119   }
1120 
1121   return true;
1122 }
1123 
1124 bool PolynomialMultiplyRecognize::findCycle(Value *Out, Value *In,
1125       ValueSeq &Cycle) {
1126   // Out = ..., In, ...
1127   if (Out == In)
1128     return true;
1129 
1130   auto *BB = cast<Instruction>(Out)->getParent();
1131   bool HadPhi = false;
1132 
1133   for (auto *U : Out->users()) {
1134     auto *I = dyn_cast<Instruction>(&*U);
1135     if (I == nullptr || I->getParent() != BB)
1136       continue;
1137     // Make sure that there are no multi-iteration cycles, e.g.
1138     //   p1 = phi(p2)
1139     //   p2 = phi(p1)
1140     // The cycle p1->p2->p1 would span two loop iterations.
1141     // Check that there is only one phi in the cycle.
1142     bool IsPhi = isa<PHINode>(I);
1143     if (IsPhi && HadPhi)
1144       return false;
1145     HadPhi |= IsPhi;
1146     if (!Cycle.insert(I))
1147       return false;
1148     if (findCycle(I, In, Cycle))
1149       break;
1150     Cycle.remove(I);
1151   }
1152   return !Cycle.empty();
1153 }
1154 
1155 void PolynomialMultiplyRecognize::classifyCycle(Instruction *DivI,
1156       ValueSeq &Cycle, ValueSeq &Early, ValueSeq &Late) {
1157   // All the values in the cycle that are between the phi node and the
1158   // divider instruction will be classified as "early", all other values
1159   // will be "late".
1160 
1161   bool IsE = true;
1162   unsigned I, N = Cycle.size();
1163   for (I = 0; I < N; ++I) {
1164     Value *V = Cycle[I];
1165     if (DivI == V)
1166       IsE = false;
1167     else if (!isa<PHINode>(V))
1168       continue;
1169     // Stop if found either.
1170     break;
1171   }
1172   // "I" is the index of either DivI or the phi node, whichever was first.
1173   // "E" is "false" or "true" respectively.
1174   ValueSeq &First = !IsE ? Early : Late;
1175   for (unsigned J = 0; J < I; ++J)
1176     First.insert(Cycle[J]);
1177 
1178   ValueSeq &Second = IsE ? Early : Late;
1179   Second.insert(Cycle[I]);
1180   for (++I; I < N; ++I) {
1181     Value *V = Cycle[I];
1182     if (DivI == V || isa<PHINode>(V))
1183       break;
1184     Second.insert(V);
1185   }
1186 
1187   for (; I < N; ++I)
1188     First.insert(Cycle[I]);
1189 }
1190 
1191 bool PolynomialMultiplyRecognize::classifyInst(Instruction *UseI,
1192       ValueSeq &Early, ValueSeq &Late) {
1193   // Select is an exception, since the condition value does not have to be
1194   // classified in the same way as the true/false values. The true/false
1195   // values do have to be both early or both late.
1196   if (UseI->getOpcode() == Instruction::Select) {
1197     Value *TV = UseI->getOperand(1), *FV = UseI->getOperand(2);
1198     if (Early.count(TV) || Early.count(FV)) {
1199       if (Late.count(TV) || Late.count(FV))
1200         return false;
1201       Early.insert(UseI);
1202     } else if (Late.count(TV) || Late.count(FV)) {
1203       if (Early.count(TV) || Early.count(FV))
1204         return false;
1205       Late.insert(UseI);
1206     }
1207     return true;
1208   }
1209 
1210   // Not sure what would be the example of this, but the code below relies
1211   // on having at least one operand.
1212   if (UseI->getNumOperands() == 0)
1213     return true;
1214 
1215   bool AE = true, AL = true;
1216   for (auto &I : UseI->operands()) {
1217     if (Early.count(&*I))
1218       AL = false;
1219     else if (Late.count(&*I))
1220       AE = false;
1221   }
1222   // If the operands appear "all early" and "all late" at the same time,
1223   // then it means that none of them are actually classified as either.
1224   // This is harmless.
1225   if (AE && AL)
1226     return true;
1227   // Conversely, if they are neither "all early" nor "all late", then
1228   // we have a mixture of early and late operands that is not a known
1229   // exception.
1230   if (!AE && !AL)
1231     return false;
1232 
1233   // Check that we have covered the two special cases.
1234   assert(AE != AL);
1235 
1236   if (AE)
1237     Early.insert(UseI);
1238   else
1239     Late.insert(UseI);
1240   return true;
1241 }
1242 
1243 bool PolynomialMultiplyRecognize::commutesWithShift(Instruction *I) {
1244   switch (I->getOpcode()) {
1245     case Instruction::And:
1246     case Instruction::Or:
1247     case Instruction::Xor:
1248     case Instruction::LShr:
1249     case Instruction::Shl:
1250     case Instruction::Select:
1251     case Instruction::ICmp:
1252     case Instruction::PHI:
1253       break;
1254     default:
1255       return false;
1256   }
1257   return true;
1258 }
1259 
1260 bool PolynomialMultiplyRecognize::highBitsAreZero(Value *V,
1261       unsigned IterCount) {
1262   auto *T = dyn_cast<IntegerType>(V->getType());
1263   if (!T)
1264     return false;
1265 
1266   KnownBits Known(T->getBitWidth());
1267   computeKnownBits(V, Known, DL);
1268   return Known.countMinLeadingZeros() >= IterCount;
1269 }
1270 
1271 bool PolynomialMultiplyRecognize::keepsHighBitsZero(Value *V,
1272       unsigned IterCount) {
1273   // Assume that all inputs to the value have the high bits zero.
1274   // Check if the value itself preserves the zeros in the high bits.
1275   if (auto *C = dyn_cast<ConstantInt>(V))
1276     return C->getValue().countl_zero() >= IterCount;
1277 
1278   if (auto *I = dyn_cast<Instruction>(V)) {
1279     switch (I->getOpcode()) {
1280       case Instruction::And:
1281       case Instruction::Or:
1282       case Instruction::Xor:
1283       case Instruction::LShr:
1284       case Instruction::Select:
1285       case Instruction::ICmp:
1286       case Instruction::PHI:
1287       case Instruction::ZExt:
1288         return true;
1289     }
1290   }
1291 
1292   return false;
1293 }
1294 
1295 bool PolynomialMultiplyRecognize::isOperandShifted(Instruction *I, Value *Op) {
1296   unsigned Opc = I->getOpcode();
1297   if (Opc == Instruction::Shl || Opc == Instruction::LShr)
1298     return Op != I->getOperand(1);
1299   return true;
1300 }
1301 
1302 bool PolynomialMultiplyRecognize::convertShiftsToLeft(BasicBlock *LoopB,
1303       BasicBlock *ExitB, unsigned IterCount) {
1304   Value *CIV = getCountIV(LoopB);
1305   if (CIV == nullptr)
1306     return false;
1307   auto *CIVTy = dyn_cast<IntegerType>(CIV->getType());
1308   if (CIVTy == nullptr)
1309     return false;
1310 
1311   ValueSeq RShifts;
1312   ValueSeq Early, Late, Cycled;
1313 
1314   // Find all value cycles that contain logical right shifts by 1.
1315   for (Instruction &I : *LoopB) {
1316     using namespace PatternMatch;
1317 
1318     Value *V = nullptr;
1319     if (!match(&I, m_LShr(m_Value(V), m_One())))
1320       continue;
1321     ValueSeq C;
1322     if (!findCycle(&I, V, C))
1323       continue;
1324 
1325     // Found a cycle.
1326     C.insert(&I);
1327     classifyCycle(&I, C, Early, Late);
1328     Cycled.insert(C.begin(), C.end());
1329     RShifts.insert(&I);
1330   }
1331 
1332   // Find the set of all values affected by the shift cycles, i.e. all
1333   // cycled values, and (recursively) all their users.
1334   ValueSeq Users(Cycled.begin(), Cycled.end());
1335   for (unsigned i = 0; i < Users.size(); ++i) {
1336     Value *V = Users[i];
1337     if (!isa<IntegerType>(V->getType()))
1338       return false;
1339     auto *R = cast<Instruction>(V);
1340     // If the instruction does not commute with shifts, the loop cannot
1341     // be unshifted.
1342     if (!commutesWithShift(R))
1343       return false;
1344     for (User *U : R->users()) {
1345       auto *T = cast<Instruction>(U);
1346       // Skip users from outside of the loop. They will be handled later.
1347       // Also, skip the right-shifts and phi nodes, since they mix early
1348       // and late values.
1349       if (T->getParent() != LoopB || RShifts.count(T) || isa<PHINode>(T))
1350         continue;
1351 
1352       Users.insert(T);
1353       if (!classifyInst(T, Early, Late))
1354         return false;
1355     }
1356   }
1357 
1358   if (Users.empty())
1359     return false;
1360 
1361   // Verify that high bits remain zero.
1362   ValueSeq Internal(Users.begin(), Users.end());
1363   ValueSeq Inputs;
1364   for (unsigned i = 0; i < Internal.size(); ++i) {
1365     auto *R = dyn_cast<Instruction>(Internal[i]);
1366     if (!R)
1367       continue;
1368     for (Value *Op : R->operands()) {
1369       auto *T = dyn_cast<Instruction>(Op);
1370       if (T && T->getParent() != LoopB)
1371         Inputs.insert(Op);
1372       else
1373         Internal.insert(Op);
1374     }
1375   }
1376   for (Value *V : Inputs)
1377     if (!highBitsAreZero(V, IterCount))
1378       return false;
1379   for (Value *V : Internal)
1380     if (!keepsHighBitsZero(V, IterCount))
1381       return false;
1382 
1383   // Finally, the work can be done. Unshift each user.
1384   IRBuilder<> IRB(LoopB);
1385   std::map<Value*,Value*> ShiftMap;
1386 
1387   using CastMapType = std::map<std::pair<Value *, Type *>, Value *>;
1388 
1389   CastMapType CastMap;
1390 
1391   auto upcast = [] (CastMapType &CM, IRBuilder<> &IRB, Value *V,
1392         IntegerType *Ty) -> Value* {
1393     auto H = CM.find(std::make_pair(V, Ty));
1394     if (H != CM.end())
1395       return H->second;
1396     Value *CV = IRB.CreateIntCast(V, Ty, false);
1397     CM.insert(std::make_pair(std::make_pair(V, Ty), CV));
1398     return CV;
1399   };
1400 
1401   for (auto I = LoopB->begin(), E = LoopB->end(); I != E; ++I) {
1402     using namespace PatternMatch;
1403 
1404     if (isa<PHINode>(I) || !Users.count(&*I))
1405       continue;
1406 
1407     // Match lshr x, 1.
1408     Value *V = nullptr;
1409     if (match(&*I, m_LShr(m_Value(V), m_One()))) {
1410       replaceAllUsesOfWithIn(&*I, V, LoopB);
1411       continue;
1412     }
1413     // For each non-cycled operand, replace it with the corresponding
1414     // value shifted left.
1415     for (auto &J : I->operands()) {
1416       Value *Op = J.get();
1417       if (!isOperandShifted(&*I, Op))
1418         continue;
1419       if (Users.count(Op))
1420         continue;
1421       // Skip shifting zeros.
1422       if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero())
1423         continue;
1424       // Check if we have already generated a shift for this value.
1425       auto F = ShiftMap.find(Op);
1426       Value *W = (F != ShiftMap.end()) ? F->second : nullptr;
1427       if (W == nullptr) {
1428         IRB.SetInsertPoint(&*I);
1429         // First, the shift amount will be CIV or CIV+1, depending on
1430         // whether the value is early or late. Instead of creating CIV+1,
1431         // do a single shift of the value.
1432         Value *ShAmt = CIV, *ShVal = Op;
1433         auto *VTy = cast<IntegerType>(ShVal->getType());
1434         auto *ATy = cast<IntegerType>(ShAmt->getType());
1435         if (Late.count(&*I))
1436           ShVal = IRB.CreateShl(Op, ConstantInt::get(VTy, 1));
1437         // Second, the types of the shifted value and the shift amount
1438         // must match.
1439         if (VTy != ATy) {
1440           if (VTy->getBitWidth() < ATy->getBitWidth())
1441             ShVal = upcast(CastMap, IRB, ShVal, ATy);
1442           else
1443             ShAmt = upcast(CastMap, IRB, ShAmt, VTy);
1444         }
1445         // Ready to generate the shift and memoize it.
1446         W = IRB.CreateShl(ShVal, ShAmt);
1447         ShiftMap.insert(std::make_pair(Op, W));
1448       }
1449       I->replaceUsesOfWith(Op, W);
1450     }
1451   }
1452 
1453   // Update the users outside of the loop to account for having left
1454   // shifts. They would normally be shifted right in the loop, so shift
1455   // them right after the loop exit.
1456   // Take advantage of the loop-closed SSA form, which has all the post-
1457   // loop values in phi nodes.
1458   IRB.SetInsertPoint(ExitB, ExitB->getFirstInsertionPt());
1459   for (auto P = ExitB->begin(), Q = ExitB->end(); P != Q; ++P) {
1460     if (!isa<PHINode>(P))
1461       break;
1462     auto *PN = cast<PHINode>(P);
1463     Value *U = PN->getIncomingValueForBlock(LoopB);
1464     if (!Users.count(U))
1465       continue;
1466     Value *S = IRB.CreateLShr(PN, ConstantInt::get(PN->getType(), IterCount));
1467     PN->replaceAllUsesWith(S);
1468     // The above RAUW will create
1469     //   S = lshr S, IterCount
1470     // so we need to fix it back into
1471     //   S = lshr PN, IterCount
1472     cast<User>(S)->replaceUsesOfWith(S, PN);
1473   }
1474 
1475   return true;
1476 }
1477 
1478 void PolynomialMultiplyRecognize::cleanupLoopBody(BasicBlock *LoopB) {
1479   for (auto &I : *LoopB)
1480     if (Value *SV = simplifyInstruction(&I, {DL, &TLI, &DT}))
1481       I.replaceAllUsesWith(SV);
1482 
1483   for (Instruction &I : llvm::make_early_inc_range(*LoopB))
1484     RecursivelyDeleteTriviallyDeadInstructions(&I, &TLI);
1485 }
1486 
1487 unsigned PolynomialMultiplyRecognize::getInverseMxN(unsigned QP) {
1488   // Arrays of coefficients of Q and the inverse, C.
1489   // Q[i] = coefficient at x^i.
1490   std::array<char,32> Q, C;
1491 
1492   for (unsigned i = 0; i < 32; ++i) {
1493     Q[i] = QP & 1;
1494     QP >>= 1;
1495   }
1496   assert(Q[0] == 1);
1497 
1498   // Find C, such that
1499   // (Q[n]*x^n + ... + Q[1]*x + Q[0]) * (C[n]*x^n + ... + C[1]*x + C[0]) = 1
1500   //
1501   // For it to have a solution, Q[0] must be 1. Since this is Z2[x], the
1502   // operations * and + are & and ^ respectively.
1503   //
1504   // Find C[i] recursively, by comparing i-th coefficient in the product
1505   // with 0 (or 1 for i=0).
1506   //
1507   // C[0] = 1, since C[0] = Q[0], and Q[0] = 1.
1508   C[0] = 1;
1509   for (unsigned i = 1; i < 32; ++i) {
1510     // Solve for C[i] in:
1511     //   C[0]Q[i] ^ C[1]Q[i-1] ^ ... ^ C[i-1]Q[1] ^ C[i]Q[0] = 0
1512     // This is equivalent to
1513     //   C[0]Q[i] ^ C[1]Q[i-1] ^ ... ^ C[i-1]Q[1] ^ C[i] = 0
1514     // which is
1515     //   C[0]Q[i] ^ C[1]Q[i-1] ^ ... ^ C[i-1]Q[1] = C[i]
1516     unsigned T = 0;
1517     for (unsigned j = 0; j < i; ++j)
1518       T = T ^ (C[j] & Q[i-j]);
1519     C[i] = T;
1520   }
1521 
1522   unsigned QV = 0;
1523   for (unsigned i = 0; i < 32; ++i)
1524     if (C[i])
1525       QV |= (1 << i);
1526 
1527   return QV;
1528 }
1529 
1530 Value *PolynomialMultiplyRecognize::generate(BasicBlock::iterator At,
1531       ParsedValues &PV) {
1532   IRBuilder<> B(&*At);
1533   Module *M = At->getParent()->getParent()->getParent();
1534   Function *PMF =
1535       Intrinsic::getOrInsertDeclaration(M, Intrinsic::hexagon_M4_pmpyw);
1536 
1537   Value *P = PV.P, *Q = PV.Q, *P0 = P;
1538   unsigned IC = PV.IterCount;
1539 
1540   if (PV.M != nullptr)
1541     P0 = P = B.CreateXor(P, PV.M);
1542 
1543   // Create a bit mask to clear the high bits beyond IterCount.
1544   auto *BMI = ConstantInt::get(P->getType(), APInt::getLowBitsSet(32, IC));
1545 
1546   if (PV.IterCount != 32)
1547     P = B.CreateAnd(P, BMI);
1548 
1549   if (PV.Inv) {
1550     auto *QI = dyn_cast<ConstantInt>(PV.Q);
1551     assert(QI && QI->getBitWidth() <= 32);
1552 
1553     // Again, clearing bits beyond IterCount.
1554     unsigned M = (1 << PV.IterCount) - 1;
1555     unsigned Tmp = (QI->getZExtValue() | 1) & M;
1556     unsigned QV = getInverseMxN(Tmp) & M;
1557     auto *QVI = ConstantInt::get(QI->getType(), QV);
1558     P = B.CreateCall(PMF, {P, QVI});
1559     P = B.CreateTrunc(P, QI->getType());
1560     if (IC != 32)
1561       P = B.CreateAnd(P, BMI);
1562   }
1563 
1564   Value *R = B.CreateCall(PMF, {P, Q});
1565 
1566   if (PV.M != nullptr)
1567     R = B.CreateXor(R, B.CreateIntCast(P0, R->getType(), false));
1568 
1569   return R;
1570 }
1571 
1572 static bool hasZeroSignBit(const Value *V) {
1573   if (const auto *CI = dyn_cast<const ConstantInt>(V))
1574     return CI->getValue().isNonNegative();
1575   const Instruction *I = dyn_cast<const Instruction>(V);
1576   if (!I)
1577     return false;
1578   switch (I->getOpcode()) {
1579     case Instruction::LShr:
1580       if (const auto SI = dyn_cast<const ConstantInt>(I->getOperand(1)))
1581         return SI->getZExtValue() > 0;
1582       return false;
1583     case Instruction::Or:
1584     case Instruction::Xor:
1585       return hasZeroSignBit(I->getOperand(0)) &&
1586              hasZeroSignBit(I->getOperand(1));
1587     case Instruction::And:
1588       return hasZeroSignBit(I->getOperand(0)) ||
1589              hasZeroSignBit(I->getOperand(1));
1590   }
1591   return false;
1592 }
1593 
1594 void PolynomialMultiplyRecognize::setupPreSimplifier(Simplifier &S) {
1595   S.addRule("sink-zext",
1596     // Sink zext past bitwise operations.
1597     [](Instruction *I, LLVMContext &Ctx) -> Value* {
1598       if (I->getOpcode() != Instruction::ZExt)
1599         return nullptr;
1600       Instruction *T = dyn_cast<Instruction>(I->getOperand(0));
1601       if (!T)
1602         return nullptr;
1603       switch (T->getOpcode()) {
1604         case Instruction::And:
1605         case Instruction::Or:
1606         case Instruction::Xor:
1607           break;
1608         default:
1609           return nullptr;
1610       }
1611       IRBuilder<> B(Ctx);
1612       return B.CreateBinOp(cast<BinaryOperator>(T)->getOpcode(),
1613                            B.CreateZExt(T->getOperand(0), I->getType()),
1614                            B.CreateZExt(T->getOperand(1), I->getType()));
1615     });
1616   S.addRule("xor/and -> and/xor",
1617     // (xor (and x a) (and y a)) -> (and (xor x y) a)
1618     [](Instruction *I, LLVMContext &Ctx) -> Value* {
1619       if (I->getOpcode() != Instruction::Xor)
1620         return nullptr;
1621       Instruction *And0 = dyn_cast<Instruction>(I->getOperand(0));
1622       Instruction *And1 = dyn_cast<Instruction>(I->getOperand(1));
1623       if (!And0 || !And1)
1624         return nullptr;
1625       if (And0->getOpcode() != Instruction::And ||
1626           And1->getOpcode() != Instruction::And)
1627         return nullptr;
1628       if (And0->getOperand(1) != And1->getOperand(1))
1629         return nullptr;
1630       IRBuilder<> B(Ctx);
1631       return B.CreateAnd(B.CreateXor(And0->getOperand(0), And1->getOperand(0)),
1632                          And0->getOperand(1));
1633     });
1634   S.addRule("sink binop into select",
1635     // (Op (select c x y) z) -> (select c (Op x z) (Op y z))
1636     // (Op x (select c y z)) -> (select c (Op x y) (Op x z))
1637     [](Instruction *I, LLVMContext &Ctx) -> Value* {
1638       BinaryOperator *BO = dyn_cast<BinaryOperator>(I);
1639       if (!BO)
1640         return nullptr;
1641       Instruction::BinaryOps Op = BO->getOpcode();
1642       if (SelectInst *Sel = dyn_cast<SelectInst>(BO->getOperand(0))) {
1643         IRBuilder<> B(Ctx);
1644         Value *X = Sel->getTrueValue(), *Y = Sel->getFalseValue();
1645         Value *Z = BO->getOperand(1);
1646         return B.CreateSelect(Sel->getCondition(),
1647                               B.CreateBinOp(Op, X, Z),
1648                               B.CreateBinOp(Op, Y, Z));
1649       }
1650       if (SelectInst *Sel = dyn_cast<SelectInst>(BO->getOperand(1))) {
1651         IRBuilder<> B(Ctx);
1652         Value *X = BO->getOperand(0);
1653         Value *Y = Sel->getTrueValue(), *Z = Sel->getFalseValue();
1654         return B.CreateSelect(Sel->getCondition(),
1655                               B.CreateBinOp(Op, X, Y),
1656                               B.CreateBinOp(Op, X, Z));
1657       }
1658       return nullptr;
1659     });
1660   S.addRule("fold select-select",
1661     // (select c (select c x y) z) -> (select c x z)
1662     // (select c x (select c y z)) -> (select c x z)
1663     [](Instruction *I, LLVMContext &Ctx) -> Value* {
1664       SelectInst *Sel = dyn_cast<SelectInst>(I);
1665       if (!Sel)
1666         return nullptr;
1667       IRBuilder<> B(Ctx);
1668       Value *C = Sel->getCondition();
1669       if (SelectInst *Sel0 = dyn_cast<SelectInst>(Sel->getTrueValue())) {
1670         if (Sel0->getCondition() == C)
1671           return B.CreateSelect(C, Sel0->getTrueValue(), Sel->getFalseValue());
1672       }
1673       if (SelectInst *Sel1 = dyn_cast<SelectInst>(Sel->getFalseValue())) {
1674         if (Sel1->getCondition() == C)
1675           return B.CreateSelect(C, Sel->getTrueValue(), Sel1->getFalseValue());
1676       }
1677       return nullptr;
1678     });
1679   S.addRule("or-signbit -> xor-signbit",
1680     // (or (lshr x 1) 0x800.0) -> (xor (lshr x 1) 0x800.0)
1681     [](Instruction *I, LLVMContext &Ctx) -> Value* {
1682       if (I->getOpcode() != Instruction::Or)
1683         return nullptr;
1684       ConstantInt *Msb = dyn_cast<ConstantInt>(I->getOperand(1));
1685       if (!Msb || !Msb->getValue().isSignMask())
1686         return nullptr;
1687       if (!hasZeroSignBit(I->getOperand(0)))
1688         return nullptr;
1689       return IRBuilder<>(Ctx).CreateXor(I->getOperand(0), Msb);
1690     });
1691   S.addRule("sink lshr into binop",
1692     // (lshr (BitOp x y) c) -> (BitOp (lshr x c) (lshr y c))
1693     [](Instruction *I, LLVMContext &Ctx) -> Value* {
1694       if (I->getOpcode() != Instruction::LShr)
1695         return nullptr;
1696       BinaryOperator *BitOp = dyn_cast<BinaryOperator>(I->getOperand(0));
1697       if (!BitOp)
1698         return nullptr;
1699       switch (BitOp->getOpcode()) {
1700         case Instruction::And:
1701         case Instruction::Or:
1702         case Instruction::Xor:
1703           break;
1704         default:
1705           return nullptr;
1706       }
1707       IRBuilder<> B(Ctx);
1708       Value *S = I->getOperand(1);
1709       return B.CreateBinOp(BitOp->getOpcode(),
1710                 B.CreateLShr(BitOp->getOperand(0), S),
1711                 B.CreateLShr(BitOp->getOperand(1), S));
1712     });
1713   S.addRule("expose bitop-const",
1714     // (BitOp1 (BitOp2 x a) b) -> (BitOp2 x (BitOp1 a b))
1715     [](Instruction *I, LLVMContext &Ctx) -> Value* {
1716       auto IsBitOp = [](unsigned Op) -> bool {
1717         switch (Op) {
1718           case Instruction::And:
1719           case Instruction::Or:
1720           case Instruction::Xor:
1721             return true;
1722         }
1723         return false;
1724       };
1725       BinaryOperator *BitOp1 = dyn_cast<BinaryOperator>(I);
1726       if (!BitOp1 || !IsBitOp(BitOp1->getOpcode()))
1727         return nullptr;
1728       BinaryOperator *BitOp2 = dyn_cast<BinaryOperator>(BitOp1->getOperand(0));
1729       if (!BitOp2 || !IsBitOp(BitOp2->getOpcode()))
1730         return nullptr;
1731       ConstantInt *CA = dyn_cast<ConstantInt>(BitOp2->getOperand(1));
1732       ConstantInt *CB = dyn_cast<ConstantInt>(BitOp1->getOperand(1));
1733       if (!CA || !CB)
1734         return nullptr;
1735       IRBuilder<> B(Ctx);
1736       Value *X = BitOp2->getOperand(0);
1737       return B.CreateBinOp(BitOp2->getOpcode(), X,
1738                 B.CreateBinOp(BitOp1->getOpcode(), CA, CB));
1739     });
1740 }
1741 
1742 void PolynomialMultiplyRecognize::setupPostSimplifier(Simplifier &S) {
1743   S.addRule("(and (xor (and x a) y) b) -> (and (xor x y) b), if b == b&a",
1744     [](Instruction *I, LLVMContext &Ctx) -> Value* {
1745       if (I->getOpcode() != Instruction::And)
1746         return nullptr;
1747       Instruction *Xor = dyn_cast<Instruction>(I->getOperand(0));
1748       ConstantInt *C0 = dyn_cast<ConstantInt>(I->getOperand(1));
1749       if (!Xor || !C0)
1750         return nullptr;
1751       if (Xor->getOpcode() != Instruction::Xor)
1752         return nullptr;
1753       Instruction *And0 = dyn_cast<Instruction>(Xor->getOperand(0));
1754       Instruction *And1 = dyn_cast<Instruction>(Xor->getOperand(1));
1755       // Pick the first non-null and.
1756       if (!And0 || And0->getOpcode() != Instruction::And)
1757         std::swap(And0, And1);
1758       ConstantInt *C1 = dyn_cast<ConstantInt>(And0->getOperand(1));
1759       if (!C1)
1760         return nullptr;
1761       uint32_t V0 = C0->getZExtValue();
1762       uint32_t V1 = C1->getZExtValue();
1763       if (V0 != (V0 & V1))
1764         return nullptr;
1765       IRBuilder<> B(Ctx);
1766       return B.CreateAnd(B.CreateXor(And0->getOperand(0), And1), C0);
1767     });
1768 }
1769 
1770 bool PolynomialMultiplyRecognize::recognize() {
1771   LLVM_DEBUG(dbgs() << "Starting PolynomialMultiplyRecognize on loop\n"
1772                     << *CurLoop << '\n');
1773   // Restrictions:
1774   // - The loop must consist of a single block.
1775   // - The iteration count must be known at compile-time.
1776   // - The loop must have an induction variable starting from 0, and
1777   //   incremented in each iteration of the loop.
1778   BasicBlock *LoopB = CurLoop->getHeader();
1779   LLVM_DEBUG(dbgs() << "Loop header:\n" << *LoopB);
1780 
1781   if (LoopB != CurLoop->getLoopLatch())
1782     return false;
1783   BasicBlock *ExitB = CurLoop->getExitBlock();
1784   if (ExitB == nullptr)
1785     return false;
1786   BasicBlock *EntryB = CurLoop->getLoopPreheader();
1787   if (EntryB == nullptr)
1788     return false;
1789 
1790   unsigned IterCount = 0;
1791   const SCEV *CT = SE.getBackedgeTakenCount(CurLoop);
1792   if (isa<SCEVCouldNotCompute>(CT))
1793     return false;
1794   if (auto *CV = dyn_cast<SCEVConstant>(CT))
1795     IterCount = CV->getValue()->getZExtValue() + 1;
1796 
1797   Value *CIV = getCountIV(LoopB);
1798   if (CIV == nullptr)
1799     return false;
1800   ParsedValues PV;
1801   Simplifier PreSimp;
1802   PV.IterCount = IterCount;
1803   LLVM_DEBUG(dbgs() << "Loop IV: " << *CIV << "\nIterCount: " << IterCount
1804                     << '\n');
1805 
1806   setupPreSimplifier(PreSimp);
1807 
1808   // Perform a preliminary scan of select instructions to see if any of them
1809   // looks like a generator of the polynomial multiply steps. Assume that a
1810   // loop can only contain a single transformable operation, so stop the
1811   // traversal after the first reasonable candidate was found.
1812   // XXX: Currently this approach can modify the loop before being 100% sure
1813   // that the transformation can be carried out.
1814   bool FoundPreScan = false;
1815   auto FeedsPHI = [LoopB](const Value *V) -> bool {
1816     for (const Value *U : V->users()) {
1817       if (const auto *P = dyn_cast<const PHINode>(U))
1818         if (P->getParent() == LoopB)
1819           return true;
1820     }
1821     return false;
1822   };
1823   for (Instruction &In : *LoopB) {
1824     SelectInst *SI = dyn_cast<SelectInst>(&In);
1825     if (!SI || !FeedsPHI(SI))
1826       continue;
1827 
1828     Simplifier::Context C(SI);
1829     Value *T = PreSimp.simplify(C);
1830     SelectInst *SelI = (T && isa<SelectInst>(T)) ? cast<SelectInst>(T) : SI;
1831     LLVM_DEBUG(dbgs() << "scanSelect(pre-scan): " << PE(C, SelI) << '\n');
1832     if (scanSelect(SelI, LoopB, EntryB, CIV, PV, true)) {
1833       FoundPreScan = true;
1834       if (SelI != SI) {
1835         Value *NewSel = C.materialize(LoopB, SI->getIterator());
1836         SI->replaceAllUsesWith(NewSel);
1837         RecursivelyDeleteTriviallyDeadInstructions(SI, &TLI);
1838       }
1839       break;
1840     }
1841   }
1842 
1843   if (!FoundPreScan) {
1844     LLVM_DEBUG(dbgs() << "Have not found candidates for pmpy\n");
1845     return false;
1846   }
1847 
1848   if (!PV.Left) {
1849     // The right shift version actually only returns the higher bits of
1850     // the result (each iteration discards the LSB). If we want to convert it
1851     // to a left-shifting loop, the working data type must be at least as
1852     // wide as the target's pmpy instruction.
1853     if (!promoteTypes(LoopB, ExitB))
1854       return false;
1855     // Run post-promotion simplifications.
1856     Simplifier PostSimp;
1857     setupPostSimplifier(PostSimp);
1858     for (Instruction &In : *LoopB) {
1859       SelectInst *SI = dyn_cast<SelectInst>(&In);
1860       if (!SI || !FeedsPHI(SI))
1861         continue;
1862       Simplifier::Context C(SI);
1863       Value *T = PostSimp.simplify(C);
1864       SelectInst *SelI = dyn_cast_or_null<SelectInst>(T);
1865       if (SelI != SI) {
1866         Value *NewSel = C.materialize(LoopB, SI->getIterator());
1867         SI->replaceAllUsesWith(NewSel);
1868         RecursivelyDeleteTriviallyDeadInstructions(SI, &TLI);
1869       }
1870       break;
1871     }
1872 
1873     if (!convertShiftsToLeft(LoopB, ExitB, IterCount))
1874       return false;
1875     cleanupLoopBody(LoopB);
1876   }
1877 
1878   // Scan the loop again, find the generating select instruction.
1879   bool FoundScan = false;
1880   for (Instruction &In : *LoopB) {
1881     SelectInst *SelI = dyn_cast<SelectInst>(&In);
1882     if (!SelI)
1883       continue;
1884     LLVM_DEBUG(dbgs() << "scanSelect: " << *SelI << '\n');
1885     FoundScan = scanSelect(SelI, LoopB, EntryB, CIV, PV, false);
1886     if (FoundScan)
1887       break;
1888   }
1889   assert(FoundScan);
1890 
1891   LLVM_DEBUG({
1892     StringRef PP = (PV.M ? "(P+M)" : "P");
1893     if (!PV.Inv)
1894       dbgs() << "Found pmpy idiom: R = " << PP << ".Q\n";
1895     else
1896       dbgs() << "Found inverse pmpy idiom: R = (" << PP << "/Q).Q) + "
1897              << PP << "\n";
1898     dbgs() << "  Res:" << *PV.Res << "\n  P:" << *PV.P << "\n";
1899     if (PV.M)
1900       dbgs() << "  M:" << *PV.M << "\n";
1901     dbgs() << "  Q:" << *PV.Q << "\n";
1902     dbgs() << "  Iteration count:" << PV.IterCount << "\n";
1903   });
1904 
1905   BasicBlock::iterator At(EntryB->getTerminator());
1906   Value *PM = generate(At, PV);
1907   if (PM == nullptr)
1908     return false;
1909 
1910   if (PM->getType() != PV.Res->getType())
1911     PM = IRBuilder<>(&*At).CreateIntCast(PM, PV.Res->getType(), false);
1912 
1913   PV.Res->replaceAllUsesWith(PM);
1914   PV.Res->eraseFromParent();
1915   return true;
1916 }
1917 
1918 int HexagonLoopIdiomRecognize::getSCEVStride(const SCEVAddRecExpr *S) {
1919   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
1920     return SC->getAPInt().getSExtValue();
1921   return 0;
1922 }
1923 
1924 bool HexagonLoopIdiomRecognize::isLegalStore(Loop *CurLoop, StoreInst *SI) {
1925   // Allow volatile stores if HexagonVolatileMemcpy is enabled.
1926   if (!(SI->isVolatile() && HexagonVolatileMemcpy) && !SI->isSimple())
1927     return false;
1928 
1929   Value *StoredVal = SI->getValueOperand();
1930   Value *StorePtr = SI->getPointerOperand();
1931 
1932   // Reject stores that are so large that they overflow an unsigned.
1933   uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
1934   if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
1935     return false;
1936 
1937   // See if the pointer expression is an AddRec like {base,+,1} on the current
1938   // loop, which indicates a strided store.  If we have something else, it's a
1939   // random store we can't handle.
1940   auto *StoreEv = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
1941   if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
1942     return false;
1943 
1944   // Check to see if the stride matches the size of the store.  If so, then we
1945   // know that every byte is touched in the loop.
1946   int Stride = getSCEVStride(StoreEv);
1947   if (Stride == 0)
1948     return false;
1949   unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
1950   if (StoreSize != unsigned(std::abs(Stride)))
1951     return false;
1952 
1953   // The store must be feeding a non-volatile load.
1954   LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
1955   if (!LI || !LI->isSimple())
1956     return false;
1957 
1958   // See if the pointer expression is an AddRec like {base,+,1} on the current
1959   // loop, which indicates a strided load.  If we have something else, it's a
1960   // random load we can't handle.
1961   Value *LoadPtr = LI->getPointerOperand();
1962   auto *LoadEv = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LoadPtr));
1963   if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
1964     return false;
1965 
1966   // The store and load must share the same stride.
1967   if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
1968     return false;
1969 
1970   // Success.  This store can be converted into a memcpy.
1971   return true;
1972 }
1973 
1974 /// mayLoopAccessLocation - Return true if the specified loop might access the
1975 /// specified pointer location, which is a loop-strided access.  The 'Access'
1976 /// argument specifies what the verboten forms of access are (read or write).
1977 static bool
1978 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
1979                       const SCEV *BECount, unsigned StoreSize,
1980                       AliasAnalysis &AA,
1981                       SmallPtrSetImpl<Instruction *> &Ignored) {
1982   // Get the location that may be stored across the loop.  Since the access
1983   // is strided positively through memory, we say that the modified location
1984   // starts at the pointer and has infinite size.
1985   LocationSize AccessSize = LocationSize::afterPointer();
1986 
1987   // If the loop iterates a fixed number of times, we can refine the access
1988   // size to be exactly the size of the memset, which is (BECount+1)*StoreSize
1989   if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
1990     AccessSize = LocationSize::precise((BECst->getValue()->getZExtValue() + 1) *
1991                                        StoreSize);
1992 
1993   // TODO: For this to be really effective, we have to dive into the pointer
1994   // operand in the store.  Store to &A[i] of 100 will always return may alias
1995   // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
1996   // which will then no-alias a store to &A[100].
1997   MemoryLocation StoreLoc(Ptr, AccessSize);
1998 
1999   for (auto *B : L->blocks())
2000     for (auto &I : *B)
2001       if (Ignored.count(&I) == 0 &&
2002           isModOrRefSet(AA.getModRefInfo(&I, StoreLoc) & Access))
2003         return true;
2004 
2005   return false;
2006 }
2007 
2008 void HexagonLoopIdiomRecognize::collectStores(Loop *CurLoop, BasicBlock *BB,
2009       SmallVectorImpl<StoreInst*> &Stores) {
2010   Stores.clear();
2011   for (Instruction &I : *BB)
2012     if (StoreInst *SI = dyn_cast<StoreInst>(&I))
2013       if (isLegalStore(CurLoop, SI))
2014         Stores.push_back(SI);
2015 }
2016 
2017 bool HexagonLoopIdiomRecognize::processCopyingStore(Loop *CurLoop,
2018       StoreInst *SI, const SCEV *BECount) {
2019   assert((SI->isSimple() || (SI->isVolatile() && HexagonVolatileMemcpy)) &&
2020          "Expected only non-volatile stores, or Hexagon-specific memcpy"
2021          "to volatile destination.");
2022 
2023   Value *StorePtr = SI->getPointerOperand();
2024   auto *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
2025   unsigned Stride = getSCEVStride(StoreEv);
2026   unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
2027   if (Stride != StoreSize)
2028     return false;
2029 
2030   // See if the pointer expression is an AddRec like {base,+,1} on the current
2031   // loop, which indicates a strided load.  If we have something else, it's a
2032   // random load we can't handle.
2033   auto *LI = cast<LoadInst>(SI->getValueOperand());
2034   auto *LoadEv = cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
2035 
2036   // The trip count of the loop and the base pointer of the addrec SCEV is
2037   // guaranteed to be loop invariant, which means that it should dominate the
2038   // header.  This allows us to insert code for it in the preheader.
2039   BasicBlock *Preheader = CurLoop->getLoopPreheader();
2040   Instruction *ExpPt = Preheader->getTerminator();
2041   IRBuilder<> Builder(ExpPt);
2042   SCEVExpander Expander(*SE, *DL, "hexagon-loop-idiom");
2043 
2044   Type *IntPtrTy = Builder.getIntPtrTy(*DL, SI->getPointerAddressSpace());
2045 
2046   // Okay, we have a strided store "p[i]" of a loaded value.  We can turn
2047   // this into a memcpy/memmove in the loop preheader now if we want.  However,
2048   // this would be unsafe to do if there is anything else in the loop that may
2049   // read or write the memory region we're storing to.  For memcpy, this
2050   // includes the load that feeds the stores.  Check for an alias by generating
2051   // the base address and checking everything.
2052   Value *StoreBasePtr = Expander.expandCodeFor(StoreEv->getStart(),
2053       Builder.getPtrTy(SI->getPointerAddressSpace()), ExpPt);
2054   Value *LoadBasePtr = nullptr;
2055 
2056   bool Overlap = false;
2057   bool DestVolatile = SI->isVolatile();
2058   Type *BECountTy = BECount->getType();
2059 
2060   if (DestVolatile) {
2061     // The trip count must fit in i32, since it is the type of the "num_words"
2062     // argument to hexagon_memcpy_forward_vp4cp4n2.
2063     if (StoreSize != 4 || DL->getTypeSizeInBits(BECountTy) > 32) {
2064 CleanupAndExit:
2065       // If we generated new code for the base pointer, clean up.
2066       Expander.clear();
2067       if (StoreBasePtr && (LoadBasePtr != StoreBasePtr)) {
2068         RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
2069         StoreBasePtr = nullptr;
2070       }
2071       if (LoadBasePtr) {
2072         RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
2073         LoadBasePtr = nullptr;
2074       }
2075       return false;
2076     }
2077   }
2078 
2079   SmallPtrSet<Instruction*, 2> Ignore1;
2080   Ignore1.insert(SI);
2081   if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
2082                             StoreSize, *AA, Ignore1)) {
2083     // Check if the load is the offending instruction.
2084     Ignore1.insert(LI);
2085     if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop,
2086                               BECount, StoreSize, *AA, Ignore1)) {
2087       // Still bad. Nothing we can do.
2088       goto CleanupAndExit;
2089     }
2090     // It worked with the load ignored.
2091     Overlap = true;
2092   }
2093 
2094   if (!Overlap) {
2095     if (DisableMemcpyIdiom || !HasMemcpy)
2096       goto CleanupAndExit;
2097   } else {
2098     // Don't generate memmove if this function will be inlined. This is
2099     // because the caller will undergo this transformation after inlining.
2100     Function *Func = CurLoop->getHeader()->getParent();
2101     if (Func->hasFnAttribute(Attribute::AlwaysInline))
2102       goto CleanupAndExit;
2103 
2104     // In case of a memmove, the call to memmove will be executed instead
2105     // of the loop, so we need to make sure that there is nothing else in
2106     // the loop than the load, store and instructions that these two depend
2107     // on.
2108     SmallVector<Instruction*,2> Insts;
2109     Insts.push_back(SI);
2110     Insts.push_back(LI);
2111     if (!coverLoop(CurLoop, Insts))
2112       goto CleanupAndExit;
2113 
2114     if (DisableMemmoveIdiom || !HasMemmove)
2115       goto CleanupAndExit;
2116     bool IsNested = CurLoop->getParentLoop() != nullptr;
2117     if (IsNested && OnlyNonNestedMemmove)
2118       goto CleanupAndExit;
2119   }
2120 
2121   // For a memcpy, we have to make sure that the input array is not being
2122   // mutated by the loop.
2123   LoadBasePtr = Expander.expandCodeFor(LoadEv->getStart(),
2124       Builder.getPtrTy(LI->getPointerAddressSpace()), ExpPt);
2125 
2126   SmallPtrSet<Instruction*, 2> Ignore2;
2127   Ignore2.insert(SI);
2128   if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
2129                             StoreSize, *AA, Ignore2))
2130     goto CleanupAndExit;
2131 
2132   // Check the stride.
2133   bool StridePos = getSCEVStride(LoadEv) >= 0;
2134 
2135   // Currently, the volatile memcpy only emulates traversing memory forward.
2136   if (!StridePos && DestVolatile)
2137     goto CleanupAndExit;
2138 
2139   bool RuntimeCheck = (Overlap || DestVolatile);
2140 
2141   BasicBlock *ExitB;
2142   if (RuntimeCheck) {
2143     // The runtime check needs a single exit block.
2144     SmallVector<BasicBlock*, 8> ExitBlocks;
2145     CurLoop->getUniqueExitBlocks(ExitBlocks);
2146     if (ExitBlocks.size() != 1)
2147       goto CleanupAndExit;
2148     ExitB = ExitBlocks[0];
2149   }
2150 
2151   // The # stored bytes is (BECount+1)*Size.  Expand the trip count out to
2152   // pointer size if it isn't already.
2153   LLVMContext &Ctx = SI->getContext();
2154   BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
2155   DebugLoc DLoc = SI->getDebugLoc();
2156 
2157   const SCEV *NumBytesS =
2158       SE->getAddExpr(BECount, SE->getOne(IntPtrTy), SCEV::FlagNUW);
2159   if (StoreSize != 1)
2160     NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize),
2161                                SCEV::FlagNUW);
2162   Value *NumBytes = Expander.expandCodeFor(NumBytesS, IntPtrTy, ExpPt);
2163   if (Instruction *In = dyn_cast<Instruction>(NumBytes))
2164     if (Value *Simp = simplifyInstruction(In, {*DL, TLI, DT}))
2165       NumBytes = Simp;
2166 
2167   CallInst *NewCall;
2168 
2169   if (RuntimeCheck) {
2170     unsigned Threshold = RuntimeMemSizeThreshold;
2171     if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) {
2172       uint64_t C = CI->getZExtValue();
2173       if (Threshold != 0 && C < Threshold)
2174         goto CleanupAndExit;
2175       if (C < CompileTimeMemSizeThreshold)
2176         goto CleanupAndExit;
2177     }
2178 
2179     BasicBlock *Header = CurLoop->getHeader();
2180     Function *Func = Header->getParent();
2181     Loop *ParentL = LF->getLoopFor(Preheader);
2182     StringRef HeaderName = Header->getName();
2183 
2184     // Create a new (empty) preheader, and update the PHI nodes in the
2185     // header to use the new preheader.
2186     BasicBlock *NewPreheader = BasicBlock::Create(Ctx, HeaderName+".rtli.ph",
2187                                                   Func, Header);
2188     if (ParentL)
2189       ParentL->addBasicBlockToLoop(NewPreheader, *LF);
2190     IRBuilder<>(NewPreheader).CreateBr(Header);
2191     for (auto &In : *Header) {
2192       PHINode *PN = dyn_cast<PHINode>(&In);
2193       if (!PN)
2194         break;
2195       int bx = PN->getBasicBlockIndex(Preheader);
2196       if (bx >= 0)
2197         PN->setIncomingBlock(bx, NewPreheader);
2198     }
2199     DT->addNewBlock(NewPreheader, Preheader);
2200     DT->changeImmediateDominator(Header, NewPreheader);
2201 
2202     // Check for safe conditions to execute memmove.
2203     // If stride is positive, copying things from higher to lower addresses
2204     // is equivalent to memmove.  For negative stride, it's the other way
2205     // around.  Copying forward in memory with positive stride may not be
2206     // same as memmove since we may be copying values that we just stored
2207     // in some previous iteration.
2208     Value *LA = Builder.CreatePtrToInt(LoadBasePtr, IntPtrTy);
2209     Value *SA = Builder.CreatePtrToInt(StoreBasePtr, IntPtrTy);
2210     Value *LowA = StridePos ? SA : LA;
2211     Value *HighA = StridePos ? LA : SA;
2212     Value *CmpA = Builder.CreateICmpULT(LowA, HighA);
2213     Value *Cond = CmpA;
2214 
2215     // Check for distance between pointers. Since the case LowA < HighA
2216     // is checked for above, assume LowA >= HighA.
2217     Value *Dist = Builder.CreateSub(LowA, HighA);
2218     Value *CmpD = Builder.CreateICmpSLE(NumBytes, Dist);
2219     Value *CmpEither = Builder.CreateOr(Cond, CmpD);
2220     Cond = CmpEither;
2221 
2222     if (Threshold != 0) {
2223       Type *Ty = NumBytes->getType();
2224       Value *Thr = ConstantInt::get(Ty, Threshold);
2225       Value *CmpB = Builder.CreateICmpULT(Thr, NumBytes);
2226       Value *CmpBoth = Builder.CreateAnd(Cond, CmpB);
2227       Cond = CmpBoth;
2228     }
2229     BasicBlock *MemmoveB = BasicBlock::Create(Ctx, Header->getName()+".rtli",
2230                                               Func, NewPreheader);
2231     if (ParentL)
2232       ParentL->addBasicBlockToLoop(MemmoveB, *LF);
2233     Instruction *OldT = Preheader->getTerminator();
2234     Builder.CreateCondBr(Cond, MemmoveB, NewPreheader);
2235     OldT->eraseFromParent();
2236     Preheader->setName(Preheader->getName()+".old");
2237     DT->addNewBlock(MemmoveB, Preheader);
2238     // Find the new immediate dominator of the exit block.
2239     BasicBlock *ExitD = Preheader;
2240     for (BasicBlock *PB : predecessors(ExitB)) {
2241       ExitD = DT->findNearestCommonDominator(ExitD, PB);
2242       if (!ExitD)
2243         break;
2244     }
2245     // If the prior immediate dominator of ExitB was dominated by the
2246     // old preheader, then the old preheader becomes the new immediate
2247     // dominator.  Otherwise don't change anything (because the newly
2248     // added blocks are dominated by the old preheader).
2249     if (ExitD && DT->dominates(Preheader, ExitD)) {
2250       DomTreeNode *BN = DT->getNode(ExitB);
2251       DomTreeNode *DN = DT->getNode(ExitD);
2252       BN->setIDom(DN);
2253     }
2254 
2255     // Add a call to memmove to the conditional block.
2256     IRBuilder<> CondBuilder(MemmoveB);
2257     CondBuilder.CreateBr(ExitB);
2258     CondBuilder.SetInsertPoint(MemmoveB->getTerminator());
2259 
2260     if (DestVolatile) {
2261       Type *Int32Ty = Type::getInt32Ty(Ctx);
2262       Type *PtrTy = PointerType::get(Ctx, 0);
2263       Type *VoidTy = Type::getVoidTy(Ctx);
2264       Module *M = Func->getParent();
2265       FunctionCallee Fn = M->getOrInsertFunction(
2266           HexagonVolatileMemcpyName, VoidTy, PtrTy, PtrTy, Int32Ty);
2267 
2268       const SCEV *OneS = SE->getConstant(Int32Ty, 1);
2269       const SCEV *BECount32 = SE->getTruncateOrZeroExtend(BECount, Int32Ty);
2270       const SCEV *NumWordsS = SE->getAddExpr(BECount32, OneS, SCEV::FlagNUW);
2271       Value *NumWords = Expander.expandCodeFor(NumWordsS, Int32Ty,
2272                                                MemmoveB->getTerminator());
2273       if (Instruction *In = dyn_cast<Instruction>(NumWords))
2274         if (Value *Simp = simplifyInstruction(In, {*DL, TLI, DT}))
2275           NumWords = Simp;
2276 
2277       NewCall = CondBuilder.CreateCall(Fn,
2278                                        {StoreBasePtr, LoadBasePtr, NumWords});
2279     } else {
2280       NewCall = CondBuilder.CreateMemMove(
2281           StoreBasePtr, SI->getAlign(), LoadBasePtr, LI->getAlign(), NumBytes);
2282     }
2283   } else {
2284     NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr,
2285                                    LI->getAlign(), NumBytes);
2286     // Okay, the memcpy has been formed.  Zap the original store and
2287     // anything that feeds into it.
2288     RecursivelyDeleteTriviallyDeadInstructions(SI, TLI);
2289   }
2290 
2291   NewCall->setDebugLoc(DLoc);
2292 
2293   LLVM_DEBUG(dbgs() << "  Formed " << (Overlap ? "memmove: " : "memcpy: ")
2294                     << *NewCall << "\n"
2295                     << "    from load ptr=" << *LoadEv << " at: " << *LI << "\n"
2296                     << "    from store ptr=" << *StoreEv << " at: " << *SI
2297                     << "\n");
2298 
2299   return true;
2300 }
2301 
2302 // Check if the instructions in Insts, together with their dependencies
2303 // cover the loop in the sense that the loop could be safely eliminated once
2304 // the instructions in Insts are removed.
2305 bool HexagonLoopIdiomRecognize::coverLoop(Loop *L,
2306       SmallVectorImpl<Instruction*> &Insts) const {
2307   SmallSet<BasicBlock*,8> LoopBlocks;
2308   for (auto *B : L->blocks())
2309     LoopBlocks.insert(B);
2310 
2311   SetVector<Instruction*> Worklist(Insts.begin(), Insts.end());
2312 
2313   // Collect all instructions from the loop that the instructions in Insts
2314   // depend on (plus their dependencies, etc.).  These instructions will
2315   // constitute the expression trees that feed those in Insts, but the trees
2316   // will be limited only to instructions contained in the loop.
2317   for (unsigned i = 0; i < Worklist.size(); ++i) {
2318     Instruction *In = Worklist[i];
2319     for (auto I = In->op_begin(), E = In->op_end(); I != E; ++I) {
2320       Instruction *OpI = dyn_cast<Instruction>(I);
2321       if (!OpI)
2322         continue;
2323       BasicBlock *PB = OpI->getParent();
2324       if (!LoopBlocks.count(PB))
2325         continue;
2326       Worklist.insert(OpI);
2327     }
2328   }
2329 
2330   // Scan all instructions in the loop, if any of them have a user outside
2331   // of the loop, or outside of the expressions collected above, then either
2332   // the loop has a side-effect visible outside of it, or there are
2333   // instructions in it that are not involved in the original set Insts.
2334   for (auto *B : L->blocks()) {
2335     for (auto &In : *B) {
2336       if (isa<BranchInst>(In) || isa<DbgInfoIntrinsic>(In))
2337         continue;
2338       if (!Worklist.count(&In) && In.mayHaveSideEffects())
2339         return false;
2340       for (auto *K : In.users()) {
2341         Instruction *UseI = dyn_cast<Instruction>(K);
2342         if (!UseI)
2343           continue;
2344         BasicBlock *UseB = UseI->getParent();
2345         if (LF->getLoopFor(UseB) != L)
2346           return false;
2347       }
2348     }
2349   }
2350 
2351   return true;
2352 }
2353 
2354 /// runOnLoopBlock - Process the specified block, which lives in a counted loop
2355 /// with the specified backedge count.  This block is known to be in the current
2356 /// loop and not in any subloops.
2357 bool HexagonLoopIdiomRecognize::runOnLoopBlock(Loop *CurLoop, BasicBlock *BB,
2358       const SCEV *BECount, SmallVectorImpl<BasicBlock*> &ExitBlocks) {
2359   // We can only promote stores in this block if they are unconditionally
2360   // executed in the loop.  For a block to be unconditionally executed, it has
2361   // to dominate all the exit blocks of the loop.  Verify this now.
2362   auto DominatedByBB = [this,BB] (BasicBlock *EB) -> bool {
2363     return DT->dominates(BB, EB);
2364   };
2365   if (!all_of(ExitBlocks, DominatedByBB))
2366     return false;
2367 
2368   bool MadeChange = false;
2369   // Look for store instructions, which may be optimized to memset/memcpy.
2370   SmallVector<StoreInst*,8> Stores;
2371   collectStores(CurLoop, BB, Stores);
2372 
2373   // Optimize the store into a memcpy, if it feeds an similarly strided load.
2374   for (auto &SI : Stores)
2375     MadeChange |= processCopyingStore(CurLoop, SI, BECount);
2376 
2377   return MadeChange;
2378 }
2379 
2380 bool HexagonLoopIdiomRecognize::runOnCountableLoop(Loop *L) {
2381   PolynomialMultiplyRecognize PMR(L, *DL, *DT, *TLI, *SE);
2382   if (PMR.recognize())
2383     return true;
2384 
2385   if (!HasMemcpy && !HasMemmove)
2386     return false;
2387 
2388   const SCEV *BECount = SE->getBackedgeTakenCount(L);
2389   assert(!isa<SCEVCouldNotCompute>(BECount) &&
2390          "runOnCountableLoop() called on a loop without a predictable"
2391          "backedge-taken count");
2392 
2393   SmallVector<BasicBlock *, 8> ExitBlocks;
2394   L->getUniqueExitBlocks(ExitBlocks);
2395 
2396   bool Changed = false;
2397 
2398   // Scan all the blocks in the loop that are not in subloops.
2399   for (auto *BB : L->getBlocks()) {
2400     // Ignore blocks in subloops.
2401     if (LF->getLoopFor(BB) != L)
2402       continue;
2403     Changed |= runOnLoopBlock(L, BB, BECount, ExitBlocks);
2404   }
2405 
2406   return Changed;
2407 }
2408 
2409 bool HexagonLoopIdiomRecognize::run(Loop *L) {
2410   const Module &M = *L->getHeader()->getParent()->getParent();
2411   if (Triple(M.getTargetTriple()).getArch() != Triple::hexagon)
2412     return false;
2413 
2414   // If the loop could not be converted to canonical form, it must have an
2415   // indirectbr in it, just give up.
2416   if (!L->getLoopPreheader())
2417     return false;
2418 
2419   // Disable loop idiom recognition if the function's name is a common idiom.
2420   StringRef Name = L->getHeader()->getParent()->getName();
2421   if (Name == "memset" || Name == "memcpy" || Name == "memmove")
2422     return false;
2423 
2424   DL = &L->getHeader()->getDataLayout();
2425 
2426   HasMemcpy = TLI->has(LibFunc_memcpy);
2427   HasMemmove = TLI->has(LibFunc_memmove);
2428 
2429   if (SE->hasLoopInvariantBackedgeTakenCount(L))
2430     return runOnCountableLoop(L);
2431   return false;
2432 }
2433 
2434 bool HexagonLoopIdiomRecognizeLegacyPass::runOnLoop(Loop *L,
2435                                                     LPPassManager &LPM) {
2436   if (skipLoop(L))
2437     return false;
2438 
2439   auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2440   auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2441   auto *LF = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2442   auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
2443       *L->getHeader()->getParent());
2444   auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2445   return HexagonLoopIdiomRecognize(AA, DT, LF, TLI, SE).run(L);
2446 }
2447 
2448 Pass *llvm::createHexagonLoopIdiomPass() {
2449   return new HexagonLoopIdiomRecognizeLegacyPass();
2450 }
2451 
2452 PreservedAnalyses
2453 HexagonLoopIdiomRecognitionPass::run(Loop &L, LoopAnalysisManager &AM,
2454                                      LoopStandardAnalysisResults &AR,
2455                                      LPMUpdater &U) {
2456   return HexagonLoopIdiomRecognize(&AR.AA, &AR.DT, &AR.LI, &AR.TLI, &AR.SE)
2457                  .run(&L)
2458              ? getLoopPassPreservedAnalyses()
2459              : PreservedAnalyses::all();
2460 }
2461