xref: /llvm-project/llvm/lib/Analysis/LoopCacheAnalysis.cpp (revision ef4ecc3ceffcf3ef129640c813f823c974f9ba22)
1 //===- LoopCacheAnalysis.cpp - Loop Cache Analysis -------------------------==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
6 // See https://llvm.org/LICENSE.txt for license information.
7 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
8 //
9 //===----------------------------------------------------------------------===//
10 ///
11 /// \file
12 /// This file defines the implementation for the loop cache analysis.
13 /// The implementation is largely based on the following paper:
14 ///
15 ///       Compiler Optimizations for Improving Data Locality
16 ///       By: Steve Carr, Katherine S. McKinley, Chau-Wen Tseng
17 ///       http://www.cs.utexas.edu/users/mckinley/papers/asplos-1994.pdf
18 ///
19 /// The general approach taken to estimate the number of cache lines used by the
20 /// memory references in an inner loop is:
21 ///    1. Partition memory references that exhibit temporal or spacial reuse
22 ///       into reference groups.
23 ///    2. For each loop L in the a loop nest LN:
24 ///       a. Compute the cost of the reference group
25 ///       b. Compute the loop cost by summing up the reference groups costs
26 //===----------------------------------------------------------------------===//
27 
28 #include "llvm/Analysis/LoopCacheAnalysis.h"
29 #include "llvm/ADT/BreadthFirstIterator.h"
30 #include "llvm/ADT/Sequence.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/Analysis/AliasAnalysis.h"
33 #include "llvm/Analysis/Delinearization.h"
34 #include "llvm/Analysis/DependenceAnalysis.h"
35 #include "llvm/Analysis/LoopInfo.h"
36 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
37 #include "llvm/Analysis/TargetTransformInfo.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "loop-cache-cost"
44 
45 static cl::opt<unsigned> DefaultTripCount(
46     "default-trip-count", cl::init(100), cl::Hidden,
47     cl::desc("Use this to specify the default trip count of a loop"));
48 
49 // In this analysis two array references are considered to exhibit temporal
50 // reuse if they access either the same memory location, or a memory location
51 // with distance smaller than a configurable threshold.
52 static cl::opt<unsigned> TemporalReuseThreshold(
53     "temporal-reuse-threshold", cl::init(2), cl::Hidden,
54     cl::desc("Use this to specify the max. distance between array elements "
55              "accessed in a loop so that the elements are classified to have "
56              "temporal reuse"));
57 
58 /// Retrieve the innermost loop in the given loop nest \p Loops. It returns a
59 /// nullptr if any loops in the loop vector supplied has more than one sibling.
60 /// The loop vector is expected to contain loops collected in breadth-first
61 /// order.
62 static Loop *getInnerMostLoop(const LoopVectorTy &Loops) {
63   assert(!Loops.empty() && "Expecting a non-empy loop vector");
64 
65   Loop *LastLoop = Loops.back();
66   Loop *ParentLoop = LastLoop->getParentLoop();
67 
68   if (ParentLoop == nullptr) {
69     assert(Loops.size() == 1 && "Expecting a single loop");
70     return LastLoop;
71   }
72 
73   return (llvm::is_sorted(Loops,
74                           [](const Loop *L1, const Loop *L2) {
75                             return L1->getLoopDepth() < L2->getLoopDepth();
76                           }))
77              ? LastLoop
78              : nullptr;
79 }
80 
81 static bool isOneDimensionalArray(const SCEV &AccessFn, const SCEV &ElemSize,
82                                   const Loop &L, ScalarEvolution &SE) {
83   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(&AccessFn);
84   if (!AR || !AR->isAffine())
85     return false;
86 
87   assert(AR->getLoop() && "AR should have a loop");
88 
89   // Check that start and increment are not add recurrences.
90   const SCEV *Start = AR->getStart();
91   const SCEV *Step = AR->getStepRecurrence(SE);
92   if (isa<SCEVAddRecExpr>(Start) || isa<SCEVAddRecExpr>(Step))
93     return false;
94 
95   // Check that start and increment are both invariant in the loop.
96   if (!SE.isLoopInvariant(Start, &L) || !SE.isLoopInvariant(Step, &L))
97     return false;
98 
99   const SCEV *StepRec = AR->getStepRecurrence(SE);
100   if (StepRec && SE.isKnownNegative(StepRec))
101     StepRec = SE.getNegativeSCEV(StepRec);
102 
103   return StepRec == &ElemSize;
104 }
105 
106 /// Compute the trip count for the given loop \p L or assume a default value if
107 /// it is not a compile time constant. Return the SCEV expression for the trip
108 /// count.
109 static const SCEV *computeTripCount(const Loop &L, const SCEV &ElemSize,
110                                     ScalarEvolution &SE) {
111   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(&L);
112   const SCEV *TripCount = (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
113                            isa<SCEVConstant>(BackedgeTakenCount))
114                               ? SE.getTripCountFromExitCount(BackedgeTakenCount)
115                               : nullptr;
116 
117   if (!TripCount) {
118     LLVM_DEBUG(dbgs() << "Trip count of loop " << L.getName()
119                << " could not be computed, using DefaultTripCount\n");
120     TripCount = SE.getConstant(ElemSize.getType(), DefaultTripCount);
121   }
122 
123   return TripCount;
124 }
125 
126 //===----------------------------------------------------------------------===//
127 // IndexedReference implementation
128 //
129 raw_ostream &llvm::operator<<(raw_ostream &OS, const IndexedReference &R) {
130   if (!R.IsValid) {
131     OS << R.StoreOrLoadInst;
132     OS << ", IsValid=false.";
133     return OS;
134   }
135 
136   OS << *R.BasePointer;
137   for (const SCEV *Subscript : R.Subscripts)
138     OS << "[" << *Subscript << "]";
139 
140   OS << ", Sizes: ";
141   for (const SCEV *Size : R.Sizes)
142     OS << "[" << *Size << "]";
143 
144   return OS;
145 }
146 
147 IndexedReference::IndexedReference(Instruction &StoreOrLoadInst,
148                                    const LoopInfo &LI, ScalarEvolution &SE)
149     : StoreOrLoadInst(StoreOrLoadInst), SE(SE) {
150   assert((isa<StoreInst>(StoreOrLoadInst) || isa<LoadInst>(StoreOrLoadInst)) &&
151          "Expecting a load or store instruction");
152 
153   IsValid = delinearize(LI);
154   if (IsValid)
155     LLVM_DEBUG(dbgs().indent(2) << "Succesfully delinearized: " << *this
156                                 << "\n");
157 }
158 
159 Optional<bool> IndexedReference::hasSpacialReuse(const IndexedReference &Other,
160                                                  unsigned CLS,
161                                                  AAResults &AA) const {
162   assert(IsValid && "Expecting a valid reference");
163 
164   if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) {
165     LLVM_DEBUG(dbgs().indent(2)
166                << "No spacial reuse: different base pointers\n");
167     return false;
168   }
169 
170   unsigned NumSubscripts = getNumSubscripts();
171   if (NumSubscripts != Other.getNumSubscripts()) {
172     LLVM_DEBUG(dbgs().indent(2)
173                << "No spacial reuse: different number of subscripts\n");
174     return false;
175   }
176 
177   // all subscripts must be equal, except the leftmost one (the last one).
178   for (auto SubNum : seq<unsigned>(0, NumSubscripts - 1)) {
179     if (getSubscript(SubNum) != Other.getSubscript(SubNum)) {
180       LLVM_DEBUG(dbgs().indent(2) << "No spacial reuse, different subscripts: "
181                                   << "\n\t" << *getSubscript(SubNum) << "\n\t"
182                                   << *Other.getSubscript(SubNum) << "\n");
183       return false;
184     }
185   }
186 
187   // the difference between the last subscripts must be less than the cache line
188   // size.
189   const SCEV *LastSubscript = getLastSubscript();
190   const SCEV *OtherLastSubscript = Other.getLastSubscript();
191   const SCEVConstant *Diff = dyn_cast<SCEVConstant>(
192       SE.getMinusSCEV(LastSubscript, OtherLastSubscript));
193 
194   if (Diff == nullptr) {
195     LLVM_DEBUG(dbgs().indent(2)
196                << "No spacial reuse, difference between subscript:\n\t"
197                << *LastSubscript << "\n\t" << OtherLastSubscript
198                << "\nis not constant.\n");
199     return None;
200   }
201 
202   bool InSameCacheLine = (Diff->getValue()->getSExtValue() < CLS);
203 
204   LLVM_DEBUG({
205     if (InSameCacheLine)
206       dbgs().indent(2) << "Found spacial reuse.\n";
207     else
208       dbgs().indent(2) << "No spacial reuse.\n";
209   });
210 
211   return InSameCacheLine;
212 }
213 
214 Optional<bool> IndexedReference::hasTemporalReuse(const IndexedReference &Other,
215                                                   unsigned MaxDistance,
216                                                   const Loop &L,
217                                                   DependenceInfo &DI,
218                                                   AAResults &AA) const {
219   assert(IsValid && "Expecting a valid reference");
220 
221   if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) {
222     LLVM_DEBUG(dbgs().indent(2)
223                << "No temporal reuse: different base pointer\n");
224     return false;
225   }
226 
227   std::unique_ptr<Dependence> D =
228       DI.depends(&StoreOrLoadInst, &Other.StoreOrLoadInst, true);
229 
230   if (D == nullptr) {
231     LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: no dependence\n");
232     return false;
233   }
234 
235   if (D->isLoopIndependent()) {
236     LLVM_DEBUG(dbgs().indent(2) << "Found temporal reuse\n");
237     return true;
238   }
239 
240   // Check the dependence distance at every loop level. There is temporal reuse
241   // if the distance at the given loop's depth is small (|d| <= MaxDistance) and
242   // it is zero at every other loop level.
243   int LoopDepth = L.getLoopDepth();
244   int Levels = D->getLevels();
245   for (int Level = 1; Level <= Levels; ++Level) {
246     const SCEV *Distance = D->getDistance(Level);
247     const SCEVConstant *SCEVConst = dyn_cast_or_null<SCEVConstant>(Distance);
248 
249     if (SCEVConst == nullptr) {
250       LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: distance unknown\n");
251       return None;
252     }
253 
254     const ConstantInt &CI = *SCEVConst->getValue();
255     if (Level != LoopDepth && !CI.isZero()) {
256       LLVM_DEBUG(dbgs().indent(2)
257                  << "No temporal reuse: distance is not zero at depth=" << Level
258                  << "\n");
259       return false;
260     } else if (Level == LoopDepth && CI.getSExtValue() > MaxDistance) {
261       LLVM_DEBUG(
262           dbgs().indent(2)
263           << "No temporal reuse: distance is greater than MaxDistance at depth="
264           << Level << "\n");
265       return false;
266     }
267   }
268 
269   LLVM_DEBUG(dbgs().indent(2) << "Found temporal reuse\n");
270   return true;
271 }
272 
273 CacheCostTy IndexedReference::computeRefCost(const Loop &L,
274                                              unsigned CLS) const {
275   assert(IsValid && "Expecting a valid reference");
276   LLVM_DEBUG({
277     dbgs().indent(2) << "Computing cache cost for:\n";
278     dbgs().indent(4) << *this << "\n";
279   });
280 
281   // If the indexed reference is loop invariant the cost is one.
282   if (isLoopInvariant(L)) {
283     LLVM_DEBUG(dbgs().indent(4) << "Reference is loop invariant: RefCost=1\n");
284     return 1;
285   }
286 
287   const SCEV *TripCount = computeTripCount(L, *Sizes.back(), SE);
288   assert(TripCount && "Expecting valid TripCount");
289   LLVM_DEBUG(dbgs() << "TripCount=" << *TripCount << "\n");
290 
291   const SCEV *RefCost = nullptr;
292   if (isConsecutive(L, CLS)) {
293     // If the indexed reference is 'consecutive' the cost is
294     // (TripCount*Stride)/CLS.
295     const SCEV *Coeff = getLastCoefficient();
296     const SCEV *ElemSize = Sizes.back();
297     assert(Coeff->getType() == ElemSize->getType() &&
298            "Expecting the same type");
299     const SCEV *Stride = SE.getMulExpr(Coeff, ElemSize);
300     Type *WiderType = SE.getWiderType(Stride->getType(), TripCount->getType());
301     const SCEV *CacheLineSize = SE.getConstant(WiderType, CLS);
302     if (SE.isKnownNegative(Stride))
303       Stride = SE.getNegativeSCEV(Stride);
304     Stride = SE.getNoopOrAnyExtend(Stride, WiderType);
305     TripCount = SE.getNoopOrAnyExtend(TripCount, WiderType);
306     const SCEV *Numerator = SE.getMulExpr(Stride, TripCount);
307     RefCost = SE.getUDivExpr(Numerator, CacheLineSize);
308 
309     LLVM_DEBUG(dbgs().indent(4)
310                << "Access is consecutive: RefCost=(TripCount*Stride)/CLS="
311                << *RefCost << "\n");
312   } else {
313     // If the indexed reference is not 'consecutive' the cost is proportional to
314     // the trip count and the depth of the dimension which the subject loop
315     // subscript is accessing. We try to estimate this by multiplying the cost
316     // by the trip counts of loops corresponding to the inner dimensions. For
317     // example, given the indexed reference 'A[i][j][k]', and assuming the
318     // i-loop is in the innermost position, the cost would be equal to the
319     // iterations of the i-loop multiplied by iterations of the j-loop.
320     RefCost = TripCount;
321 
322     unsigned Index = getSubscriptIndex(L);
323     assert(Index >= 0 && "Cound not locate a valid Index");
324 
325     for (unsigned I = Index + 1; I < getNumSubscripts() - 1; ++I) {
326       const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(getSubscript(I));
327       assert(AR && AR->getLoop() && "Expecting valid loop");
328       const SCEV *TripCount =
329           computeTripCount(*AR->getLoop(), *Sizes.back(), SE);
330       Type *WiderType = SE.getWiderType(RefCost->getType(), TripCount->getType());
331       RefCost = SE.getMulExpr(SE.getNoopOrAnyExtend(RefCost, WiderType),
332                               SE.getNoopOrAnyExtend(TripCount, WiderType));
333     }
334 
335     LLVM_DEBUG(dbgs().indent(4)
336                << "Access is not consecutive: RefCost=" << *RefCost << "\n");
337   }
338   assert(RefCost && "Expecting a valid RefCost");
339 
340   // Attempt to fold RefCost into a constant.
341   if (auto ConstantCost = dyn_cast<SCEVConstant>(RefCost))
342     return ConstantCost->getValue()->getSExtValue();
343 
344   LLVM_DEBUG(dbgs().indent(4)
345              << "RefCost is not a constant! Setting to RefCost=InvalidCost "
346                 "(invalid value).\n");
347 
348   return CacheCost::InvalidCost;
349 }
350 
351 bool IndexedReference::tryDelinearizeFixedSize(
352     ScalarEvolution *SE, Instruction *Src, const SCEV *SrcAccessFn,
353     SmallVectorImpl<const SCEV *> &SrcSubscripts) {
354   Value *SrcPtr = getLoadStorePointerOperand(Src);
355   const SCEVUnknown *SrcBase =
356       dyn_cast<SCEVUnknown>(SE->getPointerBase(SrcAccessFn));
357 
358   // Check the simple case where the array dimensions are fixed size.
359   auto *SrcGEP = dyn_cast<GetElementPtrInst>(SrcPtr);
360   if (!SrcGEP)
361     return false;
362 
363   SmallVector<int, 4> SrcSizes;
364   getIndexExpressionsFromGEP(*SE, SrcGEP, SrcSubscripts, SrcSizes);
365 
366   // Check that the two size arrays are non-empty and equal in length and
367   // value.
368   if (SrcSizes.empty() || SrcSubscripts.size() <= 1) {
369     SrcSubscripts.clear();
370     return false;
371   }
372 
373   Value *SrcBasePtr = SrcGEP->getOperand(0)->stripPointerCasts();
374 
375   // Check that for identical base pointers we do not miss index offsets
376   // that have been added before this GEP is applied.
377   if (SrcBasePtr != SrcBase->getValue()) {
378     SrcSubscripts.clear();
379     return false;
380   }
381 
382   assert(SrcSubscripts.size() == SrcSizes.size() + 1 &&
383          "Expected equal number of entries in the list of size and "
384          "subscript.");
385 
386   for (auto Idx : seq<unsigned>(1, Subscripts.size()))
387     Sizes.push_back(SE->getConstant(Subscripts[Idx]->getType(), SrcSizes[Idx - 1]));
388 
389   LLVM_DEBUG({
390     dbgs() << "Delinearized subscripts of fixed-size array\n"
391            << "SrcGEP:" << *SrcGEP << "\n";
392   });
393   return true;
394 }
395 
396 bool IndexedReference::delinearize(const LoopInfo &LI) {
397   assert(Subscripts.empty() && "Subscripts should be empty");
398   assert(Sizes.empty() && "Sizes should be empty");
399   assert(!IsValid && "Should be called once from the constructor");
400   LLVM_DEBUG(dbgs() << "Delinearizing: " << StoreOrLoadInst << "\n");
401 
402   const SCEV *ElemSize = SE.getElementSize(&StoreOrLoadInst);
403   const BasicBlock *BB = StoreOrLoadInst.getParent();
404 
405   if (Loop *L = LI.getLoopFor(BB)) {
406     const SCEV *AccessFn =
407         SE.getSCEVAtScope(getPointerOperand(&StoreOrLoadInst), L);
408 
409     BasePointer = dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFn));
410     if (BasePointer == nullptr) {
411       LLVM_DEBUG(
412           dbgs().indent(2)
413           << "ERROR: failed to delinearize, can't identify base pointer\n");
414       return false;
415     }
416 
417     bool IsFixedSize = false;
418     // Try to delinearize fixed-size arrays.
419     if (tryDelinearizeFixedSize(&SE, &StoreOrLoadInst, AccessFn, Subscripts)) {
420       IsFixedSize = true;
421       /// The last element of \p Sizes is the element size.
422       Sizes.push_back(ElemSize);
423       LLVM_DEBUG(dbgs().indent(2) << "In Loop '" << L->getName()
424                                   << "', AccessFn: " << *AccessFn << "\n");
425     }
426 
427     AccessFn = SE.getMinusSCEV(AccessFn, BasePointer);
428 
429     // Try to delinearize parametric-size arrays.
430     if (!IsFixedSize) {
431       LLVM_DEBUG(dbgs().indent(2) << "In Loop '" << L->getName()
432                                   << "', AccessFn: " << *AccessFn << "\n");
433       llvm::delinearize(SE, AccessFn, Subscripts, Sizes,
434                         SE.getElementSize(&StoreOrLoadInst));
435     }
436 
437     if (Subscripts.empty() || Sizes.empty() ||
438         Subscripts.size() != Sizes.size()) {
439       // Attempt to determine whether we have a single dimensional array access.
440       // before giving up.
441       if (!isOneDimensionalArray(*AccessFn, *ElemSize, *L, SE)) {
442         LLVM_DEBUG(dbgs().indent(2)
443                    << "ERROR: failed to delinearize reference\n");
444         Subscripts.clear();
445         Sizes.clear();
446         return false;
447       }
448 
449       // The array may be accessed in reverse, for example:
450       //   for (i = N; i > 0; i--)
451       //     A[i] = 0;
452       // In this case, reconstruct the access function using the absolute value
453       // of the step recurrence.
454       const SCEVAddRecExpr *AccessFnAR = dyn_cast<SCEVAddRecExpr>(AccessFn);
455       const SCEV *StepRec = AccessFnAR ? AccessFnAR->getStepRecurrence(SE) : nullptr;
456 
457       if (StepRec && SE.isKnownNegative(StepRec))
458         AccessFn = SE.getAddRecExpr(AccessFnAR->getStart(),
459                                     SE.getNegativeSCEV(StepRec),
460                                     AccessFnAR->getLoop(),
461                                     AccessFnAR->getNoWrapFlags());
462       const SCEV *Div = SE.getUDivExactExpr(AccessFn, ElemSize);
463       Subscripts.push_back(Div);
464       Sizes.push_back(ElemSize);
465     }
466 
467     return all_of(Subscripts, [&](const SCEV *Subscript) {
468       return isSimpleAddRecurrence(*Subscript, *L);
469     });
470   }
471 
472   return false;
473 }
474 
475 bool IndexedReference::isLoopInvariant(const Loop &L) const {
476   Value *Addr = getPointerOperand(&StoreOrLoadInst);
477   assert(Addr != nullptr && "Expecting either a load or a store instruction");
478   assert(SE.isSCEVable(Addr->getType()) && "Addr should be SCEVable");
479 
480   if (SE.isLoopInvariant(SE.getSCEV(Addr), &L))
481     return true;
482 
483   // The indexed reference is loop invariant if none of the coefficients use
484   // the loop induction variable.
485   bool allCoeffForLoopAreZero = all_of(Subscripts, [&](const SCEV *Subscript) {
486     return isCoeffForLoopZeroOrInvariant(*Subscript, L);
487   });
488 
489   return allCoeffForLoopAreZero;
490 }
491 
492 bool IndexedReference::isConsecutive(const Loop &L, unsigned CLS) const {
493   // The indexed reference is 'consecutive' if the only coefficient that uses
494   // the loop induction variable is the last one...
495   const SCEV *LastSubscript = Subscripts.back();
496   for (const SCEV *Subscript : Subscripts) {
497     if (Subscript == LastSubscript)
498       continue;
499     if (!isCoeffForLoopZeroOrInvariant(*Subscript, L))
500       return false;
501   }
502 
503   // ...and the access stride is less than the cache line size.
504   const SCEV *Coeff = getLastCoefficient();
505   const SCEV *ElemSize = Sizes.back();
506   const SCEV *Stride = SE.getMulExpr(Coeff, ElemSize);
507   const SCEV *CacheLineSize = SE.getConstant(Stride->getType(), CLS);
508 
509   Stride = SE.isKnownNegative(Stride) ? SE.getNegativeSCEV(Stride) : Stride;
510   return SE.isKnownPredicate(ICmpInst::ICMP_ULT, Stride, CacheLineSize);
511 }
512 
513 unsigned IndexedReference::getSubscriptIndex(const Loop &L) const {
514   for (auto Idx : seq<unsigned>(0, getNumSubscripts())) {
515     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(getSubscript(Idx));
516     if (AR && AR->getLoop() == &L) {
517       return Idx;
518     }
519   }
520   return -1;
521 }
522 
523 const SCEV *IndexedReference::getLastCoefficient() const {
524   const SCEV *LastSubscript = getLastSubscript();
525   auto *AR = cast<SCEVAddRecExpr>(LastSubscript);
526   return AR->getStepRecurrence(SE);
527 }
528 
529 bool IndexedReference::isCoeffForLoopZeroOrInvariant(const SCEV &Subscript,
530                                                      const Loop &L) const {
531   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(&Subscript);
532   return (AR != nullptr) ? AR->getLoop() != &L
533                          : SE.isLoopInvariant(&Subscript, &L);
534 }
535 
536 bool IndexedReference::isSimpleAddRecurrence(const SCEV &Subscript,
537                                              const Loop &L) const {
538   if (!isa<SCEVAddRecExpr>(Subscript))
539     return false;
540 
541   const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(&Subscript);
542   assert(AR->getLoop() && "AR should have a loop");
543 
544   if (!AR->isAffine())
545     return false;
546 
547   const SCEV *Start = AR->getStart();
548   const SCEV *Step = AR->getStepRecurrence(SE);
549 
550   if (!SE.isLoopInvariant(Start, &L) || !SE.isLoopInvariant(Step, &L))
551     return false;
552 
553   return true;
554 }
555 
556 bool IndexedReference::isAliased(const IndexedReference &Other,
557                                  AAResults &AA) const {
558   const auto &Loc1 = MemoryLocation::get(&StoreOrLoadInst);
559   const auto &Loc2 = MemoryLocation::get(&Other.StoreOrLoadInst);
560   return AA.isMustAlias(Loc1, Loc2);
561 }
562 
563 //===----------------------------------------------------------------------===//
564 // CacheCost implementation
565 //
566 raw_ostream &llvm::operator<<(raw_ostream &OS, const CacheCost &CC) {
567   for (const auto &LC : CC.LoopCosts) {
568     const Loop *L = LC.first;
569     OS << "Loop '" << L->getName() << "' has cost = " << LC.second << "\n";
570   }
571   return OS;
572 }
573 
574 CacheCost::CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI,
575                      ScalarEvolution &SE, TargetTransformInfo &TTI,
576                      AAResults &AA, DependenceInfo &DI, Optional<unsigned> TRT)
577     : Loops(Loops),
578       TRT((TRT == None) ? Optional<unsigned>(TemporalReuseThreshold) : TRT),
579       LI(LI), SE(SE), TTI(TTI), AA(AA), DI(DI) {
580   assert(!Loops.empty() && "Expecting a non-empty loop vector.");
581 
582   for (const Loop *L : Loops) {
583     unsigned TripCount = SE.getSmallConstantTripCount(L);
584     TripCount = (TripCount == 0) ? DefaultTripCount : TripCount;
585     TripCounts.push_back({L, TripCount});
586   }
587 
588   calculateCacheFootprint();
589 }
590 
591 std::unique_ptr<CacheCost>
592 CacheCost::getCacheCost(Loop &Root, LoopStandardAnalysisResults &AR,
593                         DependenceInfo &DI, Optional<unsigned> TRT) {
594   if (!Root.isOutermost()) {
595     LLVM_DEBUG(dbgs() << "Expecting the outermost loop in a loop nest\n");
596     return nullptr;
597   }
598 
599   LoopVectorTy Loops;
600   append_range(Loops, breadth_first(&Root));
601 
602   if (!getInnerMostLoop(Loops)) {
603     LLVM_DEBUG(dbgs() << "Cannot compute cache cost of loop nest with more "
604                          "than one innermost loop\n");
605     return nullptr;
606   }
607 
608   return std::make_unique<CacheCost>(Loops, AR.LI, AR.SE, AR.TTI, AR.AA, DI, TRT);
609 }
610 
611 void CacheCost::calculateCacheFootprint() {
612   LLVM_DEBUG(dbgs() << "POPULATING REFERENCE GROUPS\n");
613   ReferenceGroupsTy RefGroups;
614   if (!populateReferenceGroups(RefGroups))
615     return;
616 
617   LLVM_DEBUG(dbgs() << "COMPUTING LOOP CACHE COSTS\n");
618   for (const Loop *L : Loops) {
619     assert(llvm::none_of(
620                LoopCosts,
621                [L](const LoopCacheCostTy &LCC) { return LCC.first == L; }) &&
622            "Should not add duplicate element");
623     CacheCostTy LoopCost = computeLoopCacheCost(*L, RefGroups);
624     LoopCosts.push_back(std::make_pair(L, LoopCost));
625   }
626 
627   sortLoopCosts();
628   RefGroups.clear();
629 }
630 
631 bool CacheCost::populateReferenceGroups(ReferenceGroupsTy &RefGroups) const {
632   assert(RefGroups.empty() && "Reference groups should be empty");
633 
634   unsigned CLS = TTI.getCacheLineSize();
635   Loop *InnerMostLoop = getInnerMostLoop(Loops);
636   assert(InnerMostLoop != nullptr && "Expecting a valid innermost loop");
637 
638   for (BasicBlock *BB : InnerMostLoop->getBlocks()) {
639     for (Instruction &I : *BB) {
640       if (!isa<StoreInst>(I) && !isa<LoadInst>(I))
641         continue;
642 
643       std::unique_ptr<IndexedReference> R(new IndexedReference(I, LI, SE));
644       if (!R->isValid())
645         continue;
646 
647       bool Added = false;
648       for (ReferenceGroupTy &RefGroup : RefGroups) {
649         const IndexedReference &Representative = *RefGroup.front();
650         LLVM_DEBUG({
651           dbgs() << "References:\n";
652           dbgs().indent(2) << *R << "\n";
653           dbgs().indent(2) << Representative << "\n";
654         });
655 
656 
657        // FIXME: Both positive and negative access functions will be placed
658        // into the same reference group, resulting in a bi-directional array
659        // access such as:
660        //   for (i = N; i > 0; i--)
661        //     A[i] = A[N - i];
662        // having the same cost calculation as a single dimention access pattern
663        //   for (i = 0; i < N; i++)
664        //     A[i] = A[i];
665        // when in actuality, depending on the array size, the first example
666        // should have a cost closer to 2x the second due to the two cache
667        // access per iteration from opposite ends of the array
668         Optional<bool> HasTemporalReuse =
669             R->hasTemporalReuse(Representative, *TRT, *InnerMostLoop, DI, AA);
670         Optional<bool> HasSpacialReuse =
671             R->hasSpacialReuse(Representative, CLS, AA);
672 
673         if ((HasTemporalReuse.hasValue() && *HasTemporalReuse) ||
674             (HasSpacialReuse.hasValue() && *HasSpacialReuse)) {
675           RefGroup.push_back(std::move(R));
676           Added = true;
677           break;
678         }
679       }
680 
681       if (!Added) {
682         ReferenceGroupTy RG;
683         RG.push_back(std::move(R));
684         RefGroups.push_back(std::move(RG));
685       }
686     }
687   }
688 
689   if (RefGroups.empty())
690     return false;
691 
692   LLVM_DEBUG({
693     dbgs() << "\nIDENTIFIED REFERENCE GROUPS:\n";
694     int n = 1;
695     for (const ReferenceGroupTy &RG : RefGroups) {
696       dbgs().indent(2) << "RefGroup " << n << ":\n";
697       for (const auto &IR : RG)
698         dbgs().indent(4) << *IR << "\n";
699       n++;
700     }
701     dbgs() << "\n";
702   });
703 
704   return true;
705 }
706 
707 CacheCostTy
708 CacheCost::computeLoopCacheCost(const Loop &L,
709                                 const ReferenceGroupsTy &RefGroups) const {
710   if (!L.isLoopSimplifyForm())
711     return InvalidCost;
712 
713   LLVM_DEBUG(dbgs() << "Considering loop '" << L.getName()
714                     << "' as innermost loop.\n");
715 
716   // Compute the product of the trip counts of each other loop in the nest.
717   CacheCostTy TripCountsProduct = 1;
718   for (const auto &TC : TripCounts) {
719     if (TC.first == &L)
720       continue;
721     TripCountsProduct *= TC.second;
722   }
723 
724   CacheCostTy LoopCost = 0;
725   for (const ReferenceGroupTy &RG : RefGroups) {
726     CacheCostTy RefGroupCost = computeRefGroupCacheCost(RG, L);
727     LoopCost += RefGroupCost * TripCountsProduct;
728   }
729 
730   LLVM_DEBUG(dbgs().indent(2) << "Loop '" << L.getName()
731                               << "' has cost=" << LoopCost << "\n");
732 
733   return LoopCost;
734 }
735 
736 CacheCostTy CacheCost::computeRefGroupCacheCost(const ReferenceGroupTy &RG,
737                                                 const Loop &L) const {
738   assert(!RG.empty() && "Reference group should have at least one member.");
739 
740   const IndexedReference *Representative = RG.front().get();
741   return Representative->computeRefCost(L, TTI.getCacheLineSize());
742 }
743 
744 //===----------------------------------------------------------------------===//
745 // LoopCachePrinterPass implementation
746 //
747 PreservedAnalyses LoopCachePrinterPass::run(Loop &L, LoopAnalysisManager &AM,
748                                             LoopStandardAnalysisResults &AR,
749                                             LPMUpdater &U) {
750   Function *F = L.getHeader()->getParent();
751   DependenceInfo DI(F, &AR.AA, &AR.SE, &AR.LI);
752 
753   if (auto CC = CacheCost::getCacheCost(L, AR, DI))
754     OS << *CC;
755 
756   return PreservedAnalyses::all();
757 }
758