xref: /llvm-project/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp (revision 50271f787e99a35010ddbaafaa8fe711da5899f8)
1 //===-------- LoopDataPrefetch.cpp - Loop Data Prefetching Pass -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a Loop Data Prefetching Pass.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #define DEBUG_TYPE "loop-data-prefetch"
15 #include "llvm/Transforms/Scalar.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/CodeMetrics.h"
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/Analysis/LoopInfo.h"
22 #include "llvm/Analysis/ScalarEvolution.h"
23 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
24 #include "llvm/Analysis/ScalarEvolutionExpander.h"
25 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/CFG.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 #include "llvm/Transforms/Utils/ValueMapper.h"
38 using namespace llvm;
39 
40 // By default, we limit this to creating 16 PHIs (which is a little over half
41 // of the allocatable register set).
42 static cl::opt<bool>
43 PrefetchWrites("loop-prefetch-writes", cl::Hidden, cl::init(false),
44                cl::desc("Prefetch write addresses"));
45 
46 static cl::opt<unsigned>
47     PrefetchDistance("prefetch-distance",
48                      cl::desc("Number of instructions to prefetch ahead"),
49                      cl::Hidden);
50 
51 static cl::opt<unsigned>
52     MinPrefetchStride("min-prefetch-stride",
53                       cl::desc("Min stride to add prefetches"), cl::Hidden);
54 
55 static cl::opt<unsigned> MaxPrefetchIterationsAhead(
56     "max-prefetch-iters-ahead",
57     cl::desc("Max number of iterations to prefetch ahead"), cl::Hidden);
58 
59 STATISTIC(NumPrefetches, "Number of prefetches inserted");
60 
61 namespace llvm {
62   void initializeLoopDataPrefetchPass(PassRegistry&);
63 }
64 
65 namespace {
66 
67   class LoopDataPrefetch : public FunctionPass {
68   public:
69     static char ID; // Pass ID, replacement for typeid
70     LoopDataPrefetch() : FunctionPass(ID) {
71       initializeLoopDataPrefetchPass(*PassRegistry::getPassRegistry());
72     }
73 
74     void getAnalysisUsage(AnalysisUsage &AU) const override {
75       AU.addRequired<AssumptionCacheTracker>();
76       AU.addPreserved<DominatorTreeWrapperPass>();
77       AU.addRequired<LoopInfoWrapperPass>();
78       AU.addPreserved<LoopInfoWrapperPass>();
79       AU.addRequired<ScalarEvolutionWrapperPass>();
80       // FIXME: For some reason, preserving SE here breaks LSR (even if
81       // this pass changes nothing).
82       // AU.addPreserved<ScalarEvolutionWrapperPass>();
83       AU.addRequired<TargetTransformInfoWrapperPass>();
84     }
85 
86     bool runOnFunction(Function &F) override;
87 
88   private:
89     bool runOnLoop(Loop *L);
90 
91     /// \brief Check if the the stride of the accesses is large enough to
92     /// warrant a prefetch.
93     bool isStrideLargeEnough(const SCEVAddRecExpr *AR);
94 
95     unsigned getMinPrefetchStride() {
96       if (MinPrefetchStride.getNumOccurrences() > 0)
97         return MinPrefetchStride;
98       return TTI->getMinPrefetchStride();
99     }
100 
101     unsigned getPrefetchDistance() {
102       if (PrefetchDistance.getNumOccurrences() > 0)
103         return PrefetchDistance;
104       return TTI->getPrefetchDistance();
105     }
106 
107     unsigned getMaxPrefetchIterationsAhead() {
108       if (MaxPrefetchIterationsAhead.getNumOccurrences() > 0)
109         return MaxPrefetchIterationsAhead;
110       return TTI->getMaxPrefetchIterationsAhead();
111     }
112 
113     AssumptionCache *AC;
114     LoopInfo *LI;
115     ScalarEvolution *SE;
116     const TargetTransformInfo *TTI;
117     const DataLayout *DL;
118   };
119 }
120 
121 char LoopDataPrefetch::ID = 0;
122 INITIALIZE_PASS_BEGIN(LoopDataPrefetch, "loop-data-prefetch",
123                       "Loop Data Prefetch", false, false)
124 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
125 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
126 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
127 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
128 INITIALIZE_PASS_END(LoopDataPrefetch, "loop-data-prefetch",
129                     "Loop Data Prefetch", false, false)
130 
131 FunctionPass *llvm::createLoopDataPrefetchPass() { return new LoopDataPrefetch(); }
132 
133 bool LoopDataPrefetch::isStrideLargeEnough(const SCEVAddRecExpr *AR) {
134   unsigned TargetMinStride = getMinPrefetchStride();
135   // No need to check if any stride goes.
136   if (TargetMinStride <= 1)
137     return true;
138 
139   const auto *ConstStride = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
140   // If MinStride is set, don't prefetch unless we can ensure that stride is
141   // larger.
142   if (!ConstStride)
143     return false;
144 
145   unsigned AbsStride = std::abs(ConstStride->getAPInt().getSExtValue());
146   return TargetMinStride <= AbsStride;
147 }
148 
149 bool LoopDataPrefetch::runOnFunction(Function &F) {
150   if (skipFunction(F))
151     return false;
152 
153   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
154   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
155   DL = &F.getParent()->getDataLayout();
156   AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
157   TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
158 
159   // If PrefetchDistance is not set, don't run the pass.  This gives an
160   // opportunity for targets to run this pass for selected subtargets only
161   // (whose TTI sets PrefetchDistance).
162   if (getPrefetchDistance() == 0)
163     return false;
164   assert(TTI->getCacheLineSize() && "Cache line size is not set for target");
165 
166   bool MadeChange = false;
167 
168   for (auto I = LI->begin(), IE = LI->end(); I != IE; ++I)
169     for (auto L = df_begin(*I), LE = df_end(*I); L != LE; ++L)
170       MadeChange |= runOnLoop(*L);
171 
172   return MadeChange;
173 }
174 
175 bool LoopDataPrefetch::runOnLoop(Loop *L) {
176   bool MadeChange = false;
177 
178   // Only prefetch in the inner-most loop
179   if (!L->empty())
180     return MadeChange;
181 
182   SmallPtrSet<const Value *, 32> EphValues;
183   CodeMetrics::collectEphemeralValues(L, AC, EphValues);
184 
185   // Calculate the number of iterations ahead to prefetch
186   CodeMetrics Metrics;
187   for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
188        I != IE; ++I) {
189 
190     // If the loop already has prefetches, then assume that the user knows
191     // what he or she is doing and don't add any more.
192     for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end();
193          J != JE; ++J)
194       if (CallInst *CI = dyn_cast<CallInst>(J))
195         if (Function *F = CI->getCalledFunction())
196           if (F->getIntrinsicID() == Intrinsic::prefetch)
197             return MadeChange;
198 
199     Metrics.analyzeBasicBlock(*I, *TTI, EphValues);
200   }
201   unsigned LoopSize = Metrics.NumInsts;
202   if (!LoopSize)
203     LoopSize = 1;
204 
205   unsigned ItersAhead = getPrefetchDistance() / LoopSize;
206   if (!ItersAhead)
207     ItersAhead = 1;
208 
209   if (ItersAhead > getMaxPrefetchIterationsAhead())
210     return MadeChange;
211 
212   DEBUG(dbgs() << "Prefetching " << ItersAhead
213                << " iterations ahead (loop size: " << LoopSize << ") in "
214                << L->getHeader()->getParent()->getName() << ": " << *L);
215 
216   SmallVector<std::pair<Instruction *, const SCEVAddRecExpr *>, 16> PrefLoads;
217   for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
218        I != IE; ++I) {
219     for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end();
220         J != JE; ++J) {
221       Value *PtrValue;
222       Instruction *MemI;
223 
224       if (LoadInst *LMemI = dyn_cast<LoadInst>(J)) {
225         MemI = LMemI;
226         PtrValue = LMemI->getPointerOperand();
227       } else if (StoreInst *SMemI = dyn_cast<StoreInst>(J)) {
228         if (!PrefetchWrites) continue;
229         MemI = SMemI;
230         PtrValue = SMemI->getPointerOperand();
231       } else continue;
232 
233       unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace();
234       if (PtrAddrSpace)
235         continue;
236 
237       if (L->isLoopInvariant(PtrValue))
238         continue;
239 
240       const SCEV *LSCEV = SE->getSCEV(PtrValue);
241       const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
242       if (!LSCEVAddRec)
243         continue;
244 
245       // Check if the the stride of the accesses is large enough to warrant a
246       // prefetch.
247       if (!isStrideLargeEnough(LSCEVAddRec))
248         continue;
249 
250       // We don't want to double prefetch individual cache lines. If this load
251       // is known to be within one cache line of some other load that has
252       // already been prefetched, then don't prefetch this one as well.
253       bool DupPref = false;
254       for (SmallVector<std::pair<Instruction *, const SCEVAddRecExpr *>,
255              16>::iterator K = PrefLoads.begin(), KE = PrefLoads.end();
256            K != KE; ++K) {
257         const SCEV *PtrDiff = SE->getMinusSCEV(LSCEVAddRec, K->second);
258         if (const SCEVConstant *ConstPtrDiff =
259             dyn_cast<SCEVConstant>(PtrDiff)) {
260           int64_t PD = std::abs(ConstPtrDiff->getValue()->getSExtValue());
261           if (PD < (int64_t) TTI->getCacheLineSize()) {
262             DupPref = true;
263             break;
264           }
265         }
266       }
267       if (DupPref)
268         continue;
269 
270       const SCEV *NextLSCEV = SE->getAddExpr(LSCEVAddRec, SE->getMulExpr(
271         SE->getConstant(LSCEVAddRec->getType(), ItersAhead),
272         LSCEVAddRec->getStepRecurrence(*SE)));
273       if (!isSafeToExpand(NextLSCEV, *SE))
274         continue;
275 
276       PrefLoads.push_back(std::make_pair(MemI, LSCEVAddRec));
277 
278       Type *I8Ptr = Type::getInt8PtrTy((*I)->getContext(), PtrAddrSpace);
279       SCEVExpander SCEVE(*SE, J->getModule()->getDataLayout(), "prefaddr");
280       Value *PrefPtrValue = SCEVE.expandCodeFor(NextLSCEV, I8Ptr, MemI);
281 
282       IRBuilder<> Builder(MemI);
283       Module *M = (*I)->getParent()->getParent();
284       Type *I32 = Type::getInt32Ty((*I)->getContext());
285       Value *PrefetchFunc = Intrinsic::getDeclaration(M, Intrinsic::prefetch);
286       Builder.CreateCall(
287           PrefetchFunc,
288           {PrefPtrValue,
289            ConstantInt::get(I32, MemI->mayReadFromMemory() ? 0 : 1),
290            ConstantInt::get(I32, 3), ConstantInt::get(I32, 1)});
291       ++NumPrefetches;
292       DEBUG(dbgs() << "  Access: " << *PtrValue << ", SCEV: " << *LSCEV
293                    << "\n");
294 
295       MadeChange = true;
296     }
297   }
298 
299   return MadeChange;
300 }
301 
302