xref: /llvm-project/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp (revision eea7c267b9af73bec73949f99c43c4177a14a2f5)
1 //===-------- LoopDataPrefetch.cpp - Loop Data Prefetching Pass -----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a Loop Data Prefetching Pass.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #define DEBUG_TYPE "loop-data-prefetch"
15 #include "llvm/ADT/DepthFirstIterator.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/CodeMetrics.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
22 #include "llvm/Analysis/ScalarEvolution.h"
23 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
24 #include "llvm/Analysis/ScalarEvolutionExpander.h"
25 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/CFG.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Transforms/Scalar.h"
36 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 #include "llvm/Transforms/Utils/Local.h"
38 #include "llvm/Transforms/Utils/ValueMapper.h"
39 using namespace llvm;
40 
41 // By default, we limit this to creating 16 PHIs (which is a little over half
42 // of the allocatable register set).
43 static cl::opt<bool>
44 PrefetchWrites("loop-prefetch-writes", cl::Hidden, cl::init(false),
45                cl::desc("Prefetch write addresses"));
46 
47 static cl::opt<unsigned>
48     PrefetchDistance("prefetch-distance",
49                      cl::desc("Number of instructions to prefetch ahead"),
50                      cl::Hidden);
51 
52 static cl::opt<unsigned>
53     MinPrefetchStride("min-prefetch-stride",
54                       cl::desc("Min stride to add prefetches"), cl::Hidden);
55 
56 static cl::opt<unsigned> MaxPrefetchIterationsAhead(
57     "max-prefetch-iters-ahead",
58     cl::desc("Max number of iterations to prefetch ahead"), cl::Hidden);
59 
60 STATISTIC(NumPrefetches, "Number of prefetches inserted");
61 
62 namespace llvm {
63   void initializeLoopDataPrefetchPass(PassRegistry&);
64 }
65 
66 namespace {
67 
68   class LoopDataPrefetch : public FunctionPass {
69   public:
70     static char ID; // Pass ID, replacement for typeid
71     LoopDataPrefetch() : FunctionPass(ID) {
72       initializeLoopDataPrefetchPass(*PassRegistry::getPassRegistry());
73     }
74 
75     void getAnalysisUsage(AnalysisUsage &AU) const override {
76       AU.addRequired<AssumptionCacheTracker>();
77       AU.addPreserved<DominatorTreeWrapperPass>();
78       AU.addRequired<LoopInfoWrapperPass>();
79       AU.addPreserved<LoopInfoWrapperPass>();
80       AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
81       AU.addRequired<ScalarEvolutionWrapperPass>();
82       // FIXME: For some reason, preserving SE here breaks LSR (even if
83       // this pass changes nothing).
84       // AU.addPreserved<ScalarEvolutionWrapperPass>();
85       AU.addRequired<TargetTransformInfoWrapperPass>();
86     }
87 
88     bool runOnFunction(Function &F) override;
89 
90   private:
91     bool runOnLoop(Loop *L);
92 
93     /// \brief Check if the the stride of the accesses is large enough to
94     /// warrant a prefetch.
95     bool isStrideLargeEnough(const SCEVAddRecExpr *AR);
96 
97     unsigned getMinPrefetchStride() {
98       if (MinPrefetchStride.getNumOccurrences() > 0)
99         return MinPrefetchStride;
100       return TTI->getMinPrefetchStride();
101     }
102 
103     unsigned getPrefetchDistance() {
104       if (PrefetchDistance.getNumOccurrences() > 0)
105         return PrefetchDistance;
106       return TTI->getPrefetchDistance();
107     }
108 
109     unsigned getMaxPrefetchIterationsAhead() {
110       if (MaxPrefetchIterationsAhead.getNumOccurrences() > 0)
111         return MaxPrefetchIterationsAhead;
112       return TTI->getMaxPrefetchIterationsAhead();
113     }
114 
115     AssumptionCache *AC;
116     LoopInfo *LI;
117     ScalarEvolution *SE;
118     const TargetTransformInfo *TTI;
119     const DataLayout *DL;
120     OptimizationRemarkEmitter *ORE;
121   };
122 }
123 
124 char LoopDataPrefetch::ID = 0;
125 INITIALIZE_PASS_BEGIN(LoopDataPrefetch, "loop-data-prefetch",
126                       "Loop Data Prefetch", false, false)
127 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
128 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
129 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
130 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
131 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
132 INITIALIZE_PASS_END(LoopDataPrefetch, "loop-data-prefetch",
133                     "Loop Data Prefetch", false, false)
134 
135 FunctionPass *llvm::createLoopDataPrefetchPass() { return new LoopDataPrefetch(); }
136 
137 bool LoopDataPrefetch::isStrideLargeEnough(const SCEVAddRecExpr *AR) {
138   unsigned TargetMinStride = getMinPrefetchStride();
139   // No need to check if any stride goes.
140   if (TargetMinStride <= 1)
141     return true;
142 
143   const auto *ConstStride = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
144   // If MinStride is set, don't prefetch unless we can ensure that stride is
145   // larger.
146   if (!ConstStride)
147     return false;
148 
149   unsigned AbsStride = std::abs(ConstStride->getAPInt().getSExtValue());
150   return TargetMinStride <= AbsStride;
151 }
152 
153 bool LoopDataPrefetch::runOnFunction(Function &F) {
154   if (skipFunction(F))
155     return false;
156 
157   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
158   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
159   DL = &F.getParent()->getDataLayout();
160   AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
161   ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
162   TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
163 
164   // If PrefetchDistance is not set, don't run the pass.  This gives an
165   // opportunity for targets to run this pass for selected subtargets only
166   // (whose TTI sets PrefetchDistance).
167   if (getPrefetchDistance() == 0)
168     return false;
169   assert(TTI->getCacheLineSize() && "Cache line size is not set for target");
170 
171   bool MadeChange = false;
172 
173   for (Loop *I : *LI)
174     for (auto L = df_begin(I), LE = df_end(I); L != LE; ++L)
175       MadeChange |= runOnLoop(*L);
176 
177   return MadeChange;
178 }
179 
180 bool LoopDataPrefetch::runOnLoop(Loop *L) {
181   bool MadeChange = false;
182 
183   // Only prefetch in the inner-most loop
184   if (!L->empty())
185     return MadeChange;
186 
187   SmallPtrSet<const Value *, 32> EphValues;
188   CodeMetrics::collectEphemeralValues(L, AC, EphValues);
189 
190   // Calculate the number of iterations ahead to prefetch
191   CodeMetrics Metrics;
192   for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
193        I != IE; ++I) {
194 
195     // If the loop already has prefetches, then assume that the user knows
196     // what they are doing and don't add any more.
197     for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end();
198          J != JE; ++J)
199       if (CallInst *CI = dyn_cast<CallInst>(J))
200         if (Function *F = CI->getCalledFunction())
201           if (F->getIntrinsicID() == Intrinsic::prefetch)
202             return MadeChange;
203 
204     Metrics.analyzeBasicBlock(*I, *TTI, EphValues);
205   }
206   unsigned LoopSize = Metrics.NumInsts;
207   if (!LoopSize)
208     LoopSize = 1;
209 
210   unsigned ItersAhead = getPrefetchDistance() / LoopSize;
211   if (!ItersAhead)
212     ItersAhead = 1;
213 
214   if (ItersAhead > getMaxPrefetchIterationsAhead())
215     return MadeChange;
216 
217   DEBUG(dbgs() << "Prefetching " << ItersAhead
218                << " iterations ahead (loop size: " << LoopSize << ") in "
219                << L->getHeader()->getParent()->getName() << ": " << *L);
220 
221   SmallVector<std::pair<Instruction *, const SCEVAddRecExpr *>, 16> PrefLoads;
222   for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
223        I != IE; ++I) {
224     for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end();
225         J != JE; ++J) {
226       Value *PtrValue;
227       Instruction *MemI;
228 
229       if (LoadInst *LMemI = dyn_cast<LoadInst>(J)) {
230         MemI = LMemI;
231         PtrValue = LMemI->getPointerOperand();
232       } else if (StoreInst *SMemI = dyn_cast<StoreInst>(J)) {
233         if (!PrefetchWrites) continue;
234         MemI = SMemI;
235         PtrValue = SMemI->getPointerOperand();
236       } else continue;
237 
238       unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace();
239       if (PtrAddrSpace)
240         continue;
241 
242       if (L->isLoopInvariant(PtrValue))
243         continue;
244 
245       const SCEV *LSCEV = SE->getSCEV(PtrValue);
246       const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
247       if (!LSCEVAddRec)
248         continue;
249 
250       // Check if the the stride of the accesses is large enough to warrant a
251       // prefetch.
252       if (!isStrideLargeEnough(LSCEVAddRec))
253         continue;
254 
255       // We don't want to double prefetch individual cache lines. If this load
256       // is known to be within one cache line of some other load that has
257       // already been prefetched, then don't prefetch this one as well.
258       bool DupPref = false;
259       for (const auto &PrefLoad : PrefLoads) {
260         const SCEV *PtrDiff = SE->getMinusSCEV(LSCEVAddRec, PrefLoad.second);
261         if (const SCEVConstant *ConstPtrDiff =
262             dyn_cast<SCEVConstant>(PtrDiff)) {
263           int64_t PD = std::abs(ConstPtrDiff->getValue()->getSExtValue());
264           if (PD < (int64_t) TTI->getCacheLineSize()) {
265             DupPref = true;
266             break;
267           }
268         }
269       }
270       if (DupPref)
271         continue;
272 
273       const SCEV *NextLSCEV = SE->getAddExpr(LSCEVAddRec, SE->getMulExpr(
274         SE->getConstant(LSCEVAddRec->getType(), ItersAhead),
275         LSCEVAddRec->getStepRecurrence(*SE)));
276       if (!isSafeToExpand(NextLSCEV, *SE))
277         continue;
278 
279       PrefLoads.push_back(std::make_pair(MemI, LSCEVAddRec));
280 
281       Type *I8Ptr = Type::getInt8PtrTy((*I)->getContext(), PtrAddrSpace);
282       SCEVExpander SCEVE(*SE, J->getModule()->getDataLayout(), "prefaddr");
283       Value *PrefPtrValue = SCEVE.expandCodeFor(NextLSCEV, I8Ptr, MemI);
284 
285       IRBuilder<> Builder(MemI);
286       Module *M = (*I)->getParent()->getParent();
287       Type *I32 = Type::getInt32Ty((*I)->getContext());
288       Value *PrefetchFunc = Intrinsic::getDeclaration(M, Intrinsic::prefetch);
289       Builder.CreateCall(
290           PrefetchFunc,
291           {PrefPtrValue,
292            ConstantInt::get(I32, MemI->mayReadFromMemory() ? 0 : 1),
293            ConstantInt::get(I32, 3), ConstantInt::get(I32, 1)});
294       ++NumPrefetches;
295       DEBUG(dbgs() << "  Access: " << *PtrValue << ", SCEV: " << *LSCEV
296                    << "\n");
297       ORE->emitOptimizationRemark(DEBUG_TYPE, MemI, "prefetched memory access");
298 
299       MadeChange = true;
300     }
301   }
302 
303   return MadeChange;
304 }
305 
306