1 //===-------- LoopDataPrefetch.cpp - Loop Data Prefetching Pass -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements a Loop Data Prefetching Pass. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/LoopDataPrefetch.h" 15 16 #define DEBUG_TYPE "loop-data-prefetch" 17 #include "llvm/ADT/DepthFirstIterator.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/CodeMetrics.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/Analysis/LoopInfo.h" 22 #include "llvm/Analysis/OptimizationDiagnosticInfo.h" 23 #include "llvm/Analysis/ScalarEvolution.h" 24 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 25 #include "llvm/Analysis/ScalarEvolutionExpander.h" 26 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/IR/CFG.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/IR/Module.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Transforms/Scalar.h" 37 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 38 #include "llvm/Transforms/Utils/Local.h" 39 #include "llvm/Transforms/Utils/ValueMapper.h" 40 using namespace llvm; 41 42 // By default, we limit this to creating 16 PHIs (which is a little over half 43 // of the allocatable register set). 44 static cl::opt<bool> 45 PrefetchWrites("loop-prefetch-writes", cl::Hidden, cl::init(false), 46 cl::desc("Prefetch write addresses")); 47 48 static cl::opt<unsigned> 49 PrefetchDistance("prefetch-distance", 50 cl::desc("Number of instructions to prefetch ahead"), 51 cl::Hidden); 52 53 static cl::opt<unsigned> 54 MinPrefetchStride("min-prefetch-stride", 55 cl::desc("Min stride to add prefetches"), cl::Hidden); 56 57 static cl::opt<unsigned> MaxPrefetchIterationsAhead( 58 "max-prefetch-iters-ahead", 59 cl::desc("Max number of iterations to prefetch ahead"), cl::Hidden); 60 61 STATISTIC(NumPrefetches, "Number of prefetches inserted"); 62 63 namespace { 64 65 /// Loop prefetch implementation class. 66 class LoopDataPrefetch { 67 public: 68 LoopDataPrefetch(LoopInfo *LI, ScalarEvolution *SE, 69 const TargetTransformInfo *TTI, 70 OptimizationRemarkEmitter *ORE) 71 : LI(LI), SE(SE), TTI(TTI), ORE(ORE) {} 72 73 bool run(); 74 75 private: 76 bool runOnLoop(Loop *L); 77 78 /// \brief Check if the the stride of the accesses is large enough to 79 /// warrant a prefetch. 80 bool isStrideLargeEnough(const SCEVAddRecExpr *AR); 81 82 unsigned getMinPrefetchStride() { 83 if (MinPrefetchStride.getNumOccurrences() > 0) 84 return MinPrefetchStride; 85 return TTI->getMinPrefetchStride(); 86 } 87 88 unsigned getPrefetchDistance() { 89 if (PrefetchDistance.getNumOccurrences() > 0) 90 return PrefetchDistance; 91 return TTI->getPrefetchDistance(); 92 } 93 94 unsigned getMaxPrefetchIterationsAhead() { 95 if (MaxPrefetchIterationsAhead.getNumOccurrences() > 0) 96 return MaxPrefetchIterationsAhead; 97 return TTI->getMaxPrefetchIterationsAhead(); 98 } 99 100 LoopInfo *LI; 101 ScalarEvolution *SE; 102 const TargetTransformInfo *TTI; 103 OptimizationRemarkEmitter *ORE; 104 }; 105 106 /// Legacy class for inserting loop data prefetches. 107 class LoopDataPrefetchLegacyPass : public FunctionPass { 108 public: 109 static char ID; // Pass ID, replacement for typeid 110 LoopDataPrefetchLegacyPass() : FunctionPass(ID) { 111 initializeLoopDataPrefetchLegacyPassPass(*PassRegistry::getPassRegistry()); 112 } 113 114 void getAnalysisUsage(AnalysisUsage &AU) const override { 115 AU.addPreserved<DominatorTreeWrapperPass>(); 116 AU.addRequired<LoopInfoWrapperPass>(); 117 AU.addPreserved<LoopInfoWrapperPass>(); 118 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 119 AU.addRequired<ScalarEvolutionWrapperPass>(); 120 // FIXME: For some reason, preserving SE here breaks LSR (even if 121 // this pass changes nothing). 122 // AU.addPreserved<ScalarEvolutionWrapperPass>(); 123 AU.addRequired<TargetTransformInfoWrapperPass>(); 124 } 125 126 bool runOnFunction(Function &F) override; 127 }; 128 } 129 130 char LoopDataPrefetchLegacyPass::ID = 0; 131 INITIALIZE_PASS_BEGIN(LoopDataPrefetchLegacyPass, "loop-data-prefetch", 132 "Loop Data Prefetch", false, false) 133 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 134 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 135 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 136 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 137 INITIALIZE_PASS_END(LoopDataPrefetchLegacyPass, "loop-data-prefetch", 138 "Loop Data Prefetch", false, false) 139 140 FunctionPass *llvm::createLoopDataPrefetchPass() { 141 return new LoopDataPrefetchLegacyPass(); 142 } 143 144 bool LoopDataPrefetch::isStrideLargeEnough(const SCEVAddRecExpr *AR) { 145 unsigned TargetMinStride = getMinPrefetchStride(); 146 // No need to check if any stride goes. 147 if (TargetMinStride <= 1) 148 return true; 149 150 const auto *ConstStride = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)); 151 // If MinStride is set, don't prefetch unless we can ensure that stride is 152 // larger. 153 if (!ConstStride) 154 return false; 155 156 unsigned AbsStride = std::abs(ConstStride->getAPInt().getSExtValue()); 157 return TargetMinStride <= AbsStride; 158 } 159 160 PreservedAnalyses LoopDataPrefetchPass::run(Function &F, 161 FunctionAnalysisManager &AM) { 162 LoopInfo *LI = &AM.getResult<LoopAnalysis>(F); 163 ScalarEvolution *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 164 OptimizationRemarkEmitter *ORE = 165 &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 166 const TargetTransformInfo *TTI = &AM.getResult<TargetIRAnalysis>(F); 167 168 LoopDataPrefetch LDP(LI, SE, TTI, ORE); 169 bool Changed = LDP.run(); 170 171 if (Changed) { 172 PreservedAnalyses PA; 173 PA.preserve<DominatorTreeAnalysis>(); 174 PA.preserve<LoopAnalysis>(); 175 return PA; 176 } 177 178 return PreservedAnalyses::all(); 179 } 180 181 bool LoopDataPrefetchLegacyPass::runOnFunction(Function &F) { 182 if (skipFunction(F)) 183 return false; 184 185 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 186 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 187 OptimizationRemarkEmitter *ORE = 188 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 189 const TargetTransformInfo *TTI = 190 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 191 192 LoopDataPrefetch LDP(LI, SE, TTI, ORE); 193 return LDP.run(); 194 } 195 196 bool LoopDataPrefetch::run() { 197 // If PrefetchDistance is not set, don't run the pass. This gives an 198 // opportunity for targets to run this pass for selected subtargets only 199 // (whose TTI sets PrefetchDistance). 200 if (getPrefetchDistance() == 0) 201 return false; 202 assert(TTI->getCacheLineSize() && "Cache line size is not set for target"); 203 204 bool MadeChange = false; 205 206 for (Loop *I : *LI) 207 for (auto L = df_begin(I), LE = df_end(I); L != LE; ++L) 208 MadeChange |= runOnLoop(*L); 209 210 return MadeChange; 211 } 212 213 bool LoopDataPrefetch::runOnLoop(Loop *L) { 214 bool MadeChange = false; 215 216 // Only prefetch in the inner-most loop 217 if (!L->empty()) 218 return MadeChange; 219 220 SmallPtrSet<const Value *, 32> EphValues; 221 CodeMetrics::collectEphemeralValues(L, EphValues); 222 223 // Calculate the number of iterations ahead to prefetch 224 CodeMetrics Metrics; 225 for (const auto BB : L->blocks()) { 226 // If the loop already has prefetches, then assume that the user knows 227 // what they are doing and don't add any more. 228 for (auto &I : *BB) 229 if (CallInst *CI = dyn_cast<CallInst>(&I)) 230 if (Function *F = CI->getCalledFunction()) 231 if (F->getIntrinsicID() == Intrinsic::prefetch) 232 return MadeChange; 233 234 Metrics.analyzeBasicBlock(BB, *TTI, EphValues); 235 } 236 unsigned LoopSize = Metrics.NumInsts; 237 if (!LoopSize) 238 LoopSize = 1; 239 240 unsigned ItersAhead = getPrefetchDistance() / LoopSize; 241 if (!ItersAhead) 242 ItersAhead = 1; 243 244 if (ItersAhead > getMaxPrefetchIterationsAhead()) 245 return MadeChange; 246 247 DEBUG(dbgs() << "Prefetching " << ItersAhead 248 << " iterations ahead (loop size: " << LoopSize << ") in " 249 << L->getHeader()->getParent()->getName() << ": " << *L); 250 251 SmallVector<std::pair<Instruction *, const SCEVAddRecExpr *>, 16> PrefLoads; 252 for (const auto BB : L->blocks()) { 253 for (auto &I : *BB) { 254 Value *PtrValue; 255 Instruction *MemI; 256 257 if (LoadInst *LMemI = dyn_cast<LoadInst>(&I)) { 258 MemI = LMemI; 259 PtrValue = LMemI->getPointerOperand(); 260 } else if (StoreInst *SMemI = dyn_cast<StoreInst>(&I)) { 261 if (!PrefetchWrites) continue; 262 MemI = SMemI; 263 PtrValue = SMemI->getPointerOperand(); 264 } else continue; 265 266 unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace(); 267 if (PtrAddrSpace) 268 continue; 269 270 if (L->isLoopInvariant(PtrValue)) 271 continue; 272 273 const SCEV *LSCEV = SE->getSCEV(PtrValue); 274 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV); 275 if (!LSCEVAddRec) 276 continue; 277 278 // Check if the the stride of the accesses is large enough to warrant a 279 // prefetch. 280 if (!isStrideLargeEnough(LSCEVAddRec)) 281 continue; 282 283 // We don't want to double prefetch individual cache lines. If this load 284 // is known to be within one cache line of some other load that has 285 // already been prefetched, then don't prefetch this one as well. 286 bool DupPref = false; 287 for (const auto &PrefLoad : PrefLoads) { 288 const SCEV *PtrDiff = SE->getMinusSCEV(LSCEVAddRec, PrefLoad.second); 289 if (const SCEVConstant *ConstPtrDiff = 290 dyn_cast<SCEVConstant>(PtrDiff)) { 291 int64_t PD = std::abs(ConstPtrDiff->getValue()->getSExtValue()); 292 if (PD < (int64_t) TTI->getCacheLineSize()) { 293 DupPref = true; 294 break; 295 } 296 } 297 } 298 if (DupPref) 299 continue; 300 301 const SCEV *NextLSCEV = SE->getAddExpr(LSCEVAddRec, SE->getMulExpr( 302 SE->getConstant(LSCEVAddRec->getType(), ItersAhead), 303 LSCEVAddRec->getStepRecurrence(*SE))); 304 if (!isSafeToExpand(NextLSCEV, *SE)) 305 continue; 306 307 PrefLoads.push_back(std::make_pair(MemI, LSCEVAddRec)); 308 309 Type *I8Ptr = Type::getInt8PtrTy(BB->getContext(), PtrAddrSpace); 310 SCEVExpander SCEVE(*SE, I.getModule()->getDataLayout(), "prefaddr"); 311 Value *PrefPtrValue = SCEVE.expandCodeFor(NextLSCEV, I8Ptr, MemI); 312 313 IRBuilder<> Builder(MemI); 314 Module *M = BB->getParent()->getParent(); 315 Type *I32 = Type::getInt32Ty(BB->getContext()); 316 Value *PrefetchFunc = Intrinsic::getDeclaration(M, Intrinsic::prefetch); 317 Builder.CreateCall( 318 PrefetchFunc, 319 {PrefPtrValue, 320 ConstantInt::get(I32, MemI->mayReadFromMemory() ? 0 : 1), 321 ConstantInt::get(I32, 3), ConstantInt::get(I32, 1)}); 322 ++NumPrefetches; 323 DEBUG(dbgs() << " Access: " << *PtrValue << ", SCEV: " << *LSCEV 324 << "\n"); 325 ORE->emit(OptimizationRemark(DEBUG_TYPE, "Prefetched", MemI) 326 << "prefetched memory access"); 327 328 MadeChange = true; 329 } 330 } 331 332 return MadeChange; 333 } 334 335