1 //===-------- LoopDataPrefetch.cpp - Loop Data Prefetching Pass -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a Loop Data Prefetching Pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Scalar/LoopDataPrefetch.h" 14 #include "llvm/InitializePasses.h" 15 16 #include "llvm/ADT/DepthFirstIterator.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/AssumptionCache.h" 19 #include "llvm/Analysis/CodeMetrics.h" 20 #include "llvm/Analysis/LoopInfo.h" 21 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 22 #include "llvm/Analysis/ScalarEvolution.h" 23 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 24 #include "llvm/Analysis/TargetTransformInfo.h" 25 #include "llvm/IR/CFG.h" 26 #include "llvm/IR/Dominators.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/Module.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Transforms/Scalar.h" 32 #include "llvm/Transforms/Utils.h" 33 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 34 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 35 #include "llvm/Transforms/Utils/ValueMapper.h" 36 37 #define DEBUG_TYPE "loop-data-prefetch" 38 39 using namespace llvm; 40 41 // By default, we limit this to creating 16 PHIs (which is a little over half 42 // of the allocatable register set). 43 static cl::opt<bool> 44 PrefetchWrites("loop-prefetch-writes", cl::Hidden, cl::init(false), 45 cl::desc("Prefetch write addresses")); 46 47 static cl::opt<unsigned> 48 PrefetchDistance("prefetch-distance", 49 cl::desc("Number of instructions to prefetch ahead"), 50 cl::Hidden); 51 52 static cl::opt<unsigned> 53 MinPrefetchStride("min-prefetch-stride", 54 cl::desc("Min stride to add prefetches"), cl::Hidden); 55 56 static cl::opt<unsigned> MaxPrefetchIterationsAhead( 57 "max-prefetch-iters-ahead", 58 cl::desc("Max number of iterations to prefetch ahead"), cl::Hidden); 59 60 STATISTIC(NumPrefetches, "Number of prefetches inserted"); 61 62 namespace { 63 64 /// Loop prefetch implementation class. 65 class LoopDataPrefetch { 66 public: 67 LoopDataPrefetch(AssumptionCache *AC, DominatorTree *DT, LoopInfo *LI, 68 ScalarEvolution *SE, const TargetTransformInfo *TTI, 69 OptimizationRemarkEmitter *ORE) 70 : AC(AC), DT(DT), LI(LI), SE(SE), TTI(TTI), ORE(ORE) {} 71 72 bool run(); 73 74 private: 75 bool runOnLoop(Loop *L); 76 77 /// Check if the stride of the accesses is large enough to 78 /// warrant a prefetch. 79 bool isStrideLargeEnough(const SCEVAddRecExpr *AR, unsigned TargetMinStride); 80 81 unsigned getMinPrefetchStride(unsigned NumMemAccesses, 82 unsigned NumStridedMemAccesses, 83 unsigned NumPrefetches, 84 bool HasCall) { 85 if (MinPrefetchStride.getNumOccurrences() > 0) 86 return MinPrefetchStride; 87 return TTI->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses, 88 NumPrefetches, HasCall); 89 } 90 91 unsigned getPrefetchDistance() { 92 if (PrefetchDistance.getNumOccurrences() > 0) 93 return PrefetchDistance; 94 return TTI->getPrefetchDistance(); 95 } 96 97 unsigned getMaxPrefetchIterationsAhead() { 98 if (MaxPrefetchIterationsAhead.getNumOccurrences() > 0) 99 return MaxPrefetchIterationsAhead; 100 return TTI->getMaxPrefetchIterationsAhead(); 101 } 102 103 bool doPrefetchWrites() { 104 if (PrefetchWrites.getNumOccurrences() > 0) 105 return PrefetchWrites; 106 return TTI->enableWritePrefetching(); 107 } 108 109 AssumptionCache *AC; 110 DominatorTree *DT; 111 LoopInfo *LI; 112 ScalarEvolution *SE; 113 const TargetTransformInfo *TTI; 114 OptimizationRemarkEmitter *ORE; 115 }; 116 117 /// Legacy class for inserting loop data prefetches. 118 class LoopDataPrefetchLegacyPass : public FunctionPass { 119 public: 120 static char ID; // Pass ID, replacement for typeid 121 LoopDataPrefetchLegacyPass() : FunctionPass(ID) { 122 initializeLoopDataPrefetchLegacyPassPass(*PassRegistry::getPassRegistry()); 123 } 124 125 void getAnalysisUsage(AnalysisUsage &AU) const override { 126 AU.addRequired<AssumptionCacheTracker>(); 127 AU.addRequired<DominatorTreeWrapperPass>(); 128 AU.addPreserved<DominatorTreeWrapperPass>(); 129 AU.addRequired<LoopInfoWrapperPass>(); 130 AU.addPreserved<LoopInfoWrapperPass>(); 131 AU.addRequiredID(LoopSimplifyID); 132 AU.addPreservedID(LoopSimplifyID); 133 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 134 AU.addRequired<ScalarEvolutionWrapperPass>(); 135 AU.addPreserved<ScalarEvolutionWrapperPass>(); 136 AU.addRequired<TargetTransformInfoWrapperPass>(); 137 } 138 139 bool runOnFunction(Function &F) override; 140 }; 141 } 142 143 char LoopDataPrefetchLegacyPass::ID = 0; 144 INITIALIZE_PASS_BEGIN(LoopDataPrefetchLegacyPass, "loop-data-prefetch", 145 "Loop Data Prefetch", false, false) 146 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 147 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 148 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 149 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 150 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 151 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 152 INITIALIZE_PASS_END(LoopDataPrefetchLegacyPass, "loop-data-prefetch", 153 "Loop Data Prefetch", false, false) 154 155 FunctionPass *llvm::createLoopDataPrefetchPass() { 156 return new LoopDataPrefetchLegacyPass(); 157 } 158 159 bool LoopDataPrefetch::isStrideLargeEnough(const SCEVAddRecExpr *AR, 160 unsigned TargetMinStride) { 161 // No need to check if any stride goes. 162 if (TargetMinStride <= 1) 163 return true; 164 165 const auto *ConstStride = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)); 166 // If MinStride is set, don't prefetch unless we can ensure that stride is 167 // larger. 168 if (!ConstStride) 169 return false; 170 171 unsigned AbsStride = std::abs(ConstStride->getAPInt().getSExtValue()); 172 return TargetMinStride <= AbsStride; 173 } 174 175 PreservedAnalyses LoopDataPrefetchPass::run(Function &F, 176 FunctionAnalysisManager &AM) { 177 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F); 178 LoopInfo *LI = &AM.getResult<LoopAnalysis>(F); 179 ScalarEvolution *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 180 AssumptionCache *AC = &AM.getResult<AssumptionAnalysis>(F); 181 OptimizationRemarkEmitter *ORE = 182 &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 183 const TargetTransformInfo *TTI = &AM.getResult<TargetIRAnalysis>(F); 184 185 LoopDataPrefetch LDP(AC, DT, LI, SE, TTI, ORE); 186 bool Changed = LDP.run(); 187 188 if (Changed) { 189 PreservedAnalyses PA; 190 PA.preserve<DominatorTreeAnalysis>(); 191 PA.preserve<LoopAnalysis>(); 192 return PA; 193 } 194 195 return PreservedAnalyses::all(); 196 } 197 198 bool LoopDataPrefetchLegacyPass::runOnFunction(Function &F) { 199 if (skipFunction(F)) 200 return false; 201 202 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 203 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 204 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 205 AssumptionCache *AC = 206 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 207 OptimizationRemarkEmitter *ORE = 208 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 209 const TargetTransformInfo *TTI = 210 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 211 212 LoopDataPrefetch LDP(AC, DT, LI, SE, TTI, ORE); 213 return LDP.run(); 214 } 215 216 bool LoopDataPrefetch::run() { 217 // If PrefetchDistance is not set, don't run the pass. This gives an 218 // opportunity for targets to run this pass for selected subtargets only 219 // (whose TTI sets PrefetchDistance). 220 if (getPrefetchDistance() == 0) 221 return false; 222 assert(TTI->getCacheLineSize() && "Cache line size is not set for target"); 223 224 bool MadeChange = false; 225 226 for (Loop *I : *LI) 227 for (Loop *L : depth_first(I)) 228 MadeChange |= runOnLoop(L); 229 230 return MadeChange; 231 } 232 233 /// A record for a potential prefetch made during the initial scan of the 234 /// loop. This is used to let a single prefetch target multiple memory accesses. 235 struct Prefetch { 236 /// The address formula for this prefetch as returned by ScalarEvolution. 237 const SCEVAddRecExpr *LSCEVAddRec; 238 /// The point of insertion for the prefetch instruction. 239 Instruction *InsertPt = nullptr; 240 /// True if targeting a write memory access. 241 bool Writes = false; 242 /// The (first seen) prefetched instruction. 243 Instruction *MemI = nullptr; 244 245 /// Constructor to create a new Prefetch for \p I. 246 Prefetch(const SCEVAddRecExpr *L, Instruction *I) : LSCEVAddRec(L) { 247 addInstruction(I); 248 }; 249 250 /// Add the instruction \param I to this prefetch. If it's not the first 251 /// one, 'InsertPt' and 'Writes' will be updated as required. 252 /// \param PtrDiff the known constant address difference to the first added 253 /// instruction. 254 void addInstruction(Instruction *I, DominatorTree *DT = nullptr, 255 int64_t PtrDiff = 0) { 256 if (!InsertPt) { 257 MemI = I; 258 InsertPt = I; 259 Writes = isa<StoreInst>(I); 260 } else { 261 BasicBlock *PrefBB = InsertPt->getParent(); 262 BasicBlock *InsBB = I->getParent(); 263 if (PrefBB != InsBB) { 264 BasicBlock *DomBB = DT->findNearestCommonDominator(PrefBB, InsBB); 265 if (DomBB != PrefBB) 266 InsertPt = DomBB->getTerminator(); 267 } 268 269 if (isa<StoreInst>(I) && PtrDiff == 0) 270 Writes = true; 271 } 272 } 273 }; 274 275 bool LoopDataPrefetch::runOnLoop(Loop *L) { 276 bool MadeChange = false; 277 278 // Only prefetch in the inner-most loop 279 if (!L->isInnermost()) 280 return MadeChange; 281 282 SmallPtrSet<const Value *, 32> EphValues; 283 CodeMetrics::collectEphemeralValues(L, AC, EphValues); 284 285 // Calculate the number of iterations ahead to prefetch 286 CodeMetrics Metrics; 287 bool HasCall = false; 288 for (const auto BB : L->blocks()) { 289 // If the loop already has prefetches, then assume that the user knows 290 // what they are doing and don't add any more. 291 for (auto &I : *BB) { 292 if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) { 293 if (const Function *F = cast<CallBase>(I).getCalledFunction()) { 294 if (F->getIntrinsicID() == Intrinsic::prefetch) 295 return MadeChange; 296 if (TTI->isLoweredToCall(F)) 297 HasCall = true; 298 } else { // indirect call. 299 HasCall = true; 300 } 301 } 302 } 303 Metrics.analyzeBasicBlock(BB, *TTI, EphValues); 304 } 305 unsigned LoopSize = Metrics.NumInsts; 306 if (!LoopSize) 307 LoopSize = 1; 308 309 unsigned ItersAhead = getPrefetchDistance() / LoopSize; 310 if (!ItersAhead) 311 ItersAhead = 1; 312 313 if (ItersAhead > getMaxPrefetchIterationsAhead()) 314 return MadeChange; 315 316 unsigned ConstantMaxTripCount = SE->getSmallConstantMaxTripCount(L); 317 if (ConstantMaxTripCount && ConstantMaxTripCount < ItersAhead + 1) 318 return MadeChange; 319 320 unsigned NumMemAccesses = 0; 321 unsigned NumStridedMemAccesses = 0; 322 SmallVector<Prefetch, 16> Prefetches; 323 for (const auto BB : L->blocks()) 324 for (auto &I : *BB) { 325 Value *PtrValue; 326 Instruction *MemI; 327 328 if (LoadInst *LMemI = dyn_cast<LoadInst>(&I)) { 329 MemI = LMemI; 330 PtrValue = LMemI->getPointerOperand(); 331 } else if (StoreInst *SMemI = dyn_cast<StoreInst>(&I)) { 332 if (!doPrefetchWrites()) continue; 333 MemI = SMemI; 334 PtrValue = SMemI->getPointerOperand(); 335 } else continue; 336 337 unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace(); 338 if (PtrAddrSpace) 339 continue; 340 NumMemAccesses++; 341 if (L->isLoopInvariant(PtrValue)) 342 continue; 343 344 const SCEV *LSCEV = SE->getSCEV(PtrValue); 345 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV); 346 if (!LSCEVAddRec) 347 continue; 348 NumStridedMemAccesses++; 349 350 // We don't want to double prefetch individual cache lines. If this 351 // access is known to be within one cache line of some other one that 352 // has already been prefetched, then don't prefetch this one as well. 353 bool DupPref = false; 354 for (auto &Pref : Prefetches) { 355 const SCEV *PtrDiff = SE->getMinusSCEV(LSCEVAddRec, Pref.LSCEVAddRec); 356 if (const SCEVConstant *ConstPtrDiff = 357 dyn_cast<SCEVConstant>(PtrDiff)) { 358 int64_t PD = std::abs(ConstPtrDiff->getValue()->getSExtValue()); 359 if (PD < (int64_t) TTI->getCacheLineSize()) { 360 Pref.addInstruction(MemI, DT, PD); 361 DupPref = true; 362 break; 363 } 364 } 365 } 366 if (!DupPref) 367 Prefetches.push_back(Prefetch(LSCEVAddRec, MemI)); 368 } 369 370 unsigned TargetMinStride = 371 getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses, 372 Prefetches.size(), HasCall); 373 374 LLVM_DEBUG(dbgs() << "Prefetching " << ItersAhead 375 << " iterations ahead (loop size: " << LoopSize << ") in " 376 << L->getHeader()->getParent()->getName() << ": " << *L); 377 LLVM_DEBUG(dbgs() << "Loop has: " 378 << NumMemAccesses << " memory accesses, " 379 << NumStridedMemAccesses << " strided memory accesses, " 380 << Prefetches.size() << " potential prefetch(es), " 381 << "a minimum stride of " << TargetMinStride << ", " 382 << (HasCall ? "calls" : "no calls") << ".\n"); 383 384 for (auto &P : Prefetches) { 385 // Check if the stride of the accesses is large enough to warrant a 386 // prefetch. 387 if (!isStrideLargeEnough(P.LSCEVAddRec, TargetMinStride)) 388 continue; 389 390 const SCEV *NextLSCEV = SE->getAddExpr(P.LSCEVAddRec, SE->getMulExpr( 391 SE->getConstant(P.LSCEVAddRec->getType(), ItersAhead), 392 P.LSCEVAddRec->getStepRecurrence(*SE))); 393 if (!isSafeToExpand(NextLSCEV, *SE)) 394 continue; 395 396 BasicBlock *BB = P.InsertPt->getParent(); 397 Type *I8Ptr = Type::getInt8PtrTy(BB->getContext(), 0/*PtrAddrSpace*/); 398 SCEVExpander SCEVE(*SE, BB->getModule()->getDataLayout(), "prefaddr"); 399 Value *PrefPtrValue = SCEVE.expandCodeFor(NextLSCEV, I8Ptr, P.InsertPt); 400 401 IRBuilder<> Builder(P.InsertPt); 402 Module *M = BB->getParent()->getParent(); 403 Type *I32 = Type::getInt32Ty(BB->getContext()); 404 Function *PrefetchFunc = Intrinsic::getDeclaration( 405 M, Intrinsic::prefetch, PrefPtrValue->getType()); 406 Builder.CreateCall( 407 PrefetchFunc, 408 {PrefPtrValue, 409 ConstantInt::get(I32, P.Writes), 410 ConstantInt::get(I32, 3), ConstantInt::get(I32, 1)}); 411 ++NumPrefetches; 412 LLVM_DEBUG(dbgs() << " Access: " 413 << *P.MemI->getOperand(isa<LoadInst>(P.MemI) ? 0 : 1) 414 << ", SCEV: " << *P.LSCEVAddRec << "\n"); 415 ORE->emit([&]() { 416 return OptimizationRemark(DEBUG_TYPE, "Prefetched", P.MemI) 417 << "prefetched memory access"; 418 }); 419 420 MadeChange = true; 421 } 422 423 return MadeChange; 424 } 425