1 //===-------- LoopDataPrefetch.cpp - Loop Data Prefetching Pass -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a Loop Data Prefetching Pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Scalar/LoopDataPrefetch.h" 14 #include "llvm/InitializePasses.h" 15 16 #define DEBUG_TYPE "loop-data-prefetch" 17 #include "llvm/ADT/DepthFirstIterator.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AssumptionCache.h" 20 #include "llvm/Analysis/CodeMetrics.h" 21 #include "llvm/Analysis/LoopInfo.h" 22 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 23 #include "llvm/Analysis/ScalarEvolution.h" 24 #include "llvm/Analysis/ScalarEvolutionExpander.h" 25 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 26 #include "llvm/Analysis/TargetTransformInfo.h" 27 #include "llvm/IR/CFG.h" 28 #include "llvm/IR/CallSite.h" 29 #include "llvm/IR/Dominators.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/Module.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Transforms/Scalar.h" 35 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 36 #include "llvm/Transforms/Utils/ValueMapper.h" 37 using namespace llvm; 38 39 // By default, we limit this to creating 16 PHIs (which is a little over half 40 // of the allocatable register set). 41 static cl::opt<bool> 42 PrefetchWrites("loop-prefetch-writes", cl::Hidden, cl::init(false), 43 cl::desc("Prefetch write addresses")); 44 45 static cl::opt<unsigned> 46 PrefetchDistance("prefetch-distance", 47 cl::desc("Number of instructions to prefetch ahead"), 48 cl::Hidden); 49 50 static cl::opt<unsigned> 51 MinPrefetchStride("min-prefetch-stride", 52 cl::desc("Min stride to add prefetches"), cl::Hidden); 53 54 static cl::opt<unsigned> MaxPrefetchIterationsAhead( 55 "max-prefetch-iters-ahead", 56 cl::desc("Max number of iterations to prefetch ahead"), cl::Hidden); 57 58 STATISTIC(NumPrefetches, "Number of prefetches inserted"); 59 60 namespace { 61 62 /// Loop prefetch implementation class. 63 class LoopDataPrefetch { 64 public: 65 LoopDataPrefetch(AssumptionCache *AC, DominatorTree *DT, LoopInfo *LI, 66 ScalarEvolution *SE, const TargetTransformInfo *TTI, 67 OptimizationRemarkEmitter *ORE) 68 : AC(AC), DT(DT), LI(LI), SE(SE), TTI(TTI), ORE(ORE) {} 69 70 bool run(); 71 72 private: 73 bool runOnLoop(Loop *L); 74 75 /// Check if the stride of the accesses is large enough to 76 /// warrant a prefetch. 77 bool isStrideLargeEnough(const SCEVAddRecExpr *AR, unsigned TargetMinStride); 78 79 unsigned getMinPrefetchStride(unsigned NumMemAccesses, 80 unsigned NumStridedMemAccesses, 81 unsigned NumPrefetches, 82 bool HasCall) { 83 if (MinPrefetchStride.getNumOccurrences() > 0) 84 return MinPrefetchStride; 85 return TTI->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses, 86 NumPrefetches, HasCall); 87 } 88 89 unsigned getPrefetchDistance() { 90 if (PrefetchDistance.getNumOccurrences() > 0) 91 return PrefetchDistance; 92 return TTI->getPrefetchDistance(); 93 } 94 95 unsigned getMaxPrefetchIterationsAhead() { 96 if (MaxPrefetchIterationsAhead.getNumOccurrences() > 0) 97 return MaxPrefetchIterationsAhead; 98 return TTI->getMaxPrefetchIterationsAhead(); 99 } 100 101 bool doPrefetchWrites() { 102 if (PrefetchWrites.getNumOccurrences() > 0) 103 return PrefetchWrites; 104 return TTI->enableWritePrefetching(); 105 } 106 107 AssumptionCache *AC; 108 DominatorTree *DT; 109 LoopInfo *LI; 110 ScalarEvolution *SE; 111 const TargetTransformInfo *TTI; 112 OptimizationRemarkEmitter *ORE; 113 }; 114 115 /// Legacy class for inserting loop data prefetches. 116 class LoopDataPrefetchLegacyPass : public FunctionPass { 117 public: 118 static char ID; // Pass ID, replacement for typeid 119 LoopDataPrefetchLegacyPass() : FunctionPass(ID) { 120 initializeLoopDataPrefetchLegacyPassPass(*PassRegistry::getPassRegistry()); 121 } 122 123 void getAnalysisUsage(AnalysisUsage &AU) const override { 124 AU.addRequired<AssumptionCacheTracker>(); 125 AU.addRequired<DominatorTreeWrapperPass>(); 126 AU.addPreserved<DominatorTreeWrapperPass>(); 127 AU.addRequired<LoopInfoWrapperPass>(); 128 AU.addPreserved<LoopInfoWrapperPass>(); 129 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 130 AU.addRequired<ScalarEvolutionWrapperPass>(); 131 AU.addPreserved<ScalarEvolutionWrapperPass>(); 132 AU.addRequired<TargetTransformInfoWrapperPass>(); 133 } 134 135 bool runOnFunction(Function &F) override; 136 }; 137 } 138 139 char LoopDataPrefetchLegacyPass::ID = 0; 140 INITIALIZE_PASS_BEGIN(LoopDataPrefetchLegacyPass, "loop-data-prefetch", 141 "Loop Data Prefetch", false, false) 142 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 143 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 144 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 145 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 146 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 147 INITIALIZE_PASS_END(LoopDataPrefetchLegacyPass, "loop-data-prefetch", 148 "Loop Data Prefetch", false, false) 149 150 FunctionPass *llvm::createLoopDataPrefetchPass() { 151 return new LoopDataPrefetchLegacyPass(); 152 } 153 154 bool LoopDataPrefetch::isStrideLargeEnough(const SCEVAddRecExpr *AR, 155 unsigned TargetMinStride) { 156 // No need to check if any stride goes. 157 if (TargetMinStride <= 1) 158 return true; 159 160 const auto *ConstStride = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)); 161 // If MinStride is set, don't prefetch unless we can ensure that stride is 162 // larger. 163 if (!ConstStride) 164 return false; 165 166 unsigned AbsStride = std::abs(ConstStride->getAPInt().getSExtValue()); 167 return TargetMinStride <= AbsStride; 168 } 169 170 PreservedAnalyses LoopDataPrefetchPass::run(Function &F, 171 FunctionAnalysisManager &AM) { 172 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F); 173 LoopInfo *LI = &AM.getResult<LoopAnalysis>(F); 174 ScalarEvolution *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 175 AssumptionCache *AC = &AM.getResult<AssumptionAnalysis>(F); 176 OptimizationRemarkEmitter *ORE = 177 &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 178 const TargetTransformInfo *TTI = &AM.getResult<TargetIRAnalysis>(F); 179 180 LoopDataPrefetch LDP(AC, DT, LI, SE, TTI, ORE); 181 bool Changed = LDP.run(); 182 183 if (Changed) { 184 PreservedAnalyses PA; 185 PA.preserve<DominatorTreeAnalysis>(); 186 PA.preserve<LoopAnalysis>(); 187 return PA; 188 } 189 190 return PreservedAnalyses::all(); 191 } 192 193 bool LoopDataPrefetchLegacyPass::runOnFunction(Function &F) { 194 if (skipFunction(F)) 195 return false; 196 197 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 198 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 199 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 200 AssumptionCache *AC = 201 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 202 OptimizationRemarkEmitter *ORE = 203 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 204 const TargetTransformInfo *TTI = 205 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 206 207 LoopDataPrefetch LDP(AC, DT, LI, SE, TTI, ORE); 208 return LDP.run(); 209 } 210 211 bool LoopDataPrefetch::run() { 212 // If PrefetchDistance is not set, don't run the pass. This gives an 213 // opportunity for targets to run this pass for selected subtargets only 214 // (whose TTI sets PrefetchDistance). 215 if (getPrefetchDistance() == 0) 216 return false; 217 assert(TTI->getCacheLineSize() && "Cache line size is not set for target"); 218 219 bool MadeChange = false; 220 221 for (Loop *I : *LI) 222 for (auto L = df_begin(I), LE = df_end(I); L != LE; ++L) 223 MadeChange |= runOnLoop(*L); 224 225 return MadeChange; 226 } 227 228 /// A record for a potential prefetch made during the initial scan of the 229 /// loop. This is used to let a single prefetch target multiple memory accesses. 230 struct Prefetch { 231 /// The address formula for this prefetch as returned by ScalarEvolution. 232 const SCEVAddRecExpr *LSCEVAddRec; 233 /// The point of insertion for the prefetch instruction. 234 Instruction *InsertPt; 235 /// True if targeting a write memory access. 236 bool Writes; 237 /// The (first seen) prefetched instruction. 238 Instruction *MemI; 239 240 /// Constructor to create a new Prefetch for \param I. 241 Prefetch(const SCEVAddRecExpr *L, Instruction *I) 242 : LSCEVAddRec(L), InsertPt(nullptr), Writes(false), MemI(nullptr) { 243 addInstruction(I); 244 }; 245 246 /// Add the instruction \param I to this prefetch. If it's not the first 247 /// one, 'InsertPt' and 'Writes' will be updated as required. 248 /// \param PtrDiff the known constant address difference to the first added 249 /// instruction. 250 void addInstruction(Instruction *I, DominatorTree *DT = nullptr, 251 int64_t PtrDiff = 0) { 252 if (!InsertPt) { 253 MemI = I; 254 InsertPt = I; 255 Writes = isa<StoreInst>(I); 256 } else { 257 BasicBlock *PrefBB = InsertPt->getParent(); 258 BasicBlock *InsBB = I->getParent(); 259 if (PrefBB != InsBB) { 260 BasicBlock *DomBB = DT->findNearestCommonDominator(PrefBB, InsBB); 261 if (DomBB != PrefBB) 262 InsertPt = DomBB->getTerminator(); 263 } 264 265 if (isa<StoreInst>(I) && PtrDiff == 0) 266 Writes = true; 267 } 268 } 269 }; 270 271 bool LoopDataPrefetch::runOnLoop(Loop *L) { 272 bool MadeChange = false; 273 274 // Only prefetch in the inner-most loop 275 if (!L->empty()) 276 return MadeChange; 277 278 SmallPtrSet<const Value *, 32> EphValues; 279 CodeMetrics::collectEphemeralValues(L, AC, EphValues); 280 281 // Calculate the number of iterations ahead to prefetch 282 CodeMetrics Metrics; 283 bool HasCall = false; 284 for (const auto BB : L->blocks()) { 285 // If the loop already has prefetches, then assume that the user knows 286 // what they are doing and don't add any more. 287 for (auto &I : *BB) { 288 if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) { 289 ImmutableCallSite CS(&I); 290 if (const Function *F = CS.getCalledFunction()) { 291 if (F->getIntrinsicID() == Intrinsic::prefetch) 292 return MadeChange; 293 if (TTI->isLoweredToCall(F)) 294 HasCall = true; 295 } else { // indirect call. 296 HasCall = true; 297 } 298 } 299 } 300 Metrics.analyzeBasicBlock(BB, *TTI, EphValues); 301 } 302 unsigned LoopSize = Metrics.NumInsts; 303 if (!LoopSize) 304 LoopSize = 1; 305 306 unsigned ItersAhead = getPrefetchDistance() / LoopSize; 307 if (!ItersAhead) 308 ItersAhead = 1; 309 310 if (ItersAhead > getMaxPrefetchIterationsAhead()) 311 return MadeChange; 312 313 unsigned ConstantMaxTripCount = SE->getSmallConstantMaxTripCount(L); 314 if (ConstantMaxTripCount && ConstantMaxTripCount < ItersAhead + 1) 315 return MadeChange; 316 317 unsigned NumMemAccesses = 0; 318 unsigned NumStridedMemAccesses = 0; 319 SmallVector<Prefetch, 16> Prefetches; 320 for (const auto BB : L->blocks()) 321 for (auto &I : *BB) { 322 Value *PtrValue; 323 Instruction *MemI; 324 325 if (LoadInst *LMemI = dyn_cast<LoadInst>(&I)) { 326 MemI = LMemI; 327 PtrValue = LMemI->getPointerOperand(); 328 } else if (StoreInst *SMemI = dyn_cast<StoreInst>(&I)) { 329 if (!doPrefetchWrites()) continue; 330 MemI = SMemI; 331 PtrValue = SMemI->getPointerOperand(); 332 } else continue; 333 334 unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace(); 335 if (PtrAddrSpace) 336 continue; 337 NumMemAccesses++; 338 if (L->isLoopInvariant(PtrValue)) 339 continue; 340 341 const SCEV *LSCEV = SE->getSCEV(PtrValue); 342 const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV); 343 if (!LSCEVAddRec) 344 continue; 345 NumStridedMemAccesses++; 346 347 // We don't want to double prefetch individual cache lines. If this 348 // access is known to be within one cache line of some other one that 349 // has already been prefetched, then don't prefetch this one as well. 350 bool DupPref = false; 351 for (auto &Pref : Prefetches) { 352 const SCEV *PtrDiff = SE->getMinusSCEV(LSCEVAddRec, Pref.LSCEVAddRec); 353 if (const SCEVConstant *ConstPtrDiff = 354 dyn_cast<SCEVConstant>(PtrDiff)) { 355 int64_t PD = std::abs(ConstPtrDiff->getValue()->getSExtValue()); 356 if (PD < (int64_t) TTI->getCacheLineSize()) { 357 Pref.addInstruction(MemI, DT, PD); 358 DupPref = true; 359 break; 360 } 361 } 362 } 363 if (!DupPref) 364 Prefetches.push_back(Prefetch(LSCEVAddRec, MemI)); 365 } 366 367 unsigned TargetMinStride = 368 getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses, 369 Prefetches.size(), HasCall); 370 371 LLVM_DEBUG(dbgs() << "Prefetching " << ItersAhead 372 << " iterations ahead (loop size: " << LoopSize << ") in " 373 << L->getHeader()->getParent()->getName() << ": " << *L); 374 LLVM_DEBUG(dbgs() << "Loop has: " 375 << NumMemAccesses << " memory accesses, " 376 << NumStridedMemAccesses << " strided memory accesses, " 377 << Prefetches.size() << " potential prefetch(es), " 378 << "a minimum stride of " << TargetMinStride << ", " 379 << (HasCall ? "calls" : "no calls") << ".\n"); 380 381 for (auto &P : Prefetches) { 382 // Check if the stride of the accesses is large enough to warrant a 383 // prefetch. 384 if (!isStrideLargeEnough(P.LSCEVAddRec, TargetMinStride)) 385 continue; 386 387 const SCEV *NextLSCEV = SE->getAddExpr(P.LSCEVAddRec, SE->getMulExpr( 388 SE->getConstant(P.LSCEVAddRec->getType(), ItersAhead), 389 P.LSCEVAddRec->getStepRecurrence(*SE))); 390 if (!isSafeToExpand(NextLSCEV, *SE)) 391 continue; 392 393 BasicBlock *BB = P.InsertPt->getParent(); 394 Type *I8Ptr = Type::getInt8PtrTy(BB->getContext(), 0/*PtrAddrSpace*/); 395 SCEVExpander SCEVE(*SE, BB->getModule()->getDataLayout(), "prefaddr"); 396 Value *PrefPtrValue = SCEVE.expandCodeFor(NextLSCEV, I8Ptr, P.InsertPt); 397 398 IRBuilder<> Builder(P.InsertPt); 399 Module *M = BB->getParent()->getParent(); 400 Type *I32 = Type::getInt32Ty(BB->getContext()); 401 Function *PrefetchFunc = Intrinsic::getDeclaration( 402 M, Intrinsic::prefetch, PrefPtrValue->getType()); 403 Builder.CreateCall( 404 PrefetchFunc, 405 {PrefPtrValue, 406 ConstantInt::get(I32, P.Writes), 407 ConstantInt::get(I32, 3), ConstantInt::get(I32, 1)}); 408 ++NumPrefetches; 409 LLVM_DEBUG(dbgs() << " Access: " 410 << *P.MemI->getOperand(isa<LoadInst>(P.MemI) ? 0 : 1) 411 << ", SCEV: " << *P.LSCEVAddRec << "\n"); 412 ORE->emit([&]() { 413 return OptimizationRemark(DEBUG_TYPE, "Prefetched", P.MemI) 414 << "prefetched memory access"; 415 }); 416 417 MadeChange = true; 418 } 419 420 return MadeChange; 421 } 422