1 //===- LoopCacheAnalysis.cpp - Loop Cache Analysis -------------------------==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 6 // See https://llvm.org/LICENSE.txt for license information. 7 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 8 // 9 //===----------------------------------------------------------------------===// 10 /// 11 /// \file 12 /// This file defines the implementation for the loop cache analysis. 13 /// The implementation is largely based on the following paper: 14 /// 15 /// Compiler Optimizations for Improving Data Locality 16 /// By: Steve Carr, Katherine S. McKinley, Chau-Wen Tseng 17 /// http://www.cs.utexas.edu/users/mckinley/papers/asplos-1994.pdf 18 /// 19 /// The general approach taken to estimate the number of cache lines used by the 20 /// memory references in an inner loop is: 21 /// 1. Partition memory references that exhibit temporal or spacial reuse 22 /// into reference groups. 23 /// 2. For each loop L in the a loop nest LN: 24 /// a. Compute the cost of the reference group 25 /// b. Compute the loop cost by summing up the reference groups costs 26 //===----------------------------------------------------------------------===// 27 28 #include "llvm/Analysis/LoopCacheAnalysis.h" 29 #include "llvm/ADT/BreadthFirstIterator.h" 30 #include "llvm/ADT/Sequence.h" 31 #include "llvm/ADT/SmallVector.h" 32 #include "llvm/Support/Debug.h" 33 34 using namespace llvm; 35 36 #define DEBUG_TYPE "loop-cache-cost" 37 38 static cl::opt<unsigned> DefaultTripCount( 39 "default-trip-count", cl::init(100), cl::Hidden, 40 cl::desc("Use this to specify the default trip count of a loop")); 41 42 // In this analysis two array references are considered to exhibit temporal 43 // reuse if they access either the same memory location, or a memory location 44 // with distance smaller than a configurable threshold. 45 static cl::opt<unsigned> TemporalReuseThreshold( 46 "temporal-reuse-threshold", cl::init(2), cl::Hidden, 47 cl::desc("Use this to specify the max. distance between array elements " 48 "accessed in a loop so that the elements are classified to have " 49 "temporal reuse")); 50 51 /// Retrieve the innermost loop in the given loop nest \p Loops. It returns a 52 /// nullptr if any loops in the loop vector supplied has more than one sibling. 53 /// The loop vector is expected to contain loops collected in breadth-first 54 /// order. 55 static Loop *getInnerMostLoop(const LoopVectorTy &Loops) { 56 assert(!Loops.empty() && "Expecting a non-empy loop vector"); 57 58 Loop *LastLoop = Loops.back(); 59 Loop *ParentLoop = LastLoop->getParentLoop(); 60 61 if (ParentLoop == nullptr) { 62 assert(Loops.size() == 1 && "Expecting a single loop"); 63 return LastLoop; 64 } 65 66 return (std::is_sorted(Loops.begin(), Loops.end(), 67 [](const Loop *L1, const Loop *L2) { 68 return L1->getLoopDepth() < L2->getLoopDepth(); 69 })) 70 ? LastLoop 71 : nullptr; 72 } 73 74 static bool isOneDimensionalArray(const SCEV &AccessFn, const SCEV &ElemSize, 75 const Loop &L, ScalarEvolution &SE) { 76 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(&AccessFn); 77 if (!AR || !AR->isAffine()) 78 return false; 79 80 assert(AR->getLoop() && "AR should have a loop"); 81 82 // Check that start and increment are not add recurrences. 83 const SCEV *Start = AR->getStart(); 84 const SCEV *Step = AR->getStepRecurrence(SE); 85 if (isa<SCEVAddRecExpr>(Start) || isa<SCEVAddRecExpr>(Step)) 86 return false; 87 88 // Check that start and increment are both invariant in the loop. 89 if (!SE.isLoopInvariant(Start, &L) || !SE.isLoopInvariant(Step, &L)) 90 return false; 91 92 return AR->getStepRecurrence(SE) == &ElemSize; 93 } 94 95 /// Compute the trip count for the given loop \p L. Return the SCEV expression 96 /// for the trip count or nullptr if it cannot be computed. 97 static const SCEV *computeTripCount(const Loop &L, ScalarEvolution &SE) { 98 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(&L); 99 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) || 100 !isa<SCEVConstant>(BackedgeTakenCount)) 101 return nullptr; 102 103 return SE.getAddExpr(BackedgeTakenCount, 104 SE.getOne(BackedgeTakenCount->getType())); 105 } 106 107 //===----------------------------------------------------------------------===// 108 // IndexedReference implementation 109 // 110 raw_ostream &llvm::operator<<(raw_ostream &OS, const IndexedReference &R) { 111 if (!R.IsValid) { 112 OS << R.StoreOrLoadInst; 113 OS << ", IsValid=false."; 114 return OS; 115 } 116 117 OS << *R.BasePointer; 118 for (const SCEV *Subscript : R.Subscripts) 119 OS << "[" << *Subscript << "]"; 120 121 OS << ", Sizes: "; 122 for (const SCEV *Size : R.Sizes) 123 OS << "[" << *Size << "]"; 124 125 return OS; 126 } 127 128 IndexedReference::IndexedReference(Instruction &StoreOrLoadInst, 129 const LoopInfo &LI, ScalarEvolution &SE) 130 : StoreOrLoadInst(StoreOrLoadInst), SE(SE) { 131 assert((isa<StoreInst>(StoreOrLoadInst) || isa<LoadInst>(StoreOrLoadInst)) && 132 "Expecting a load or store instruction"); 133 134 IsValid = delinearize(LI); 135 if (IsValid) 136 LLVM_DEBUG(dbgs().indent(2) << "Succesfully delinearized: " << *this 137 << "\n"); 138 } 139 140 Optional<bool> IndexedReference::hasSpacialReuse(const IndexedReference &Other, 141 unsigned CLS, 142 AliasAnalysis &AA) const { 143 assert(IsValid && "Expecting a valid reference"); 144 145 if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) { 146 LLVM_DEBUG(dbgs().indent(2) 147 << "No spacial reuse: different base pointers\n"); 148 return false; 149 } 150 151 unsigned NumSubscripts = getNumSubscripts(); 152 if (NumSubscripts != Other.getNumSubscripts()) { 153 LLVM_DEBUG(dbgs().indent(2) 154 << "No spacial reuse: different number of subscripts\n"); 155 return false; 156 } 157 158 // all subscripts must be equal, except the leftmost one (the last one). 159 for (auto SubNum : seq<unsigned>(0, NumSubscripts - 1)) { 160 if (getSubscript(SubNum) != Other.getSubscript(SubNum)) { 161 LLVM_DEBUG(dbgs().indent(2) << "No spacial reuse, different subscripts: " 162 << "\n\t" << *getSubscript(SubNum) << "\n\t" 163 << *Other.getSubscript(SubNum) << "\n"); 164 return false; 165 } 166 } 167 168 // the difference between the last subscripts must be less than the cache line 169 // size. 170 const SCEV *LastSubscript = getLastSubscript(); 171 const SCEV *OtherLastSubscript = Other.getLastSubscript(); 172 const SCEVConstant *Diff = dyn_cast<SCEVConstant>( 173 SE.getMinusSCEV(LastSubscript, OtherLastSubscript)); 174 175 if (Diff == nullptr) { 176 LLVM_DEBUG(dbgs().indent(2) 177 << "No spacial reuse, difference between subscript:\n\t" 178 << *LastSubscript << "\n\t" << OtherLastSubscript 179 << "\nis not constant.\n"); 180 return None; 181 } 182 183 bool InSameCacheLine = (Diff->getValue()->getSExtValue() < CLS); 184 185 LLVM_DEBUG({ 186 if (InSameCacheLine) 187 dbgs().indent(2) << "Found spacial reuse.\n"; 188 else 189 dbgs().indent(2) << "No spacial reuse.\n"; 190 }); 191 192 return InSameCacheLine; 193 } 194 195 Optional<bool> IndexedReference::hasTemporalReuse(const IndexedReference &Other, 196 unsigned MaxDistance, 197 const Loop &L, 198 DependenceInfo &DI, 199 AliasAnalysis &AA) const { 200 assert(IsValid && "Expecting a valid reference"); 201 202 if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) { 203 LLVM_DEBUG(dbgs().indent(2) 204 << "No temporal reuse: different base pointer\n"); 205 return false; 206 } 207 208 std::unique_ptr<Dependence> D = 209 DI.depends(&StoreOrLoadInst, &Other.StoreOrLoadInst, true); 210 211 if (D == nullptr) { 212 LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: no dependence\n"); 213 return false; 214 } 215 216 if (D->isLoopIndependent()) { 217 LLVM_DEBUG(dbgs().indent(2) << "Found temporal reuse\n"); 218 return true; 219 } 220 221 // Check the dependence distance at every loop level. There is temporal reuse 222 // if the distance at the given loop's depth is small (|d| <= MaxDistance) and 223 // it is zero at every other loop level. 224 int LoopDepth = L.getLoopDepth(); 225 int Levels = D->getLevels(); 226 for (int Level = 1; Level <= Levels; ++Level) { 227 const SCEV *Distance = D->getDistance(Level); 228 const SCEVConstant *SCEVConst = dyn_cast_or_null<SCEVConstant>(Distance); 229 230 if (SCEVConst == nullptr) { 231 LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: distance unknown\n"); 232 return None; 233 } 234 235 const ConstantInt &CI = *SCEVConst->getValue(); 236 if (Level != LoopDepth && !CI.isZero()) { 237 LLVM_DEBUG(dbgs().indent(2) 238 << "No temporal reuse: distance is not zero at depth=" << Level 239 << "\n"); 240 return false; 241 } else if (Level == LoopDepth && CI.getSExtValue() > MaxDistance) { 242 LLVM_DEBUG( 243 dbgs().indent(2) 244 << "No temporal reuse: distance is greater than MaxDistance at depth=" 245 << Level << "\n"); 246 return false; 247 } 248 } 249 250 LLVM_DEBUG(dbgs().indent(2) << "Found temporal reuse\n"); 251 return true; 252 } 253 254 CacheCostTy IndexedReference::computeRefCost(const Loop &L, 255 unsigned CLS) const { 256 assert(IsValid && "Expecting a valid reference"); 257 LLVM_DEBUG({ 258 dbgs().indent(2) << "Computing cache cost for:\n"; 259 dbgs().indent(4) << *this << "\n"; 260 }); 261 262 // If the indexed reference is loop invariant the cost is one. 263 if (isLoopInvariant(L)) { 264 LLVM_DEBUG(dbgs().indent(4) << "Reference is loop invariant: RefCost=1\n"); 265 return 1; 266 } 267 268 const SCEV *TripCount = computeTripCount(L, SE); 269 if (!TripCount) { 270 LLVM_DEBUG(dbgs() << "Trip count of loop " << L.getName() 271 << " could not be computed, using DefaultTripCount\n"); 272 const SCEV *ElemSize = Sizes.back(); 273 TripCount = SE.getConstant(ElemSize->getType(), DefaultTripCount); 274 } 275 LLVM_DEBUG(dbgs() << "TripCount=" << *TripCount << "\n"); 276 277 // If the indexed reference is 'consecutive' the cost is 278 // (TripCount*Stride)/CLS, otherwise the cost is TripCount. 279 const SCEV *RefCost = TripCount; 280 281 if (isConsecutive(L, CLS)) { 282 const SCEV *Coeff = getLastCoefficient(); 283 const SCEV *ElemSize = Sizes.back(); 284 const SCEV *Stride = SE.getMulExpr(Coeff, ElemSize); 285 const SCEV *CacheLineSize = SE.getConstant(Stride->getType(), CLS); 286 const SCEV *Numerator = SE.getMulExpr(Stride, TripCount); 287 RefCost = SE.getUDivExpr(Numerator, CacheLineSize); 288 LLVM_DEBUG(dbgs().indent(4) 289 << "Access is consecutive: RefCost=(TripCount*Stride)/CLS=" 290 << *RefCost << "\n"); 291 } else 292 LLVM_DEBUG(dbgs().indent(4) 293 << "Access is not consecutive: RefCost=TripCount=" << *RefCost 294 << "\n"); 295 296 // Attempt to fold RefCost into a constant. 297 if (auto ConstantCost = dyn_cast<SCEVConstant>(RefCost)) 298 return ConstantCost->getValue()->getSExtValue(); 299 300 LLVM_DEBUG(dbgs().indent(4) 301 << "RefCost is not a constant! Setting to RefCost=InvalidCost " 302 "(invalid value).\n"); 303 304 return CacheCost::InvalidCost; 305 } 306 307 bool IndexedReference::delinearize(const LoopInfo &LI) { 308 assert(Subscripts.empty() && "Subscripts should be empty"); 309 assert(Sizes.empty() && "Sizes should be empty"); 310 assert(!IsValid && "Should be called once from the constructor"); 311 LLVM_DEBUG(dbgs() << "Delinearizing: " << StoreOrLoadInst << "\n"); 312 313 const SCEV *ElemSize = SE.getElementSize(&StoreOrLoadInst); 314 const BasicBlock *BB = StoreOrLoadInst.getParent(); 315 316 for (Loop *L = LI.getLoopFor(BB); L != nullptr; L = L->getParentLoop()) { 317 const SCEV *AccessFn = 318 SE.getSCEVAtScope(getPointerOperand(&StoreOrLoadInst), L); 319 320 BasePointer = dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFn)); 321 if (BasePointer == nullptr) { 322 LLVM_DEBUG( 323 dbgs().indent(2) 324 << "ERROR: failed to delinearize, can't identify base pointer\n"); 325 return false; 326 } 327 328 AccessFn = SE.getMinusSCEV(AccessFn, BasePointer); 329 330 LLVM_DEBUG(dbgs().indent(2) << "In Loop '" << L->getName() 331 << "', AccessFn: " << *AccessFn << "\n"); 332 333 SE.delinearize(AccessFn, Subscripts, Sizes, 334 SE.getElementSize(&StoreOrLoadInst)); 335 336 if (Subscripts.empty() || Sizes.empty() || 337 Subscripts.size() != Sizes.size()) { 338 // Attempt to determine whether we have a single dimensional array access. 339 // before giving up. 340 if (!isOneDimensionalArray(*AccessFn, *ElemSize, *L, SE)) { 341 LLVM_DEBUG(dbgs().indent(2) 342 << "ERROR: failed to delinearize reference\n"); 343 Subscripts.clear(); 344 Sizes.clear(); 345 break; 346 } 347 348 const SCEV *Div = SE.getUDivExactExpr(AccessFn, ElemSize); 349 Subscripts.push_back(Div); 350 Sizes.push_back(ElemSize); 351 } 352 353 return all_of(Subscripts, [&](const SCEV *Subscript) { 354 return isSimpleAddRecurrence(*Subscript, *L); 355 }); 356 } 357 358 return false; 359 } 360 361 bool IndexedReference::isLoopInvariant(const Loop &L) const { 362 Value *Addr = getPointerOperand(&StoreOrLoadInst); 363 assert(Addr != nullptr && "Expecting either a load or a store instruction"); 364 assert(SE.isSCEVable(Addr->getType()) && "Addr should be SCEVable"); 365 366 if (SE.isLoopInvariant(SE.getSCEV(Addr), &L)) 367 return true; 368 369 // The indexed reference is loop invariant if none of the coefficients use 370 // the loop induction variable. 371 bool allCoeffForLoopAreZero = all_of(Subscripts, [&](const SCEV *Subscript) { 372 return isCoeffForLoopZeroOrInvariant(*Subscript, L); 373 }); 374 375 return allCoeffForLoopAreZero; 376 } 377 378 bool IndexedReference::isConsecutive(const Loop &L, unsigned CLS) const { 379 // The indexed reference is 'consecutive' if the only coefficient that uses 380 // the loop induction variable is the last one... 381 const SCEV *LastSubscript = Subscripts.back(); 382 for (const SCEV *Subscript : Subscripts) { 383 if (Subscript == LastSubscript) 384 continue; 385 if (!isCoeffForLoopZeroOrInvariant(*Subscript, L)) 386 return false; 387 } 388 389 // ...and the access stride is less than the cache line size. 390 const SCEV *Coeff = getLastCoefficient(); 391 const SCEV *ElemSize = Sizes.back(); 392 const SCEV *Stride = SE.getMulExpr(Coeff, ElemSize); 393 const SCEV *CacheLineSize = SE.getConstant(Stride->getType(), CLS); 394 395 return SE.isKnownPredicate(ICmpInst::ICMP_ULT, Stride, CacheLineSize); 396 } 397 398 const SCEV *IndexedReference::getLastCoefficient() const { 399 const SCEV *LastSubscript = getLastSubscript(); 400 assert(isa<SCEVAddRecExpr>(LastSubscript) && 401 "Expecting a SCEV add recurrence expression"); 402 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LastSubscript); 403 return AR->getStepRecurrence(SE); 404 } 405 406 bool IndexedReference::isCoeffForLoopZeroOrInvariant(const SCEV &Subscript, 407 const Loop &L) const { 408 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(&Subscript); 409 return (AR != nullptr) ? AR->getLoop() != &L 410 : SE.isLoopInvariant(&Subscript, &L); 411 } 412 413 bool IndexedReference::isSimpleAddRecurrence(const SCEV &Subscript, 414 const Loop &L) const { 415 if (!isa<SCEVAddRecExpr>(Subscript)) 416 return false; 417 418 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(&Subscript); 419 assert(AR->getLoop() && "AR should have a loop"); 420 421 if (!AR->isAffine()) 422 return false; 423 424 const SCEV *Start = AR->getStart(); 425 const SCEV *Step = AR->getStepRecurrence(SE); 426 427 if (!SE.isLoopInvariant(Start, &L) || !SE.isLoopInvariant(Step, &L)) 428 return false; 429 430 return true; 431 } 432 433 bool IndexedReference::isAliased(const IndexedReference &Other, 434 AliasAnalysis &AA) const { 435 const auto &Loc1 = MemoryLocation::get(&StoreOrLoadInst); 436 const auto &Loc2 = MemoryLocation::get(&Other.StoreOrLoadInst); 437 return AA.isMustAlias(Loc1, Loc2); 438 } 439 440 //===----------------------------------------------------------------------===// 441 // CacheCost implementation 442 // 443 raw_ostream &llvm::operator<<(raw_ostream &OS, const CacheCost &CC) { 444 for (const auto &LC : CC.LoopCosts) { 445 const Loop *L = LC.first; 446 OS << "Loop '" << L->getName() << "' has cost = " << LC.second << "\n"; 447 } 448 return OS; 449 } 450 451 CacheCost::CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI, 452 ScalarEvolution &SE, TargetTransformInfo &TTI, 453 AliasAnalysis &AA, DependenceInfo &DI, 454 Optional<unsigned> TRT) 455 : Loops(Loops), TripCounts(), LoopCosts(), 456 TRT(TRT == None ? Optional<unsigned>(TemporalReuseThreshold) : TRT), 457 LI(LI), SE(SE), TTI(TTI), AA(AA), DI(DI) { 458 assert(!Loops.empty() && "Expecting a non-empty loop vector."); 459 460 for (const Loop *L : Loops) { 461 unsigned TripCount = SE.getSmallConstantTripCount(L); 462 TripCount = (TripCount == 0) ? DefaultTripCount : TripCount; 463 TripCounts.push_back({L, TripCount}); 464 } 465 466 calculateCacheFootprint(); 467 } 468 469 std::unique_ptr<CacheCost> 470 CacheCost::getCacheCost(Loop &Root, LoopStandardAnalysisResults &AR, 471 DependenceInfo &DI, Optional<unsigned> TRT) { 472 if (Root.getParentLoop()) { 473 LLVM_DEBUG(dbgs() << "Expecting the outermost loop in a loop nest\n"); 474 return nullptr; 475 } 476 477 LoopVectorTy Loops; 478 for (Loop *L : breadth_first(&Root)) 479 Loops.push_back(L); 480 481 if (!getInnerMostLoop(Loops)) { 482 LLVM_DEBUG(dbgs() << "Cannot compute cache cost of loop nest with more " 483 "than one innermost loop\n"); 484 return nullptr; 485 } 486 487 return std::make_unique<CacheCost>(Loops, AR.LI, AR.SE, AR.TTI, AR.AA, DI, TRT); 488 } 489 490 void CacheCost::calculateCacheFootprint() { 491 LLVM_DEBUG(dbgs() << "POPULATING REFERENCE GROUPS\n"); 492 ReferenceGroupsTy RefGroups; 493 if (!populateReferenceGroups(RefGroups)) 494 return; 495 496 LLVM_DEBUG(dbgs() << "COMPUTING LOOP CACHE COSTS\n"); 497 for (const Loop *L : Loops) { 498 assert((std::find_if(LoopCosts.begin(), LoopCosts.end(), 499 [L](const LoopCacheCostTy &LCC) { 500 return LCC.first == L; 501 }) == LoopCosts.end()) && 502 "Should not add duplicate element"); 503 CacheCostTy LoopCost = computeLoopCacheCost(*L, RefGroups); 504 LoopCosts.push_back(std::make_pair(L, LoopCost)); 505 } 506 507 sortLoopCosts(); 508 RefGroups.clear(); 509 } 510 511 bool CacheCost::populateReferenceGroups(ReferenceGroupsTy &RefGroups) const { 512 assert(RefGroups.empty() && "Reference groups should be empty"); 513 514 unsigned CLS = TTI.getCacheLineSize(); 515 Loop *InnerMostLoop = getInnerMostLoop(Loops); 516 assert(InnerMostLoop != nullptr && "Expecting a valid innermost loop"); 517 518 for (BasicBlock *BB : InnerMostLoop->getBlocks()) { 519 for (Instruction &I : *BB) { 520 if (!isa<StoreInst>(I) && !isa<LoadInst>(I)) 521 continue; 522 523 std::unique_ptr<IndexedReference> R(new IndexedReference(I, LI, SE)); 524 if (!R->isValid()) 525 continue; 526 527 bool Added = false; 528 for (ReferenceGroupTy &RefGroup : RefGroups) { 529 const IndexedReference &Representative = *RefGroup.front().get(); 530 LLVM_DEBUG({ 531 dbgs() << "References:\n"; 532 dbgs().indent(2) << *R << "\n"; 533 dbgs().indent(2) << Representative << "\n"; 534 }); 535 536 Optional<bool> HasTemporalReuse = 537 R->hasTemporalReuse(Representative, *TRT, *InnerMostLoop, DI, AA); 538 Optional<bool> HasSpacialReuse = 539 R->hasSpacialReuse(Representative, CLS, AA); 540 541 if ((HasTemporalReuse.hasValue() && *HasTemporalReuse) || 542 (HasSpacialReuse.hasValue() && *HasSpacialReuse)) { 543 RefGroup.push_back(std::move(R)); 544 Added = true; 545 break; 546 } 547 } 548 549 if (!Added) { 550 ReferenceGroupTy RG; 551 RG.push_back(std::move(R)); 552 RefGroups.push_back(std::move(RG)); 553 } 554 } 555 } 556 557 if (RefGroups.empty()) 558 return false; 559 560 LLVM_DEBUG({ 561 dbgs() << "\nIDENTIFIED REFERENCE GROUPS:\n"; 562 int n = 1; 563 for (const ReferenceGroupTy &RG : RefGroups) { 564 dbgs().indent(2) << "RefGroup " << n << ":\n"; 565 for (const auto &IR : RG) 566 dbgs().indent(4) << *IR << "\n"; 567 n++; 568 } 569 dbgs() << "\n"; 570 }); 571 572 return true; 573 } 574 575 CacheCostTy 576 CacheCost::computeLoopCacheCost(const Loop &L, 577 const ReferenceGroupsTy &RefGroups) const { 578 if (!L.isLoopSimplifyForm()) 579 return InvalidCost; 580 581 LLVM_DEBUG(dbgs() << "Considering loop '" << L.getName() 582 << "' as innermost loop.\n"); 583 584 // Compute the product of the trip counts of each other loop in the nest. 585 CacheCostTy TripCountsProduct = 1; 586 for (const auto &TC : TripCounts) { 587 if (TC.first == &L) 588 continue; 589 TripCountsProduct *= TC.second; 590 } 591 592 CacheCostTy LoopCost = 0; 593 for (const ReferenceGroupTy &RG : RefGroups) { 594 CacheCostTy RefGroupCost = computeRefGroupCacheCost(RG, L); 595 LoopCost += RefGroupCost * TripCountsProduct; 596 } 597 598 LLVM_DEBUG(dbgs().indent(2) << "Loop '" << L.getName() 599 << "' has cost=" << LoopCost << "\n"); 600 601 return LoopCost; 602 } 603 604 CacheCostTy CacheCost::computeRefGroupCacheCost(const ReferenceGroupTy &RG, 605 const Loop &L) const { 606 assert(!RG.empty() && "Reference group should have at least one member."); 607 608 const IndexedReference *Representative = RG.front().get(); 609 return Representative->computeRefCost(L, TTI.getCacheLineSize()); 610 } 611 612 //===----------------------------------------------------------------------===// 613 // LoopCachePrinterPass implementation 614 // 615 PreservedAnalyses LoopCachePrinterPass::run(Loop &L, LoopAnalysisManager &AM, 616 LoopStandardAnalysisResults &AR, 617 LPMUpdater &U) { 618 Function *F = L.getHeader()->getParent(); 619 DependenceInfo DI(F, &AR.AA, &AR.SE, &AR.LI); 620 621 if (auto CC = CacheCost::getCacheCost(L, AR, DI)) 622 OS << *CC; 623 624 return PreservedAnalyses::all(); 625 } 626