Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us
1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
12 //===----------------------------------------------------------------------===//
71 #define DEBUG_TYPE "loop-accesses"
74 VectorizationFactor("force-vector-width", cl::Hidden,
80 VectorizationInterleave("force-vector-interleave", cl::Hidden,
88 "runtime-memory-check-threshold", cl::Hidden,
96 "memory-check-merge-threshold", cl::Hidden,
104 /// We collect dependences up to this threshold.
106 MaxDependences("max-dependences", cl::Hidden,
108 "loop-access analysis (default = 100)"),
123 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
126 /// Enable store-to-load forwarding conflict detection. This option can
129 "store-to-load-forwarding-conflict-detection", cl::Hidden,
130 cl::desc("Enable conflict detection in loop-access analysis"),
134 "max-forked-scev-depth", cl::Hidden,
139 "laa-speculate-unit-stride", cl::Hidden,
140 cl::desc("Speculate that non-constant strides are unit in LAA"),
144 "hoist-runtime-checks", cl::Hidden,
163 // For a non-symbolic stride, just return the original expression.
166 const SCEV *StrideSCEV = SI->second;
174 const auto *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
187 .PointerValue->getType()
188 ->getPointerAddressSpace()),
194 /// Let's assume A is the first access and B is a memory access on N-th loop
198 /// N is a calculated back-edge taken count:
199 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
215 {SE->getCouldNotCompute(), SE->getCouldNotCompute()}});
217 return Iter->second;
222 if (SE->isLoopInvariant(PtrExpr, Lp)) {
227 ScStart = AR->getStart();
228 ScEnd = AR->evaluateAtIteration(Ex, *SE);
229 const SCEV *Step = AR->getStepRecurrence(*SE);
234 if (CStep->getValue()->isNegative())
240 ScStart = SE->getUMinExpr(ScStart, ScEnd);
241 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
244 return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
246 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
247 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
250 auto &DL = Lp->getHeader()->getDataLayout();
251 Type *IdxTy = DL.getIndexType(PtrExpr->getType());
252 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
253 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
255 Iter->second = {ScStart, ScEnd};
256 return Iter->second;
288 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
289 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty())
293 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
295 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
305 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
306 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
307 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
308 SinkAR->getLoop() != DC.getInnermostLoop())
312 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
314 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
321 SinkAR->getLoop()->getHeader()->getDataLayout();
328 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
329 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
330 Step->getAPInt().abs() != AllocSize)
334 IntegerType::get(Src->PointerValue->getContext(),
338 if (Step->getValue()->isNegative())
341 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
342 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
347 const Loop *InnerLoop = SrcAR->getLoop();
352 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
356 const Loop *StartARLoop = SrcStartAR->getLoop();
357 if (StartARLoop == SinkStartAR->getLoop() &&
358 StartARLoop == InnerLoop->getParentLoop() &&
362 SrcStartAR->getStepRecurrence(*SE) !=
363 SinkStartAR->getStepRecurrence(*SE)) {
374 Src->NeedsFreeze || Sink->NeedsFreeze);
415 const SCEV *Diff = SE->getMinusSCEV(J, I);
420 return C->getValue()->isNegative() ? J : I;
427 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
458 this->NeedsFreeze |= NeedsFreeze;
466 // - We know that pointers in the same equivalence class share
469 // - We wouldn't be able to merge two pointers for which we need
477 // - if the difference between this pointer and the min/max bounds
484 // with a non-constant difference, we shouldn't perform any pointer
496 // us to perform an accurate check in this case.
519 It->second.push_back(Index);
548 auto PointerI = PositionMap.find(MI->getPointer());
551 for (unsigned Pointer : PointerI->second) {
561 // reasonable. If we do end up hitting this threshold, the algorithm
575 // We couldn't add this pointer to any existing set or the threshold
591 return (PtrToPartition[PtrIdx1] != -1 &&
619 const auto &First = Check1->Members, &Second = Check2->Members;
635 OS.indent(Depth) << "Run-time memory checks:\n";
687 /// Check if we can emit a run-time no-alias check for \p Access.
689 /// Returns true if we can emit a run-time no alias check for \p Access.
691 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
692 /// we will attempt to use additional run-time checks in order to get
702 /// non-intersection.
751 // iteration-local.
752 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
786 /// Sets of potentially dependent accesses - members of one set share an
800 /// The SCEV predicate containing all the SCEV-related assumptions.
814 /// by adding run-time checks (overflow checks) if necessary.
817 // The bounds for loop-invariant pointer is trivial.
818 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
829 return AR->isAffine();
837 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
858 // SCEV does not look through non-header PHIs inside the loop. Such phis
861 if (PN && InnermostLoop.contains(PN->getParent()) &&
862 PN->getParent() != InnermostLoop.getHeader()) {
863 for (const Use &Inc : PN->incoming_values())
895 const SCEV *Scev = SE->getSCEV(Ptr);
896 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
902 Depth--;
911 return SE->getAddExpr(L, R);
913 return SE->getMinusSCEV(L, R);
920 unsigned Opcode = I->getOpcode();
924 Type *SourceTy = GEP->getSourceElementType();
927 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
933 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
934 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
953 Type *IntPtrTy = SE->getEffectiveSCEVType(
954 SE->getSCEV(GEP->getPointerOperand())->getType());
959 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
962 const SCEV *Scaled1 = SE->getMulExpr(
963 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
964 const SCEV *Scaled2 = SE->getMulExpr(
965 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
966 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
968 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
977 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
978 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
991 if (I->getNumOperands() == 2) {
992 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
993 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
1006 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1007 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1046 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1054 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1056 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1205 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1210 // We need to perform run-time alias checks, but some pointers had bounds
1252 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1253 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1269 // If we can do run-time checks, but there are no checks, no runtime checks
1281 // We process the set twice: first we process read-write pointers, last we
1282 // process read-only pointers. This allows us to skip dependence tests for
1283 // read-only pointers.
1292 : (ReadOnlyPtr.count(A.getPointer()) ? "read-only"
1298 // compatibility and potential for underlying-object overlap. As a result, we
1302 // Note that both the alias-set tracker and the alias sets themselves used
1317 // and then to process read-only pointers.
1342 "Alias-set pointer not in the access set?");
1347 // Memorize read-only pointers for later processing and skip them in
1350 // consecutive as "read-only" pointers (so that we check
1359 // If this is a write - check other reads and writes for conflicts. If
1361 // there is no other write to the ptr - this is an optimization to
1386 TheLoop->getHeader()->getParent(),
1387 UnderlyingObj->getType()->getPointerAddressSpace()))
1393 DepCands.unionSets(Access, Prev->second);
1404 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1410 if (AR->getNoWrapFlags(SCEV::NoWrapMask))
1416 // Scalar evolution does not propagate the non-wrapping flags to values that
1417 // are derived from a non-wrapping induction variable because non-wrapping
1418 // could be flow-sensitive.
1421 // non-wrapping for the *specific* value of Ptr.
1425 if (!GEP || !GEP->isInBounds())
1428 // Make sure there is only one non-const index and analyze that.
1430 for (Value *Index : GEP->indices())
1440 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1443 if (OBO->hasNoSignedWrap() &&
1446 isa<ConstantInt>(OBO->getOperand(1))) {
1447 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1450 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1463 if (PSE.getSE()->isLoopInvariant(PtrScev, Lp))
1466 Type *Ty = Ptr->getType();
1467 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1469 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1479 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1485 if (Lp != AR->getLoop()) {
1486 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1492 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1497 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1502 auto &DL = Lp->getHeader()->getDataLayout();
1505 const APInt &APStepVal = C->getAPInt();
1507 // Huge step value - give up.
1532 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1538 unsigned AddrSpace = Ty->getPointerAddressSpace();
1539 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1540 (Stride == 1 || Stride == -1))
1552 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1562 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1572 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1573 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1581 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1582 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1588 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1589 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1598 OffsetB -= OffsetA;
1608 Val = Diff->getAPInt().getSExtValue();
1613 // Ensure that the calculated distance matches the type-based one after all
1624 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1651 // Fill SortedIndices array only if it is non-consecutive.
1675 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1684 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1752 // factor store-load forwarding does not take place.
1754 // prevent store-load forwarding making vectorized code run a lot slower.
1755 // a[i] = a[i-3] ^ a[i-8];
1756 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1757 // hence on your typical architecture store-load forwarding does not take
1759 // Store-load forwarding distance.
1761 // After this many iterations store-to-load forwarding conflicts should not
1782 << " that could cause a store-load forwarding conflict\n");
1798 /// Given a dependence-distance \p Dist between two
1839 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1843 // backedgeTakenCount is non-negative, so we zero extend Product.
1847 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1849 // Is Dist - (MaxBTC * Step) > 0 ?
1855 // Second try: Is -Dist - (MaxBTC * Step) > 0 ?
1856 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1870 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1871 assert(Distance > 0 && "The distance must be non-zero");
1903 const auto &DL = InnermostLoop->getHeader()->getDataLayout();
1916 if (APtr->getType()->getPointerAddressSpace() !=
1917 BPtr->getType()->getPointerAddressSpace())
1946 // sink are loop invariant to avoid compile-time increases. This is not
1969 // If either Src or Sink are not strided (i.e. not a non-wrapping AddRec) and
1970 // not loop-invariant (stride will be 0 in that case), we cannot analyze the
1973 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
2025 // non-constant distance dependencies.
2032 auto &DL = InnermostLoop->getHeader()->getDataLayout();
2049 const APInt &Val = C->getAPInt();
2095 couldPreventStoreLoadForward(C->getAPInt().abs().getZExtValue(),
2098 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2116 // FoundNonConstantDistanceDependence to force re-trying with runtime
2118 // original behavior w.r.t. re-trying with runtime checks.
2127 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2135 // Bail out early if passed-in parameters make vectorization not feasible.
2160 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2161 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2174 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2177 // For non-constant distances, we checked the lower bound of the
2179 // for vectorization). Classify it as Unknown, so we re-try with runtime
2237 // For non-constant distances, we checked the lower bound of the dependence
2239 // vectorization). Classify it as Unknown, so we re-try with runtime checks.
2250 MinDepDistBytes = -1;
2269 bool AIIsWrite = AI->getInt();
2271 // other stores in the same equivalence class - to the same address.
2326 auto &IndexVector = Accesses.find(Access)->second;
2331 [&](unsigned Idx) { return this->InstMap[Idx]; });
2349 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2356 << TheLoop->getHeader()->getParent()->getName() << "' from "
2357 << TheLoop->getLocStr() << "\n");
2360 if (!TheLoop->isInnermost()) {
2367 if (TheLoop->getNumBackEdges() != 1) {
2378 const SCEV *ExitCount = PSE->getSymbolicMaxBackedgeTakenCount();
2387 << TheLoop->getHeader()->getName() << "\n");
2408 PtrRtChecking->Pointers.clear();
2409 PtrRtChecking->Need = false;
2411 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2415 !TheLoop->getHeader()->getParent()->hasOptSize();
2426 if (Call->isConvergent())
2430 // With both a non-vectorizable memory instruction and a convergent
2441 for (Metadata *Op : Decl->getScopeList()->operands())
2457 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2468 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2470 << "read with atomic ordering or volatile read";
2471 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2477 DepChecker->addAccess(Ld);
2492 if (!St->isSimple() && !IsAnnotatedParallel) {
2494 << "write with atomic ordering or volatile write";
2495 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2501 DepChecker->addAccess(St);
2517 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2537 Value *Ptr = ST->getPointerOperand();
2546 // If we did *not* see this pointer before, insert it to the read-write
2556 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2575 Value *Ptr = LD->getPointerOperand();
2578 // the read-write list. This allows us to vectorize expressions
2579 // such as A[i] += x; Because the address of A[i] is a read-write
2587 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2604 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2614 // If we write (or read-write) to a single destination and there are no
2617 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2629 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2646 DepsAreSafe = DepChecker->areDepsSafe(DependentAccesses,
2649 if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeCheck()) {
2655 PtrRtChecking->reset();
2656 PtrRtChecking->Need = true;
2658 auto *SE = PSE->getSE();
2686 << (PtrRtChecking->Need ? "" : " don't")
2704 if (Found == Deps->end())
2717 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2740 "store-to-load forwarding.";
2744 "store-to-load forwarding.";
2755 DebugLoc SourceLoc = I->getDebugLoc();
2757 SourceLoc = DD->getDebugLoc();
2766 assert(TheLoop->contains(BB) && "Unknown block used");
2769 BasicBlock* Latch = TheLoop->getLoopLatch();
2770 return !DT->dominates(BB, Latch);
2777 Value *CodeRegion = TheLoop->getHeader();
2778 DebugLoc DL = TheLoop->getStartLoc();
2781 CodeRegion = I->getParent();
2784 if (I->getDebugLoc())
2785 DL = I->getDebugLoc();
2794 auto *SE = PSE->getSE();
2796 // trivially loop-invariant FP values to be considered invariant.
2797 if (!SE->isSCEVable(V->getType()))
2799 const SCEV *S = SE->getSCEV(V);
2800 return SE->isLoopInvariant(S, TheLoop);
2807 const DataLayout &DL = Gep->getDataLayout();
2808 unsigned LastOperand = Gep->getNumOperands() - 1;
2809 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2812 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2815 std::advance(GEPTI, LastOperand - 2);
2824 --LastOperand;
2831 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
2842 for (unsigned I = 0, E = GEP->getNumOperands(); I != E; ++I)
2844 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(I)), Lp))
2846 return GEP->getOperand(InductionOperand);
2852 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2853 if (!PtrTy || PtrTy->isAggregateType())
2865 const SCEV *V = SE->getSCEV(Ptr);
2870 V = C->getOperand();
2878 if (Lp != S->getLoop())
2881 V = S->getStepRecurrence(*SE);
2889 if (M->getOperand(0)->getSCEVType() != scConstant)
2892 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2894 // Huge step value - give up.
2901 V = M->getOperand(1);
2907 if (!SE->isLoopInvariant(V, Lp))
2915 if (isa<SCEVUnknown>(C->getOperand()))
2927 // could broaden the scope of values returned here - to anything
2929 // computation of an interesting IV - but we chose not to as we
2932 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2941 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2946 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2947 // or zero iteration loop, as Trip-Count <= Stride == 1.
2958 const SCEV *MaxBTC = PSE->getSymbolicMaxBackedgeTakenCount();
2962 // The backedgeTakenCount is non-negative, so we zero extend MaxBTC.
2963 const DataLayout &DL = TheLoop->getHeader()->getDataLayout();
2964 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2965 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(MaxBTC->getType());
2968 ScalarEvolution *SE = PSE->getSE();
2970 CastedStride = SE->getNoopOrSignExtend(StrideExpr, MaxBTC->getType());
2972 CastedBECount = SE->getZeroExtendExpr(MaxBTC, StrideExpr->getType());
2973 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2976 // Stride - MaxBTC> 0
2977 if (SE->isKnownPositive(StrideMinusBETaken)) {
2990 StrideBase = C->getOperand();
3003 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector);
3011 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_ScalableVector);
3029 if (PtrRtChecking->Need)
3030 OS << " with run-time checks";
3038 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3040 if (auto *Dependences = DepChecker->getDependences()) {
3043 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3049 // List the pair of accesses need run-time checks to prove independence.
3050 PtrRtChecking->print(OS, Depth);
3062 PSE->getPredicate().print(OS, Depth);
3066 OS.indent(Depth) << "Expressions re-written:\n";
3067 PSE->print(OS, Depth);
3074 It->second =
3077 return *It->second;
3086 if (LAI->getRuntimePointerChecking()->getChecks().empty() &&
3087 LAI->getPSE().getPredicate().isAlwaysTrue())