xref: /llvm-project/llvm/include/llvm/Transforms/Utils/LoopUtils.h (revision b3cba9be41bfa89bc0ec212706c6028a901e127a)
1 //===- llvm/Transforms/Utils/LoopUtils.h - Loop utilities -------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines some loop transformation utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
14 #define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
15 
16 #include "llvm/Analysis/IVDescriptors.h"
17 #include "llvm/Analysis/LoopAccessAnalysis.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/IR/VectorBuilder.h"
20 #include "llvm/Transforms/Utils/ValueMapper.h"
21 
22 namespace llvm {
23 
24 template <typename T> class DomTreeNodeBase;
25 using DomTreeNode = DomTreeNodeBase<BasicBlock>;
26 class AssumptionCache;
27 class StringRef;
28 class AnalysisUsage;
29 class TargetTransformInfo;
30 class AAResults;
31 class BasicBlock;
32 class ICFLoopSafetyInfo;
33 class IRBuilderBase;
34 class Loop;
35 class LoopInfo;
36 class MemoryAccess;
37 class MemorySSA;
38 class MemorySSAUpdater;
39 class OptimizationRemarkEmitter;
40 class PredIteratorCache;
41 class ScalarEvolution;
42 class SCEV;
43 class SCEVExpander;
44 class TargetLibraryInfo;
45 class LPPassManager;
46 class Instruction;
47 struct RuntimeCheckingPtrGroup;
48 typedef std::pair<const RuntimeCheckingPtrGroup *,
49                   const RuntimeCheckingPtrGroup *>
50     RuntimePointerCheck;
51 
52 template <typename T, unsigned N> class SmallSetVector;
53 template <typename T, unsigned N> class SmallPriorityWorklist;
54 
55 BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
56                                    MemorySSAUpdater *MSSAU, bool PreserveLCSSA);
57 
58 /// Ensure that all exit blocks of the loop are dedicated exits.
59 ///
60 /// For any loop exit block with non-loop predecessors, we split the loop
61 /// predecessors to use a dedicated loop exit block. We update the dominator
62 /// tree and loop info if provided, and will preserve LCSSA if requested.
63 bool formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
64                              MemorySSAUpdater *MSSAU, bool PreserveLCSSA);
65 
66 /// Ensures LCSSA form for every instruction from the Worklist in the scope of
67 /// innermost containing loop.
68 ///
69 /// For the given instruction which have uses outside of the loop, an LCSSA PHI
70 /// node is inserted and the uses outside the loop are rewritten to use this
71 /// node.
72 ///
73 /// LoopInfo and DominatorTree are required and, since the routine makes no
74 /// changes to CFG, preserved.
75 ///
76 /// Returns true if any modifications are made.
77 ///
78 /// This function may introduce unused PHI nodes. If \p PHIsToRemove is not
79 /// nullptr, those are added to it (before removing, the caller has to check if
80 /// they still do not have any uses). Otherwise the PHIs are directly removed.
81 ///
82 /// If \p InsertedPHIs is not nullptr, inserted phis will be added to this
83 /// vector.
84 bool formLCSSAForInstructions(
85     SmallVectorImpl<Instruction *> &Worklist, const DominatorTree &DT,
86     const LoopInfo &LI, ScalarEvolution *SE,
87     SmallVectorImpl<PHINode *> *PHIsToRemove = nullptr,
88     SmallVectorImpl<PHINode *> *InsertedPHIs = nullptr);
89 
90 /// Put loop into LCSSA form.
91 ///
92 /// Looks at all instructions in the loop which have uses outside of the
93 /// current loop. For each, an LCSSA PHI node is inserted and the uses outside
94 /// the loop are rewritten to use this node. Sub-loops must be in LCSSA form
95 /// already.
96 ///
97 /// LoopInfo and DominatorTree are required and preserved.
98 ///
99 /// If ScalarEvolution is passed in, it will be preserved.
100 ///
101 /// Returns true if any modifications are made to the loop.
102 bool formLCSSA(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
103                ScalarEvolution *SE);
104 
105 /// Put a loop nest into LCSSA form.
106 ///
107 /// This recursively forms LCSSA for a loop nest.
108 ///
109 /// LoopInfo and DominatorTree are required and preserved.
110 ///
111 /// If ScalarEvolution is passed in, it will be preserved.
112 ///
113 /// Returns true if any modifications are made to the loop.
114 bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
115                           ScalarEvolution *SE);
116 
117 /// Flags controlling how much is checked when sinking or hoisting
118 /// instructions.  The number of memory access in the loop (and whether there
119 /// are too many) is determined in the constructors when using MemorySSA.
120 class SinkAndHoistLICMFlags {
121 public:
122   // Explicitly set limits.
123   SinkAndHoistLICMFlags(unsigned LicmMssaOptCap,
124                         unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
125                         Loop &L, MemorySSA &MSSA);
126   // Use default limits.
127   SinkAndHoistLICMFlags(bool IsSink, Loop &L, MemorySSA &MSSA);
128 
129   void setIsSink(bool B) { IsSink = B; }
130   bool getIsSink() { return IsSink; }
131   bool tooManyMemoryAccesses() { return NoOfMemAccTooLarge; }
132   bool tooManyClobberingCalls() { return LicmMssaOptCounter >= LicmMssaOptCap; }
133   void incrementClobberingCalls() { ++LicmMssaOptCounter; }
134 
135 protected:
136   bool NoOfMemAccTooLarge = false;
137   unsigned LicmMssaOptCounter = 0;
138   unsigned LicmMssaOptCap;
139   unsigned LicmMssaNoAccForPromotionCap;
140   bool IsSink;
141 };
142 
143 /// Walk the specified region of the CFG (defined by all blocks
144 /// dominated by the specified block, and that are in the current loop) in
145 /// reverse depth first order w.r.t the DominatorTree. This allows us to visit
146 /// uses before definitions, allowing us to sink a loop body in one pass without
147 /// iteration. Takes DomTreeNode, AAResults, LoopInfo, DominatorTree,
148 /// TargetLibraryInfo, Loop, AliasSet information for all
149 /// instructions of the loop and loop safety information as
150 /// arguments. Diagnostics is emitted via \p ORE. It returns changed status.
151 /// \p CurLoop is a loop to do sinking on. \p OutermostLoop is used only when
152 /// this function is called by \p sinkRegionForLoopNest.
153 bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *,
154                 TargetLibraryInfo *, TargetTransformInfo *, Loop *CurLoop,
155                 MemorySSAUpdater &, ICFLoopSafetyInfo *,
156                 SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *,
157                 Loop *OutermostLoop = nullptr);
158 
159 /// Call sinkRegion on loops contained within the specified loop
160 /// in order from innermost to outermost.
161 bool sinkRegionForLoopNest(DomTreeNode *, AAResults *, LoopInfo *,
162                            DominatorTree *, TargetLibraryInfo *,
163                            TargetTransformInfo *, Loop *, MemorySSAUpdater &,
164                            ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &,
165                            OptimizationRemarkEmitter *);
166 
167 /// Walk the specified region of the CFG (defined by all blocks
168 /// dominated by the specified block, and that are in the current loop) in depth
169 /// first order w.r.t the DominatorTree.  This allows us to visit definitions
170 /// before uses, allowing us to hoist a loop body in one pass without iteration.
171 /// Takes DomTreeNode, AAResults, LoopInfo, DominatorTree,
172 /// TargetLibraryInfo, Loop, AliasSet information for all
173 /// instructions of the loop and loop safety information as arguments.
174 /// Diagnostics is emitted via \p ORE. It returns changed status.
175 /// \p AllowSpeculation is whether values should be hoisted even if they are not
176 /// guaranteed to execute in the loop, but are safe to speculatively execute.
177 bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *,
178                  AssumptionCache *, TargetLibraryInfo *, Loop *,
179                  MemorySSAUpdater &, ScalarEvolution *, ICFLoopSafetyInfo *,
180                  SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, bool,
181                  bool AllowSpeculation);
182 
183 /// Return true if the induction variable \p IV in a Loop whose latch is
184 /// \p LatchBlock would become dead if the exit test \p Cond were removed.
185 /// Conservatively returns false if analysis is insufficient.
186 bool isAlmostDeadIV(PHINode *IV, BasicBlock *LatchBlock, Value *Cond);
187 
188 /// This function deletes dead loops. The caller of this function needs to
189 /// guarantee that the loop is infact dead.
190 /// The function requires a bunch or prerequisites to be present:
191 ///   - The loop needs to be in LCSSA form
192 ///   - The loop needs to have a Preheader
193 ///   - A unique dedicated exit block must exist
194 ///
195 /// This also updates the relevant analysis information in \p DT, \p SE, \p LI
196 /// and \p MSSA if pointers to those are provided.
197 /// It also updates the loop PM if an updater struct is provided.
198 
199 void deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
200                     LoopInfo *LI, MemorySSA *MSSA = nullptr);
201 
202 /// Remove the backedge of the specified loop.  Handles loop nests and general
203 /// loop structures subject to the precondition that the loop has no parent
204 /// loop and has a single latch block.  Preserves all listed analyses.
205 void breakLoopBackedge(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
206                        LoopInfo &LI, MemorySSA *MSSA);
207 
208 /// Try to promote memory values to scalars by sinking stores out of
209 /// the loop and moving loads to before the loop.  We do this by looping over
210 /// the stores in the loop, looking for stores to Must pointers which are
211 /// loop invariant. It takes a set of must-alias values, Loop exit blocks
212 /// vector, loop exit blocks insertion point vector, PredIteratorCache,
213 /// LoopInfo, DominatorTree, Loop, AliasSet information for all instructions
214 /// of the loop and loop safety information as arguments.
215 /// Diagnostics is emitted via \p ORE. It returns changed status.
216 /// \p AllowSpeculation is whether values should be hoisted even if they are not
217 /// guaranteed to execute in the loop, but are safe to speculatively execute.
218 bool promoteLoopAccessesToScalars(
219     const SmallSetVector<Value *, 8> &, SmallVectorImpl<BasicBlock *> &,
220     SmallVectorImpl<BasicBlock::iterator> &, SmallVectorImpl<MemoryAccess *> &,
221     PredIteratorCache &, LoopInfo *, DominatorTree *, AssumptionCache *AC,
222     const TargetLibraryInfo *, TargetTransformInfo *, Loop *,
223     MemorySSAUpdater &, ICFLoopSafetyInfo *, OptimizationRemarkEmitter *,
224     bool AllowSpeculation, bool HasReadsOutsideSet);
225 
226 /// Does a BFS from a given node to all of its children inside a given loop.
227 /// The returned vector of basic blocks includes the starting point.
228 SmallVector<BasicBlock *, 16>
229 collectChildrenInLoop(DominatorTree *DT, DomTreeNode *N, const Loop *CurLoop);
230 
231 /// Returns the instructions that use values defined in the loop.
232 SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L);
233 
234 /// Find a combination of metadata ("llvm.loop.vectorize.width" and
235 /// "llvm.loop.vectorize.scalable.enable") for a loop and use it to construct a
236 /// ElementCount. If the metadata "llvm.loop.vectorize.width" cannot be found
237 /// then std::nullopt is returned.
238 std::optional<ElementCount>
239 getOptionalElementCountLoopAttribute(const Loop *TheLoop);
240 
241 /// Create a new loop identifier for a loop created from a loop transformation.
242 ///
243 /// @param OrigLoopID The loop ID of the loop before the transformation.
244 /// @param FollowupAttrs List of attribute names that contain attributes to be
245 ///                      added to the new loop ID.
246 /// @param InheritOptionsAttrsPrefix Selects which attributes should be inherited
247 ///                                  from the original loop. The following values
248 ///                                  are considered:
249 ///        nullptr   : Inherit all attributes from @p OrigLoopID.
250 ///        ""        : Do not inherit any attribute from @p OrigLoopID; only use
251 ///                    those specified by a followup attribute.
252 ///        "<prefix>": Inherit all attributes except those which start with
253 ///                    <prefix>; commonly used to remove metadata for the
254 ///                    applied transformation.
255 /// @param AlwaysNew If true, do not try to reuse OrigLoopID and never return
256 ///                  std::nullopt.
257 ///
258 /// @return The loop ID for the after-transformation loop. The following values
259 ///         can be returned:
260 ///         std::nullopt : No followup attribute was found; it is up to the
261 ///                        transformation to choose attributes that make sense.
262 ///         @p OrigLoopID: The original identifier can be reused.
263 ///         nullptr      : The new loop has no attributes.
264 ///         MDNode*      : A new unique loop identifier.
265 std::optional<MDNode *>
266 makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef<StringRef> FollowupAttrs,
267                    const char *InheritOptionsAttrsPrefix = "",
268                    bool AlwaysNew = false);
269 
270 /// Look for the loop attribute that disables all transformation heuristic.
271 bool hasDisableAllTransformsHint(const Loop *L);
272 
273 /// Look for the loop attribute that disables the LICM transformation heuristics.
274 bool hasDisableLICMTransformsHint(const Loop *L);
275 
276 /// The mode sets how eager a transformation should be applied.
277 enum TransformationMode {
278   /// The pass can use heuristics to determine whether a transformation should
279   /// be applied.
280   TM_Unspecified,
281 
282   /// The transformation should be applied without considering a cost model.
283   TM_Enable,
284 
285   /// The transformation should not be applied.
286   TM_Disable,
287 
288   /// Force is a flag and should not be used alone.
289   TM_Force = 0x04,
290 
291   /// The transformation was directed by the user, e.g. by a #pragma in
292   /// the source code. If the transformation could not be applied, a
293   /// warning should be emitted.
294   TM_ForcedByUser = TM_Enable | TM_Force,
295 
296   /// The transformation must not be applied. For instance, `#pragma clang loop
297   /// unroll(disable)` explicitly forbids any unrolling to take place. Unlike
298   /// general loop metadata, it must not be dropped. Most passes should not
299   /// behave differently under TM_Disable and TM_SuppressedByUser.
300   TM_SuppressedByUser = TM_Disable | TM_Force
301 };
302 
303 /// @{
304 /// Get the mode for LLVM's supported loop transformations.
305 TransformationMode hasUnrollTransformation(const Loop *L);
306 TransformationMode hasUnrollAndJamTransformation(const Loop *L);
307 TransformationMode hasVectorizeTransformation(const Loop *L);
308 TransformationMode hasDistributeTransformation(const Loop *L);
309 TransformationMode hasLICMVersioningTransformation(const Loop *L);
310 /// @}
311 
312 /// Set input string into loop metadata by keeping other values intact.
313 /// If the string is already in loop metadata update value if it is
314 /// different.
315 void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
316                              unsigned V = 0);
317 
318 /// Returns a loop's estimated trip count based on branch weight metadata.
319 /// In addition if \p EstimatedLoopInvocationWeight is not null it is
320 /// initialized with weight of loop's latch leading to the exit.
321 /// Returns 0 when the count is estimated to be 0, or std::nullopt when a
322 /// meaningful estimate can not be made.
323 std::optional<unsigned>
324 getLoopEstimatedTripCount(Loop *L,
325                           unsigned *EstimatedLoopInvocationWeight = nullptr);
326 
327 /// Set a loop's branch weight metadata to reflect that loop has \p
328 /// EstimatedTripCount iterations and \p EstimatedLoopInvocationWeight exits
329 /// through latch. Returns true if metadata is successfully updated, false
330 /// otherwise. Note that loop must have a latch block which controls loop exit
331 /// in order to succeed.
332 bool setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount,
333                                unsigned EstimatedLoopInvocationWeight);
334 
335 /// Check inner loop (L) backedge count is known to be invariant on all
336 /// iterations of its outer loop. If the loop has no parent, this is trivially
337 /// true.
338 bool hasIterationCountInvariantInParent(Loop *L, ScalarEvolution &SE);
339 
340 /// Helper to consistently add the set of standard passes to a loop pass's \c
341 /// AnalysisUsage.
342 ///
343 /// All loop passes should call this as part of implementing their \c
344 /// getAnalysisUsage.
345 void getLoopAnalysisUsage(AnalysisUsage &AU);
346 
347 /// Returns true if is legal to hoist or sink this instruction disregarding the
348 /// possible introduction of faults.  Reasoning about potential faulting
349 /// instructions is the responsibility of the caller since it is challenging to
350 /// do efficiently from within this routine.
351 /// \p TargetExecutesOncePerLoop is true only when it is guaranteed that the
352 /// target executes at most once per execution of the loop body.  This is used
353 /// to assess the legality of duplicating atomic loads.  Generally, this is
354 /// true when moving out of loop and not true when moving into loops.
355 /// If \p ORE is set use it to emit optimization remarks.
356 bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
357                         Loop *CurLoop, MemorySSAUpdater &MSSAU,
358                         bool TargetExecutesOncePerLoop,
359                         SinkAndHoistLICMFlags &LICMFlags,
360                         OptimizationRemarkEmitter *ORE = nullptr);
361 
362 /// Returns the llvm.vector.reduce intrinsic that corresponds to the recurrence
363 /// kind.
364 constexpr Intrinsic::ID getReductionIntrinsicID(RecurKind RK);
365 
366 /// Returns the arithmetic instruction opcode used when expanding a reduction.
367 unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID);
368 
369 /// Returns the min/max intrinsic used when expanding a min/max reduction.
370 Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID);
371 
372 /// Returns the min/max intrinsic used when expanding a min/max reduction.
373 Intrinsic::ID getMinMaxReductionIntrinsicOp(RecurKind RK);
374 
375 /// Returns the recurence kind used when expanding a min/max reduction.
376 RecurKind getMinMaxReductionRecurKind(Intrinsic::ID RdxID);
377 
378 /// Returns the comparison predicate used when expanding a min/max reduction.
379 CmpInst::Predicate getMinMaxReductionPredicate(RecurKind RK);
380 
381 /// Given information about an @llvm.vector.reduce.* intrinsic, return
382 /// the identity value for the reduction.
383 Value *getReductionIdentity(Intrinsic::ID RdxID, Type *Ty, FastMathFlags FMF);
384 
385 /// Given information about an recurrence kind, return the identity
386 /// for the @llvm.vector.reduce.* used to generate it.
387 Value *getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF);
388 
389 /// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
390 /// The Builder's fast-math-flags must be set to propagate the expected values.
391 Value *createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left,
392                       Value *Right);
393 
394 /// Generates an ordered vector reduction using extracts to reduce the value.
395 Value *getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
396                            unsigned Op, RecurKind MinMaxKind = RecurKind::None);
397 
398 /// Generates a vector reduction using shufflevectors to reduce the value.
399 /// Fast-math-flags are propagated using the IRBuilder's setting.
400 Value *getShuffleReduction(IRBuilderBase &Builder, Value *Src, unsigned Op,
401                            TargetTransformInfo::ReductionShuffle RS,
402                            RecurKind MinMaxKind = RecurKind::None);
403 
404 /// Create a reduction of the given vector. The reduction operation
405 /// is described by the \p Opcode parameter. min/max reductions require
406 /// additional information supplied in \p RdxKind.
407 /// Fast-math-flags are propagated using the IRBuilder's setting.
408 Value *createSimpleReduction(IRBuilderBase &B, Value *Src,
409                              RecurKind RdxKind);
410 /// Overloaded function to generate vector-predication intrinsics for
411 /// reduction.
412 Value *createSimpleReduction(VectorBuilder &VB, Value *Src,
413                              const RecurrenceDescriptor &Desc);
414 
415 /// Create a reduction of the given vector \p Src for a reduction of the
416 /// kind RecurKind::IAnyOf or RecurKind::FAnyOf. The reduction operation is
417 /// described by \p Desc.
418 Value *createAnyOfReduction(IRBuilderBase &B, Value *Src,
419                             const RecurrenceDescriptor &Desc,
420                             PHINode *OrigPhi);
421 
422 /// Create a reduction of the given vector \p Src for a reduction of the
423 /// kind RecurKind::IFindLastIV or RecurKind::FFindLastIV. The reduction
424 /// operation is described by \p Desc.
425 Value *createFindLastIVReduction(IRBuilderBase &B, Value *Src,
426                                  const RecurrenceDescriptor &Desc);
427 
428 /// Create a generic reduction using a recurrence descriptor \p Desc
429 /// Fast-math-flags are propagated using the RecurrenceDescriptor.
430 Value *createReduction(IRBuilderBase &B, const RecurrenceDescriptor &Desc,
431                        Value *Src, PHINode *OrigPhi = nullptr);
432 
433 /// Create an ordered reduction intrinsic using the given recurrence
434 /// descriptor \p Desc.
435 Value *createOrderedReduction(IRBuilderBase &B,
436                               const RecurrenceDescriptor &Desc, Value *Src,
437                               Value *Start);
438 /// Overloaded function to generate vector-predication intrinsics for ordered
439 /// reduction.
440 Value *createOrderedReduction(VectorBuilder &VB,
441                               const RecurrenceDescriptor &Desc, Value *Src,
442                               Value *Start);
443 
444 /// Get the intersection (logical and) of all of the potential IR flags
445 /// of each scalar operation (VL) that will be converted into a vector (I).
446 /// If OpValue is non-null, we only consider operations similar to OpValue
447 /// when intersecting.
448 /// Flag set: NSW, NUW (if IncludeWrapFlags is true), exact, and all of
449 /// fast-math.
450 void propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue = nullptr,
451                       bool IncludeWrapFlags = true);
452 
453 /// Returns true if we can prove that \p S is defined and always negative in
454 /// loop \p L.
455 bool isKnownNegativeInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE);
456 
457 /// Returns true if we can prove that \p S is defined and always non-negative in
458 /// loop \p L.
459 bool isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
460                               ScalarEvolution &SE);
461 /// Returns true if we can prove that \p S is defined and always positive in
462 /// loop \p L.
463 bool isKnownPositiveInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE);
464 
465 /// Returns true if we can prove that \p S is defined and always non-positive in
466 /// loop \p L.
467 bool isKnownNonPositiveInLoop(const SCEV *S, const Loop *L,
468                               ScalarEvolution &SE);
469 
470 /// Returns true if \p S is defined and never is equal to signed/unsigned max.
471 bool cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
472                        bool Signed);
473 
474 /// Returns true if \p S is defined and never is equal to signed/unsigned min.
475 bool cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
476                        bool Signed);
477 
478 enum ReplaceExitVal {
479   NeverRepl,
480   OnlyCheapRepl,
481   NoHardUse,
482   UnusedIndVarInLoop,
483   AlwaysRepl
484 };
485 
486 /// If the final value of any expressions that are recurrent in the loop can
487 /// be computed, substitute the exit values from the loop into any instructions
488 /// outside of the loop that use the final values of the current expressions.
489 /// Return the number of loop exit values that have been replaced, and the
490 /// corresponding phi node will be added to DeadInsts.
491 int rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI,
492                           ScalarEvolution *SE, const TargetTransformInfo *TTI,
493                           SCEVExpander &Rewriter, DominatorTree *DT,
494                           ReplaceExitVal ReplaceExitValue,
495                           SmallVector<WeakTrackingVH, 16> &DeadInsts);
496 
497 /// Set weights for \p UnrolledLoop and \p RemainderLoop based on weights for
498 /// \p OrigLoop and the following distribution of \p OrigLoop iteration among \p
499 /// UnrolledLoop and \p RemainderLoop. \p UnrolledLoop receives weights that
500 /// reflect TC/UF iterations, and \p RemainderLoop receives weights that reflect
501 /// the remaining TC%UF iterations.
502 ///
503 /// Note that \p OrigLoop may be equal to either \p UnrolledLoop or \p
504 /// RemainderLoop in which case weights for \p OrigLoop are updated accordingly.
505 /// Note also behavior is undefined if \p UnrolledLoop and \p RemainderLoop are
506 /// equal. \p UF must be greater than zero.
507 /// If \p OrigLoop has no profile info associated nothing happens.
508 ///
509 /// This utility may be useful for such optimizations as unroller and
510 /// vectorizer as it's typical transformation for them.
511 void setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop,
512                                   Loop *RemainderLoop, uint64_t UF);
513 
514 /// Utility that implements appending of loops onto a worklist given a range.
515 /// We want to process loops in postorder, but the worklist is a LIFO data
516 /// structure, so we append to it in *reverse* postorder.
517 /// For trees, a preorder traversal is a viable reverse postorder, so we
518 /// actually append using a preorder walk algorithm.
519 template <typename RangeT>
520 void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist<Loop *, 4> &);
521 /// Utility that implements appending of loops onto a worklist given a range.
522 /// It has the same behavior as appendLoopsToWorklist, but assumes the range of
523 /// loops has already been reversed, so it processes loops in the given order.
524 template <typename RangeT>
525 void appendReversedLoopsToWorklist(RangeT &&,
526                                    SmallPriorityWorklist<Loop *, 4> &);
527 
528 /// Utility that implements appending of loops onto a worklist given LoopInfo.
529 /// Calls the templated utility taking a Range of loops, handing it the Loops
530 /// in LoopInfo, iterated in reverse. This is because the loops are stored in
531 /// RPO w.r.t. the control flow graph in LoopInfo. For the purpose of unrolling,
532 /// loop deletion, and LICM, we largely want to work forward across the CFG so
533 /// that we visit defs before uses and can propagate simplifications from one
534 /// loop nest into the next. Calls appendReversedLoopsToWorklist with the
535 /// already reversed loops in LI.
536 /// FIXME: Consider changing the order in LoopInfo.
537 void appendLoopsToWorklist(LoopInfo &, SmallPriorityWorklist<Loop *, 4> &);
538 
539 /// Recursively clone the specified loop and all of its children,
540 /// mapping the blocks with the specified map.
541 Loop *cloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
542                 LoopInfo *LI, LPPassManager *LPM);
543 
544 /// Add code that checks at runtime if the accessed arrays in \p PointerChecks
545 /// overlap. Returns the final comparator value or NULL if no check is needed.
546 Value *
547 addRuntimeChecks(Instruction *Loc, Loop *TheLoop,
548                  const SmallVectorImpl<RuntimePointerCheck> &PointerChecks,
549                  SCEVExpander &Expander, bool HoistRuntimeChecks = false);
550 
551 Value *addDiffRuntimeChecks(
552     Instruction *Loc, ArrayRef<PointerDiffInfo> Checks, SCEVExpander &Expander,
553     function_ref<Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC);
554 
555 /// Struct to hold information about a partially invariant condition.
556 struct IVConditionInfo {
557   /// Instructions that need to be duplicated and checked for the unswitching
558   /// condition.
559   SmallVector<Instruction *> InstToDuplicate;
560 
561   /// Constant to indicate for which value the condition is invariant.
562   Constant *KnownValue = nullptr;
563 
564   /// True if the partially invariant path is no-op (=does not have any
565   /// side-effects and no loop value is used outside the loop).
566   bool PathIsNoop = true;
567 
568   /// If the partially invariant path reaches a single exit block, ExitForPath
569   /// is set to that block. Otherwise it is nullptr.
570   BasicBlock *ExitForPath = nullptr;
571 };
572 
573 /// Check if the loop header has a conditional branch that is not
574 /// loop-invariant, because it involves load instructions. If all paths from
575 /// either the true or false successor to the header or loop exists do not
576 /// modify the memory feeding the condition, perform 'partial unswitching'. That
577 /// is, duplicate the instructions feeding the condition in the pre-header. Then
578 /// unswitch on the duplicated condition. The condition is now known in the
579 /// unswitched version for the 'invariant' path through the original loop.
580 ///
581 /// If the branch condition of the header is partially invariant, return a pair
582 /// containing the instructions to duplicate and a boolean Constant to update
583 /// the condition in the loops created for the true or false successors.
584 std::optional<IVConditionInfo> hasPartialIVCondition(const Loop &L,
585                                                      unsigned MSSAThreshold,
586                                                      const MemorySSA &MSSA,
587                                                      AAResults &AA);
588 
589 } // end namespace llvm
590 
591 #endif // LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
592