1 //===-- LoopUtils.cpp - Loop Utility functions -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines common loop utility functions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Transforms/Utils/LoopUtils.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/ADT/PriorityWorklist.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/BasicAliasAnalysis.h"
23 #include "llvm/Analysis/DomTreeUpdater.h"
24 #include "llvm/Analysis/GlobalsModRef.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/LoopAccessAnalysis.h"
27 #include "llvm/Analysis/LoopInfo.h"
28 #include "llvm/Analysis/LoopPass.h"
29 #include "llvm/Analysis/MemorySSA.h"
30 #include "llvm/Analysis/MemorySSAUpdater.h"
31 #include "llvm/Analysis/MustExecute.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
34 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
35 #include "llvm/Analysis/TargetTransformInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/IR/DIBuilder.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/Module.h"
43 #include "llvm/IR/Operator.h"
44 #include "llvm/IR/PatternMatch.h"
45 #include "llvm/IR/ValueHandle.h"
46 #include "llvm/InitializePasses.h"
47 #include "llvm/Pass.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/KnownBits.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/Local.h"
52 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
53
54 using namespace llvm;
55 using namespace llvm::PatternMatch;
56
57 #define DEBUG_TYPE "loop-utils"
58
59 static const char *LLVMLoopDisableNonforced = "llvm.loop.disable_nonforced";
60 static const char *LLVMLoopDisableLICM = "llvm.licm.disable";
61 static const char *LLVMLoopMustProgress = "llvm.loop.mustprogress";
62
formDedicatedExitBlocks(Loop * L,DominatorTree * DT,LoopInfo * LI,MemorySSAUpdater * MSSAU,bool PreserveLCSSA)63 bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
64 MemorySSAUpdater *MSSAU,
65 bool PreserveLCSSA) {
66 bool Changed = false;
67
68 // We re-use a vector for the in-loop predecesosrs.
69 SmallVector<BasicBlock *, 4> InLoopPredecessors;
70
71 auto RewriteExit = [&](BasicBlock *BB) {
72 assert(InLoopPredecessors.empty() &&
73 "Must start with an empty predecessors list!");
74 auto Cleanup = make_scope_exit([&] { InLoopPredecessors.clear(); });
75
76 // See if there are any non-loop predecessors of this exit block and
77 // keep track of the in-loop predecessors.
78 bool IsDedicatedExit = true;
79 for (auto *PredBB : predecessors(BB))
80 if (L->contains(PredBB)) {
81 if (isa<IndirectBrInst>(PredBB->getTerminator()))
82 // We cannot rewrite exiting edges from an indirectbr.
83 return false;
84 if (isa<CallBrInst>(PredBB->getTerminator()))
85 // We cannot rewrite exiting edges from a callbr.
86 return false;
87
88 InLoopPredecessors.push_back(PredBB);
89 } else {
90 IsDedicatedExit = false;
91 }
92
93 assert(!InLoopPredecessors.empty() && "Must have *some* loop predecessor!");
94
95 // Nothing to do if this is already a dedicated exit.
96 if (IsDedicatedExit)
97 return false;
98
99 auto *NewExitBB = SplitBlockPredecessors(
100 BB, InLoopPredecessors, ".loopexit", DT, LI, MSSAU, PreserveLCSSA);
101
102 if (!NewExitBB)
103 LLVM_DEBUG(
104 dbgs() << "WARNING: Can't create a dedicated exit block for loop: "
105 << *L << "\n");
106 else
107 LLVM_DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block "
108 << NewExitBB->getName() << "\n");
109 return true;
110 };
111
112 // Walk the exit blocks directly rather than building up a data structure for
113 // them, but only visit each one once.
114 SmallPtrSet<BasicBlock *, 4> Visited;
115 for (auto *BB : L->blocks())
116 for (auto *SuccBB : successors(BB)) {
117 // We're looking for exit blocks so skip in-loop successors.
118 if (L->contains(SuccBB))
119 continue;
120
121 // Visit each exit block exactly once.
122 if (!Visited.insert(SuccBB).second)
123 continue;
124
125 Changed |= RewriteExit(SuccBB);
126 }
127
128 return Changed;
129 }
130
131 /// Returns the instructions that use values defined in the loop.
findDefsUsedOutsideOfLoop(Loop * L)132 SmallVector<Instruction *, 8> llvm::findDefsUsedOutsideOfLoop(Loop *L) {
133 SmallVector<Instruction *, 8> UsedOutside;
134
135 for (auto *Block : L->getBlocks())
136 // FIXME: I believe that this could use copy_if if the Inst reference could
137 // be adapted into a pointer.
138 for (auto &Inst : *Block) {
139 auto Users = Inst.users();
140 if (any_of(Users, [&](User *U) {
141 auto *Use = cast<Instruction>(U);
142 return !L->contains(Use->getParent());
143 }))
144 UsedOutside.push_back(&Inst);
145 }
146
147 return UsedOutside;
148 }
149
getLoopAnalysisUsage(AnalysisUsage & AU)150 void llvm::getLoopAnalysisUsage(AnalysisUsage &AU) {
151 // By definition, all loop passes need the LoopInfo analysis and the
152 // Dominator tree it depends on. Because they all participate in the loop
153 // pass manager, they must also preserve these.
154 AU.addRequired<DominatorTreeWrapperPass>();
155 AU.addPreserved<DominatorTreeWrapperPass>();
156 AU.addRequired<LoopInfoWrapperPass>();
157 AU.addPreserved<LoopInfoWrapperPass>();
158
159 // We must also preserve LoopSimplify and LCSSA. We locally access their IDs
160 // here because users shouldn't directly get them from this header.
161 extern char &LoopSimplifyID;
162 extern char &LCSSAID;
163 AU.addRequiredID(LoopSimplifyID);
164 AU.addPreservedID(LoopSimplifyID);
165 AU.addRequiredID(LCSSAID);
166 AU.addPreservedID(LCSSAID);
167 // This is used in the LPPassManager to perform LCSSA verification on passes
168 // which preserve lcssa form
169 AU.addRequired<LCSSAVerificationPass>();
170 AU.addPreserved<LCSSAVerificationPass>();
171
172 // Loop passes are designed to run inside of a loop pass manager which means
173 // that any function analyses they require must be required by the first loop
174 // pass in the manager (so that it is computed before the loop pass manager
175 // runs) and preserved by all loop pasess in the manager. To make this
176 // reasonably robust, the set needed for most loop passes is maintained here.
177 // If your loop pass requires an analysis not listed here, you will need to
178 // carefully audit the loop pass manager nesting structure that results.
179 AU.addRequired<AAResultsWrapperPass>();
180 AU.addPreserved<AAResultsWrapperPass>();
181 AU.addPreserved<BasicAAWrapperPass>();
182 AU.addPreserved<GlobalsAAWrapperPass>();
183 AU.addPreserved<SCEVAAWrapperPass>();
184 AU.addRequired<ScalarEvolutionWrapperPass>();
185 AU.addPreserved<ScalarEvolutionWrapperPass>();
186 // FIXME: When all loop passes preserve MemorySSA, it can be required and
187 // preserved here instead of the individual handling in each pass.
188 }
189
190 /// Manually defined generic "LoopPass" dependency initialization. This is used
191 /// to initialize the exact set of passes from above in \c
192 /// getLoopAnalysisUsage. It can be used within a loop pass's initialization
193 /// with:
194 ///
195 /// INITIALIZE_PASS_DEPENDENCY(LoopPass)
196 ///
197 /// As-if "LoopPass" were a pass.
initializeLoopPassPass(PassRegistry & Registry)198 void llvm::initializeLoopPassPass(PassRegistry &Registry) {
199 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
200 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
201 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
202 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass)
203 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
204 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
205 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
206 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
207 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
208 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
209 }
210
211 /// Create MDNode for input string.
createStringMetadata(Loop * TheLoop,StringRef Name,unsigned V)212 static MDNode *createStringMetadata(Loop *TheLoop, StringRef Name, unsigned V) {
213 LLVMContext &Context = TheLoop->getHeader()->getContext();
214 Metadata *MDs[] = {
215 MDString::get(Context, Name),
216 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(Context), V))};
217 return MDNode::get(Context, MDs);
218 }
219
220 /// Set input string into loop metadata by keeping other values intact.
221 /// If the string is already in loop metadata update value if it is
222 /// different.
addStringMetadataToLoop(Loop * TheLoop,const char * StringMD,unsigned V)223 void llvm::addStringMetadataToLoop(Loop *TheLoop, const char *StringMD,
224 unsigned V) {
225 SmallVector<Metadata *, 4> MDs(1);
226 // If the loop already has metadata, retain it.
227 MDNode *LoopID = TheLoop->getLoopID();
228 if (LoopID) {
229 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
230 MDNode *Node = cast<MDNode>(LoopID->getOperand(i));
231 // If it is of form key = value, try to parse it.
232 if (Node->getNumOperands() == 2) {
233 MDString *S = dyn_cast<MDString>(Node->getOperand(0));
234 if (S && S->getString().equals(StringMD)) {
235 ConstantInt *IntMD =
236 mdconst::extract_or_null<ConstantInt>(Node->getOperand(1));
237 if (IntMD && IntMD->getSExtValue() == V)
238 // It is already in place. Do nothing.
239 return;
240 // We need to update the value, so just skip it here and it will
241 // be added after copying other existed nodes.
242 continue;
243 }
244 }
245 MDs.push_back(Node);
246 }
247 }
248 // Add new metadata.
249 MDs.push_back(createStringMetadata(TheLoop, StringMD, V));
250 // Replace current metadata node with new one.
251 LLVMContext &Context = TheLoop->getHeader()->getContext();
252 MDNode *NewLoopID = MDNode::get(Context, MDs);
253 // Set operand 0 to refer to the loop id itself.
254 NewLoopID->replaceOperandWith(0, NewLoopID);
255 TheLoop->setLoopID(NewLoopID);
256 }
257
258 /// Find string metadata for loop
259 ///
260 /// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
261 /// operand or null otherwise. If the string metadata is not found return
262 /// Optional's not-a-value.
findStringMetadataForLoop(const Loop * TheLoop,StringRef Name)263 Optional<const MDOperand *> llvm::findStringMetadataForLoop(const Loop *TheLoop,
264 StringRef Name) {
265 MDNode *MD = findOptionMDForLoop(TheLoop, Name);
266 if (!MD)
267 return None;
268 switch (MD->getNumOperands()) {
269 case 1:
270 return nullptr;
271 case 2:
272 return &MD->getOperand(1);
273 default:
274 llvm_unreachable("loop metadata has 0 or 1 operand");
275 }
276 }
277
getOptionalBoolLoopAttribute(const Loop * TheLoop,StringRef Name)278 static Optional<bool> getOptionalBoolLoopAttribute(const Loop *TheLoop,
279 StringRef Name) {
280 MDNode *MD = findOptionMDForLoop(TheLoop, Name);
281 if (!MD)
282 return None;
283 switch (MD->getNumOperands()) {
284 case 1:
285 // When the value is absent it is interpreted as 'attribute set'.
286 return true;
287 case 2:
288 if (ConstantInt *IntMD =
289 mdconst::extract_or_null<ConstantInt>(MD->getOperand(1).get()))
290 return IntMD->getZExtValue();
291 return true;
292 }
293 llvm_unreachable("unexpected number of options");
294 }
295
getBooleanLoopAttribute(const Loop * TheLoop,StringRef Name)296 bool llvm::getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name) {
297 return getOptionalBoolLoopAttribute(TheLoop, Name).getValueOr(false);
298 }
299
300 Optional<ElementCount>
getOptionalElementCountLoopAttribute(const Loop * TheLoop)301 llvm::getOptionalElementCountLoopAttribute(const Loop *TheLoop) {
302 Optional<int> Width =
303 getOptionalIntLoopAttribute(TheLoop, "llvm.loop.vectorize.width");
304
305 if (Width.hasValue()) {
306 Optional<int> IsScalable = getOptionalIntLoopAttribute(
307 TheLoop, "llvm.loop.vectorize.scalable.enable");
308 return ElementCount::get(*Width, IsScalable.getValueOr(false));
309 }
310
311 return None;
312 }
313
getOptionalIntLoopAttribute(const Loop * TheLoop,StringRef Name)314 llvm::Optional<int> llvm::getOptionalIntLoopAttribute(const Loop *TheLoop,
315 StringRef Name) {
316 const MDOperand *AttrMD =
317 findStringMetadataForLoop(TheLoop, Name).getValueOr(nullptr);
318 if (!AttrMD)
319 return None;
320
321 ConstantInt *IntMD = mdconst::extract_or_null<ConstantInt>(AttrMD->get());
322 if (!IntMD)
323 return None;
324
325 return IntMD->getSExtValue();
326 }
327
makeFollowupLoopID(MDNode * OrigLoopID,ArrayRef<StringRef> FollowupOptions,const char * InheritOptionsExceptPrefix,bool AlwaysNew)328 Optional<MDNode *> llvm::makeFollowupLoopID(
329 MDNode *OrigLoopID, ArrayRef<StringRef> FollowupOptions,
330 const char *InheritOptionsExceptPrefix, bool AlwaysNew) {
331 if (!OrigLoopID) {
332 if (AlwaysNew)
333 return nullptr;
334 return None;
335 }
336
337 assert(OrigLoopID->getOperand(0) == OrigLoopID);
338
339 bool InheritAllAttrs = !InheritOptionsExceptPrefix;
340 bool InheritSomeAttrs =
341 InheritOptionsExceptPrefix && InheritOptionsExceptPrefix[0] != '\0';
342 SmallVector<Metadata *, 8> MDs;
343 MDs.push_back(nullptr);
344
345 bool Changed = false;
346 if (InheritAllAttrs || InheritSomeAttrs) {
347 for (const MDOperand &Existing : drop_begin(OrigLoopID->operands())) {
348 MDNode *Op = cast<MDNode>(Existing.get());
349
350 auto InheritThisAttribute = [InheritSomeAttrs,
351 InheritOptionsExceptPrefix](MDNode *Op) {
352 if (!InheritSomeAttrs)
353 return false;
354
355 // Skip malformatted attribute metadata nodes.
356 if (Op->getNumOperands() == 0)
357 return true;
358 Metadata *NameMD = Op->getOperand(0).get();
359 if (!isa<MDString>(NameMD))
360 return true;
361 StringRef AttrName = cast<MDString>(NameMD)->getString();
362
363 // Do not inherit excluded attributes.
364 return !AttrName.startswith(InheritOptionsExceptPrefix);
365 };
366
367 if (InheritThisAttribute(Op))
368 MDs.push_back(Op);
369 else
370 Changed = true;
371 }
372 } else {
373 // Modified if we dropped at least one attribute.
374 Changed = OrigLoopID->getNumOperands() > 1;
375 }
376
377 bool HasAnyFollowup = false;
378 for (StringRef OptionName : FollowupOptions) {
379 MDNode *FollowupNode = findOptionMDForLoopID(OrigLoopID, OptionName);
380 if (!FollowupNode)
381 continue;
382
383 HasAnyFollowup = true;
384 for (const MDOperand &Option : drop_begin(FollowupNode->operands())) {
385 MDs.push_back(Option.get());
386 Changed = true;
387 }
388 }
389
390 // Attributes of the followup loop not specified explicity, so signal to the
391 // transformation pass to add suitable attributes.
392 if (!AlwaysNew && !HasAnyFollowup)
393 return None;
394
395 // If no attributes were added or remove, the previous loop Id can be reused.
396 if (!AlwaysNew && !Changed)
397 return OrigLoopID;
398
399 // No attributes is equivalent to having no !llvm.loop metadata at all.
400 if (MDs.size() == 1)
401 return nullptr;
402
403 // Build the new loop ID.
404 MDTuple *FollowupLoopID = MDNode::get(OrigLoopID->getContext(), MDs);
405 FollowupLoopID->replaceOperandWith(0, FollowupLoopID);
406 return FollowupLoopID;
407 }
408
hasDisableAllTransformsHint(const Loop * L)409 bool llvm::hasDisableAllTransformsHint(const Loop *L) {
410 return getBooleanLoopAttribute(L, LLVMLoopDisableNonforced);
411 }
412
hasDisableLICMTransformsHint(const Loop * L)413 bool llvm::hasDisableLICMTransformsHint(const Loop *L) {
414 return getBooleanLoopAttribute(L, LLVMLoopDisableLICM);
415 }
416
hasMustProgress(const Loop * L)417 bool llvm::hasMustProgress(const Loop *L) {
418 return getBooleanLoopAttribute(L, LLVMLoopMustProgress);
419 }
420
hasUnrollTransformation(const Loop * L)421 TransformationMode llvm::hasUnrollTransformation(const Loop *L) {
422 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.disable"))
423 return TM_SuppressedByUser;
424
425 Optional<int> Count =
426 getOptionalIntLoopAttribute(L, "llvm.loop.unroll.count");
427 if (Count.hasValue())
428 return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
429
430 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.enable"))
431 return TM_ForcedByUser;
432
433 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.full"))
434 return TM_ForcedByUser;
435
436 if (hasDisableAllTransformsHint(L))
437 return TM_Disable;
438
439 return TM_Unspecified;
440 }
441
hasUnrollAndJamTransformation(const Loop * L)442 TransformationMode llvm::hasUnrollAndJamTransformation(const Loop *L) {
443 if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.disable"))
444 return TM_SuppressedByUser;
445
446 Optional<int> Count =
447 getOptionalIntLoopAttribute(L, "llvm.loop.unroll_and_jam.count");
448 if (Count.hasValue())
449 return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser;
450
451 if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.enable"))
452 return TM_ForcedByUser;
453
454 if (hasDisableAllTransformsHint(L))
455 return TM_Disable;
456
457 return TM_Unspecified;
458 }
459
hasVectorizeTransformation(const Loop * L)460 TransformationMode llvm::hasVectorizeTransformation(const Loop *L) {
461 Optional<bool> Enable =
462 getOptionalBoolLoopAttribute(L, "llvm.loop.vectorize.enable");
463
464 if (Enable == false)
465 return TM_SuppressedByUser;
466
467 Optional<ElementCount> VectorizeWidth =
468 getOptionalElementCountLoopAttribute(L);
469 Optional<int> InterleaveCount =
470 getOptionalIntLoopAttribute(L, "llvm.loop.interleave.count");
471
472 // 'Forcing' vector width and interleave count to one effectively disables
473 // this tranformation.
474 if (Enable == true && VectorizeWidth && VectorizeWidth->isScalar() &&
475 InterleaveCount == 1)
476 return TM_SuppressedByUser;
477
478 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
479 return TM_Disable;
480
481 if (Enable == true)
482 return TM_ForcedByUser;
483
484 if ((VectorizeWidth && VectorizeWidth->isScalar()) && InterleaveCount == 1)
485 return TM_Disable;
486
487 if ((VectorizeWidth && VectorizeWidth->isVector()) || InterleaveCount > 1)
488 return TM_Enable;
489
490 if (hasDisableAllTransformsHint(L))
491 return TM_Disable;
492
493 return TM_Unspecified;
494 }
495
hasDistributeTransformation(const Loop * L)496 TransformationMode llvm::hasDistributeTransformation(const Loop *L) {
497 if (getBooleanLoopAttribute(L, "llvm.loop.distribute.enable"))
498 return TM_ForcedByUser;
499
500 if (hasDisableAllTransformsHint(L))
501 return TM_Disable;
502
503 return TM_Unspecified;
504 }
505
hasLICMVersioningTransformation(const Loop * L)506 TransformationMode llvm::hasLICMVersioningTransformation(const Loop *L) {
507 if (getBooleanLoopAttribute(L, "llvm.loop.licm_versioning.disable"))
508 return TM_SuppressedByUser;
509
510 if (hasDisableAllTransformsHint(L))
511 return TM_Disable;
512
513 return TM_Unspecified;
514 }
515
516 /// Does a BFS from a given node to all of its children inside a given loop.
517 /// The returned vector of nodes includes the starting point.
518 SmallVector<DomTreeNode *, 16>
collectChildrenInLoop(DomTreeNode * N,const Loop * CurLoop)519 llvm::collectChildrenInLoop(DomTreeNode *N, const Loop *CurLoop) {
520 SmallVector<DomTreeNode *, 16> Worklist;
521 auto AddRegionToWorklist = [&](DomTreeNode *DTN) {
522 // Only include subregions in the top level loop.
523 BasicBlock *BB = DTN->getBlock();
524 if (CurLoop->contains(BB))
525 Worklist.push_back(DTN);
526 };
527
528 AddRegionToWorklist(N);
529
530 for (size_t I = 0; I < Worklist.size(); I++) {
531 for (DomTreeNode *Child : Worklist[I]->children())
532 AddRegionToWorklist(Child);
533 }
534
535 return Worklist;
536 }
537
deleteDeadLoop(Loop * L,DominatorTree * DT,ScalarEvolution * SE,LoopInfo * LI,MemorySSA * MSSA)538 void llvm::deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
539 LoopInfo *LI, MemorySSA *MSSA) {
540 assert((!DT || L->isLCSSAForm(*DT)) && "Expected LCSSA!");
541 auto *Preheader = L->getLoopPreheader();
542 assert(Preheader && "Preheader should exist!");
543
544 std::unique_ptr<MemorySSAUpdater> MSSAU;
545 if (MSSA)
546 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
547
548 // Now that we know the removal is safe, remove the loop by changing the
549 // branch from the preheader to go to the single exit block.
550 //
551 // Because we're deleting a large chunk of code at once, the sequence in which
552 // we remove things is very important to avoid invalidation issues.
553
554 // Tell ScalarEvolution that the loop is deleted. Do this before
555 // deleting the loop so that ScalarEvolution can look at the loop
556 // to determine what it needs to clean up.
557 if (SE)
558 SE->forgetLoop(L);
559
560 auto *OldBr = dyn_cast<BranchInst>(Preheader->getTerminator());
561 assert(OldBr && "Preheader must end with a branch");
562 assert(OldBr->isUnconditional() && "Preheader must have a single successor");
563 // Connect the preheader to the exit block. Keep the old edge to the header
564 // around to perform the dominator tree update in two separate steps
565 // -- #1 insertion of the edge preheader -> exit and #2 deletion of the edge
566 // preheader -> header.
567 //
568 //
569 // 0. Preheader 1. Preheader 2. Preheader
570 // | | | |
571 // V | V |
572 // Header <--\ | Header <--\ | Header <--\
573 // | | | | | | | | | | |
574 // | V | | | V | | | V |
575 // | Body --/ | | Body --/ | | Body --/
576 // V V V V V
577 // Exit Exit Exit
578 //
579 // By doing this is two separate steps we can perform the dominator tree
580 // update without using the batch update API.
581 //
582 // Even when the loop is never executed, we cannot remove the edge from the
583 // source block to the exit block. Consider the case where the unexecuted loop
584 // branches back to an outer loop. If we deleted the loop and removed the edge
585 // coming to this inner loop, this will break the outer loop structure (by
586 // deleting the backedge of the outer loop). If the outer loop is indeed a
587 // non-loop, it will be deleted in a future iteration of loop deletion pass.
588 IRBuilder<> Builder(OldBr);
589
590 auto *ExitBlock = L->getUniqueExitBlock();
591 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
592 if (ExitBlock) {
593 assert(ExitBlock && "Should have a unique exit block!");
594 assert(L->hasDedicatedExits() && "Loop should have dedicated exits!");
595
596 Builder.CreateCondBr(Builder.getFalse(), L->getHeader(), ExitBlock);
597 // Remove the old branch. The conditional branch becomes a new terminator.
598 OldBr->eraseFromParent();
599
600 // Rewrite phis in the exit block to get their inputs from the Preheader
601 // instead of the exiting block.
602 for (PHINode &P : ExitBlock->phis()) {
603 // Set the zero'th element of Phi to be from the preheader and remove all
604 // other incoming values. Given the loop has dedicated exits, all other
605 // incoming values must be from the exiting blocks.
606 int PredIndex = 0;
607 P.setIncomingBlock(PredIndex, Preheader);
608 // Removes all incoming values from all other exiting blocks (including
609 // duplicate values from an exiting block).
610 // Nuke all entries except the zero'th entry which is the preheader entry.
611 // NOTE! We need to remove Incoming Values in the reverse order as done
612 // below, to keep the indices valid for deletion (removeIncomingValues
613 // updates getNumIncomingValues and shifts all values down into the
614 // operand being deleted).
615 for (unsigned i = 0, e = P.getNumIncomingValues() - 1; i != e; ++i)
616 P.removeIncomingValue(e - i, false);
617
618 assert((P.getNumIncomingValues() == 1 &&
619 P.getIncomingBlock(PredIndex) == Preheader) &&
620 "Should have exactly one value and that's from the preheader!");
621 }
622
623 if (DT) {
624 DTU.applyUpdates({{DominatorTree::Insert, Preheader, ExitBlock}});
625 if (MSSA) {
626 MSSAU->applyUpdates({{DominatorTree::Insert, Preheader, ExitBlock}},
627 *DT);
628 if (VerifyMemorySSA)
629 MSSA->verifyMemorySSA();
630 }
631 }
632
633 // Disconnect the loop body by branching directly to its exit.
634 Builder.SetInsertPoint(Preheader->getTerminator());
635 Builder.CreateBr(ExitBlock);
636 // Remove the old branch.
637 Preheader->getTerminator()->eraseFromParent();
638 } else {
639 assert(L->hasNoExitBlocks() &&
640 "Loop should have either zero or one exit blocks.");
641
642 Builder.SetInsertPoint(OldBr);
643 Builder.CreateUnreachable();
644 Preheader->getTerminator()->eraseFromParent();
645 }
646
647 if (DT) {
648 DTU.applyUpdates({{DominatorTree::Delete, Preheader, L->getHeader()}});
649 if (MSSA) {
650 MSSAU->applyUpdates({{DominatorTree::Delete, Preheader, L->getHeader()}},
651 *DT);
652 SmallSetVector<BasicBlock *, 8> DeadBlockSet(L->block_begin(),
653 L->block_end());
654 MSSAU->removeBlocks(DeadBlockSet);
655 if (VerifyMemorySSA)
656 MSSA->verifyMemorySSA();
657 }
658 }
659
660 // Use a map to unique and a vector to guarantee deterministic ordering.
661 llvm::SmallDenseSet<std::pair<DIVariable *, DIExpression *>, 4> DeadDebugSet;
662 llvm::SmallVector<DbgVariableIntrinsic *, 4> DeadDebugInst;
663
664 if (ExitBlock) {
665 // Given LCSSA form is satisfied, we should not have users of instructions
666 // within the dead loop outside of the loop. However, LCSSA doesn't take
667 // unreachable uses into account. We handle them here.
668 // We could do it after drop all references (in this case all users in the
669 // loop will be already eliminated and we have less work to do but according
670 // to API doc of User::dropAllReferences only valid operation after dropping
671 // references, is deletion. So let's substitute all usages of
672 // instruction from the loop with undef value of corresponding type first.
673 for (auto *Block : L->blocks())
674 for (Instruction &I : *Block) {
675 auto *Undef = UndefValue::get(I.getType());
676 for (Value::use_iterator UI = I.use_begin(), E = I.use_end();
677 UI != E;) {
678 Use &U = *UI;
679 ++UI;
680 if (auto *Usr = dyn_cast<Instruction>(U.getUser()))
681 if (L->contains(Usr->getParent()))
682 continue;
683 // If we have a DT then we can check that uses outside a loop only in
684 // unreachable block.
685 if (DT)
686 assert(!DT->isReachableFromEntry(U) &&
687 "Unexpected user in reachable block");
688 U.set(Undef);
689 }
690 auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I);
691 if (!DVI)
692 continue;
693 auto Key =
694 DeadDebugSet.find({DVI->getVariable(), DVI->getExpression()});
695 if (Key != DeadDebugSet.end())
696 continue;
697 DeadDebugSet.insert({DVI->getVariable(), DVI->getExpression()});
698 DeadDebugInst.push_back(DVI);
699 }
700
701 // After the loop has been deleted all the values defined and modified
702 // inside the loop are going to be unavailable.
703 // Since debug values in the loop have been deleted, inserting an undef
704 // dbg.value truncates the range of any dbg.value before the loop where the
705 // loop used to be. This is particularly important for constant values.
706 DIBuilder DIB(*ExitBlock->getModule());
707 Instruction *InsertDbgValueBefore = ExitBlock->getFirstNonPHI();
708 assert(InsertDbgValueBefore &&
709 "There should be a non-PHI instruction in exit block, else these "
710 "instructions will have no parent.");
711 for (auto *DVI : DeadDebugInst)
712 DIB.insertDbgValueIntrinsic(UndefValue::get(Builder.getInt32Ty()),
713 DVI->getVariable(), DVI->getExpression(),
714 DVI->getDebugLoc(), InsertDbgValueBefore);
715 }
716
717 // Remove the block from the reference counting scheme, so that we can
718 // delete it freely later.
719 for (auto *Block : L->blocks())
720 Block->dropAllReferences();
721
722 if (MSSA && VerifyMemorySSA)
723 MSSA->verifyMemorySSA();
724
725 if (LI) {
726 // Erase the instructions and the blocks without having to worry
727 // about ordering because we already dropped the references.
728 // NOTE: This iteration is safe because erasing the block does not remove
729 // its entry from the loop's block list. We do that in the next section.
730 for (Loop::block_iterator LpI = L->block_begin(), LpE = L->block_end();
731 LpI != LpE; ++LpI)
732 (*LpI)->eraseFromParent();
733
734 // Finally, the blocks from loopinfo. This has to happen late because
735 // otherwise our loop iterators won't work.
736
737 SmallPtrSet<BasicBlock *, 8> blocks;
738 blocks.insert(L->block_begin(), L->block_end());
739 for (BasicBlock *BB : blocks)
740 LI->removeBlock(BB);
741
742 // The last step is to update LoopInfo now that we've eliminated this loop.
743 // Note: LoopInfo::erase remove the given loop and relink its subloops with
744 // its parent. While removeLoop/removeChildLoop remove the given loop but
745 // not relink its subloops, which is what we want.
746 if (Loop *ParentLoop = L->getParentLoop()) {
747 Loop::iterator I = find(*ParentLoop, L);
748 assert(I != ParentLoop->end() && "Couldn't find loop");
749 ParentLoop->removeChildLoop(I);
750 } else {
751 Loop::iterator I = find(*LI, L);
752 assert(I != LI->end() && "Couldn't find loop");
753 LI->removeLoop(I);
754 }
755 LI->destroy(L);
756 }
757 }
758
getOutermostLoop(Loop * L)759 static Loop *getOutermostLoop(Loop *L) {
760 while (Loop *Parent = L->getParentLoop())
761 L = Parent;
762 return L;
763 }
764
breakLoopBackedge(Loop * L,DominatorTree & DT,ScalarEvolution & SE,LoopInfo & LI,MemorySSA * MSSA)765 void llvm::breakLoopBackedge(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
766 LoopInfo &LI, MemorySSA *MSSA) {
767 auto *Latch = L->getLoopLatch();
768 assert(Latch && "multiple latches not yet supported");
769 auto *Header = L->getHeader();
770 Loop *OutermostLoop = getOutermostLoop(L);
771
772 SE.forgetLoop(L);
773
774 // Note: By splitting the backedge, and then explicitly making it unreachable
775 // we gracefully handle corner cases such as non-bottom tested loops and the
776 // like. We also have the benefit of being able to reuse existing well tested
777 // code. It might be worth special casing the common bottom tested case at
778 // some point to avoid code churn.
779
780 std::unique_ptr<MemorySSAUpdater> MSSAU;
781 if (MSSA)
782 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
783
784 auto *BackedgeBB = SplitEdge(Latch, Header, &DT, &LI, MSSAU.get());
785
786 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Eager);
787 (void)changeToUnreachable(BackedgeBB->getTerminator(), /*UseTrap*/false,
788 /*PreserveLCSSA*/true, &DTU, MSSAU.get());
789
790 // Erase (and destroy) this loop instance. Handles relinking sub-loops
791 // and blocks within the loop as needed.
792 LI.erase(L);
793
794 // If the loop we broke had a parent, then changeToUnreachable might have
795 // caused a block to be removed from the parent loop (see loop_nest_lcssa
796 // test case in zero-btc.ll for an example), thus changing the parent's
797 // exit blocks. If that happened, we need to rebuild LCSSA on the outermost
798 // loop which might have a had a block removed.
799 if (OutermostLoop != L)
800 formLCSSARecursively(*OutermostLoop, DT, &LI, &SE);
801 }
802
803
804 /// Checks if \p L has single exit through latch block except possibly
805 /// "deoptimizing" exits. Returns branch instruction terminating the loop
806 /// latch if above check is successful, nullptr otherwise.
getExpectedExitLoopLatchBranch(Loop * L)807 static BranchInst *getExpectedExitLoopLatchBranch(Loop *L) {
808 BasicBlock *Latch = L->getLoopLatch();
809 if (!Latch)
810 return nullptr;
811
812 BranchInst *LatchBR = dyn_cast<BranchInst>(Latch->getTerminator());
813 if (!LatchBR || LatchBR->getNumSuccessors() != 2 || !L->isLoopExiting(Latch))
814 return nullptr;
815
816 assert((LatchBR->getSuccessor(0) == L->getHeader() ||
817 LatchBR->getSuccessor(1) == L->getHeader()) &&
818 "At least one edge out of the latch must go to the header");
819
820 SmallVector<BasicBlock *, 4> ExitBlocks;
821 L->getUniqueNonLatchExitBlocks(ExitBlocks);
822 if (any_of(ExitBlocks, [](const BasicBlock *EB) {
823 return !EB->getTerminatingDeoptimizeCall();
824 }))
825 return nullptr;
826
827 return LatchBR;
828 }
829
830 Optional<unsigned>
getLoopEstimatedTripCount(Loop * L,unsigned * EstimatedLoopInvocationWeight)831 llvm::getLoopEstimatedTripCount(Loop *L,
832 unsigned *EstimatedLoopInvocationWeight) {
833 // Support loops with an exiting latch and other existing exists only
834 // deoptimize.
835 BranchInst *LatchBranch = getExpectedExitLoopLatchBranch(L);
836 if (!LatchBranch)
837 return None;
838
839 // To estimate the number of times the loop body was executed, we want to
840 // know the number of times the backedge was taken, vs. the number of times
841 // we exited the loop.
842 uint64_t BackedgeTakenWeight, LatchExitWeight;
843 if (!LatchBranch->extractProfMetadata(BackedgeTakenWeight, LatchExitWeight))
844 return None;
845
846 if (LatchBranch->getSuccessor(0) != L->getHeader())
847 std::swap(BackedgeTakenWeight, LatchExitWeight);
848
849 if (!LatchExitWeight)
850 return None;
851
852 if (EstimatedLoopInvocationWeight)
853 *EstimatedLoopInvocationWeight = LatchExitWeight;
854
855 // Estimated backedge taken count is a ratio of the backedge taken weight by
856 // the weight of the edge exiting the loop, rounded to nearest.
857 uint64_t BackedgeTakenCount =
858 llvm::divideNearest(BackedgeTakenWeight, LatchExitWeight);
859 // Estimated trip count is one plus estimated backedge taken count.
860 return BackedgeTakenCount + 1;
861 }
862
setLoopEstimatedTripCount(Loop * L,unsigned EstimatedTripCount,unsigned EstimatedloopInvocationWeight)863 bool llvm::setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount,
864 unsigned EstimatedloopInvocationWeight) {
865 // Support loops with an exiting latch and other existing exists only
866 // deoptimize.
867 BranchInst *LatchBranch = getExpectedExitLoopLatchBranch(L);
868 if (!LatchBranch)
869 return false;
870
871 // Calculate taken and exit weights.
872 unsigned LatchExitWeight = 0;
873 unsigned BackedgeTakenWeight = 0;
874
875 if (EstimatedTripCount > 0) {
876 LatchExitWeight = EstimatedloopInvocationWeight;
877 BackedgeTakenWeight = (EstimatedTripCount - 1) * LatchExitWeight;
878 }
879
880 // Make a swap if back edge is taken when condition is "false".
881 if (LatchBranch->getSuccessor(0) != L->getHeader())
882 std::swap(BackedgeTakenWeight, LatchExitWeight);
883
884 MDBuilder MDB(LatchBranch->getContext());
885
886 // Set/Update profile metadata.
887 LatchBranch->setMetadata(
888 LLVMContext::MD_prof,
889 MDB.createBranchWeights(BackedgeTakenWeight, LatchExitWeight));
890
891 return true;
892 }
893
hasIterationCountInvariantInParent(Loop * InnerLoop,ScalarEvolution & SE)894 bool llvm::hasIterationCountInvariantInParent(Loop *InnerLoop,
895 ScalarEvolution &SE) {
896 Loop *OuterL = InnerLoop->getParentLoop();
897 if (!OuterL)
898 return true;
899
900 // Get the backedge taken count for the inner loop
901 BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch();
902 const SCEV *InnerLoopBECountSC = SE.getExitCount(InnerLoop, InnerLoopLatch);
903 if (isa<SCEVCouldNotCompute>(InnerLoopBECountSC) ||
904 !InnerLoopBECountSC->getType()->isIntegerTy())
905 return false;
906
907 // Get whether count is invariant to the outer loop
908 ScalarEvolution::LoopDisposition LD =
909 SE.getLoopDisposition(InnerLoopBECountSC, OuterL);
910 if (LD != ScalarEvolution::LoopInvariant)
911 return false;
912
913 return true;
914 }
915
createMinMaxOp(IRBuilderBase & Builder,RecurKind RK,Value * Left,Value * Right)916 Value *llvm::createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left,
917 Value *Right) {
918 CmpInst::Predicate Pred;
919 switch (RK) {
920 default:
921 llvm_unreachable("Unknown min/max recurrence kind");
922 case RecurKind::UMin:
923 Pred = CmpInst::ICMP_ULT;
924 break;
925 case RecurKind::UMax:
926 Pred = CmpInst::ICMP_UGT;
927 break;
928 case RecurKind::SMin:
929 Pred = CmpInst::ICMP_SLT;
930 break;
931 case RecurKind::SMax:
932 Pred = CmpInst::ICMP_SGT;
933 break;
934 case RecurKind::FMin:
935 Pred = CmpInst::FCMP_OLT;
936 break;
937 case RecurKind::FMax:
938 Pred = CmpInst::FCMP_OGT;
939 break;
940 }
941
942 Value *Cmp = Builder.CreateCmp(Pred, Left, Right, "rdx.minmax.cmp");
943 Value *Select = Builder.CreateSelect(Cmp, Left, Right, "rdx.minmax.select");
944 return Select;
945 }
946
947 // Helper to generate an ordered reduction.
getOrderedReduction(IRBuilderBase & Builder,Value * Acc,Value * Src,unsigned Op,RecurKind RdxKind,ArrayRef<Value * > RedOps)948 Value *llvm::getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
949 unsigned Op, RecurKind RdxKind,
950 ArrayRef<Value *> RedOps) {
951 unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements();
952
953 // Extract and apply reduction ops in ascending order:
954 // e.g. ((((Acc + Scl[0]) + Scl[1]) + Scl[2]) + ) ... + Scl[VF-1]
955 Value *Result = Acc;
956 for (unsigned ExtractIdx = 0; ExtractIdx != VF; ++ExtractIdx) {
957 Value *Ext =
958 Builder.CreateExtractElement(Src, Builder.getInt32(ExtractIdx));
959
960 if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
961 Result = Builder.CreateBinOp((Instruction::BinaryOps)Op, Result, Ext,
962 "bin.rdx");
963 } else {
964 assert(RecurrenceDescriptor::isMinMaxRecurrenceKind(RdxKind) &&
965 "Invalid min/max");
966 Result = createMinMaxOp(Builder, RdxKind, Result, Ext);
967 }
968
969 if (!RedOps.empty())
970 propagateIRFlags(Result, RedOps);
971 }
972
973 return Result;
974 }
975
976 // Helper to generate a log2 shuffle reduction.
getShuffleReduction(IRBuilderBase & Builder,Value * Src,unsigned Op,RecurKind RdxKind,ArrayRef<Value * > RedOps)977 Value *llvm::getShuffleReduction(IRBuilderBase &Builder, Value *Src,
978 unsigned Op, RecurKind RdxKind,
979 ArrayRef<Value *> RedOps) {
980 unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements();
981 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles
982 // and vector ops, reducing the set of values being computed by half each
983 // round.
984 assert(isPowerOf2_32(VF) &&
985 "Reduction emission only supported for pow2 vectors!");
986 Value *TmpVec = Src;
987 SmallVector<int, 32> ShuffleMask(VF);
988 for (unsigned i = VF; i != 1; i >>= 1) {
989 // Move the upper half of the vector to the lower half.
990 for (unsigned j = 0; j != i / 2; ++j)
991 ShuffleMask[j] = i / 2 + j;
992
993 // Fill the rest of the mask with undef.
994 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), -1);
995
996 Value *Shuf = Builder.CreateShuffleVector(TmpVec, ShuffleMask, "rdx.shuf");
997
998 if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
999 // The builder propagates its fast-math-flags setting.
1000 TmpVec = Builder.CreateBinOp((Instruction::BinaryOps)Op, TmpVec, Shuf,
1001 "bin.rdx");
1002 } else {
1003 assert(RecurrenceDescriptor::isMinMaxRecurrenceKind(RdxKind) &&
1004 "Invalid min/max");
1005 TmpVec = createMinMaxOp(Builder, RdxKind, TmpVec, Shuf);
1006 }
1007 if (!RedOps.empty())
1008 propagateIRFlags(TmpVec, RedOps);
1009
1010 // We may compute the reassociated scalar ops in a way that does not
1011 // preserve nsw/nuw etc. Conservatively, drop those flags.
1012 if (auto *ReductionInst = dyn_cast<Instruction>(TmpVec))
1013 ReductionInst->dropPoisonGeneratingFlags();
1014 }
1015 // The result is in the first element of the vector.
1016 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
1017 }
1018
createSimpleTargetReduction(IRBuilderBase & Builder,const TargetTransformInfo * TTI,Value * Src,RecurKind RdxKind,ArrayRef<Value * > RedOps)1019 Value *llvm::createSimpleTargetReduction(IRBuilderBase &Builder,
1020 const TargetTransformInfo *TTI,
1021 Value *Src, RecurKind RdxKind,
1022 ArrayRef<Value *> RedOps) {
1023 TargetTransformInfo::ReductionFlags RdxFlags;
1024 RdxFlags.IsMaxOp = RdxKind == RecurKind::SMax || RdxKind == RecurKind::UMax ||
1025 RdxKind == RecurKind::FMax;
1026 RdxFlags.IsSigned = RdxKind == RecurKind::SMax || RdxKind == RecurKind::SMin;
1027
1028 auto *SrcVecEltTy = cast<VectorType>(Src->getType())->getElementType();
1029 switch (RdxKind) {
1030 case RecurKind::Add:
1031 return Builder.CreateAddReduce(Src);
1032 case RecurKind::Mul:
1033 return Builder.CreateMulReduce(Src);
1034 case RecurKind::And:
1035 return Builder.CreateAndReduce(Src);
1036 case RecurKind::Or:
1037 return Builder.CreateOrReduce(Src);
1038 case RecurKind::Xor:
1039 return Builder.CreateXorReduce(Src);
1040 case RecurKind::FAdd:
1041 return Builder.CreateFAddReduce(ConstantFP::getNegativeZero(SrcVecEltTy),
1042 Src);
1043 case RecurKind::FMul:
1044 return Builder.CreateFMulReduce(ConstantFP::get(SrcVecEltTy, 1.0), Src);
1045 case RecurKind::SMax:
1046 return Builder.CreateIntMaxReduce(Src, true);
1047 case RecurKind::SMin:
1048 return Builder.CreateIntMinReduce(Src, true);
1049 case RecurKind::UMax:
1050 return Builder.CreateIntMaxReduce(Src, false);
1051 case RecurKind::UMin:
1052 return Builder.CreateIntMinReduce(Src, false);
1053 case RecurKind::FMax:
1054 return Builder.CreateFPMaxReduce(Src);
1055 case RecurKind::FMin:
1056 return Builder.CreateFPMinReduce(Src);
1057 default:
1058 llvm_unreachable("Unhandled opcode");
1059 }
1060 }
1061
createTargetReduction(IRBuilderBase & B,const TargetTransformInfo * TTI,RecurrenceDescriptor & Desc,Value * Src)1062 Value *llvm::createTargetReduction(IRBuilderBase &B,
1063 const TargetTransformInfo *TTI,
1064 RecurrenceDescriptor &Desc, Value *Src) {
1065 // TODO: Support in-order reductions based on the recurrence descriptor.
1066 // All ops in the reduction inherit fast-math-flags from the recurrence
1067 // descriptor.
1068 IRBuilderBase::FastMathFlagGuard FMFGuard(B);
1069 B.setFastMathFlags(Desc.getFastMathFlags());
1070 return createSimpleTargetReduction(B, TTI, Src, Desc.getRecurrenceKind());
1071 }
1072
createOrderedReduction(IRBuilderBase & B,RecurrenceDescriptor & Desc,Value * Src,Value * Start)1073 Value *llvm::createOrderedReduction(IRBuilderBase &B,
1074 RecurrenceDescriptor &Desc, Value *Src,
1075 Value *Start) {
1076 assert(Desc.getRecurrenceKind() == RecurKind::FAdd &&
1077 "Unexpected reduction kind");
1078 assert(Src->getType()->isVectorTy() && "Expected a vector type");
1079 assert(!Start->getType()->isVectorTy() && "Expected a scalar type");
1080
1081 return B.CreateFAddReduce(Start, Src);
1082 }
1083
propagateIRFlags(Value * I,ArrayRef<Value * > VL,Value * OpValue)1084 void llvm::propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue) {
1085 auto *VecOp = dyn_cast<Instruction>(I);
1086 if (!VecOp)
1087 return;
1088 auto *Intersection = (OpValue == nullptr) ? dyn_cast<Instruction>(VL[0])
1089 : dyn_cast<Instruction>(OpValue);
1090 if (!Intersection)
1091 return;
1092 const unsigned Opcode = Intersection->getOpcode();
1093 VecOp->copyIRFlags(Intersection);
1094 for (auto *V : VL) {
1095 auto *Instr = dyn_cast<Instruction>(V);
1096 if (!Instr)
1097 continue;
1098 if (OpValue == nullptr || Opcode == Instr->getOpcode())
1099 VecOp->andIRFlags(V);
1100 }
1101 }
1102
isKnownNegativeInLoop(const SCEV * S,const Loop * L,ScalarEvolution & SE)1103 bool llvm::isKnownNegativeInLoop(const SCEV *S, const Loop *L,
1104 ScalarEvolution &SE) {
1105 const SCEV *Zero = SE.getZero(S->getType());
1106 return SE.isAvailableAtLoopEntry(S, L) &&
1107 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, S, Zero);
1108 }
1109
isKnownNonNegativeInLoop(const SCEV * S,const Loop * L,ScalarEvolution & SE)1110 bool llvm::isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
1111 ScalarEvolution &SE) {
1112 const SCEV *Zero = SE.getZero(S->getType());
1113 return SE.isAvailableAtLoopEntry(S, L) &&
1114 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGE, S, Zero);
1115 }
1116
cannotBeMinInLoop(const SCEV * S,const Loop * L,ScalarEvolution & SE,bool Signed)1117 bool llvm::cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
1118 bool Signed) {
1119 unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth();
1120 APInt Min = Signed ? APInt::getSignedMinValue(BitWidth) :
1121 APInt::getMinValue(BitWidth);
1122 auto Predicate = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1123 return SE.isAvailableAtLoopEntry(S, L) &&
1124 SE.isLoopEntryGuardedByCond(L, Predicate, S,
1125 SE.getConstant(Min));
1126 }
1127
cannotBeMaxInLoop(const SCEV * S,const Loop * L,ScalarEvolution & SE,bool Signed)1128 bool llvm::cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
1129 bool Signed) {
1130 unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth();
1131 APInt Max = Signed ? APInt::getSignedMaxValue(BitWidth) :
1132 APInt::getMaxValue(BitWidth);
1133 auto Predicate = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1134 return SE.isAvailableAtLoopEntry(S, L) &&
1135 SE.isLoopEntryGuardedByCond(L, Predicate, S,
1136 SE.getConstant(Max));
1137 }
1138
1139 //===----------------------------------------------------------------------===//
1140 // rewriteLoopExitValues - Optimize IV users outside the loop.
1141 // As a side effect, reduces the amount of IV processing within the loop.
1142 //===----------------------------------------------------------------------===//
1143
1144 // Return true if the SCEV expansion generated by the rewriter can replace the
1145 // original value. SCEV guarantees that it produces the same value, but the way
1146 // it is produced may be illegal IR. Ideally, this function will only be
1147 // called for verification.
isValidRewrite(ScalarEvolution * SE,Value * FromVal,Value * ToVal)1148 static bool isValidRewrite(ScalarEvolution *SE, Value *FromVal, Value *ToVal) {
1149 // If an SCEV expression subsumed multiple pointers, its expansion could
1150 // reassociate the GEP changing the base pointer. This is illegal because the
1151 // final address produced by a GEP chain must be inbounds relative to its
1152 // underlying object. Otherwise basic alias analysis, among other things,
1153 // could fail in a dangerous way. Ultimately, SCEV will be improved to avoid
1154 // producing an expression involving multiple pointers. Until then, we must
1155 // bail out here.
1156 //
1157 // Retrieve the pointer operand of the GEP. Don't use getUnderlyingObject
1158 // because it understands lcssa phis while SCEV does not.
1159 Value *FromPtr = FromVal;
1160 Value *ToPtr = ToVal;
1161 if (auto *GEP = dyn_cast<GEPOperator>(FromVal))
1162 FromPtr = GEP->getPointerOperand();
1163
1164 if (auto *GEP = dyn_cast<GEPOperator>(ToVal))
1165 ToPtr = GEP->getPointerOperand();
1166
1167 if (FromPtr != FromVal || ToPtr != ToVal) {
1168 // Quickly check the common case
1169 if (FromPtr == ToPtr)
1170 return true;
1171
1172 // SCEV may have rewritten an expression that produces the GEP's pointer
1173 // operand. That's ok as long as the pointer operand has the same base
1174 // pointer. Unlike getUnderlyingObject(), getPointerBase() will find the
1175 // base of a recurrence. This handles the case in which SCEV expansion
1176 // converts a pointer type recurrence into a nonrecurrent pointer base
1177 // indexed by an integer recurrence.
1178
1179 // If the GEP base pointer is a vector of pointers, abort.
1180 if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy())
1181 return false;
1182
1183 const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr));
1184 const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr));
1185 if (FromBase == ToBase)
1186 return true;
1187
1188 LLVM_DEBUG(dbgs() << "rewriteLoopExitValues: GEP rewrite bail out "
1189 << *FromBase << " != " << *ToBase << "\n");
1190
1191 return false;
1192 }
1193 return true;
1194 }
1195
hasHardUserWithinLoop(const Loop * L,const Instruction * I)1196 static bool hasHardUserWithinLoop(const Loop *L, const Instruction *I) {
1197 SmallPtrSet<const Instruction *, 8> Visited;
1198 SmallVector<const Instruction *, 8> WorkList;
1199 Visited.insert(I);
1200 WorkList.push_back(I);
1201 while (!WorkList.empty()) {
1202 const Instruction *Curr = WorkList.pop_back_val();
1203 // This use is outside the loop, nothing to do.
1204 if (!L->contains(Curr))
1205 continue;
1206 // Do we assume it is a "hard" use which will not be eliminated easily?
1207 if (Curr->mayHaveSideEffects())
1208 return true;
1209 // Otherwise, add all its users to worklist.
1210 for (auto U : Curr->users()) {
1211 auto *UI = cast<Instruction>(U);
1212 if (Visited.insert(UI).second)
1213 WorkList.push_back(UI);
1214 }
1215 }
1216 return false;
1217 }
1218
1219 // Collect information about PHI nodes which can be transformed in
1220 // rewriteLoopExitValues.
1221 struct RewritePhi {
1222 PHINode *PN; // For which PHI node is this replacement?
1223 unsigned Ith; // For which incoming value?
1224 const SCEV *ExpansionSCEV; // The SCEV of the incoming value we are rewriting.
1225 Instruction *ExpansionPoint; // Where we'd like to expand that SCEV?
1226 bool HighCost; // Is this expansion a high-cost?
1227
1228 Value *Expansion = nullptr;
1229 bool ValidRewrite = false;
1230
RewritePhiRewritePhi1231 RewritePhi(PHINode *P, unsigned I, const SCEV *Val, Instruction *ExpansionPt,
1232 bool H)
1233 : PN(P), Ith(I), ExpansionSCEV(Val), ExpansionPoint(ExpansionPt),
1234 HighCost(H) {}
1235 };
1236
1237 // Check whether it is possible to delete the loop after rewriting exit
1238 // value. If it is possible, ignore ReplaceExitValue and do rewriting
1239 // aggressively.
canLoopBeDeleted(Loop * L,SmallVector<RewritePhi,8> & RewritePhiSet)1240 static bool canLoopBeDeleted(Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet) {
1241 BasicBlock *Preheader = L->getLoopPreheader();
1242 // If there is no preheader, the loop will not be deleted.
1243 if (!Preheader)
1244 return false;
1245
1246 // In LoopDeletion pass Loop can be deleted when ExitingBlocks.size() > 1.
1247 // We obviate multiple ExitingBlocks case for simplicity.
1248 // TODO: If we see testcase with multiple ExitingBlocks can be deleted
1249 // after exit value rewriting, we can enhance the logic here.
1250 SmallVector<BasicBlock *, 4> ExitingBlocks;
1251 L->getExitingBlocks(ExitingBlocks);
1252 SmallVector<BasicBlock *, 8> ExitBlocks;
1253 L->getUniqueExitBlocks(ExitBlocks);
1254 if (ExitBlocks.size() != 1 || ExitingBlocks.size() != 1)
1255 return false;
1256
1257 BasicBlock *ExitBlock = ExitBlocks[0];
1258 BasicBlock::iterator BI = ExitBlock->begin();
1259 while (PHINode *P = dyn_cast<PHINode>(BI)) {
1260 Value *Incoming = P->getIncomingValueForBlock(ExitingBlocks[0]);
1261
1262 // If the Incoming value of P is found in RewritePhiSet, we know it
1263 // could be rewritten to use a loop invariant value in transformation
1264 // phase later. Skip it in the loop invariant check below.
1265 bool found = false;
1266 for (const RewritePhi &Phi : RewritePhiSet) {
1267 if (!Phi.ValidRewrite)
1268 continue;
1269 unsigned i = Phi.Ith;
1270 if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) {
1271 found = true;
1272 break;
1273 }
1274 }
1275
1276 Instruction *I;
1277 if (!found && (I = dyn_cast<Instruction>(Incoming)))
1278 if (!L->hasLoopInvariantOperands(I))
1279 return false;
1280
1281 ++BI;
1282 }
1283
1284 for (auto *BB : L->blocks())
1285 if (llvm::any_of(*BB, [](Instruction &I) {
1286 return I.mayHaveSideEffects();
1287 }))
1288 return false;
1289
1290 return true;
1291 }
1292
rewriteLoopExitValues(Loop * L,LoopInfo * LI,TargetLibraryInfo * TLI,ScalarEvolution * SE,const TargetTransformInfo * TTI,SCEVExpander & Rewriter,DominatorTree * DT,ReplaceExitVal ReplaceExitValue,SmallVector<WeakTrackingVH,16> & DeadInsts)1293 int llvm::rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI,
1294 ScalarEvolution *SE,
1295 const TargetTransformInfo *TTI,
1296 SCEVExpander &Rewriter, DominatorTree *DT,
1297 ReplaceExitVal ReplaceExitValue,
1298 SmallVector<WeakTrackingVH, 16> &DeadInsts) {
1299 // Check a pre-condition.
1300 assert(L->isRecursivelyLCSSAForm(*DT, *LI) &&
1301 "Indvars did not preserve LCSSA!");
1302
1303 SmallVector<BasicBlock*, 8> ExitBlocks;
1304 L->getUniqueExitBlocks(ExitBlocks);
1305
1306 SmallVector<RewritePhi, 8> RewritePhiSet;
1307 // Find all values that are computed inside the loop, but used outside of it.
1308 // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan
1309 // the exit blocks of the loop to find them.
1310 for (BasicBlock *ExitBB : ExitBlocks) {
1311 // If there are no PHI nodes in this exit block, then no values defined
1312 // inside the loop are used on this path, skip it.
1313 PHINode *PN = dyn_cast<PHINode>(ExitBB->begin());
1314 if (!PN) continue;
1315
1316 unsigned NumPreds = PN->getNumIncomingValues();
1317
1318 // Iterate over all of the PHI nodes.
1319 BasicBlock::iterator BBI = ExitBB->begin();
1320 while ((PN = dyn_cast<PHINode>(BBI++))) {
1321 if (PN->use_empty())
1322 continue; // dead use, don't replace it
1323
1324 if (!SE->isSCEVable(PN->getType()))
1325 continue;
1326
1327 // It's necessary to tell ScalarEvolution about this explicitly so that
1328 // it can walk the def-use list and forget all SCEVs, as it may not be
1329 // watching the PHI itself. Once the new exit value is in place, there
1330 // may not be a def-use connection between the loop and every instruction
1331 // which got a SCEVAddRecExpr for that loop.
1332 SE->forgetValue(PN);
1333
1334 // Iterate over all of the values in all the PHI nodes.
1335 for (unsigned i = 0; i != NumPreds; ++i) {
1336 // If the value being merged in is not integer or is not defined
1337 // in the loop, skip it.
1338 Value *InVal = PN->getIncomingValue(i);
1339 if (!isa<Instruction>(InVal))
1340 continue;
1341
1342 // If this pred is for a subloop, not L itself, skip it.
1343 if (LI->getLoopFor(PN->getIncomingBlock(i)) != L)
1344 continue; // The Block is in a subloop, skip it.
1345
1346 // Check that InVal is defined in the loop.
1347 Instruction *Inst = cast<Instruction>(InVal);
1348 if (!L->contains(Inst))
1349 continue;
1350
1351 // Okay, this instruction has a user outside of the current loop
1352 // and varies predictably *inside* the loop. Evaluate the value it
1353 // contains when the loop exits, if possible. We prefer to start with
1354 // expressions which are true for all exits (so as to maximize
1355 // expression reuse by the SCEVExpander), but resort to per-exit
1356 // evaluation if that fails.
1357 const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop());
1358 if (isa<SCEVCouldNotCompute>(ExitValue) ||
1359 !SE->isLoopInvariant(ExitValue, L) ||
1360 !isSafeToExpand(ExitValue, *SE)) {
1361 // TODO: This should probably be sunk into SCEV in some way; maybe a
1362 // getSCEVForExit(SCEV*, L, ExitingBB)? It can be generalized for
1363 // most SCEV expressions and other recurrence types (e.g. shift
1364 // recurrences). Is there existing code we can reuse?
1365 const SCEV *ExitCount = SE->getExitCount(L, PN->getIncomingBlock(i));
1366 if (isa<SCEVCouldNotCompute>(ExitCount))
1367 continue;
1368 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Inst)))
1369 if (AddRec->getLoop() == L)
1370 ExitValue = AddRec->evaluateAtIteration(ExitCount, *SE);
1371 if (isa<SCEVCouldNotCompute>(ExitValue) ||
1372 !SE->isLoopInvariant(ExitValue, L) ||
1373 !isSafeToExpand(ExitValue, *SE))
1374 continue;
1375 }
1376
1377 // Computing the value outside of the loop brings no benefit if it is
1378 // definitely used inside the loop in a way which can not be optimized
1379 // away. Avoid doing so unless we know we have a value which computes
1380 // the ExitValue already. TODO: This should be merged into SCEV
1381 // expander to leverage its knowledge of existing expressions.
1382 if (ReplaceExitValue != AlwaysRepl && !isa<SCEVConstant>(ExitValue) &&
1383 !isa<SCEVUnknown>(ExitValue) && hasHardUserWithinLoop(L, Inst))
1384 continue;
1385
1386 // Check if expansions of this SCEV would count as being high cost.
1387 bool HighCost = Rewriter.isHighCostExpansion(
1388 ExitValue, L, SCEVCheapExpansionBudget, TTI, Inst);
1389
1390 // Note that we must not perform expansions until after
1391 // we query *all* the costs, because if we perform temporary expansion
1392 // inbetween, one that we might not intend to keep, said expansion
1393 // *may* affect cost calculation of the the next SCEV's we'll query,
1394 // and next SCEV may errneously get smaller cost.
1395
1396 // Collect all the candidate PHINodes to be rewritten.
1397 RewritePhiSet.emplace_back(PN, i, ExitValue, Inst, HighCost);
1398 }
1399 }
1400 }
1401
1402 // Now that we've done preliminary filtering and billed all the SCEV's,
1403 // we can perform the last sanity check - the expansion must be valid.
1404 for (RewritePhi &Phi : RewritePhiSet) {
1405 Phi.Expansion = Rewriter.expandCodeFor(Phi.ExpansionSCEV, Phi.PN->getType(),
1406 Phi.ExpansionPoint);
1407
1408 LLVM_DEBUG(dbgs() << "rewriteLoopExitValues: AfterLoopVal = "
1409 << *(Phi.Expansion) << '\n'
1410 << " LoopVal = " << *(Phi.ExpansionPoint) << "\n");
1411
1412 // FIXME: isValidRewrite() is a hack. it should be an assert, eventually.
1413 Phi.ValidRewrite = isValidRewrite(SE, Phi.ExpansionPoint, Phi.Expansion);
1414 if (!Phi.ValidRewrite) {
1415 DeadInsts.push_back(Phi.Expansion);
1416 continue;
1417 }
1418
1419 #ifndef NDEBUG
1420 // If we reuse an instruction from a loop which is neither L nor one of
1421 // its containing loops, we end up breaking LCSSA form for this loop by
1422 // creating a new use of its instruction.
1423 if (auto *ExitInsn = dyn_cast<Instruction>(Phi.Expansion))
1424 if (auto *EVL = LI->getLoopFor(ExitInsn->getParent()))
1425 if (EVL != L)
1426 assert(EVL->contains(L) && "LCSSA breach detected!");
1427 #endif
1428 }
1429
1430 // TODO: after isValidRewrite() is an assertion, evaluate whether
1431 // it is beneficial to change how we calculate high-cost:
1432 // if we have SCEV 'A' which we know we will expand, should we calculate
1433 // the cost of other SCEV's after expanding SCEV 'A',
1434 // thus potentially giving cost bonus to those other SCEV's?
1435
1436 bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet);
1437 int NumReplaced = 0;
1438
1439 // Transformation.
1440 for (const RewritePhi &Phi : RewritePhiSet) {
1441 if (!Phi.ValidRewrite)
1442 continue;
1443
1444 PHINode *PN = Phi.PN;
1445 Value *ExitVal = Phi.Expansion;
1446
1447 // Only do the rewrite when the ExitValue can be expanded cheaply.
1448 // If LoopCanBeDel is true, rewrite exit value aggressively.
1449 if (ReplaceExitValue == OnlyCheapRepl && !LoopCanBeDel && Phi.HighCost) {
1450 DeadInsts.push_back(ExitVal);
1451 continue;
1452 }
1453
1454 NumReplaced++;
1455 Instruction *Inst = cast<Instruction>(PN->getIncomingValue(Phi.Ith));
1456 PN->setIncomingValue(Phi.Ith, ExitVal);
1457
1458 // If this instruction is dead now, delete it. Don't do it now to avoid
1459 // invalidating iterators.
1460 if (isInstructionTriviallyDead(Inst, TLI))
1461 DeadInsts.push_back(Inst);
1462
1463 // Replace PN with ExitVal if that is legal and does not break LCSSA.
1464 if (PN->getNumIncomingValues() == 1 &&
1465 LI->replacementPreservesLCSSAForm(PN, ExitVal)) {
1466 PN->replaceAllUsesWith(ExitVal);
1467 PN->eraseFromParent();
1468 }
1469 }
1470
1471 // The insertion point instruction may have been deleted; clear it out
1472 // so that the rewriter doesn't trip over it later.
1473 Rewriter.clearInsertPoint();
1474 return NumReplaced;
1475 }
1476
1477 /// Set weights for \p UnrolledLoop and \p RemainderLoop based on weights for
1478 /// \p OrigLoop.
setProfileInfoAfterUnrolling(Loop * OrigLoop,Loop * UnrolledLoop,Loop * RemainderLoop,uint64_t UF)1479 void llvm::setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop,
1480 Loop *RemainderLoop, uint64_t UF) {
1481 assert(UF > 0 && "Zero unrolled factor is not supported");
1482 assert(UnrolledLoop != RemainderLoop &&
1483 "Unrolled and Remainder loops are expected to distinct");
1484
1485 // Get number of iterations in the original scalar loop.
1486 unsigned OrigLoopInvocationWeight = 0;
1487 Optional<unsigned> OrigAverageTripCount =
1488 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight);
1489 if (!OrigAverageTripCount)
1490 return;
1491
1492 // Calculate number of iterations in unrolled loop.
1493 unsigned UnrolledAverageTripCount = *OrigAverageTripCount / UF;
1494 // Calculate number of iterations for remainder loop.
1495 unsigned RemainderAverageTripCount = *OrigAverageTripCount % UF;
1496
1497 setLoopEstimatedTripCount(UnrolledLoop, UnrolledAverageTripCount,
1498 OrigLoopInvocationWeight);
1499 setLoopEstimatedTripCount(RemainderLoop, RemainderAverageTripCount,
1500 OrigLoopInvocationWeight);
1501 }
1502
1503 /// Utility that implements appending of loops onto a worklist.
1504 /// Loops are added in preorder (analogous for reverse postorder for trees),
1505 /// and the worklist is processed LIFO.
1506 template <typename RangeT>
appendReversedLoopsToWorklist(RangeT && Loops,SmallPriorityWorklist<Loop *,4> & Worklist)1507 void llvm::appendReversedLoopsToWorklist(
1508 RangeT &&Loops, SmallPriorityWorklist<Loop *, 4> &Worklist) {
1509 // We use an internal worklist to build up the preorder traversal without
1510 // recursion.
1511 SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
1512
1513 // We walk the initial sequence of loops in reverse because we generally want
1514 // to visit defs before uses and the worklist is LIFO.
1515 for (Loop *RootL : Loops) {
1516 assert(PreOrderLoops.empty() && "Must start with an empty preorder walk.");
1517 assert(PreOrderWorklist.empty() &&
1518 "Must start with an empty preorder walk worklist.");
1519 PreOrderWorklist.push_back(RootL);
1520 do {
1521 Loop *L = PreOrderWorklist.pop_back_val();
1522 PreOrderWorklist.append(L->begin(), L->end());
1523 PreOrderLoops.push_back(L);
1524 } while (!PreOrderWorklist.empty());
1525
1526 Worklist.insert(std::move(PreOrderLoops));
1527 PreOrderLoops.clear();
1528 }
1529 }
1530
1531 template <typename RangeT>
appendLoopsToWorklist(RangeT && Loops,SmallPriorityWorklist<Loop *,4> & Worklist)1532 void llvm::appendLoopsToWorklist(RangeT &&Loops,
1533 SmallPriorityWorklist<Loop *, 4> &Worklist) {
1534 appendReversedLoopsToWorklist(reverse(Loops), Worklist);
1535 }
1536
1537 template void llvm::appendLoopsToWorklist<ArrayRef<Loop *> &>(
1538 ArrayRef<Loop *> &Loops, SmallPriorityWorklist<Loop *, 4> &Worklist);
1539
1540 template void
1541 llvm::appendLoopsToWorklist<Loop &>(Loop &L,
1542 SmallPriorityWorklist<Loop *, 4> &Worklist);
1543
appendLoopsToWorklist(LoopInfo & LI,SmallPriorityWorklist<Loop *,4> & Worklist)1544 void llvm::appendLoopsToWorklist(LoopInfo &LI,
1545 SmallPriorityWorklist<Loop *, 4> &Worklist) {
1546 appendReversedLoopsToWorklist(LI, Worklist);
1547 }
1548
cloneLoop(Loop * L,Loop * PL,ValueToValueMapTy & VM,LoopInfo * LI,LPPassManager * LPM)1549 Loop *llvm::cloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
1550 LoopInfo *LI, LPPassManager *LPM) {
1551 Loop &New = *LI->AllocateLoop();
1552 if (PL)
1553 PL->addChildLoop(&New);
1554 else
1555 LI->addTopLevelLoop(&New);
1556
1557 if (LPM)
1558 LPM->addLoop(New);
1559
1560 // Add all of the blocks in L to the new loop.
1561 for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
1562 I != E; ++I)
1563 if (LI->getLoopFor(*I) == L)
1564 New.addBasicBlockToLoop(cast<BasicBlock>(VM[*I]), *LI);
1565
1566 // Add all of the subloops to the new loop.
1567 for (Loop *I : *L)
1568 cloneLoop(I, &New, VM, LI, LPM);
1569
1570 return &New;
1571 }
1572
1573 /// IR Values for the lower and upper bounds of a pointer evolution. We
1574 /// need to use value-handles because SCEV expansion can invalidate previously
1575 /// expanded values. Thus expansion of a pointer can invalidate the bounds for
1576 /// a previous one.
1577 struct PointerBounds {
1578 TrackingVH<Value> Start;
1579 TrackingVH<Value> End;
1580 };
1581
1582 /// Expand code for the lower and upper bound of the pointer group \p CG
1583 /// in \p TheLoop. \return the values for the bounds.
expandBounds(const RuntimeCheckingPtrGroup * CG,Loop * TheLoop,Instruction * Loc,SCEVExpander & Exp)1584 static PointerBounds expandBounds(const RuntimeCheckingPtrGroup *CG,
1585 Loop *TheLoop, Instruction *Loc,
1586 SCEVExpander &Exp) {
1587 ScalarEvolution *SE = Exp.getSE();
1588 // TODO: Add helper to retrieve pointers to CG.
1589 Value *Ptr = CG->RtCheck.Pointers[CG->Members[0]].PointerValue;
1590 const SCEV *Sc = SE->getSCEV(Ptr);
1591
1592 unsigned AS = Ptr->getType()->getPointerAddressSpace();
1593 LLVMContext &Ctx = Loc->getContext();
1594
1595 // Use this type for pointer arithmetic.
1596 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS);
1597
1598 if (SE->isLoopInvariant(Sc, TheLoop)) {
1599 LLVM_DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:"
1600 << *Ptr << "\n");
1601 // Ptr could be in the loop body. If so, expand a new one at the correct
1602 // location.
1603 Instruction *Inst = dyn_cast<Instruction>(Ptr);
1604 Value *NewPtr = (Inst && TheLoop->contains(Inst))
1605 ? Exp.expandCodeFor(Sc, PtrArithTy, Loc)
1606 : Ptr;
1607 // We must return a half-open range, which means incrementing Sc.
1608 const SCEV *ScPlusOne = SE->getAddExpr(Sc, SE->getOne(PtrArithTy));
1609 Value *NewPtrPlusOne = Exp.expandCodeFor(ScPlusOne, PtrArithTy, Loc);
1610 return {NewPtr, NewPtrPlusOne};
1611 } else {
1612 Value *Start = nullptr, *End = nullptr;
1613 LLVM_DEBUG(dbgs() << "LAA: Adding RT check for range:\n");
1614 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc);
1615 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc);
1616 LLVM_DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High
1617 << "\n");
1618 return {Start, End};
1619 }
1620 }
1621
1622 /// Turns a collection of checks into a collection of expanded upper and
1623 /// lower bounds for both pointers in the check.
1624 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4>
expandBounds(const SmallVectorImpl<RuntimePointerCheck> & PointerChecks,Loop * L,Instruction * Loc,SCEVExpander & Exp)1625 expandBounds(const SmallVectorImpl<RuntimePointerCheck> &PointerChecks, Loop *L,
1626 Instruction *Loc, SCEVExpander &Exp) {
1627 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds;
1628
1629 // Here we're relying on the SCEV Expander's cache to only emit code for the
1630 // same bounds once.
1631 transform(PointerChecks, std::back_inserter(ChecksWithBounds),
1632 [&](const RuntimePointerCheck &Check) {
1633 PointerBounds First = expandBounds(Check.first, L, Loc, Exp),
1634 Second = expandBounds(Check.second, L, Loc, Exp);
1635 return std::make_pair(First, Second);
1636 });
1637
1638 return ChecksWithBounds;
1639 }
1640
addRuntimeChecks(Instruction * Loc,Loop * TheLoop,const SmallVectorImpl<RuntimePointerCheck> & PointerChecks,SCEVExpander & Exp)1641 std::pair<Instruction *, Instruction *> llvm::addRuntimeChecks(
1642 Instruction *Loc, Loop *TheLoop,
1643 const SmallVectorImpl<RuntimePointerCheck> &PointerChecks,
1644 SCEVExpander &Exp) {
1645 // TODO: Move noalias annotation code from LoopVersioning here and share with LV if possible.
1646 // TODO: Pass RtPtrChecking instead of PointerChecks and SE separately, if possible
1647 auto ExpandedChecks = expandBounds(PointerChecks, TheLoop, Loc, Exp);
1648
1649 LLVMContext &Ctx = Loc->getContext();
1650 Instruction *FirstInst = nullptr;
1651 IRBuilder<> ChkBuilder(Loc);
1652 // Our instructions might fold to a constant.
1653 Value *MemoryRuntimeCheck = nullptr;
1654
1655 // FIXME: this helper is currently a duplicate of the one in
1656 // LoopVectorize.cpp.
1657 auto GetFirstInst = [](Instruction *FirstInst, Value *V,
1658 Instruction *Loc) -> Instruction * {
1659 if (FirstInst)
1660 return FirstInst;
1661 if (Instruction *I = dyn_cast<Instruction>(V))
1662 return I->getParent() == Loc->getParent() ? I : nullptr;
1663 return nullptr;
1664 };
1665
1666 for (const auto &Check : ExpandedChecks) {
1667 const PointerBounds &A = Check.first, &B = Check.second;
1668 // Check if two pointers (A and B) conflict where conflict is computed as:
1669 // start(A) <= end(B) && start(B) <= end(A)
1670 unsigned AS0 = A.Start->getType()->getPointerAddressSpace();
1671 unsigned AS1 = B.Start->getType()->getPointerAddressSpace();
1672
1673 assert((AS0 == B.End->getType()->getPointerAddressSpace()) &&
1674 (AS1 == A.End->getType()->getPointerAddressSpace()) &&
1675 "Trying to bounds check pointers with different address spaces");
1676
1677 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
1678 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
1679
1680 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc");
1681 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc");
1682 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc");
1683 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc");
1684
1685 // [A|B].Start points to the first accessed byte under base [A|B].
1686 // [A|B].End points to the last accessed byte, plus one.
1687 // There is no conflict when the intervals are disjoint:
1688 // NoConflict = (B.Start >= A.End) || (A.Start >= B.End)
1689 //
1690 // bound0 = (B.Start < A.End)
1691 // bound1 = (A.Start < B.End)
1692 // IsConflict = bound0 & bound1
1693 Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0");
1694 FirstInst = GetFirstInst(FirstInst, Cmp0, Loc);
1695 Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1");
1696 FirstInst = GetFirstInst(FirstInst, Cmp1, Loc);
1697 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
1698 FirstInst = GetFirstInst(FirstInst, IsConflict, Loc);
1699 if (MemoryRuntimeCheck) {
1700 IsConflict =
1701 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx");
1702 FirstInst = GetFirstInst(FirstInst, IsConflict, Loc);
1703 }
1704 MemoryRuntimeCheck = IsConflict;
1705 }
1706
1707 if (!MemoryRuntimeCheck)
1708 return std::make_pair(nullptr, nullptr);
1709
1710 // We have to do this trickery because the IRBuilder might fold the check to a
1711 // constant expression in which case there is no Instruction anchored in a
1712 // the block.
1713 Instruction *Check =
1714 BinaryOperator::CreateAnd(MemoryRuntimeCheck, ConstantInt::getTrue(Ctx));
1715 ChkBuilder.Insert(Check, "memcheck.conflict");
1716 FirstInst = GetFirstInst(FirstInst, Check, Loc);
1717 return std::make_pair(FirstInst, Check);
1718 }
1719
hasPartialIVCondition(Loop & L,unsigned MSSAThreshold,MemorySSA & MSSA,AAResults & AA)1720 Optional<IVConditionInfo> llvm::hasPartialIVCondition(Loop &L,
1721 unsigned MSSAThreshold,
1722 MemorySSA &MSSA,
1723 AAResults &AA) {
1724 auto *TI = dyn_cast<BranchInst>(L.getHeader()->getTerminator());
1725 if (!TI || !TI->isConditional())
1726 return {};
1727
1728 auto *CondI = dyn_cast<CmpInst>(TI->getCondition());
1729 // The case with the condition outside the loop should already be handled
1730 // earlier.
1731 if (!CondI || !L.contains(CondI))
1732 return {};
1733
1734 SmallVector<Instruction *> InstToDuplicate;
1735 InstToDuplicate.push_back(CondI);
1736
1737 SmallVector<Value *, 4> WorkList;
1738 WorkList.append(CondI->op_begin(), CondI->op_end());
1739
1740 SmallVector<MemoryAccess *, 4> AccessesToCheck;
1741 SmallVector<MemoryLocation, 4> AccessedLocs;
1742 while (!WorkList.empty()) {
1743 Instruction *I = dyn_cast<Instruction>(WorkList.pop_back_val());
1744 if (!I || !L.contains(I))
1745 continue;
1746
1747 // TODO: support additional instructions.
1748 if (!isa<LoadInst>(I) && !isa<GetElementPtrInst>(I))
1749 return {};
1750
1751 // Do not duplicate volatile and atomic loads.
1752 if (auto *LI = dyn_cast<LoadInst>(I))
1753 if (LI->isVolatile() || LI->isAtomic())
1754 return {};
1755
1756 InstToDuplicate.push_back(I);
1757 if (MemoryAccess *MA = MSSA.getMemoryAccess(I)) {
1758 if (auto *MemUse = dyn_cast_or_null<MemoryUse>(MA)) {
1759 // Queue the defining access to check for alias checks.
1760 AccessesToCheck.push_back(MemUse->getDefiningAccess());
1761 AccessedLocs.push_back(MemoryLocation::get(I));
1762 } else {
1763 // MemoryDefs may clobber the location or may be atomic memory
1764 // operations. Bail out.
1765 return {};
1766 }
1767 }
1768 WorkList.append(I->op_begin(), I->op_end());
1769 }
1770
1771 if (InstToDuplicate.empty())
1772 return {};
1773
1774 SmallVector<BasicBlock *, 4> ExitingBlocks;
1775 L.getExitingBlocks(ExitingBlocks);
1776 auto HasNoClobbersOnPath =
1777 [&L, &AA, &AccessedLocs, &ExitingBlocks, &InstToDuplicate,
1778 MSSAThreshold](BasicBlock *Succ, BasicBlock *Header,
1779 SmallVector<MemoryAccess *, 4> AccessesToCheck)
1780 -> Optional<IVConditionInfo> {
1781 IVConditionInfo Info;
1782 // First, collect all blocks in the loop that are on a patch from Succ
1783 // to the header.
1784 SmallVector<BasicBlock *, 4> WorkList;
1785 WorkList.push_back(Succ);
1786 WorkList.push_back(Header);
1787 SmallPtrSet<BasicBlock *, 4> Seen;
1788 Seen.insert(Header);
1789 Info.PathIsNoop &=
1790 all_of(*Header, [](Instruction &I) { return !I.mayHaveSideEffects(); });
1791
1792 while (!WorkList.empty()) {
1793 BasicBlock *Current = WorkList.pop_back_val();
1794 if (!L.contains(Current))
1795 continue;
1796 const auto &SeenIns = Seen.insert(Current);
1797 if (!SeenIns.second)
1798 continue;
1799
1800 Info.PathIsNoop &= all_of(
1801 *Current, [](Instruction &I) { return !I.mayHaveSideEffects(); });
1802 WorkList.append(succ_begin(Current), succ_end(Current));
1803 }
1804
1805 // Require at least 2 blocks on a path through the loop. This skips
1806 // paths that directly exit the loop.
1807 if (Seen.size() < 2)
1808 return {};
1809
1810 // Next, check if there are any MemoryDefs that are on the path through
1811 // the loop (in the Seen set) and they may-alias any of the locations in
1812 // AccessedLocs. If that is the case, they may modify the condition and
1813 // partial unswitching is not possible.
1814 SmallPtrSet<MemoryAccess *, 4> SeenAccesses;
1815 while (!AccessesToCheck.empty()) {
1816 MemoryAccess *Current = AccessesToCheck.pop_back_val();
1817 auto SeenI = SeenAccesses.insert(Current);
1818 if (!SeenI.second || !Seen.contains(Current->getBlock()))
1819 continue;
1820
1821 // Bail out if exceeded the threshold.
1822 if (SeenAccesses.size() >= MSSAThreshold)
1823 return {};
1824
1825 // MemoryUse are read-only accesses.
1826 if (isa<MemoryUse>(Current))
1827 continue;
1828
1829 // For a MemoryDef, check if is aliases any of the location feeding
1830 // the original condition.
1831 if (auto *CurrentDef = dyn_cast<MemoryDef>(Current)) {
1832 if (any_of(AccessedLocs, [&AA, CurrentDef](MemoryLocation &Loc) {
1833 return isModSet(
1834 AA.getModRefInfo(CurrentDef->getMemoryInst(), Loc));
1835 }))
1836 return {};
1837 }
1838
1839 for (Use &U : Current->uses())
1840 AccessesToCheck.push_back(cast<MemoryAccess>(U.getUser()));
1841 }
1842
1843 // We could also allow loops with known trip counts without mustprogress,
1844 // but ScalarEvolution may not be available.
1845 Info.PathIsNoop &=
1846 L.getHeader()->getParent()->mustProgress() || hasMustProgress(&L);
1847
1848 // If the path is considered a no-op so far, check if it reaches a
1849 // single exit block without any phis. This ensures no values from the
1850 // loop are used outside of the loop.
1851 if (Info.PathIsNoop) {
1852 for (auto *Exiting : ExitingBlocks) {
1853 if (!Seen.contains(Exiting))
1854 continue;
1855 for (auto *Succ : successors(Exiting)) {
1856 if (L.contains(Succ))
1857 continue;
1858
1859 Info.PathIsNoop &= llvm::empty(Succ->phis()) &&
1860 (!Info.ExitForPath || Info.ExitForPath == Succ);
1861 if (!Info.PathIsNoop)
1862 break;
1863 assert((!Info.ExitForPath || Info.ExitForPath == Succ) &&
1864 "cannot have multiple exit blocks");
1865 Info.ExitForPath = Succ;
1866 }
1867 }
1868 }
1869 if (!Info.ExitForPath)
1870 Info.PathIsNoop = false;
1871
1872 Info.InstToDuplicate = InstToDuplicate;
1873 return Info;
1874 };
1875
1876 // If we branch to the same successor, partial unswitching will not be
1877 // beneficial.
1878 if (TI->getSuccessor(0) == TI->getSuccessor(1))
1879 return {};
1880
1881 if (auto Info = HasNoClobbersOnPath(TI->getSuccessor(0), L.getHeader(),
1882 AccessesToCheck)) {
1883 Info->KnownValue = ConstantInt::getTrue(TI->getContext());
1884 return Info;
1885 }
1886 if (auto Info = HasNoClobbersOnPath(TI->getSuccessor(1), L.getHeader(),
1887 AccessesToCheck)) {
1888 Info->KnownValue = ConstantInt::getFalse(TI->getContext());
1889 return Info;
1890 }
1891
1892 return {};
1893 }
1894