1 //===- PlaceSafepoints.cpp - Place GC Safepoints --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Place garbage collection safepoints at appropriate locations in the IR. This
10 // does not make relocation semantics or variable liveness explicit. That's
11 // done by RewriteStatepointsForGC.
12 //
13 // Terminology:
14 // - A call is said to be "parseable" if there is a stack map generated for the
15 // return PC of the call. A runtime can determine where values listed in the
16 // deopt arguments and (after RewriteStatepointsForGC) gc arguments are located
17 // on the stack when the code is suspended inside such a call. Every parse
18 // point is represented by a call wrapped in an gc.statepoint intrinsic.
19 // - A "poll" is an explicit check in the generated code to determine if the
20 // runtime needs the generated code to cooperate by calling a helper routine
21 // and thus suspending its execution at a known state. The call to the helper
22 // routine will be parseable. The (gc & runtime specific) logic of a poll is
23 // assumed to be provided in a function of the name "gc.safepoint_poll".
24 //
25 // We aim to insert polls such that running code can quickly be brought to a
26 // well defined state for inspection by the collector. In the current
27 // implementation, this is done via the insertion of poll sites at method entry
28 // and the backedge of most loops. We try to avoid inserting more polls than
29 // are necessary to ensure a finite period between poll sites. This is not
30 // because the poll itself is expensive in the generated code; it's not. Polls
31 // do tend to impact the optimizer itself in negative ways; we'd like to avoid
32 // perturbing the optimization of the method as much as we can.
33 //
34 // We also need to make most call sites parseable. The callee might execute a
35 // poll (or otherwise be inspected by the GC). If so, the entire stack
36 // (including the suspended frame of the current method) must be parseable.
37 //
38 // This pass will insert:
39 // - Call parse points ("call safepoints") for any call which may need to
40 // reach a safepoint during the execution of the callee function.
41 // - Backedge safepoint polls and entry safepoint polls to ensure that
42 // executing code reaches a safepoint poll in a finite amount of time.
43 //
44 // We do not currently support return statepoints, but adding them would not
45 // be hard. They are not required for correctness - entry safepoints are an
46 // alternative - but some GCs may prefer them. Patches welcome.
47 //
48 //===----------------------------------------------------------------------===//
49
50 #include "llvm/InitializePasses.h"
51 #include "llvm/Pass.h"
52
53 #include "llvm/ADT/SetVector.h"
54 #include "llvm/ADT/Statistic.h"
55 #include "llvm/Analysis/CFG.h"
56 #include "llvm/Analysis/LoopInfo.h"
57 #include "llvm/Analysis/ScalarEvolution.h"
58 #include "llvm/Analysis/TargetLibraryInfo.h"
59 #include "llvm/IR/Dominators.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/LegacyPassManager.h"
62 #include "llvm/IR/Statepoint.h"
63 #include "llvm/Support/CommandLine.h"
64 #include "llvm/Support/Debug.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
67 #include "llvm/Transforms/Utils/Cloning.h"
68 #include "llvm/Transforms/Utils/Local.h"
69
70 #define DEBUG_TYPE "safepoint-placement"
71
72 STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted");
73 STATISTIC(NumBackedgeSafepoints, "Number of backedge safepoints inserted");
74
75 STATISTIC(CallInLoop,
76 "Number of loops without safepoints due to calls in loop");
77 STATISTIC(FiniteExecution,
78 "Number of loops without safepoints finite execution");
79
80 using namespace llvm;
81
82 // Ignore opportunities to avoid placing safepoints on backedges, useful for
83 // validation
84 static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden,
85 cl::init(false));
86
87 /// How narrow does the trip count of a loop have to be to have to be considered
88 /// "counted"? Counted loops do not get safepoints at backedges.
89 static cl::opt<int> CountedLoopTripWidth("spp-counted-loop-trip-width",
90 cl::Hidden, cl::init(32));
91
92 // If true, split the backedge of a loop when placing the safepoint, otherwise
93 // split the latch block itself. Both are useful to support for
94 // experimentation, but in practice, it looks like splitting the backedge
95 // optimizes better.
96 static cl::opt<bool> SplitBackedge("spp-split-backedge", cl::Hidden,
97 cl::init(false));
98
99 namespace {
100
101 /// An analysis pass whose purpose is to identify each of the backedges in
102 /// the function which require a safepoint poll to be inserted.
103 struct PlaceBackedgeSafepointsImpl : public FunctionPass {
104 static char ID;
105
106 /// The output of the pass - gives a list of each backedge (described by
107 /// pointing at the branch) which need a poll inserted.
108 std::vector<Instruction *> PollLocations;
109
110 /// True unless we're running spp-no-calls in which case we need to disable
111 /// the call-dependent placement opts.
112 bool CallSafepointsEnabled;
113
114 ScalarEvolution *SE = nullptr;
115 DominatorTree *DT = nullptr;
116 LoopInfo *LI = nullptr;
117 TargetLibraryInfo *TLI = nullptr;
118
PlaceBackedgeSafepointsImpl__anonabe2282b0111::PlaceBackedgeSafepointsImpl119 PlaceBackedgeSafepointsImpl(bool CallSafepoints = false)
120 : FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) {
121 initializePlaceBackedgeSafepointsImplPass(*PassRegistry::getPassRegistry());
122 }
123
124 bool runOnLoop(Loop *);
runOnLoopAndSubLoops__anonabe2282b0111::PlaceBackedgeSafepointsImpl125 void runOnLoopAndSubLoops(Loop *L) {
126 // Visit all the subloops
127 for (Loop *I : *L)
128 runOnLoopAndSubLoops(I);
129 runOnLoop(L);
130 }
131
runOnFunction__anonabe2282b0111::PlaceBackedgeSafepointsImpl132 bool runOnFunction(Function &F) override {
133 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
134 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
135 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
136 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
137 for (Loop *I : *LI) {
138 runOnLoopAndSubLoops(I);
139 }
140 return false;
141 }
142
getAnalysisUsage__anonabe2282b0111::PlaceBackedgeSafepointsImpl143 void getAnalysisUsage(AnalysisUsage &AU) const override {
144 AU.addRequired<DominatorTreeWrapperPass>();
145 AU.addRequired<ScalarEvolutionWrapperPass>();
146 AU.addRequired<LoopInfoWrapperPass>();
147 AU.addRequired<TargetLibraryInfoWrapperPass>();
148 // We no longer modify the IR at all in this pass. Thus all
149 // analysis are preserved.
150 AU.setPreservesAll();
151 }
152 };
153 }
154
155 static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
156 static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
157 static cl::opt<bool> NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false));
158
159 namespace {
160 struct PlaceSafepoints : public FunctionPass {
161 static char ID; // Pass identification, replacement for typeid
162
PlaceSafepoints__anonabe2282b0211::PlaceSafepoints163 PlaceSafepoints() : FunctionPass(ID) {
164 initializePlaceSafepointsPass(*PassRegistry::getPassRegistry());
165 }
166 bool runOnFunction(Function &F) override;
167
getAnalysisUsage__anonabe2282b0211::PlaceSafepoints168 void getAnalysisUsage(AnalysisUsage &AU) const override {
169 // We modify the graph wholesale (inlining, block insertion, etc). We
170 // preserve nothing at the moment. We could potentially preserve dom tree
171 // if that was worth doing
172 AU.addRequired<TargetLibraryInfoWrapperPass>();
173 }
174 };
175 }
176
177 // Insert a safepoint poll immediately before the given instruction. Does
178 // not handle the parsability of state at the runtime call, that's the
179 // callers job.
180 static void
181 InsertSafepointPoll(Instruction *InsertBefore,
182 std::vector<CallBase *> &ParsePointsNeeded /*rval*/,
183 const TargetLibraryInfo &TLI);
184
needsStatepoint(CallBase * Call,const TargetLibraryInfo & TLI)185 static bool needsStatepoint(CallBase *Call, const TargetLibraryInfo &TLI) {
186 if (callsGCLeafFunction(Call, TLI))
187 return false;
188 if (auto *CI = dyn_cast<CallInst>(Call)) {
189 if (CI->isInlineAsm())
190 return false;
191 }
192
193 return !(isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) ||
194 isa<GCResultInst>(Call));
195 }
196
197 /// Returns true if this loop is known to contain a call safepoint which
198 /// must unconditionally execute on any iteration of the loop which returns
199 /// to the loop header via an edge from Pred. Returns a conservative correct
200 /// answer; i.e. false is always valid.
containsUnconditionalCallSafepoint(Loop * L,BasicBlock * Header,BasicBlock * Pred,DominatorTree & DT,const TargetLibraryInfo & TLI)201 static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
202 BasicBlock *Pred,
203 DominatorTree &DT,
204 const TargetLibraryInfo &TLI) {
205 // In general, we're looking for any cut of the graph which ensures
206 // there's a call safepoint along every edge between Header and Pred.
207 // For the moment, we look only for the 'cuts' that consist of a single call
208 // instruction in a block which is dominated by the Header and dominates the
209 // loop latch (Pred) block. Somewhat surprisingly, walking the entire chain
210 // of such dominating blocks gets substantially more occurrences than just
211 // checking the Pred and Header blocks themselves. This may be due to the
212 // density of loop exit conditions caused by range and null checks.
213 // TODO: structure this as an analysis pass, cache the result for subloops,
214 // avoid dom tree recalculations
215 assert(DT.dominates(Header, Pred) && "loop latch not dominated by header?");
216
217 BasicBlock *Current = Pred;
218 while (true) {
219 for (Instruction &I : *Current) {
220 if (auto *Call = dyn_cast<CallBase>(&I))
221 // Note: Technically, needing a safepoint isn't quite the right
222 // condition here. We should instead be checking if the target method
223 // has an
224 // unconditional poll. In practice, this is only a theoretical concern
225 // since we don't have any methods with conditional-only safepoint
226 // polls.
227 if (needsStatepoint(Call, TLI))
228 return true;
229 }
230
231 if (Current == Header)
232 break;
233 Current = DT.getNode(Current)->getIDom()->getBlock();
234 }
235
236 return false;
237 }
238
239 /// Returns true if this loop is known to terminate in a finite number of
240 /// iterations. Note that this function may return false for a loop which
241 /// does actual terminate in a finite constant number of iterations due to
242 /// conservatism in the analysis.
mustBeFiniteCountedLoop(Loop * L,ScalarEvolution * SE,BasicBlock * Pred)243 static bool mustBeFiniteCountedLoop(Loop *L, ScalarEvolution *SE,
244 BasicBlock *Pred) {
245 // A conservative bound on the loop as a whole.
246 const SCEV *MaxTrips = SE->getConstantMaxBackedgeTakenCount(L);
247 if (!isa<SCEVCouldNotCompute>(MaxTrips) &&
248 SE->getUnsignedRange(MaxTrips).getUnsignedMax().isIntN(
249 CountedLoopTripWidth))
250 return true;
251
252 // If this is a conditional branch to the header with the alternate path
253 // being outside the loop, we can ask questions about the execution frequency
254 // of the exit block.
255 if (L->isLoopExiting(Pred)) {
256 // This returns an exact expression only. TODO: We really only need an
257 // upper bound here, but SE doesn't expose that.
258 const SCEV *MaxExec = SE->getExitCount(L, Pred);
259 if (!isa<SCEVCouldNotCompute>(MaxExec) &&
260 SE->getUnsignedRange(MaxExec).getUnsignedMax().isIntN(
261 CountedLoopTripWidth))
262 return true;
263 }
264
265 return /* not finite */ false;
266 }
267
scanOneBB(Instruction * Start,Instruction * End,std::vector<CallInst * > & Calls,DenseSet<BasicBlock * > & Seen,std::vector<BasicBlock * > & Worklist)268 static void scanOneBB(Instruction *Start, Instruction *End,
269 std::vector<CallInst *> &Calls,
270 DenseSet<BasicBlock *> &Seen,
271 std::vector<BasicBlock *> &Worklist) {
272 for (BasicBlock::iterator BBI(Start), BBE0 = Start->getParent()->end(),
273 BBE1 = BasicBlock::iterator(End);
274 BBI != BBE0 && BBI != BBE1; BBI++) {
275 if (CallInst *CI = dyn_cast<CallInst>(&*BBI))
276 Calls.push_back(CI);
277
278 // FIXME: This code does not handle invokes
279 assert(!isa<InvokeInst>(&*BBI) &&
280 "support for invokes in poll code needed");
281
282 // Only add the successor blocks if we reach the terminator instruction
283 // without encountering end first
284 if (BBI->isTerminator()) {
285 BasicBlock *BB = BBI->getParent();
286 for (BasicBlock *Succ : successors(BB)) {
287 if (Seen.insert(Succ).second) {
288 Worklist.push_back(Succ);
289 }
290 }
291 }
292 }
293 }
294
scanInlinedCode(Instruction * Start,Instruction * End,std::vector<CallInst * > & Calls,DenseSet<BasicBlock * > & Seen)295 static void scanInlinedCode(Instruction *Start, Instruction *End,
296 std::vector<CallInst *> &Calls,
297 DenseSet<BasicBlock *> &Seen) {
298 Calls.clear();
299 std::vector<BasicBlock *> Worklist;
300 Seen.insert(Start->getParent());
301 scanOneBB(Start, End, Calls, Seen, Worklist);
302 while (!Worklist.empty()) {
303 BasicBlock *BB = Worklist.back();
304 Worklist.pop_back();
305 scanOneBB(&*BB->begin(), End, Calls, Seen, Worklist);
306 }
307 }
308
runOnLoop(Loop * L)309 bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
310 // Loop through all loop latches (branches controlling backedges). We need
311 // to place a safepoint on every backedge (potentially).
312 // Note: In common usage, there will be only one edge due to LoopSimplify
313 // having run sometime earlier in the pipeline, but this code must be correct
314 // w.r.t. loops with multiple backedges.
315 BasicBlock *Header = L->getHeader();
316 SmallVector<BasicBlock*, 16> LoopLatches;
317 L->getLoopLatches(LoopLatches);
318 for (BasicBlock *Pred : LoopLatches) {
319 assert(L->contains(Pred));
320
321 // Make a policy decision about whether this loop needs a safepoint or
322 // not. Note that this is about unburdening the optimizer in loops, not
323 // avoiding the runtime cost of the actual safepoint.
324 if (!AllBackedges) {
325 if (mustBeFiniteCountedLoop(L, SE, Pred)) {
326 LLVM_DEBUG(dbgs() << "skipping safepoint placement in finite loop\n");
327 FiniteExecution++;
328 continue;
329 }
330 if (CallSafepointsEnabled &&
331 containsUnconditionalCallSafepoint(L, Header, Pred, *DT, *TLI)) {
332 // Note: This is only semantically legal since we won't do any further
333 // IPO or inlining before the actual call insertion.. If we hadn't, we
334 // might latter loose this call safepoint.
335 LLVM_DEBUG(
336 dbgs()
337 << "skipping safepoint placement due to unconditional call\n");
338 CallInLoop++;
339 continue;
340 }
341 }
342
343 // TODO: We can create an inner loop which runs a finite number of
344 // iterations with an outer loop which contains a safepoint. This would
345 // not help runtime performance that much, but it might help our ability to
346 // optimize the inner loop.
347
348 // Safepoint insertion would involve creating a new basic block (as the
349 // target of the current backedge) which does the safepoint (of all live
350 // variables) and branches to the true header
351 Instruction *Term = Pred->getTerminator();
352
353 LLVM_DEBUG(dbgs() << "[LSP] terminator instruction: " << *Term);
354
355 PollLocations.push_back(Term);
356 }
357
358 return false;
359 }
360
361 /// Returns true if an entry safepoint is not required before this callsite in
362 /// the caller function.
doesNotRequireEntrySafepointBefore(CallBase * Call)363 static bool doesNotRequireEntrySafepointBefore(CallBase *Call) {
364 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call)) {
365 switch (II->getIntrinsicID()) {
366 case Intrinsic::experimental_gc_statepoint:
367 case Intrinsic::experimental_patchpoint_void:
368 case Intrinsic::experimental_patchpoint_i64:
369 // The can wrap an actual call which may grow the stack by an unbounded
370 // amount or run forever.
371 return false;
372 default:
373 // Most LLVM intrinsics are things which do not expand to actual calls, or
374 // at least if they do, are leaf functions that cause only finite stack
375 // growth. In particular, the optimizer likes to form things like memsets
376 // out of stores in the original IR. Another important example is
377 // llvm.localescape which must occur in the entry block. Inserting a
378 // safepoint before it is not legal since it could push the localescape
379 // out of the entry block.
380 return true;
381 }
382 }
383 return false;
384 }
385
findLocationForEntrySafepoint(Function & F,DominatorTree & DT)386 static Instruction *findLocationForEntrySafepoint(Function &F,
387 DominatorTree &DT) {
388
389 // Conceptually, this poll needs to be on method entry, but in
390 // practice, we place it as late in the entry block as possible. We
391 // can place it as late as we want as long as it dominates all calls
392 // that can grow the stack. This, combined with backedge polls,
393 // give us all the progress guarantees we need.
394
395 // hasNextInstruction and nextInstruction are used to iterate
396 // through a "straight line" execution sequence.
397
398 auto HasNextInstruction = [](Instruction *I) {
399 if (!I->isTerminator())
400 return true;
401
402 BasicBlock *nextBB = I->getParent()->getUniqueSuccessor();
403 return nextBB && (nextBB->getUniquePredecessor() != nullptr);
404 };
405
406 auto NextInstruction = [&](Instruction *I) {
407 assert(HasNextInstruction(I) &&
408 "first check if there is a next instruction!");
409
410 if (I->isTerminator())
411 return &I->getParent()->getUniqueSuccessor()->front();
412 return &*++I->getIterator();
413 };
414
415 Instruction *Cursor = nullptr;
416 for (Cursor = &F.getEntryBlock().front(); HasNextInstruction(Cursor);
417 Cursor = NextInstruction(Cursor)) {
418
419 // We need to ensure a safepoint poll occurs before any 'real' call. The
420 // easiest way to ensure finite execution between safepoints in the face of
421 // recursive and mutually recursive functions is to enforce that each take
422 // a safepoint. Additionally, we need to ensure a poll before any call
423 // which can grow the stack by an unbounded amount. This isn't required
424 // for GC semantics per se, but is a common requirement for languages
425 // which detect stack overflow via guard pages and then throw exceptions.
426 if (auto *Call = dyn_cast<CallBase>(Cursor)) {
427 if (doesNotRequireEntrySafepointBefore(Call))
428 continue;
429 break;
430 }
431 }
432
433 assert((HasNextInstruction(Cursor) || Cursor->isTerminator()) &&
434 "either we stopped because of a call, or because of terminator");
435
436 return Cursor;
437 }
438
439 const char GCSafepointPollName[] = "gc.safepoint_poll";
440
isGCSafepointPoll(Function & F)441 static bool isGCSafepointPoll(Function &F) {
442 return F.getName().equals(GCSafepointPollName);
443 }
444
445 /// Returns true if this function should be rewritten to include safepoint
446 /// polls and parseable call sites. The main point of this function is to be
447 /// an extension point for custom logic.
shouldRewriteFunction(Function & F)448 static bool shouldRewriteFunction(Function &F) {
449 // TODO: This should check the GCStrategy
450 if (F.hasGC()) {
451 const auto &FunctionGCName = F.getGC();
452 const StringRef StatepointExampleName("statepoint-example");
453 const StringRef CoreCLRName("coreclr");
454 return (StatepointExampleName == FunctionGCName) ||
455 (CoreCLRName == FunctionGCName);
456 } else
457 return false;
458 }
459
460 // TODO: These should become properties of the GCStrategy, possibly with
461 // command line overrides.
enableEntrySafepoints(Function & F)462 static bool enableEntrySafepoints(Function &F) { return !NoEntry; }
enableBackedgeSafepoints(Function & F)463 static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; }
enableCallSafepoints(Function & F)464 static bool enableCallSafepoints(Function &F) { return !NoCall; }
465
runOnFunction(Function & F)466 bool PlaceSafepoints::runOnFunction(Function &F) {
467 if (F.isDeclaration() || F.empty()) {
468 // This is a declaration, nothing to do. Must exit early to avoid crash in
469 // dom tree calculation
470 return false;
471 }
472
473 if (isGCSafepointPoll(F)) {
474 // Given we're inlining this inside of safepoint poll insertion, this
475 // doesn't make any sense. Note that we do make any contained calls
476 // parseable after we inline a poll.
477 return false;
478 }
479
480 if (!shouldRewriteFunction(F))
481 return false;
482
483 const TargetLibraryInfo &TLI =
484 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
485
486 bool Modified = false;
487
488 // In various bits below, we rely on the fact that uses are reachable from
489 // defs. When there are basic blocks unreachable from the entry, dominance
490 // and reachablity queries return non-sensical results. Thus, we preprocess
491 // the function to ensure these properties hold.
492 Modified |= removeUnreachableBlocks(F);
493
494 // STEP 1 - Insert the safepoint polling locations. We do not need to
495 // actually insert parse points yet. That will be done for all polls and
496 // calls in a single pass.
497
498 DominatorTree DT;
499 DT.recalculate(F);
500
501 SmallVector<Instruction *, 16> PollsNeeded;
502 std::vector<CallBase *> ParsePointNeeded;
503
504 if (enableBackedgeSafepoints(F)) {
505 // Construct a pass manager to run the LoopPass backedge logic. We
506 // need the pass manager to handle scheduling all the loop passes
507 // appropriately. Doing this by hand is painful and just not worth messing
508 // with for the moment.
509 legacy::FunctionPassManager FPM(F.getParent());
510 bool CanAssumeCallSafepoints = enableCallSafepoints(F);
511 auto *PBS = new PlaceBackedgeSafepointsImpl(CanAssumeCallSafepoints);
512 FPM.add(PBS);
513 FPM.run(F);
514
515 // We preserve dominance information when inserting the poll, otherwise
516 // we'd have to recalculate this on every insert
517 DT.recalculate(F);
518
519 auto &PollLocations = PBS->PollLocations;
520
521 auto OrderByBBName = [](Instruction *a, Instruction *b) {
522 return a->getParent()->getName() < b->getParent()->getName();
523 };
524 // We need the order of list to be stable so that naming ends up stable
525 // when we split edges. This makes test cases much easier to write.
526 llvm::sort(PollLocations, OrderByBBName);
527
528 // We can sometimes end up with duplicate poll locations. This happens if
529 // a single loop is visited more than once. The fact this happens seems
530 // wrong, but it does happen for the split-backedge.ll test case.
531 PollLocations.erase(std::unique(PollLocations.begin(),
532 PollLocations.end()),
533 PollLocations.end());
534
535 // Insert a poll at each point the analysis pass identified
536 // The poll location must be the terminator of a loop latch block.
537 for (Instruction *Term : PollLocations) {
538 // We are inserting a poll, the function is modified
539 Modified = true;
540
541 if (SplitBackedge) {
542 // Split the backedge of the loop and insert the poll within that new
543 // basic block. This creates a loop with two latches per original
544 // latch (which is non-ideal), but this appears to be easier to
545 // optimize in practice than inserting the poll immediately before the
546 // latch test.
547
548 // Since this is a latch, at least one of the successors must dominate
549 // it. Its possible that we have a) duplicate edges to the same header
550 // and b) edges to distinct loop headers. We need to insert pools on
551 // each.
552 SetVector<BasicBlock *> Headers;
553 for (unsigned i = 0; i < Term->getNumSuccessors(); i++) {
554 BasicBlock *Succ = Term->getSuccessor(i);
555 if (DT.dominates(Succ, Term->getParent())) {
556 Headers.insert(Succ);
557 }
558 }
559 assert(!Headers.empty() && "poll location is not a loop latch?");
560
561 // The split loop structure here is so that we only need to recalculate
562 // the dominator tree once. Alternatively, we could just keep it up to
563 // date and use a more natural merged loop.
564 SetVector<BasicBlock *> SplitBackedges;
565 for (BasicBlock *Header : Headers) {
566 BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, &DT);
567 PollsNeeded.push_back(NewBB->getTerminator());
568 NumBackedgeSafepoints++;
569 }
570 } else {
571 // Split the latch block itself, right before the terminator.
572 PollsNeeded.push_back(Term);
573 NumBackedgeSafepoints++;
574 }
575 }
576 }
577
578 if (enableEntrySafepoints(F)) {
579 if (Instruction *Location = findLocationForEntrySafepoint(F, DT)) {
580 PollsNeeded.push_back(Location);
581 Modified = true;
582 NumEntrySafepoints++;
583 }
584 // TODO: else we should assert that there was, in fact, a policy choice to
585 // not insert a entry safepoint poll.
586 }
587
588 // Now that we've identified all the needed safepoint poll locations, insert
589 // safepoint polls themselves.
590 for (Instruction *PollLocation : PollsNeeded) {
591 std::vector<CallBase *> RuntimeCalls;
592 InsertSafepointPoll(PollLocation, RuntimeCalls, TLI);
593 llvm::append_range(ParsePointNeeded, RuntimeCalls);
594 }
595
596 return Modified;
597 }
598
599 char PlaceBackedgeSafepointsImpl::ID = 0;
600 char PlaceSafepoints::ID = 0;
601
createPlaceSafepointsPass()602 FunctionPass *llvm::createPlaceSafepointsPass() {
603 return new PlaceSafepoints();
604 }
605
606 INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl,
607 "place-backedge-safepoints-impl",
608 "Place Backedge Safepoints", false, false)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)609 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
610 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
611 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
612 INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl,
613 "place-backedge-safepoints-impl",
614 "Place Backedge Safepoints", false, false)
615
616 INITIALIZE_PASS_BEGIN(PlaceSafepoints, "place-safepoints", "Place Safepoints",
617 false, false)
618 INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints",
619 false, false)
620
621 static void
622 InsertSafepointPoll(Instruction *InsertBefore,
623 std::vector<CallBase *> &ParsePointsNeeded /*rval*/,
624 const TargetLibraryInfo &TLI) {
625 BasicBlock *OrigBB = InsertBefore->getParent();
626 Module *M = InsertBefore->getModule();
627 assert(M && "must be part of a module");
628
629 // Inline the safepoint poll implementation - this will get all the branch,
630 // control flow, etc.. Most importantly, it will introduce the actual slow
631 // path call - where we need to insert a safepoint (parsepoint).
632
633 auto *F = M->getFunction(GCSafepointPollName);
634 assert(F && "gc.safepoint_poll function is missing");
635 assert(F->getValueType() ==
636 FunctionType::get(Type::getVoidTy(M->getContext()), false) &&
637 "gc.safepoint_poll declared with wrong type");
638 assert(!F->empty() && "gc.safepoint_poll must be a non-empty function");
639 CallInst *PollCall = CallInst::Create(F, "", InsertBefore);
640
641 // Record some information about the call site we're replacing
642 BasicBlock::iterator Before(PollCall), After(PollCall);
643 bool IsBegin = false;
644 if (Before == OrigBB->begin())
645 IsBegin = true;
646 else
647 Before--;
648
649 After++;
650 assert(After != OrigBB->end() && "must have successor");
651
652 // Do the actual inlining
653 InlineFunctionInfo IFI;
654 bool InlineStatus = InlineFunction(*PollCall, IFI).isSuccess();
655 assert(InlineStatus && "inline must succeed");
656 (void)InlineStatus; // suppress warning in release-asserts
657
658 // Check post-conditions
659 assert(IFI.StaticAllocas.empty() && "can't have allocs");
660
661 std::vector<CallInst *> Calls; // new calls
662 DenseSet<BasicBlock *> BBs; // new BBs + insertee
663
664 // Include only the newly inserted instructions, Note: begin may not be valid
665 // if we inserted to the beginning of the basic block
666 BasicBlock::iterator Start = IsBegin ? OrigBB->begin() : std::next(Before);
667
668 // If your poll function includes an unreachable at the end, that's not
669 // valid. Bugpoint likes to create this, so check for it.
670 assert(isPotentiallyReachable(&*Start, &*After) &&
671 "malformed poll function");
672
673 scanInlinedCode(&*Start, &*After, Calls, BBs);
674 assert(!Calls.empty() && "slow path not found for safepoint poll");
675
676 // Record the fact we need a parsable state at the runtime call contained in
677 // the poll function. This is required so that the runtime knows how to
678 // parse the last frame when we actually take the safepoint (i.e. execute
679 // the slow path)
680 assert(ParsePointsNeeded.empty());
681 for (auto *CI : Calls) {
682 // No safepoint needed or wanted
683 if (!needsStatepoint(CI, TLI))
684 continue;
685
686 // These are likely runtime calls. Should we assert that via calling
687 // convention or something?
688 ParsePointsNeeded.push_back(CI);
689 }
690 assert(ParsePointsNeeded.size() <= Calls.size());
691 }
692