1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Rewrite call/invoke instructions so as to make potential relocations
10 // performed by the garbage collector explicit in the IR.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h"
15
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/iterator_range.h"
28 #include "llvm/Analysis/DomTreeUpdater.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/TargetTransformInfo.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/Constant.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Dominators.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/IRBuilder.h"
42 #include "llvm/IR/InstIterator.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/LLVMContext.h"
49 #include "llvm/IR/MDBuilder.h"
50 #include "llvm/IR/Metadata.h"
51 #include "llvm/IR/Module.h"
52 #include "llvm/IR/Statepoint.h"
53 #include "llvm/IR/Type.h"
54 #include "llvm/IR/User.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/IR/ValueHandle.h"
57 #include "llvm/InitializePasses.h"
58 #include "llvm/Pass.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Compiler.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
69 #include <algorithm>
70 #include <cassert>
71 #include <cstddef>
72 #include <cstdint>
73 #include <iterator>
74 #include <set>
75 #include <string>
76 #include <utility>
77 #include <vector>
78
79 #define DEBUG_TYPE "rewrite-statepoints-for-gc"
80
81 using namespace llvm;
82
83 // Print the liveset found at the insert location
84 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden,
85 cl::init(false));
86 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden,
87 cl::init(false));
88
89 // Print out the base pointers for debugging
90 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden,
91 cl::init(false));
92
93 // Cost threshold measuring when it is profitable to rematerialize value instead
94 // of relocating it
95 static cl::opt<unsigned>
96 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden,
97 cl::init(6));
98
99 #ifdef EXPENSIVE_CHECKS
100 static bool ClobberNonLive = true;
101 #else
102 static bool ClobberNonLive = false;
103 #endif
104
105 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live",
106 cl::location(ClobberNonLive),
107 cl::Hidden);
108
109 static cl::opt<bool>
110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info",
111 cl::Hidden, cl::init(true));
112
113 /// The IR fed into RewriteStatepointsForGC may have had attributes and
114 /// metadata implying dereferenceability that are no longer valid/correct after
115 /// RewriteStatepointsForGC has run. This is because semantically, after
116 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire
117 /// heap. stripNonValidData (conservatively) restores
118 /// correctness by erasing all attributes in the module that externally imply
119 /// dereferenceability. Similar reasoning also applies to the noalias
120 /// attributes and metadata. gc.statepoint can touch the entire heap including
121 /// noalias objects.
122 /// Apart from attributes and metadata, we also remove instructions that imply
123 /// constant physical memory: llvm.invariant.start.
124 static void stripNonValidData(Module &M);
125
126 static bool shouldRewriteStatepointsIn(Function &F);
127
run(Module & M,ModuleAnalysisManager & AM)128 PreservedAnalyses RewriteStatepointsForGC::run(Module &M,
129 ModuleAnalysisManager &AM) {
130 bool Changed = false;
131 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
132 for (Function &F : M) {
133 // Nothing to do for declarations.
134 if (F.isDeclaration() || F.empty())
135 continue;
136
137 // Policy choice says not to rewrite - the most common reason is that we're
138 // compiling code without a GCStrategy.
139 if (!shouldRewriteStatepointsIn(F))
140 continue;
141
142 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
143 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
144 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
145 Changed |= runOnFunction(F, DT, TTI, TLI);
146 }
147 if (!Changed)
148 return PreservedAnalyses::all();
149
150 // stripNonValidData asserts that shouldRewriteStatepointsIn
151 // returns true for at least one function in the module. Since at least
152 // one function changed, we know that the precondition is satisfied.
153 stripNonValidData(M);
154
155 PreservedAnalyses PA;
156 PA.preserve<TargetIRAnalysis>();
157 PA.preserve<TargetLibraryAnalysis>();
158 return PA;
159 }
160
161 namespace {
162
163 class RewriteStatepointsForGCLegacyPass : public ModulePass {
164 RewriteStatepointsForGC Impl;
165
166 public:
167 static char ID; // Pass identification, replacement for typeid
168
RewriteStatepointsForGCLegacyPass()169 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() {
170 initializeRewriteStatepointsForGCLegacyPassPass(
171 *PassRegistry::getPassRegistry());
172 }
173
runOnModule(Module & M)174 bool runOnModule(Module &M) override {
175 bool Changed = false;
176 for (Function &F : M) {
177 // Nothing to do for declarations.
178 if (F.isDeclaration() || F.empty())
179 continue;
180
181 // Policy choice says not to rewrite - the most common reason is that
182 // we're compiling code without a GCStrategy.
183 if (!shouldRewriteStatepointsIn(F))
184 continue;
185
186 TargetTransformInfo &TTI =
187 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
188 const TargetLibraryInfo &TLI =
189 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
190 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
191
192 Changed |= Impl.runOnFunction(F, DT, TTI, TLI);
193 }
194
195 if (!Changed)
196 return false;
197
198 // stripNonValidData asserts that shouldRewriteStatepointsIn
199 // returns true for at least one function in the module. Since at least
200 // one function changed, we know that the precondition is satisfied.
201 stripNonValidData(M);
202 return true;
203 }
204
getAnalysisUsage(AnalysisUsage & AU) const205 void getAnalysisUsage(AnalysisUsage &AU) const override {
206 // We add and rewrite a bunch of instructions, but don't really do much
207 // else. We could in theory preserve a lot more analyses here.
208 AU.addRequired<DominatorTreeWrapperPass>();
209 AU.addRequired<TargetTransformInfoWrapperPass>();
210 AU.addRequired<TargetLibraryInfoWrapperPass>();
211 }
212 };
213
214 } // end anonymous namespace
215
216 char RewriteStatepointsForGCLegacyPass::ID = 0;
217
createRewriteStatepointsForGCLegacyPass()218 ModulePass *llvm::createRewriteStatepointsForGCLegacyPass() {
219 return new RewriteStatepointsForGCLegacyPass();
220 }
221
222 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass,
223 "rewrite-statepoints-for-gc",
224 "Make relocations explicit at statepoints", false, false)
225 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
226 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
227 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass,
228 "rewrite-statepoints-for-gc",
229 "Make relocations explicit at statepoints", false, false)
230
231 namespace {
232
233 struct GCPtrLivenessData {
234 /// Values defined in this block.
235 MapVector<BasicBlock *, SetVector<Value *>> KillSet;
236
237 /// Values used in this block (and thus live); does not included values
238 /// killed within this block.
239 MapVector<BasicBlock *, SetVector<Value *>> LiveSet;
240
241 /// Values live into this basic block (i.e. used by any
242 /// instruction in this basic block or ones reachable from here)
243 MapVector<BasicBlock *, SetVector<Value *>> LiveIn;
244
245 /// Values live out of this basic block (i.e. live into
246 /// any successor block)
247 MapVector<BasicBlock *, SetVector<Value *>> LiveOut;
248 };
249
250 // The type of the internal cache used inside the findBasePointers family
251 // of functions. From the callers perspective, this is an opaque type and
252 // should not be inspected.
253 //
254 // In the actual implementation this caches two relations:
255 // - The base relation itself (i.e. this pointer is based on that one)
256 // - The base defining value relation (i.e. before base_phi insertion)
257 // Generally, after the execution of a full findBasePointer call, only the
258 // base relation will remain. Internally, we add a mixture of the two
259 // types, then update all the second type to the first type
260 using DefiningValueMapTy = MapVector<Value *, Value *>;
261 using StatepointLiveSetTy = SetVector<Value *>;
262 using RematerializedValueMapTy =
263 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>;
264
265 struct PartiallyConstructedSafepointRecord {
266 /// The set of values known to be live across this safepoint
267 StatepointLiveSetTy LiveSet;
268
269 /// Mapping from live pointers to a base-defining-value
270 MapVector<Value *, Value *> PointerToBase;
271
272 /// The *new* gc.statepoint instruction itself. This produces the token
273 /// that normal path gc.relocates and the gc.result are tied to.
274 GCStatepointInst *StatepointToken;
275
276 /// Instruction to which exceptional gc relocates are attached
277 /// Makes it easier to iterate through them during relocationViaAlloca.
278 Instruction *UnwindToken;
279
280 /// Record live values we are rematerialized instead of relocating.
281 /// They are not included into 'LiveSet' field.
282 /// Maps rematerialized copy to it's original value.
283 RematerializedValueMapTy RematerializedValues;
284 };
285
286 } // end anonymous namespace
287
GetDeoptBundleOperands(const CallBase * Call)288 static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) {
289 Optional<OperandBundleUse> DeoptBundle =
290 Call->getOperandBundle(LLVMContext::OB_deopt);
291
292 if (!DeoptBundle.hasValue()) {
293 assert(AllowStatepointWithNoDeoptInfo &&
294 "Found non-leaf call without deopt info!");
295 return None;
296 }
297
298 return DeoptBundle.getValue().Inputs;
299 }
300
301 /// Compute the live-in set for every basic block in the function
302 static void computeLiveInValues(DominatorTree &DT, Function &F,
303 GCPtrLivenessData &Data);
304
305 /// Given results from the dataflow liveness computation, find the set of live
306 /// Values at a particular instruction.
307 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data,
308 StatepointLiveSetTy &out);
309
310 // TODO: Once we can get to the GCStrategy, this becomes
311 // Optional<bool> isGCManagedPointer(const Type *Ty) const override {
312
isGCPointerType(Type * T)313 static bool isGCPointerType(Type *T) {
314 if (auto *PT = dyn_cast<PointerType>(T))
315 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our
316 // GC managed heap. We know that a pointer into this heap needs to be
317 // updated and that no other pointer does.
318 return PT->getAddressSpace() == 1;
319 return false;
320 }
321
322 // Return true if this type is one which a) is a gc pointer or contains a GC
323 // pointer and b) is of a type this code expects to encounter as a live value.
324 // (The insertion code will assert that a type which matches (a) and not (b)
325 // is not encountered.)
isHandledGCPointerType(Type * T)326 static bool isHandledGCPointerType(Type *T) {
327 // We fully support gc pointers
328 if (isGCPointerType(T))
329 return true;
330 // We partially support vectors of gc pointers. The code will assert if it
331 // can't handle something.
332 if (auto VT = dyn_cast<VectorType>(T))
333 if (isGCPointerType(VT->getElementType()))
334 return true;
335 return false;
336 }
337
338 #ifndef NDEBUG
339 /// Returns true if this type contains a gc pointer whether we know how to
340 /// handle that type or not.
containsGCPtrType(Type * Ty)341 static bool containsGCPtrType(Type *Ty) {
342 if (isGCPointerType(Ty))
343 return true;
344 if (VectorType *VT = dyn_cast<VectorType>(Ty))
345 return isGCPointerType(VT->getScalarType());
346 if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
347 return containsGCPtrType(AT->getElementType());
348 if (StructType *ST = dyn_cast<StructType>(Ty))
349 return llvm::any_of(ST->elements(), containsGCPtrType);
350 return false;
351 }
352
353 // Returns true if this is a type which a) is a gc pointer or contains a GC
354 // pointer and b) is of a type which the code doesn't expect (i.e. first class
355 // aggregates). Used to trip assertions.
isUnhandledGCPointerType(Type * Ty)356 static bool isUnhandledGCPointerType(Type *Ty) {
357 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty);
358 }
359 #endif
360
361 // Return the name of the value suffixed with the provided value, or if the
362 // value didn't have a name, the default value specified.
suffixed_name_or(Value * V,StringRef Suffix,StringRef DefaultName)363 static std::string suffixed_name_or(Value *V, StringRef Suffix,
364 StringRef DefaultName) {
365 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str();
366 }
367
368 // Conservatively identifies any definitions which might be live at the
369 // given instruction. The analysis is performed immediately before the
370 // given instruction. Values defined by that instruction are not considered
371 // live. Values used by that instruction are considered live.
analyzeParsePointLiveness(DominatorTree & DT,GCPtrLivenessData & OriginalLivenessData,CallBase * Call,PartiallyConstructedSafepointRecord & Result)372 static void analyzeParsePointLiveness(
373 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call,
374 PartiallyConstructedSafepointRecord &Result) {
375 StatepointLiveSetTy LiveSet;
376 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet);
377
378 if (PrintLiveSet) {
379 dbgs() << "Live Variables:\n";
380 for (Value *V : LiveSet)
381 dbgs() << " " << V->getName() << " " << *V << "\n";
382 }
383 if (PrintLiveSetSize) {
384 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n";
385 dbgs() << "Number live values: " << LiveSet.size() << "\n";
386 }
387 Result.LiveSet = LiveSet;
388 }
389
390 // Returns true is V is a knownBaseResult.
391 static bool isKnownBaseResult(Value *V);
392
393 // Returns true if V is a BaseResult that already exists in the IR, i.e. it is
394 // not created by the findBasePointers algorithm.
395 static bool isOriginalBaseResult(Value *V);
396
397 namespace {
398
399 /// A single base defining value - An immediate base defining value for an
400 /// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'.
401 /// For instructions which have multiple pointer [vector] inputs or that
402 /// transition between vector and scalar types, there is no immediate base
403 /// defining value. The 'base defining value' for 'Def' is the transitive
404 /// closure of this relation stopping at the first instruction which has no
405 /// immediate base defining value. The b.d.v. might itself be a base pointer,
406 /// but it can also be an arbitrary derived pointer.
407 struct BaseDefiningValueResult {
408 /// Contains the value which is the base defining value.
409 Value * const BDV;
410
411 /// True if the base defining value is also known to be an actual base
412 /// pointer.
413 const bool IsKnownBase;
414
BaseDefiningValueResult__anon7262c22f0311::BaseDefiningValueResult415 BaseDefiningValueResult(Value *BDV, bool IsKnownBase)
416 : BDV(BDV), IsKnownBase(IsKnownBase) {
417 #ifndef NDEBUG
418 // Check consistency between new and old means of checking whether a BDV is
419 // a base.
420 bool MustBeBase = isKnownBaseResult(BDV);
421 assert(!MustBeBase || MustBeBase == IsKnownBase);
422 #endif
423 }
424 };
425
426 } // end anonymous namespace
427
428 static BaseDefiningValueResult findBaseDefiningValue(Value *I);
429
430 /// Return a base defining value for the 'Index' element of the given vector
431 /// instruction 'I'. If Index is null, returns a BDV for the entire vector
432 /// 'I'. As an optimization, this method will try to determine when the
433 /// element is known to already be a base pointer. If this can be established,
434 /// the second value in the returned pair will be true. Note that either a
435 /// vector or a pointer typed value can be returned. For the former, the
436 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'.
437 /// If the later, the return pointer is a BDV (or possibly a base) for the
438 /// particular element in 'I'.
439 static BaseDefiningValueResult
findBaseDefiningValueOfVector(Value * I)440 findBaseDefiningValueOfVector(Value *I) {
441 // Each case parallels findBaseDefiningValue below, see that code for
442 // detailed motivation.
443
444 if (isa<Argument>(I))
445 // An incoming argument to the function is a base pointer
446 return BaseDefiningValueResult(I, true);
447
448 if (isa<Constant>(I))
449 // Base of constant vector consists only of constant null pointers.
450 // For reasoning see similar case inside 'findBaseDefiningValue' function.
451 return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()),
452 true);
453
454 if (isa<LoadInst>(I))
455 return BaseDefiningValueResult(I, true);
456
457 if (isa<InsertElementInst>(I))
458 // We don't know whether this vector contains entirely base pointers or
459 // not. To be conservatively correct, we treat it as a BDV and will
460 // duplicate code as needed to construct a parallel vector of bases.
461 return BaseDefiningValueResult(I, false);
462
463 if (isa<ShuffleVectorInst>(I))
464 // We don't know whether this vector contains entirely base pointers or
465 // not. To be conservatively correct, we treat it as a BDV and will
466 // duplicate code as needed to construct a parallel vector of bases.
467 // TODO: There a number of local optimizations which could be applied here
468 // for particular sufflevector patterns.
469 return BaseDefiningValueResult(I, false);
470
471 // The behavior of getelementptr instructions is the same for vector and
472 // non-vector data types.
473 if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
474 return findBaseDefiningValue(GEP->getPointerOperand());
475
476 // If the pointer comes through a bitcast of a vector of pointers to
477 // a vector of another type of pointer, then look through the bitcast
478 if (auto *BC = dyn_cast<BitCastInst>(I))
479 return findBaseDefiningValue(BC->getOperand(0));
480
481 // We assume that functions in the source language only return base
482 // pointers. This should probably be generalized via attributes to support
483 // both source language and internal functions.
484 if (isa<CallInst>(I) || isa<InvokeInst>(I))
485 return BaseDefiningValueResult(I, true);
486
487 // A PHI or Select is a base defining value. The outer findBasePointer
488 // algorithm is responsible for constructing a base value for this BDV.
489 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
490 "unknown vector instruction - no base found for vector element");
491 return BaseDefiningValueResult(I, false);
492 }
493
494 /// Helper function for findBasePointer - Will return a value which either a)
495 /// defines the base pointer for the input, b) blocks the simple search
496 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change
497 /// from pointer to vector type or back.
findBaseDefiningValue(Value * I)498 static BaseDefiningValueResult findBaseDefiningValue(Value *I) {
499 assert(I->getType()->isPtrOrPtrVectorTy() &&
500 "Illegal to ask for the base pointer of a non-pointer type");
501
502 if (I->getType()->isVectorTy())
503 return findBaseDefiningValueOfVector(I);
504
505 if (isa<Argument>(I))
506 // An incoming argument to the function is a base pointer
507 // We should have never reached here if this argument isn't an gc value
508 return BaseDefiningValueResult(I, true);
509
510 if (isa<Constant>(I)) {
511 // We assume that objects with a constant base (e.g. a global) can't move
512 // and don't need to be reported to the collector because they are always
513 // live. Besides global references, all kinds of constants (e.g. undef,
514 // constant expressions, null pointers) can be introduced by the inliner or
515 // the optimizer, especially on dynamically dead paths.
516 // Here we treat all of them as having single null base. By doing this we
517 // trying to avoid problems reporting various conflicts in a form of
518 // "phi (const1, const2)" or "phi (const, regular gc ptr)".
519 // See constant.ll file for relevant test cases.
520
521 return BaseDefiningValueResult(
522 ConstantPointerNull::get(cast<PointerType>(I->getType())), true);
523 }
524
525 if (CastInst *CI = dyn_cast<CastInst>(I)) {
526 Value *Def = CI->stripPointerCasts();
527 // If stripping pointer casts changes the address space there is an
528 // addrspacecast in between.
529 assert(cast<PointerType>(Def->getType())->getAddressSpace() ==
530 cast<PointerType>(CI->getType())->getAddressSpace() &&
531 "unsupported addrspacecast");
532 // If we find a cast instruction here, it means we've found a cast which is
533 // not simply a pointer cast (i.e. an inttoptr). We don't know how to
534 // handle int->ptr conversion.
535 assert(!isa<CastInst>(Def) && "shouldn't find another cast here");
536 return findBaseDefiningValue(Def);
537 }
538
539 if (isa<LoadInst>(I))
540 // The value loaded is an gc base itself
541 return BaseDefiningValueResult(I, true);
542
543 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
544 // The base of this GEP is the base
545 return findBaseDefiningValue(GEP->getPointerOperand());
546
547 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
548 switch (II->getIntrinsicID()) {
549 default:
550 // fall through to general call handling
551 break;
552 case Intrinsic::experimental_gc_statepoint:
553 llvm_unreachable("statepoints don't produce pointers");
554 case Intrinsic::experimental_gc_relocate:
555 // Rerunning safepoint insertion after safepoints are already
556 // inserted is not supported. It could probably be made to work,
557 // but why are you doing this? There's no good reason.
558 llvm_unreachable("repeat safepoint insertion is not supported");
559 case Intrinsic::gcroot:
560 // Currently, this mechanism hasn't been extended to work with gcroot.
561 // There's no reason it couldn't be, but I haven't thought about the
562 // implications much.
563 llvm_unreachable(
564 "interaction with the gcroot mechanism is not supported");
565 }
566 }
567 // We assume that functions in the source language only return base
568 // pointers. This should probably be generalized via attributes to support
569 // both source language and internal functions.
570 if (isa<CallInst>(I) || isa<InvokeInst>(I))
571 return BaseDefiningValueResult(I, true);
572
573 // TODO: I have absolutely no idea how to implement this part yet. It's not
574 // necessarily hard, I just haven't really looked at it yet.
575 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented");
576
577 if (isa<AtomicCmpXchgInst>(I))
578 // A CAS is effectively a atomic store and load combined under a
579 // predicate. From the perspective of base pointers, we just treat it
580 // like a load.
581 return BaseDefiningValueResult(I, true);
582
583 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are "
584 "binary ops which don't apply to pointers");
585
586 // The aggregate ops. Aggregates can either be in the heap or on the
587 // stack, but in either case, this is simply a field load. As a result,
588 // this is a defining definition of the base just like a load is.
589 if (isa<ExtractValueInst>(I))
590 return BaseDefiningValueResult(I, true);
591
592 // We should never see an insert vector since that would require we be
593 // tracing back a struct value not a pointer value.
594 assert(!isa<InsertValueInst>(I) &&
595 "Base pointer for a struct is meaningless");
596
597 // An extractelement produces a base result exactly when it's input does.
598 // We may need to insert a parallel instruction to extract the appropriate
599 // element out of the base vector corresponding to the input. Given this,
600 // it's analogous to the phi and select case even though it's not a merge.
601 if (isa<ExtractElementInst>(I))
602 // Note: There a lot of obvious peephole cases here. This are deliberately
603 // handled after the main base pointer inference algorithm to make writing
604 // test cases to exercise that code easier.
605 return BaseDefiningValueResult(I, false);
606
607 // The last two cases here don't return a base pointer. Instead, they
608 // return a value which dynamically selects from among several base
609 // derived pointers (each with it's own base potentially). It's the job of
610 // the caller to resolve these.
611 assert((isa<SelectInst>(I) || isa<PHINode>(I)) &&
612 "missing instruction case in findBaseDefiningValing");
613 return BaseDefiningValueResult(I, false);
614 }
615
616 /// Returns the base defining value for this value.
findBaseDefiningValueCached(Value * I,DefiningValueMapTy & Cache)617 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) {
618 Value *&Cached = Cache[I];
619 if (!Cached) {
620 Cached = findBaseDefiningValue(I).BDV;
621 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> "
622 << Cached->getName() << "\n");
623 }
624 assert(Cache[I] != nullptr);
625 return Cached;
626 }
627
628 /// Return a base pointer for this value if known. Otherwise, return it's
629 /// base defining value.
findBaseOrBDV(Value * I,DefiningValueMapTy & Cache)630 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) {
631 Value *Def = findBaseDefiningValueCached(I, Cache);
632 auto Found = Cache.find(Def);
633 if (Found != Cache.end()) {
634 // Either a base-of relation, or a self reference. Caller must check.
635 return Found->second;
636 }
637 // Only a BDV available
638 return Def;
639 }
640
641 /// This value is a base pointer that is not generated by RS4GC, i.e. it already
642 /// exists in the code.
isOriginalBaseResult(Value * V)643 static bool isOriginalBaseResult(Value *V) {
644 // no recursion possible
645 return !isa<PHINode>(V) && !isa<SelectInst>(V) &&
646 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) &&
647 !isa<ShuffleVectorInst>(V);
648 }
649
650 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV,
651 /// is it known to be a base pointer? Or do we need to continue searching.
isKnownBaseResult(Value * V)652 static bool isKnownBaseResult(Value *V) {
653 if (isOriginalBaseResult(V))
654 return true;
655 if (isa<Instruction>(V) &&
656 cast<Instruction>(V)->getMetadata("is_base_value")) {
657 // This is a previously inserted base phi or select. We know
658 // that this is a base value.
659 return true;
660 }
661
662 // We need to keep searching
663 return false;
664 }
665
666 // Returns true if First and Second values are both scalar or both vector.
areBothVectorOrScalar(Value * First,Value * Second)667 static bool areBothVectorOrScalar(Value *First, Value *Second) {
668 return isa<VectorType>(First->getType()) ==
669 isa<VectorType>(Second->getType());
670 }
671
672 namespace {
673
674 /// Models the state of a single base defining value in the findBasePointer
675 /// algorithm for determining where a new instruction is needed to propagate
676 /// the base of this BDV.
677 class BDVState {
678 public:
679 enum StatusTy {
680 // Starting state of lattice
681 Unknown,
682 // Some specific base value -- does *not* mean that instruction
683 // propagates the base of the object
684 // ex: gep %arg, 16 -> %arg is the base value
685 Base,
686 // Need to insert a node to represent a merge.
687 Conflict
688 };
689
BDVState()690 BDVState() {
691 llvm_unreachable("missing state in map");
692 }
693
BDVState(Value * OriginalValue)694 explicit BDVState(Value *OriginalValue)
695 : OriginalValue(OriginalValue) {}
BDVState(Value * OriginalValue,StatusTy Status,Value * BaseValue=nullptr)696 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr)
697 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) {
698 assert(Status != Base || BaseValue);
699 }
700
getStatus() const701 StatusTy getStatus() const { return Status; }
getOriginalValue() const702 Value *getOriginalValue() const { return OriginalValue; }
getBaseValue() const703 Value *getBaseValue() const { return BaseValue; }
704
isBase() const705 bool isBase() const { return getStatus() == Base; }
isUnknown() const706 bool isUnknown() const { return getStatus() == Unknown; }
isConflict() const707 bool isConflict() const { return getStatus() == Conflict; }
708
709 // Values of type BDVState form a lattice, and this function implements the
710 // meet
711 // operation.
meet(const BDVState & Other)712 void meet(const BDVState &Other) {
713 auto markConflict = [&]() {
714 Status = BDVState::Conflict;
715 BaseValue = nullptr;
716 };
717 // Conflict is a final state.
718 if (isConflict())
719 return;
720 // if we are not known - just take other state.
721 if (isUnknown()) {
722 Status = Other.getStatus();
723 BaseValue = Other.getBaseValue();
724 return;
725 }
726 // We are base.
727 assert(isBase() && "Unknown state");
728 // If other is unknown - just keep our state.
729 if (Other.isUnknown())
730 return;
731 // If other is conflict - it is a final state.
732 if (Other.isConflict())
733 return markConflict();
734 // Other is base as well.
735 assert(Other.isBase() && "Unknown state");
736 // If bases are different - Conflict.
737 if (getBaseValue() != Other.getBaseValue())
738 return markConflict();
739 // We are identical, do nothing.
740 }
741
operator ==(const BDVState & Other) const742 bool operator==(const BDVState &Other) const {
743 return OriginalValue == OriginalValue && BaseValue == Other.BaseValue &&
744 Status == Other.Status;
745 }
746
operator !=(const BDVState & other) const747 bool operator!=(const BDVState &other) const { return !(*this == other); }
748
749 LLVM_DUMP_METHOD
dump() const750 void dump() const {
751 print(dbgs());
752 dbgs() << '\n';
753 }
754
print(raw_ostream & OS) const755 void print(raw_ostream &OS) const {
756 switch (getStatus()) {
757 case Unknown:
758 OS << "U";
759 break;
760 case Base:
761 OS << "B";
762 break;
763 case Conflict:
764 OS << "C";
765 break;
766 }
767 OS << " (base " << getBaseValue() << " - "
768 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")"
769 << " for " << OriginalValue->getName() << ":";
770 }
771
772 private:
773 AssertingVH<Value> OriginalValue; // instruction this state corresponds to
774 StatusTy Status = Unknown;
775 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base.
776 };
777
778 } // end anonymous namespace
779
780 #ifndef NDEBUG
operator <<(raw_ostream & OS,const BDVState & State)781 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) {
782 State.print(OS);
783 return OS;
784 }
785 #endif
786
787 /// For a given value or instruction, figure out what base ptr its derived from.
788 /// For gc objects, this is simply itself. On success, returns a value which is
789 /// the base pointer. (This is reliable and can be used for relocation.) On
790 /// failure, returns nullptr.
findBasePointer(Value * I,DefiningValueMapTy & Cache)791 static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) {
792 Value *Def = findBaseOrBDV(I, Cache);
793
794 if (isKnownBaseResult(Def) && areBothVectorOrScalar(Def, I))
795 return Def;
796
797 // Here's the rough algorithm:
798 // - For every SSA value, construct a mapping to either an actual base
799 // pointer or a PHI which obscures the base pointer.
800 // - Construct a mapping from PHI to unknown TOP state. Use an
801 // optimistic algorithm to propagate base pointer information. Lattice
802 // looks like:
803 // UNKNOWN
804 // b1 b2 b3 b4
805 // CONFLICT
806 // When algorithm terminates, all PHIs will either have a single concrete
807 // base or be in a conflict state.
808 // - For every conflict, insert a dummy PHI node without arguments. Add
809 // these to the base[Instruction] = BasePtr mapping. For every
810 // non-conflict, add the actual base.
811 // - For every conflict, add arguments for the base[a] of each input
812 // arguments.
813 //
814 // Note: A simpler form of this would be to add the conflict form of all
815 // PHIs without running the optimistic algorithm. This would be
816 // analogous to pessimistic data flow and would likely lead to an
817 // overall worse solution.
818
819 #ifndef NDEBUG
820 auto isExpectedBDVType = [](Value *BDV) {
821 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) ||
822 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) ||
823 isa<ShuffleVectorInst>(BDV);
824 };
825 #endif
826
827 // Once populated, will contain a mapping from each potentially non-base BDV
828 // to a lattice value (described above) which corresponds to that BDV.
829 // We use the order of insertion (DFS over the def/use graph) to provide a
830 // stable deterministic ordering for visiting DenseMaps (which are unordered)
831 // below. This is important for deterministic compilation.
832 MapVector<Value *, BDVState> States;
833
834 #ifndef NDEBUG
835 auto VerifyStates = [&]() {
836 for (auto &Entry : States) {
837 assert(Entry.first == Entry.second.getOriginalValue());
838 }
839 };
840 #endif
841
842 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) {
843 if (PHINode *PN = dyn_cast<PHINode>(BDV)) {
844 for (Value *InVal : PN->incoming_values())
845 F(InVal);
846 } else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) {
847 F(SI->getTrueValue());
848 F(SI->getFalseValue());
849 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) {
850 F(EE->getVectorOperand());
851 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) {
852 F(IE->getOperand(0));
853 F(IE->getOperand(1));
854 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) {
855 // For a canonical broadcast, ignore the undef argument
856 // (without this, we insert a parallel base shuffle for every broadcast)
857 F(SV->getOperand(0));
858 if (!SV->isZeroEltSplat())
859 F(SV->getOperand(1));
860 } else {
861 llvm_unreachable("unexpected BDV type");
862 }
863 };
864
865
866 // Recursively fill in all base defining values reachable from the initial
867 // one for which we don't already know a definite base value for
868 /* scope */ {
869 SmallVector<Value*, 16> Worklist;
870 Worklist.push_back(Def);
871 States.insert({Def, BDVState(Def)});
872 while (!Worklist.empty()) {
873 Value *Current = Worklist.pop_back_val();
874 assert(!isOriginalBaseResult(Current) && "why did it get added?");
875
876 auto visitIncomingValue = [&](Value *InVal) {
877 Value *Base = findBaseOrBDV(InVal, Cache);
878 if (isKnownBaseResult(Base) && areBothVectorOrScalar(Base, InVal))
879 // Known bases won't need new instructions introduced and can be
880 // ignored safely. However, this can only be done when InVal and Base
881 // are both scalar or both vector. Otherwise, we need to find a
882 // correct BDV for InVal, by creating an entry in the lattice
883 // (States).
884 return;
885 assert(isExpectedBDVType(Base) && "the only non-base values "
886 "we see should be base defining values");
887 if (States.insert(std::make_pair(Base, BDVState(Base))).second)
888 Worklist.push_back(Base);
889 };
890
891 visitBDVOperands(Current, visitIncomingValue);
892 }
893 }
894
895 #ifndef NDEBUG
896 VerifyStates();
897 LLVM_DEBUG(dbgs() << "States after initialization:\n");
898 for (auto Pair : States) {
899 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
900 }
901 #endif
902
903 // Iterate forward through the value graph pruning any node from the state
904 // list where all of the inputs are base pointers. The purpose of this is to
905 // reuse existing values when the derived pointer we were asked to materialize
906 // a base pointer for happens to be a base pointer itself. (Or a sub-graph
907 // feeding it does.)
908 SmallVector<Value *> ToRemove;
909 do {
910 ToRemove.clear();
911 for (auto Pair : States) {
912 Value *BDV = Pair.first;
913 auto canPruneInput = [&](Value *V) {
914 Value *BDV = findBaseOrBDV(V, Cache);
915 if (V->stripPointerCasts() != BDV)
916 return false;
917 // The assumption is that anything not in the state list is
918 // propagates a base pointer.
919 return States.count(BDV) == 0;
920 };
921
922 bool CanPrune = true;
923 visitBDVOperands(BDV, [&](Value *Op) {
924 CanPrune = CanPrune && canPruneInput(Op);
925 });
926 if (CanPrune)
927 ToRemove.push_back(BDV);
928 }
929 for (Value *V : ToRemove) {
930 States.erase(V);
931 // Cache the fact V is it's own base for later usage.
932 Cache[V] = V;
933 }
934 } while (!ToRemove.empty());
935
936 // Did we manage to prove that Def itself must be a base pointer?
937 if (!States.count(Def))
938 return Def;
939
940 // Return a phi state for a base defining value. We'll generate a new
941 // base state for known bases and expect to find a cached state otherwise.
942 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) {
943 auto I = States.find(BaseValue);
944 if (I != States.end())
945 return I->second;
946 assert(areBothVectorOrScalar(BaseValue, Input));
947 return BDVState(BaseValue, BDVState::Base, BaseValue);
948 };
949
950 bool Progress = true;
951 while (Progress) {
952 #ifndef NDEBUG
953 const size_t OldSize = States.size();
954 #endif
955 Progress = false;
956 // We're only changing values in this loop, thus safe to keep iterators.
957 // Since this is computing a fixed point, the order of visit does not
958 // effect the result. TODO: We could use a worklist here and make this run
959 // much faster.
960 for (auto Pair : States) {
961 Value *BDV = Pair.first;
962 // Only values that do not have known bases or those that have differing
963 // type (scalar versus vector) from a possible known base should be in the
964 // lattice.
965 assert((!isKnownBaseResult(BDV) ||
966 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) &&
967 "why did it get added?");
968
969 BDVState NewState(BDV);
970 visitBDVOperands(BDV, [&](Value *Op) {
971 Value *BDV = findBaseOrBDV(Op, Cache);
972 auto OpState = GetStateForBDV(BDV, Op);
973 NewState.meet(OpState);
974 });
975
976 BDVState OldState = States[BDV];
977 if (OldState != NewState) {
978 Progress = true;
979 States[BDV] = NewState;
980 }
981 }
982
983 assert(OldSize == States.size() &&
984 "fixed point shouldn't be adding any new nodes to state");
985 }
986
987 #ifndef NDEBUG
988 VerifyStates();
989 LLVM_DEBUG(dbgs() << "States after meet iteration:\n");
990 for (auto Pair : States) {
991 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n");
992 }
993 #endif
994
995 // Handle all instructions that have a vector BDV, but the instruction itself
996 // is of scalar type.
997 for (auto Pair : States) {
998 Instruction *I = cast<Instruction>(Pair.first);
999 BDVState State = Pair.second;
1000 auto *BaseValue = State.getBaseValue();
1001 // Only values that do not have known bases or those that have differing
1002 // type (scalar versus vector) from a possible known base should be in the
1003 // lattice.
1004 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, BaseValue)) &&
1005 "why did it get added?");
1006 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1007
1008 if (!State.isBase() || !isa<VectorType>(BaseValue->getType()))
1009 continue;
1010 // extractelement instructions are a bit special in that we may need to
1011 // insert an extract even when we know an exact base for the instruction.
1012 // The problem is that we need to convert from a vector base to a scalar
1013 // base for the particular indice we're interested in.
1014 if (isa<ExtractElementInst>(I)) {
1015 auto *EE = cast<ExtractElementInst>(I);
1016 // TODO: In many cases, the new instruction is just EE itself. We should
1017 // exploit this, but can't do it here since it would break the invariant
1018 // about the BDV not being known to be a base.
1019 auto *BaseInst = ExtractElementInst::Create(
1020 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE);
1021 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {}));
1022 States[I] = BDVState(I, BDVState::Base, BaseInst);
1023 } else if (!isa<VectorType>(I->getType())) {
1024 // We need to handle cases that have a vector base but the instruction is
1025 // a scalar type (these could be phis or selects or any instruction that
1026 // are of scalar type, but the base can be a vector type). We
1027 // conservatively set this as conflict. Setting the base value for these
1028 // conflicts is handled in the next loop which traverses States.
1029 States[I] = BDVState(I, BDVState::Conflict);
1030 }
1031 }
1032
1033 #ifndef NDEBUG
1034 VerifyStates();
1035 #endif
1036
1037 // Insert Phis for all conflicts
1038 // TODO: adjust naming patterns to avoid this order of iteration dependency
1039 for (auto Pair : States) {
1040 Instruction *I = cast<Instruction>(Pair.first);
1041 BDVState State = Pair.second;
1042 // Only values that do not have known bases or those that have differing
1043 // type (scalar versus vector) from a possible known base should be in the
1044 // lattice.
1045 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, State.getBaseValue())) &&
1046 "why did it get added?");
1047 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1048
1049 // Since we're joining a vector and scalar base, they can never be the
1050 // same. As a result, we should always see insert element having reached
1051 // the conflict state.
1052 assert(!isa<InsertElementInst>(I) || State.isConflict());
1053
1054 if (!State.isConflict())
1055 continue;
1056
1057 auto getMangledName = [](Instruction *I) -> std::string {
1058 if (isa<PHINode>(I)) {
1059 return suffixed_name_or(I, ".base", "base_phi");
1060 } else if (isa<SelectInst>(I)) {
1061 return suffixed_name_or(I, ".base", "base_select");
1062 } else if (isa<ExtractElementInst>(I)) {
1063 return suffixed_name_or(I, ".base", "base_ee");
1064 } else if (isa<InsertElementInst>(I)) {
1065 return suffixed_name_or(I, ".base", "base_ie");
1066 } else {
1067 return suffixed_name_or(I, ".base", "base_sv");
1068 }
1069 };
1070
1071 Instruction *BaseInst = I->clone();
1072 BaseInst->insertBefore(I);
1073 BaseInst->setName(getMangledName(I));
1074 // Add metadata marking this as a base value
1075 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {}));
1076 States[I] = BDVState(I, BDVState::Conflict, BaseInst);
1077 }
1078
1079 #ifndef NDEBUG
1080 VerifyStates();
1081 #endif
1082
1083 // Returns a instruction which produces the base pointer for a given
1084 // instruction. The instruction is assumed to be an input to one of the BDVs
1085 // seen in the inference algorithm above. As such, we must either already
1086 // know it's base defining value is a base, or have inserted a new
1087 // instruction to propagate the base of it's BDV and have entered that newly
1088 // introduced instruction into the state table. In either case, we are
1089 // assured to be able to determine an instruction which produces it's base
1090 // pointer.
1091 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) {
1092 Value *BDV = findBaseOrBDV(Input, Cache);
1093 Value *Base = nullptr;
1094 if (!States.count(BDV)) {
1095 assert(areBothVectorOrScalar(BDV, Input));
1096 Base = BDV;
1097 } else {
1098 // Either conflict or base.
1099 assert(States.count(BDV));
1100 Base = States[BDV].getBaseValue();
1101 }
1102 assert(Base && "Can't be null");
1103 // The cast is needed since base traversal may strip away bitcasts
1104 if (Base->getType() != Input->getType() && InsertPt)
1105 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt);
1106 return Base;
1107 };
1108
1109 // Fixup all the inputs of the new PHIs. Visit order needs to be
1110 // deterministic and predictable because we're naming newly created
1111 // instructions.
1112 for (auto Pair : States) {
1113 Instruction *BDV = cast<Instruction>(Pair.first);
1114 BDVState State = Pair.second;
1115
1116 // Only values that do not have known bases or those that have differing
1117 // type (scalar versus vector) from a possible known base should be in the
1118 // lattice.
1119 assert((!isKnownBaseResult(BDV) ||
1120 !areBothVectorOrScalar(BDV, State.getBaseValue())) &&
1121 "why did it get added?");
1122 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!");
1123 if (!State.isConflict())
1124 continue;
1125
1126 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) {
1127 PHINode *PN = cast<PHINode>(BDV);
1128 const unsigned NumPHIValues = PN->getNumIncomingValues();
1129
1130 // The IR verifier requires phi nodes with multiple entries from the
1131 // same basic block to have the same incoming value for each of those
1132 // entries. Since we're inserting bitcasts in the loop, make sure we
1133 // do so at least once per incoming block.
1134 DenseMap<BasicBlock *, Value*> BlockToValue;
1135 for (unsigned i = 0; i < NumPHIValues; i++) {
1136 Value *InVal = PN->getIncomingValue(i);
1137 BasicBlock *InBB = PN->getIncomingBlock(i);
1138 if (!BlockToValue.count(InBB))
1139 BlockToValue[InBB] = getBaseForInput(InVal, InBB->getTerminator());
1140 else {
1141 #ifndef NDEBUG
1142 Value *OldBase = BlockToValue[InBB];
1143 Value *Base = getBaseForInput(InVal, nullptr);
1144 // In essence this assert states: the only way two values
1145 // incoming from the same basic block may be different is by
1146 // being different bitcasts of the same value. A cleanup
1147 // that remains TODO is changing findBaseOrBDV to return an
1148 // llvm::Value of the correct type (and still remain pure).
1149 // This will remove the need to add bitcasts.
1150 assert(Base->stripPointerCasts() == OldBase->stripPointerCasts() &&
1151 "Sanity -- findBaseOrBDV should be pure!");
1152 #endif
1153 }
1154 Value *Base = BlockToValue[InBB];
1155 BasePHI->setIncomingValue(i, Base);
1156 }
1157 } else if (SelectInst *BaseSI =
1158 dyn_cast<SelectInst>(State.getBaseValue())) {
1159 SelectInst *SI = cast<SelectInst>(BDV);
1160
1161 // Find the instruction which produces the base for each input.
1162 // We may need to insert a bitcast.
1163 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI));
1164 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI));
1165 } else if (auto *BaseEE =
1166 dyn_cast<ExtractElementInst>(State.getBaseValue())) {
1167 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand();
1168 // Find the instruction which produces the base for each input. We may
1169 // need to insert a bitcast.
1170 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE));
1171 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){
1172 auto *BdvIE = cast<InsertElementInst>(BDV);
1173 auto UpdateOperand = [&](int OperandIdx) {
1174 Value *InVal = BdvIE->getOperand(OperandIdx);
1175 Value *Base = getBaseForInput(InVal, BaseIE);
1176 BaseIE->setOperand(OperandIdx, Base);
1177 };
1178 UpdateOperand(0); // vector operand
1179 UpdateOperand(1); // scalar operand
1180 } else {
1181 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue());
1182 auto *BdvSV = cast<ShuffleVectorInst>(BDV);
1183 auto UpdateOperand = [&](int OperandIdx) {
1184 Value *InVal = BdvSV->getOperand(OperandIdx);
1185 Value *Base = getBaseForInput(InVal, BaseSV);
1186 BaseSV->setOperand(OperandIdx, Base);
1187 };
1188 UpdateOperand(0); // vector operand
1189 if (!BdvSV->isZeroEltSplat())
1190 UpdateOperand(1); // vector operand
1191 else {
1192 // Never read, so just use undef
1193 Value *InVal = BdvSV->getOperand(1);
1194 BaseSV->setOperand(1, UndefValue::get(InVal->getType()));
1195 }
1196 }
1197 }
1198
1199 #ifndef NDEBUG
1200 VerifyStates();
1201 #endif
1202
1203 // Cache all of our results so we can cheaply reuse them
1204 // NOTE: This is actually two caches: one of the base defining value
1205 // relation and one of the base pointer relation! FIXME
1206 for (auto Pair : States) {
1207 auto *BDV = Pair.first;
1208 Value *Base = Pair.second.getBaseValue();
1209 assert(BDV && Base);
1210 // Only values that do not have known bases or those that have differing
1211 // type (scalar versus vector) from a possible known base should be in the
1212 // lattice.
1213 assert((!isKnownBaseResult(BDV) || !areBothVectorOrScalar(BDV, Base)) &&
1214 "why did it get added?");
1215
1216 LLVM_DEBUG(
1217 dbgs() << "Updating base value cache"
1218 << " for: " << BDV->getName() << " from: "
1219 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none")
1220 << " to: " << Base->getName() << "\n");
1221
1222 Cache[BDV] = Base;
1223 }
1224 assert(Cache.count(Def));
1225 return Cache[Def];
1226 }
1227
1228 // For a set of live pointers (base and/or derived), identify the base
1229 // pointer of the object which they are derived from. This routine will
1230 // mutate the IR graph as needed to make the 'base' pointer live at the
1231 // definition site of 'derived'. This ensures that any use of 'derived' can
1232 // also use 'base'. This may involve the insertion of a number of
1233 // additional PHI nodes.
1234 //
1235 // preconditions: live is a set of pointer type Values
1236 //
1237 // side effects: may insert PHI nodes into the existing CFG, will preserve
1238 // CFG, will not remove or mutate any existing nodes
1239 //
1240 // post condition: PointerToBase contains one (derived, base) pair for every
1241 // pointer in live. Note that derived can be equal to base if the original
1242 // pointer was a base pointer.
1243 static void
findBasePointers(const StatepointLiveSetTy & live,MapVector<Value *,Value * > & PointerToBase,DominatorTree * DT,DefiningValueMapTy & DVCache)1244 findBasePointers(const StatepointLiveSetTy &live,
1245 MapVector<Value *, Value *> &PointerToBase,
1246 DominatorTree *DT, DefiningValueMapTy &DVCache) {
1247 for (Value *ptr : live) {
1248 Value *base = findBasePointer(ptr, DVCache);
1249 assert(base && "failed to find base pointer");
1250 PointerToBase[ptr] = base;
1251 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) ||
1252 DT->dominates(cast<Instruction>(base)->getParent(),
1253 cast<Instruction>(ptr)->getParent())) &&
1254 "The base we found better dominate the derived pointer");
1255 }
1256 }
1257
1258 /// Find the required based pointers (and adjust the live set) for the given
1259 /// parse point.
findBasePointers(DominatorTree & DT,DefiningValueMapTy & DVCache,CallBase * Call,PartiallyConstructedSafepointRecord & result)1260 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache,
1261 CallBase *Call,
1262 PartiallyConstructedSafepointRecord &result) {
1263 MapVector<Value *, Value *> PointerToBase;
1264 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet;
1265 // We assume that all pointers passed to deopt are base pointers; as an
1266 // optimization, we can use this to avoid seperately materializing the base
1267 // pointer graph. This is only relevant since we're very conservative about
1268 // generating new conflict nodes during base pointer insertion. If we were
1269 // smarter there, this would be irrelevant.
1270 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt))
1271 for (Value *V : Opt->Inputs) {
1272 if (!PotentiallyDerivedPointers.count(V))
1273 continue;
1274 PotentiallyDerivedPointers.remove(V);
1275 PointerToBase[V] = V;
1276 }
1277 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache);
1278
1279 if (PrintBasePointers) {
1280 errs() << "Base Pairs (w/o Relocation):\n";
1281 for (auto &Pair : PointerToBase) {
1282 errs() << " derived ";
1283 Pair.first->printAsOperand(errs(), false);
1284 errs() << " base ";
1285 Pair.second->printAsOperand(errs(), false);
1286 errs() << "\n";;
1287 }
1288 }
1289
1290 result.PointerToBase = PointerToBase;
1291 }
1292
1293 /// Given an updated version of the dataflow liveness results, update the
1294 /// liveset and base pointer maps for the call site CS.
1295 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
1296 CallBase *Call,
1297 PartiallyConstructedSafepointRecord &result);
1298
recomputeLiveInValues(Function & F,DominatorTree & DT,ArrayRef<CallBase * > toUpdate,MutableArrayRef<struct PartiallyConstructedSafepointRecord> records)1299 static void recomputeLiveInValues(
1300 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate,
1301 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) {
1302 // TODO-PERF: reuse the original liveness, then simply run the dataflow
1303 // again. The old values are still live and will help it stabilize quickly.
1304 GCPtrLivenessData RevisedLivenessData;
1305 computeLiveInValues(DT, F, RevisedLivenessData);
1306 for (size_t i = 0; i < records.size(); i++) {
1307 struct PartiallyConstructedSafepointRecord &info = records[i];
1308 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info);
1309 }
1310 }
1311
1312 // When inserting gc.relocate and gc.result calls, we need to ensure there are
1313 // no uses of the original value / return value between the gc.statepoint and
1314 // the gc.relocate / gc.result call. One case which can arise is a phi node
1315 // starting one of the successor blocks. We also need to be able to insert the
1316 // gc.relocates only on the path which goes through the statepoint. We might
1317 // need to split an edge to make this possible.
1318 static BasicBlock *
normalizeForInvokeSafepoint(BasicBlock * BB,BasicBlock * InvokeParent,DominatorTree & DT)1319 normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent,
1320 DominatorTree &DT) {
1321 BasicBlock *Ret = BB;
1322 if (!BB->getUniquePredecessor())
1323 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT);
1324
1325 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes
1326 // from it
1327 FoldSingleEntryPHINodes(Ret);
1328 assert(!isa<PHINode>(Ret->begin()) &&
1329 "All PHI nodes should have been removed!");
1330
1331 // At this point, we can safely insert a gc.relocate or gc.result as the first
1332 // instruction in Ret if needed.
1333 return Ret;
1334 }
1335
1336 // List of all function attributes which must be stripped when lowering from
1337 // abstract machine model to physical machine model. Essentially, these are
1338 // all the effects a safepoint might have which we ignored in the abstract
1339 // machine model for purposes of optimization. We have to strip these on
1340 // both function declarations and call sites.
1341 static constexpr Attribute::AttrKind FnAttrsToStrip[] =
1342 {Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly,
1343 Attribute::ArgMemOnly, Attribute::InaccessibleMemOnly,
1344 Attribute::InaccessibleMemOrArgMemOnly,
1345 Attribute::NoSync, Attribute::NoFree};
1346
1347 // List of all parameter and return attributes which must be stripped when
1348 // lowering from the abstract machine model. Note that we list attributes
1349 // here which aren't valid as return attributes, that is okay. There are
1350 // also some additional attributes with arguments which are handled
1351 // explicitly and are not in this list.
1352 static constexpr Attribute::AttrKind ParamAttrsToStrip[] =
1353 {Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly,
1354 Attribute::NoAlias, Attribute::NoFree};
1355
1356
1357 // Create new attribute set containing only attributes which can be transferred
1358 // from original call to the safepoint.
legalizeCallAttributes(LLVMContext & Ctx,AttributeList AL)1359 static AttributeList legalizeCallAttributes(LLVMContext &Ctx,
1360 AttributeList AL) {
1361 if (AL.isEmpty())
1362 return AL;
1363
1364 // Remove the readonly, readnone, and statepoint function attributes.
1365 AttrBuilder FnAttrs = AL.getFnAttributes();
1366 for (auto Attr : FnAttrsToStrip)
1367 FnAttrs.removeAttribute(Attr);
1368
1369 for (Attribute A : AL.getFnAttributes()) {
1370 if (isStatepointDirectiveAttr(A))
1371 FnAttrs.remove(A);
1372 }
1373
1374 // Just skip parameter and return attributes for now
1375 return AttributeList::get(Ctx, AttributeList::FunctionIndex,
1376 AttributeSet::get(Ctx, FnAttrs));
1377 }
1378
1379 /// Helper function to place all gc relocates necessary for the given
1380 /// statepoint.
1381 /// Inputs:
1382 /// liveVariables - list of variables to be relocated.
1383 /// basePtrs - base pointers.
1384 /// statepointToken - statepoint instruction to which relocates should be
1385 /// bound.
1386 /// Builder - Llvm IR builder to be used to construct new calls.
CreateGCRelocates(ArrayRef<Value * > LiveVariables,ArrayRef<Value * > BasePtrs,Instruction * StatepointToken,IRBuilder<> & Builder)1387 static void CreateGCRelocates(ArrayRef<Value *> LiveVariables,
1388 ArrayRef<Value *> BasePtrs,
1389 Instruction *StatepointToken,
1390 IRBuilder<> &Builder) {
1391 if (LiveVariables.empty())
1392 return;
1393
1394 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) {
1395 auto ValIt = llvm::find(LiveVec, Val);
1396 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!");
1397 size_t Index = std::distance(LiveVec.begin(), ValIt);
1398 assert(Index < LiveVec.size() && "Bug in std::find?");
1399 return Index;
1400 };
1401 Module *M = StatepointToken->getModule();
1402
1403 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose
1404 // element type is i8 addrspace(1)*). We originally generated unique
1405 // declarations for each pointer type, but this proved problematic because
1406 // the intrinsic mangling code is incomplete and fragile. Since we're moving
1407 // towards a single unified pointer type anyways, we can just cast everything
1408 // to an i8* of the right address space. A bitcast is added later to convert
1409 // gc_relocate to the actual value's type.
1410 auto getGCRelocateDecl = [&] (Type *Ty) {
1411 assert(isHandledGCPointerType(Ty));
1412 auto AS = Ty->getScalarType()->getPointerAddressSpace();
1413 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS);
1414 if (auto *VT = dyn_cast<VectorType>(Ty))
1415 NewTy = FixedVectorType::get(NewTy,
1416 cast<FixedVectorType>(VT)->getNumElements());
1417 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate,
1418 {NewTy});
1419 };
1420
1421 // Lazily populated map from input types to the canonicalized form mentioned
1422 // in the comment above. This should probably be cached somewhere more
1423 // broadly.
1424 DenseMap<Type *, Function *> TypeToDeclMap;
1425
1426 for (unsigned i = 0; i < LiveVariables.size(); i++) {
1427 // Generate the gc.relocate call and save the result
1428 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i]));
1429 Value *LiveIdx = Builder.getInt32(i);
1430
1431 Type *Ty = LiveVariables[i]->getType();
1432 if (!TypeToDeclMap.count(Ty))
1433 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty);
1434 Function *GCRelocateDecl = TypeToDeclMap[Ty];
1435
1436 // only specify a debug name if we can give a useful one
1437 CallInst *Reloc = Builder.CreateCall(
1438 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx},
1439 suffixed_name_or(LiveVariables[i], ".relocated", ""));
1440 // Trick CodeGen into thinking there are lots of free registers at this
1441 // fake call.
1442 Reloc->setCallingConv(CallingConv::Cold);
1443 }
1444 }
1445
1446 namespace {
1447
1448 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this
1449 /// avoids having to worry about keeping around dangling pointers to Values.
1450 class DeferredReplacement {
1451 AssertingVH<Instruction> Old;
1452 AssertingVH<Instruction> New;
1453 bool IsDeoptimize = false;
1454
1455 DeferredReplacement() = default;
1456
1457 public:
createRAUW(Instruction * Old,Instruction * New)1458 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) {
1459 assert(Old != New && Old && New &&
1460 "Cannot RAUW equal values or to / from null!");
1461
1462 DeferredReplacement D;
1463 D.Old = Old;
1464 D.New = New;
1465 return D;
1466 }
1467
createDelete(Instruction * ToErase)1468 static DeferredReplacement createDelete(Instruction *ToErase) {
1469 DeferredReplacement D;
1470 D.Old = ToErase;
1471 return D;
1472 }
1473
createDeoptimizeReplacement(Instruction * Old)1474 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) {
1475 #ifndef NDEBUG
1476 auto *F = cast<CallInst>(Old)->getCalledFunction();
1477 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize &&
1478 "Only way to construct a deoptimize deferred replacement");
1479 #endif
1480 DeferredReplacement D;
1481 D.Old = Old;
1482 D.IsDeoptimize = true;
1483 return D;
1484 }
1485
1486 /// Does the task represented by this instance.
doReplacement()1487 void doReplacement() {
1488 Instruction *OldI = Old;
1489 Instruction *NewI = New;
1490
1491 assert(OldI != NewI && "Disallowed at construction?!");
1492 assert((!IsDeoptimize || !New) &&
1493 "Deoptimize intrinsics are not replaced!");
1494
1495 Old = nullptr;
1496 New = nullptr;
1497
1498 if (NewI)
1499 OldI->replaceAllUsesWith(NewI);
1500
1501 if (IsDeoptimize) {
1502 // Note: we've inserted instructions, so the call to llvm.deoptimize may
1503 // not necessarily be followed by the matching return.
1504 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator());
1505 new UnreachableInst(RI->getContext(), RI);
1506 RI->eraseFromParent();
1507 }
1508
1509 OldI->eraseFromParent();
1510 }
1511 };
1512
1513 } // end anonymous namespace
1514
getDeoptLowering(CallBase * Call)1515 static StringRef getDeoptLowering(CallBase *Call) {
1516 const char *DeoptLowering = "deopt-lowering";
1517 if (Call->hasFnAttr(DeoptLowering)) {
1518 // FIXME: Calls have a *really* confusing interface around attributes
1519 // with values.
1520 const AttributeList &CSAS = Call->getAttributes();
1521 if (CSAS.hasAttribute(AttributeList::FunctionIndex, DeoptLowering))
1522 return CSAS.getAttribute(AttributeList::FunctionIndex, DeoptLowering)
1523 .getValueAsString();
1524 Function *F = Call->getCalledFunction();
1525 assert(F && F->hasFnAttribute(DeoptLowering));
1526 return F->getFnAttribute(DeoptLowering).getValueAsString();
1527 }
1528 return "live-through";
1529 }
1530
1531 static void
makeStatepointExplicitImpl(CallBase * Call,const SmallVectorImpl<Value * > & BasePtrs,const SmallVectorImpl<Value * > & LiveVariables,PartiallyConstructedSafepointRecord & Result,std::vector<DeferredReplacement> & Replacements)1532 makeStatepointExplicitImpl(CallBase *Call, /* to replace */
1533 const SmallVectorImpl<Value *> &BasePtrs,
1534 const SmallVectorImpl<Value *> &LiveVariables,
1535 PartiallyConstructedSafepointRecord &Result,
1536 std::vector<DeferredReplacement> &Replacements) {
1537 assert(BasePtrs.size() == LiveVariables.size());
1538
1539 // Then go ahead and use the builder do actually do the inserts. We insert
1540 // immediately before the previous instruction under the assumption that all
1541 // arguments will be available here. We can't insert afterwards since we may
1542 // be replacing a terminator.
1543 IRBuilder<> Builder(Call);
1544
1545 ArrayRef<Value *> GCArgs(LiveVariables);
1546 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID;
1547 uint32_t NumPatchBytes = 0;
1548 uint32_t Flags = uint32_t(StatepointFlags::None);
1549
1550 SmallVector<Value *, 8> CallArgs(Call->args());
1551 Optional<ArrayRef<Use>> DeoptArgs;
1552 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt))
1553 DeoptArgs = Bundle->Inputs;
1554 Optional<ArrayRef<Use>> TransitionArgs;
1555 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) {
1556 TransitionArgs = Bundle->Inputs;
1557 // TODO: This flag no longer serves a purpose and can be removed later
1558 Flags |= uint32_t(StatepointFlags::GCTransition);
1559 }
1560
1561 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls
1562 // with a return value, we lower then as never returning calls to
1563 // __llvm_deoptimize that are followed by unreachable to get better codegen.
1564 bool IsDeoptimize = false;
1565
1566 StatepointDirectives SD =
1567 parseStatepointDirectivesFromAttrs(Call->getAttributes());
1568 if (SD.NumPatchBytes)
1569 NumPatchBytes = *SD.NumPatchBytes;
1570 if (SD.StatepointID)
1571 StatepointID = *SD.StatepointID;
1572
1573 // Pass through the requested lowering if any. The default is live-through.
1574 StringRef DeoptLowering = getDeoptLowering(Call);
1575 if (DeoptLowering.equals("live-in"))
1576 Flags |= uint32_t(StatepointFlags::DeoptLiveIn);
1577 else {
1578 assert(DeoptLowering.equals("live-through") && "Unsupported value!");
1579 }
1580
1581 Value *CallTarget = Call->getCalledOperand();
1582 if (Function *F = dyn_cast<Function>(CallTarget)) {
1583 auto IID = F->getIntrinsicID();
1584 if (IID == Intrinsic::experimental_deoptimize) {
1585 // Calls to llvm.experimental.deoptimize are lowered to calls to the
1586 // __llvm_deoptimize symbol. We want to resolve this now, since the
1587 // verifier does not allow taking the address of an intrinsic function.
1588
1589 SmallVector<Type *, 8> DomainTy;
1590 for (Value *Arg : CallArgs)
1591 DomainTy.push_back(Arg->getType());
1592 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy,
1593 /* isVarArg = */ false);
1594
1595 // Note: CallTarget can be a bitcast instruction of a symbol if there are
1596 // calls to @llvm.experimental.deoptimize with different argument types in
1597 // the same module. This is fine -- we assume the frontend knew what it
1598 // was doing when generating this kind of IR.
1599 CallTarget = F->getParent()
1600 ->getOrInsertFunction("__llvm_deoptimize", FTy)
1601 .getCallee();
1602
1603 IsDeoptimize = true;
1604 } else if (IID == Intrinsic::memcpy_element_unordered_atomic ||
1605 IID == Intrinsic::memmove_element_unordered_atomic) {
1606 // Unordered atomic memcpy and memmove intrinsics which are not explicitly
1607 // marked as "gc-leaf-function" should be lowered in a GC parseable way.
1608 // Specifically, these calls should be lowered to the
1609 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols.
1610 // Similarly to __llvm_deoptimize we want to resolve this now, since the
1611 // verifier does not allow taking the address of an intrinsic function.
1612 //
1613 // Moreover we need to shuffle the arguments for the call in order to
1614 // accommodate GC. The underlying source and destination objects might be
1615 // relocated during copy operation should the GC occur. To relocate the
1616 // derived source and destination pointers the implementation of the
1617 // intrinsic should know the corresponding base pointers.
1618 //
1619 // To make the base pointers available pass them explicitly as arguments:
1620 // memcpy(dest_derived, source_derived, ...) =>
1621 // memcpy(dest_base, dest_offset, source_base, source_offset, ...)
1622 auto &Context = Call->getContext();
1623 auto &DL = Call->getModule()->getDataLayout();
1624 auto GetBaseAndOffset = [&](Value *Derived) {
1625 assert(Result.PointerToBase.count(Derived));
1626 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace();
1627 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace);
1628 Value *Base = Result.PointerToBase.find(Derived)->second;
1629 Value *Base_int = Builder.CreatePtrToInt(
1630 Base, Type::getIntNTy(Context, IntPtrSize));
1631 Value *Derived_int = Builder.CreatePtrToInt(
1632 Derived, Type::getIntNTy(Context, IntPtrSize));
1633 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int));
1634 };
1635
1636 auto *Dest = CallArgs[0];
1637 Value *DestBase, *DestOffset;
1638 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest);
1639
1640 auto *Source = CallArgs[1];
1641 Value *SourceBase, *SourceOffset;
1642 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source);
1643
1644 auto *LengthInBytes = CallArgs[2];
1645 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]);
1646
1647 CallArgs.clear();
1648 CallArgs.push_back(DestBase);
1649 CallArgs.push_back(DestOffset);
1650 CallArgs.push_back(SourceBase);
1651 CallArgs.push_back(SourceOffset);
1652 CallArgs.push_back(LengthInBytes);
1653
1654 SmallVector<Type *, 8> DomainTy;
1655 for (Value *Arg : CallArgs)
1656 DomainTy.push_back(Arg->getType());
1657 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy,
1658 /* isVarArg = */ false);
1659
1660 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) {
1661 uint64_t ElementSize = ElementSizeCI->getZExtValue();
1662 if (IID == Intrinsic::memcpy_element_unordered_atomic) {
1663 switch (ElementSize) {
1664 case 1:
1665 return "__llvm_memcpy_element_unordered_atomic_safepoint_1";
1666 case 2:
1667 return "__llvm_memcpy_element_unordered_atomic_safepoint_2";
1668 case 4:
1669 return "__llvm_memcpy_element_unordered_atomic_safepoint_4";
1670 case 8:
1671 return "__llvm_memcpy_element_unordered_atomic_safepoint_8";
1672 case 16:
1673 return "__llvm_memcpy_element_unordered_atomic_safepoint_16";
1674 default:
1675 llvm_unreachable("unexpected element size!");
1676 }
1677 }
1678 assert(IID == Intrinsic::memmove_element_unordered_atomic);
1679 switch (ElementSize) {
1680 case 1:
1681 return "__llvm_memmove_element_unordered_atomic_safepoint_1";
1682 case 2:
1683 return "__llvm_memmove_element_unordered_atomic_safepoint_2";
1684 case 4:
1685 return "__llvm_memmove_element_unordered_atomic_safepoint_4";
1686 case 8:
1687 return "__llvm_memmove_element_unordered_atomic_safepoint_8";
1688 case 16:
1689 return "__llvm_memmove_element_unordered_atomic_safepoint_16";
1690 default:
1691 llvm_unreachable("unexpected element size!");
1692 }
1693 };
1694
1695 CallTarget =
1696 F->getParent()
1697 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy)
1698 .getCallee();
1699 }
1700 }
1701
1702 // Create the statepoint given all the arguments
1703 GCStatepointInst *Token = nullptr;
1704 if (auto *CI = dyn_cast<CallInst>(Call)) {
1705 CallInst *SPCall = Builder.CreateGCStatepointCall(
1706 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs,
1707 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token");
1708
1709 SPCall->setTailCallKind(CI->getTailCallKind());
1710 SPCall->setCallingConv(CI->getCallingConv());
1711
1712 // Currently we will fail on parameter attributes and on certain
1713 // function attributes. In case if we can handle this set of attributes -
1714 // set up function attrs directly on statepoint and return attrs later for
1715 // gc_result intrinsic.
1716 SPCall->setAttributes(
1717 legalizeCallAttributes(CI->getContext(), CI->getAttributes()));
1718
1719 Token = cast<GCStatepointInst>(SPCall);
1720
1721 // Put the following gc_result and gc_relocate calls immediately after the
1722 // the old call (which we're about to delete)
1723 assert(CI->getNextNode() && "Not a terminator, must have next!");
1724 Builder.SetInsertPoint(CI->getNextNode());
1725 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc());
1726 } else {
1727 auto *II = cast<InvokeInst>(Call);
1728
1729 // Insert the new invoke into the old block. We'll remove the old one in a
1730 // moment at which point this will become the new terminator for the
1731 // original block.
1732 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke(
1733 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(),
1734 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs,
1735 "statepoint_token");
1736
1737 SPInvoke->setCallingConv(II->getCallingConv());
1738
1739 // Currently we will fail on parameter attributes and on certain
1740 // function attributes. In case if we can handle this set of attributes -
1741 // set up function attrs directly on statepoint and return attrs later for
1742 // gc_result intrinsic.
1743 SPInvoke->setAttributes(
1744 legalizeCallAttributes(II->getContext(), II->getAttributes()));
1745
1746 Token = cast<GCStatepointInst>(SPInvoke);
1747
1748 // Generate gc relocates in exceptional path
1749 BasicBlock *UnwindBlock = II->getUnwindDest();
1750 assert(!isa<PHINode>(UnwindBlock->begin()) &&
1751 UnwindBlock->getUniquePredecessor() &&
1752 "can't safely insert in this block!");
1753
1754 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt());
1755 Builder.SetCurrentDebugLocation(II->getDebugLoc());
1756
1757 // Attach exceptional gc relocates to the landingpad.
1758 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst();
1759 Result.UnwindToken = ExceptionalToken;
1760
1761 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder);
1762
1763 // Generate gc relocates and returns for normal block
1764 BasicBlock *NormalDest = II->getNormalDest();
1765 assert(!isa<PHINode>(NormalDest->begin()) &&
1766 NormalDest->getUniquePredecessor() &&
1767 "can't safely insert in this block!");
1768
1769 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt());
1770
1771 // gc relocates will be generated later as if it were regular call
1772 // statepoint
1773 }
1774 assert(Token && "Should be set in one of the above branches!");
1775
1776 if (IsDeoptimize) {
1777 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we
1778 // transform the tail-call like structure to a call to a void function
1779 // followed by unreachable to get better codegen.
1780 Replacements.push_back(
1781 DeferredReplacement::createDeoptimizeReplacement(Call));
1782 } else {
1783 Token->setName("statepoint_token");
1784 if (!Call->getType()->isVoidTy() && !Call->use_empty()) {
1785 StringRef Name = Call->hasName() ? Call->getName() : "";
1786 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name);
1787 GCResult->setAttributes(
1788 AttributeList::get(GCResult->getContext(), AttributeList::ReturnIndex,
1789 Call->getAttributes().getRetAttributes()));
1790
1791 // We cannot RAUW or delete CS.getInstruction() because it could be in the
1792 // live set of some other safepoint, in which case that safepoint's
1793 // PartiallyConstructedSafepointRecord will hold a raw pointer to this
1794 // llvm::Instruction. Instead, we defer the replacement and deletion to
1795 // after the live sets have been made explicit in the IR, and we no longer
1796 // have raw pointers to worry about.
1797 Replacements.emplace_back(
1798 DeferredReplacement::createRAUW(Call, GCResult));
1799 } else {
1800 Replacements.emplace_back(DeferredReplacement::createDelete(Call));
1801 }
1802 }
1803
1804 Result.StatepointToken = Token;
1805
1806 // Second, create a gc.relocate for every live variable
1807 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder);
1808 }
1809
1810 // Replace an existing gc.statepoint with a new one and a set of gc.relocates
1811 // which make the relocations happening at this safepoint explicit.
1812 //
1813 // WARNING: Does not do any fixup to adjust users of the original live
1814 // values. That's the callers responsibility.
1815 static void
makeStatepointExplicit(DominatorTree & DT,CallBase * Call,PartiallyConstructedSafepointRecord & Result,std::vector<DeferredReplacement> & Replacements)1816 makeStatepointExplicit(DominatorTree &DT, CallBase *Call,
1817 PartiallyConstructedSafepointRecord &Result,
1818 std::vector<DeferredReplacement> &Replacements) {
1819 const auto &LiveSet = Result.LiveSet;
1820 const auto &PointerToBase = Result.PointerToBase;
1821
1822 // Convert to vector for efficient cross referencing.
1823 SmallVector<Value *, 64> BaseVec, LiveVec;
1824 LiveVec.reserve(LiveSet.size());
1825 BaseVec.reserve(LiveSet.size());
1826 for (Value *L : LiveSet) {
1827 LiveVec.push_back(L);
1828 assert(PointerToBase.count(L));
1829 Value *Base = PointerToBase.find(L)->second;
1830 BaseVec.push_back(Base);
1831 }
1832 assert(LiveVec.size() == BaseVec.size());
1833
1834 // Do the actual rewriting and delete the old statepoint
1835 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements);
1836 }
1837
1838 // Helper function for the relocationViaAlloca.
1839 //
1840 // It receives iterator to the statepoint gc relocates and emits a store to the
1841 // assigned location (via allocaMap) for the each one of them. It adds the
1842 // visited values into the visitedLiveValues set, which we will later use them
1843 // for sanity checking.
1844 static void
insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,DenseMap<Value *,AllocaInst * > & AllocaMap,DenseSet<Value * > & VisitedLiveValues)1845 insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs,
1846 DenseMap<Value *, AllocaInst *> &AllocaMap,
1847 DenseSet<Value *> &VisitedLiveValues) {
1848 for (User *U : GCRelocs) {
1849 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U);
1850 if (!Relocate)
1851 continue;
1852
1853 Value *OriginalValue = Relocate->getDerivedPtr();
1854 assert(AllocaMap.count(OriginalValue));
1855 Value *Alloca = AllocaMap[OriginalValue];
1856
1857 // Emit store into the related alloca
1858 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to
1859 // the correct type according to alloca.
1860 assert(Relocate->getNextNode() &&
1861 "Should always have one since it's not a terminator");
1862 IRBuilder<> Builder(Relocate->getNextNode());
1863 Value *CastedRelocatedValue =
1864 Builder.CreateBitCast(Relocate,
1865 cast<AllocaInst>(Alloca)->getAllocatedType(),
1866 suffixed_name_or(Relocate, ".casted", ""));
1867
1868 new StoreInst(CastedRelocatedValue, Alloca,
1869 cast<Instruction>(CastedRelocatedValue)->getNextNode());
1870
1871 #ifndef NDEBUG
1872 VisitedLiveValues.insert(OriginalValue);
1873 #endif
1874 }
1875 }
1876
1877 // Helper function for the "relocationViaAlloca". Similar to the
1878 // "insertRelocationStores" but works for rematerialized values.
insertRematerializationStores(const RematerializedValueMapTy & RematerializedValues,DenseMap<Value *,AllocaInst * > & AllocaMap,DenseSet<Value * > & VisitedLiveValues)1879 static void insertRematerializationStores(
1880 const RematerializedValueMapTy &RematerializedValues,
1881 DenseMap<Value *, AllocaInst *> &AllocaMap,
1882 DenseSet<Value *> &VisitedLiveValues) {
1883 for (auto RematerializedValuePair: RematerializedValues) {
1884 Instruction *RematerializedValue = RematerializedValuePair.first;
1885 Value *OriginalValue = RematerializedValuePair.second;
1886
1887 assert(AllocaMap.count(OriginalValue) &&
1888 "Can not find alloca for rematerialized value");
1889 Value *Alloca = AllocaMap[OriginalValue];
1890
1891 new StoreInst(RematerializedValue, Alloca,
1892 RematerializedValue->getNextNode());
1893
1894 #ifndef NDEBUG
1895 VisitedLiveValues.insert(OriginalValue);
1896 #endif
1897 }
1898 }
1899
1900 /// Do all the relocation update via allocas and mem2reg
relocationViaAlloca(Function & F,DominatorTree & DT,ArrayRef<Value * > Live,ArrayRef<PartiallyConstructedSafepointRecord> Records)1901 static void relocationViaAlloca(
1902 Function &F, DominatorTree &DT, ArrayRef<Value *> Live,
1903 ArrayRef<PartiallyConstructedSafepointRecord> Records) {
1904 #ifndef NDEBUG
1905 // record initial number of (static) allocas; we'll check we have the same
1906 // number when we get done.
1907 int InitialAllocaNum = 0;
1908 for (Instruction &I : F.getEntryBlock())
1909 if (isa<AllocaInst>(I))
1910 InitialAllocaNum++;
1911 #endif
1912
1913 // TODO-PERF: change data structures, reserve
1914 DenseMap<Value *, AllocaInst *> AllocaMap;
1915 SmallVector<AllocaInst *, 200> PromotableAllocas;
1916 // Used later to chack that we have enough allocas to store all values
1917 std::size_t NumRematerializedValues = 0;
1918 PromotableAllocas.reserve(Live.size());
1919
1920 // Emit alloca for "LiveValue" and record it in "allocaMap" and
1921 // "PromotableAllocas"
1922 const DataLayout &DL = F.getParent()->getDataLayout();
1923 auto emitAllocaFor = [&](Value *LiveValue) {
1924 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(),
1925 DL.getAllocaAddrSpace(), "",
1926 F.getEntryBlock().getFirstNonPHI());
1927 AllocaMap[LiveValue] = Alloca;
1928 PromotableAllocas.push_back(Alloca);
1929 };
1930
1931 // Emit alloca for each live gc pointer
1932 for (Value *V : Live)
1933 emitAllocaFor(V);
1934
1935 // Emit allocas for rematerialized values
1936 for (const auto &Info : Records)
1937 for (auto RematerializedValuePair : Info.RematerializedValues) {
1938 Value *OriginalValue = RematerializedValuePair.second;
1939 if (AllocaMap.count(OriginalValue) != 0)
1940 continue;
1941
1942 emitAllocaFor(OriginalValue);
1943 ++NumRematerializedValues;
1944 }
1945
1946 // The next two loops are part of the same conceptual operation. We need to
1947 // insert a store to the alloca after the original def and at each
1948 // redefinition. We need to insert a load before each use. These are split
1949 // into distinct loops for performance reasons.
1950
1951 // Update gc pointer after each statepoint: either store a relocated value or
1952 // null (if no relocated value was found for this gc pointer and it is not a
1953 // gc_result). This must happen before we update the statepoint with load of
1954 // alloca otherwise we lose the link between statepoint and old def.
1955 for (const auto &Info : Records) {
1956 Value *Statepoint = Info.StatepointToken;
1957
1958 // This will be used for consistency check
1959 DenseSet<Value *> VisitedLiveValues;
1960
1961 // Insert stores for normal statepoint gc relocates
1962 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues);
1963
1964 // In case if it was invoke statepoint
1965 // we will insert stores for exceptional path gc relocates.
1966 if (isa<InvokeInst>(Statepoint)) {
1967 insertRelocationStores(Info.UnwindToken->users(), AllocaMap,
1968 VisitedLiveValues);
1969 }
1970
1971 // Do similar thing with rematerialized values
1972 insertRematerializationStores(Info.RematerializedValues, AllocaMap,
1973 VisitedLiveValues);
1974
1975 if (ClobberNonLive) {
1976 // As a debugging aid, pretend that an unrelocated pointer becomes null at
1977 // the gc.statepoint. This will turn some subtle GC problems into
1978 // slightly easier to debug SEGVs. Note that on large IR files with
1979 // lots of gc.statepoints this is extremely costly both memory and time
1980 // wise.
1981 SmallVector<AllocaInst *, 64> ToClobber;
1982 for (auto Pair : AllocaMap) {
1983 Value *Def = Pair.first;
1984 AllocaInst *Alloca = Pair.second;
1985
1986 // This value was relocated
1987 if (VisitedLiveValues.count(Def)) {
1988 continue;
1989 }
1990 ToClobber.push_back(Alloca);
1991 }
1992
1993 auto InsertClobbersAt = [&](Instruction *IP) {
1994 for (auto *AI : ToClobber) {
1995 auto PT = cast<PointerType>(AI->getAllocatedType());
1996 Constant *CPN = ConstantPointerNull::get(PT);
1997 new StoreInst(CPN, AI, IP);
1998 }
1999 };
2000
2001 // Insert the clobbering stores. These may get intermixed with the
2002 // gc.results and gc.relocates, but that's fine.
2003 if (auto II = dyn_cast<InvokeInst>(Statepoint)) {
2004 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt());
2005 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt());
2006 } else {
2007 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode());
2008 }
2009 }
2010 }
2011
2012 // Update use with load allocas and add store for gc_relocated.
2013 for (auto Pair : AllocaMap) {
2014 Value *Def = Pair.first;
2015 AllocaInst *Alloca = Pair.second;
2016
2017 // We pre-record the uses of allocas so that we dont have to worry about
2018 // later update that changes the user information..
2019
2020 SmallVector<Instruction *, 20> Uses;
2021 // PERF: trade a linear scan for repeated reallocation
2022 Uses.reserve(Def->getNumUses());
2023 for (User *U : Def->users()) {
2024 if (!isa<ConstantExpr>(U)) {
2025 // If the def has a ConstantExpr use, then the def is either a
2026 // ConstantExpr use itself or null. In either case
2027 // (recursively in the first, directly in the second), the oop
2028 // it is ultimately dependent on is null and this particular
2029 // use does not need to be fixed up.
2030 Uses.push_back(cast<Instruction>(U));
2031 }
2032 }
2033
2034 llvm::sort(Uses);
2035 auto Last = std::unique(Uses.begin(), Uses.end());
2036 Uses.erase(Last, Uses.end());
2037
2038 for (Instruction *Use : Uses) {
2039 if (isa<PHINode>(Use)) {
2040 PHINode *Phi = cast<PHINode>(Use);
2041 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) {
2042 if (Def == Phi->getIncomingValue(i)) {
2043 LoadInst *Load =
2044 new LoadInst(Alloca->getAllocatedType(), Alloca, "",
2045 Phi->getIncomingBlock(i)->getTerminator());
2046 Phi->setIncomingValue(i, Load);
2047 }
2048 }
2049 } else {
2050 LoadInst *Load =
2051 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use);
2052 Use->replaceUsesOfWith(Def, Load);
2053 }
2054 }
2055
2056 // Emit store for the initial gc value. Store must be inserted after load,
2057 // otherwise store will be in alloca's use list and an extra load will be
2058 // inserted before it.
2059 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false,
2060 DL.getABITypeAlign(Def->getType()));
2061 if (Instruction *Inst = dyn_cast<Instruction>(Def)) {
2062 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) {
2063 // InvokeInst is a terminator so the store need to be inserted into its
2064 // normal destination block.
2065 BasicBlock *NormalDest = Invoke->getNormalDest();
2066 Store->insertBefore(NormalDest->getFirstNonPHI());
2067 } else {
2068 assert(!Inst->isTerminator() &&
2069 "The only terminator that can produce a value is "
2070 "InvokeInst which is handled above.");
2071 Store->insertAfter(Inst);
2072 }
2073 } else {
2074 assert(isa<Argument>(Def));
2075 Store->insertAfter(cast<Instruction>(Alloca));
2076 }
2077 }
2078
2079 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues &&
2080 "we must have the same allocas with lives");
2081 if (!PromotableAllocas.empty()) {
2082 // Apply mem2reg to promote alloca to SSA
2083 PromoteMemToReg(PromotableAllocas, DT);
2084 }
2085
2086 #ifndef NDEBUG
2087 for (auto &I : F.getEntryBlock())
2088 if (isa<AllocaInst>(I))
2089 InitialAllocaNum--;
2090 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas");
2091 #endif
2092 }
2093
2094 /// Implement a unique function which doesn't require we sort the input
2095 /// vector. Doing so has the effect of changing the output of a couple of
2096 /// tests in ways which make them less useful in testing fused safepoints.
unique_unsorted(SmallVectorImpl<T> & Vec)2097 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) {
2098 SmallSet<T, 8> Seen;
2099 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; });
2100 }
2101
2102 /// Insert holders so that each Value is obviously live through the entire
2103 /// lifetime of the call.
insertUseHolderAfter(CallBase * Call,const ArrayRef<Value * > Values,SmallVectorImpl<CallInst * > & Holders)2104 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values,
2105 SmallVectorImpl<CallInst *> &Holders) {
2106 if (Values.empty())
2107 // No values to hold live, might as well not insert the empty holder
2108 return;
2109
2110 Module *M = Call->getModule();
2111 // Use a dummy vararg function to actually hold the values live
2112 FunctionCallee Func = M->getOrInsertFunction(
2113 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true));
2114 if (isa<CallInst>(Call)) {
2115 // For call safepoints insert dummy calls right after safepoint
2116 Holders.push_back(
2117 CallInst::Create(Func, Values, "", &*++Call->getIterator()));
2118 return;
2119 }
2120 // For invoke safepooints insert dummy calls both in normal and
2121 // exceptional destination blocks
2122 auto *II = cast<InvokeInst>(Call);
2123 Holders.push_back(CallInst::Create(
2124 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt()));
2125 Holders.push_back(CallInst::Create(
2126 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt()));
2127 }
2128
findLiveReferences(Function & F,DominatorTree & DT,ArrayRef<CallBase * > toUpdate,MutableArrayRef<struct PartiallyConstructedSafepointRecord> records)2129 static void findLiveReferences(
2130 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate,
2131 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) {
2132 GCPtrLivenessData OriginalLivenessData;
2133 computeLiveInValues(DT, F, OriginalLivenessData);
2134 for (size_t i = 0; i < records.size(); i++) {
2135 struct PartiallyConstructedSafepointRecord &info = records[i];
2136 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info);
2137 }
2138 }
2139
2140 // Helper function for the "rematerializeLiveValues". It walks use chain
2141 // starting from the "CurrentValue" until it reaches the root of the chain, i.e.
2142 // the base or a value it cannot process. Only "simple" values are processed
2143 // (currently it is GEP's and casts). The returned root is examined by the
2144 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array
2145 // with all visited values.
findRematerializableChainToBasePointer(SmallVectorImpl<Instruction * > & ChainToBase,Value * CurrentValue)2146 static Value* findRematerializableChainToBasePointer(
2147 SmallVectorImpl<Instruction*> &ChainToBase,
2148 Value *CurrentValue) {
2149 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) {
2150 ChainToBase.push_back(GEP);
2151 return findRematerializableChainToBasePointer(ChainToBase,
2152 GEP->getPointerOperand());
2153 }
2154
2155 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) {
2156 if (!CI->isNoopCast(CI->getModule()->getDataLayout()))
2157 return CI;
2158
2159 ChainToBase.push_back(CI);
2160 return findRematerializableChainToBasePointer(ChainToBase,
2161 CI->getOperand(0));
2162 }
2163
2164 // We have reached the root of the chain, which is either equal to the base or
2165 // is the first unsupported value along the use chain.
2166 return CurrentValue;
2167 }
2168
2169 // Helper function for the "rematerializeLiveValues". Compute cost of the use
2170 // chain we are going to rematerialize.
2171 static InstructionCost
chainToBasePointerCost(SmallVectorImpl<Instruction * > & Chain,TargetTransformInfo & TTI)2172 chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain,
2173 TargetTransformInfo &TTI) {
2174 InstructionCost Cost = 0;
2175
2176 for (Instruction *Instr : Chain) {
2177 if (CastInst *CI = dyn_cast<CastInst>(Instr)) {
2178 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) &&
2179 "non noop cast is found during rematerialization");
2180
2181 Type *SrcTy = CI->getOperand(0)->getType();
2182 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy,
2183 TTI::getCastContextHint(CI),
2184 TargetTransformInfo::TCK_SizeAndLatency, CI);
2185
2186 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) {
2187 // Cost of the address calculation
2188 Type *ValTy = GEP->getSourceElementType();
2189 Cost += TTI.getAddressComputationCost(ValTy);
2190
2191 // And cost of the GEP itself
2192 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not
2193 // allowed for the external usage)
2194 if (!GEP->hasAllConstantIndices())
2195 Cost += 2;
2196
2197 } else {
2198 llvm_unreachable("unsupported instruction type during rematerialization");
2199 }
2200 }
2201
2202 return Cost;
2203 }
2204
AreEquivalentPhiNodes(PHINode & OrigRootPhi,PHINode & AlternateRootPhi)2205 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) {
2206 unsigned PhiNum = OrigRootPhi.getNumIncomingValues();
2207 if (PhiNum != AlternateRootPhi.getNumIncomingValues() ||
2208 OrigRootPhi.getParent() != AlternateRootPhi.getParent())
2209 return false;
2210 // Map of incoming values and their corresponding basic blocks of
2211 // OrigRootPhi.
2212 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues;
2213 for (unsigned i = 0; i < PhiNum; i++)
2214 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] =
2215 OrigRootPhi.getIncomingBlock(i);
2216
2217 // Both current and base PHIs should have same incoming values and
2218 // the same basic blocks corresponding to the incoming values.
2219 for (unsigned i = 0; i < PhiNum; i++) {
2220 auto CIVI =
2221 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i));
2222 if (CIVI == CurrentIncomingValues.end())
2223 return false;
2224 BasicBlock *CurrentIncomingBB = CIVI->second;
2225 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i))
2226 return false;
2227 }
2228 return true;
2229 }
2230
2231 // From the statepoint live set pick values that are cheaper to recompute then
2232 // to relocate. Remove this values from the live set, rematerialize them after
2233 // statepoint and record them in "Info" structure. Note that similar to
2234 // relocated values we don't do any user adjustments here.
rematerializeLiveValues(CallBase * Call,PartiallyConstructedSafepointRecord & Info,TargetTransformInfo & TTI)2235 static void rematerializeLiveValues(CallBase *Call,
2236 PartiallyConstructedSafepointRecord &Info,
2237 TargetTransformInfo &TTI) {
2238 const unsigned int ChainLengthThreshold = 10;
2239
2240 // Record values we are going to delete from this statepoint live set.
2241 // We can not di this in following loop due to iterator invalidation.
2242 SmallVector<Value *, 32> LiveValuesToBeDeleted;
2243
2244 for (Value *LiveValue: Info.LiveSet) {
2245 // For each live pointer find its defining chain
2246 SmallVector<Instruction *, 3> ChainToBase;
2247 assert(Info.PointerToBase.count(LiveValue));
2248 Value *RootOfChain =
2249 findRematerializableChainToBasePointer(ChainToBase,
2250 LiveValue);
2251
2252 // Nothing to do, or chain is too long
2253 if ( ChainToBase.size() == 0 ||
2254 ChainToBase.size() > ChainLengthThreshold)
2255 continue;
2256
2257 // Handle the scenario where the RootOfChain is not equal to the
2258 // Base Value, but they are essentially the same phi values.
2259 if (RootOfChain != Info.PointerToBase[LiveValue]) {
2260 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain);
2261 PHINode *AlternateRootPhi = dyn_cast<PHINode>(Info.PointerToBase[LiveValue]);
2262 if (!OrigRootPhi || !AlternateRootPhi)
2263 continue;
2264 // PHI nodes that have the same incoming values, and belonging to the same
2265 // basic blocks are essentially the same SSA value. When the original phi
2266 // has incoming values with different base pointers, the original phi is
2267 // marked as conflict, and an additional `AlternateRootPhi` with the same
2268 // incoming values get generated by the findBasePointer function. We need
2269 // to identify the newly generated AlternateRootPhi (.base version of phi)
2270 // and RootOfChain (the original phi node itself) are the same, so that we
2271 // can rematerialize the gep and casts. This is a workaround for the
2272 // deficiency in the findBasePointer algorithm.
2273 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi))
2274 continue;
2275 // Now that the phi nodes are proved to be the same, assert that
2276 // findBasePointer's newly generated AlternateRootPhi is present in the
2277 // liveset of the call.
2278 assert(Info.LiveSet.count(AlternateRootPhi));
2279 }
2280 // Compute cost of this chain
2281 InstructionCost Cost = chainToBasePointerCost(ChainToBase, TTI);
2282 // TODO: We can also account for cases when we will be able to remove some
2283 // of the rematerialized values by later optimization passes. I.e if
2284 // we rematerialized several intersecting chains. Or if original values
2285 // don't have any uses besides this statepoint.
2286
2287 // For invokes we need to rematerialize each chain twice - for normal and
2288 // for unwind basic blocks. Model this by multiplying cost by two.
2289 if (isa<InvokeInst>(Call)) {
2290 Cost *= 2;
2291 }
2292 // If it's too expensive - skip it
2293 if (Cost >= RematerializationThreshold)
2294 continue;
2295
2296 // Remove value from the live set
2297 LiveValuesToBeDeleted.push_back(LiveValue);
2298
2299 // Clone instructions and record them inside "Info" structure
2300
2301 // Walk backwards to visit top-most instructions first
2302 std::reverse(ChainToBase.begin(), ChainToBase.end());
2303
2304 // Utility function which clones all instructions from "ChainToBase"
2305 // and inserts them before "InsertBefore". Returns rematerialized value
2306 // which should be used after statepoint.
2307 auto rematerializeChain = [&ChainToBase](
2308 Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) {
2309 Instruction *LastClonedValue = nullptr;
2310 Instruction *LastValue = nullptr;
2311 for (Instruction *Instr: ChainToBase) {
2312 // Only GEP's and casts are supported as we need to be careful to not
2313 // introduce any new uses of pointers not in the liveset.
2314 // Note that it's fine to introduce new uses of pointers which were
2315 // otherwise not used after this statepoint.
2316 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr));
2317
2318 Instruction *ClonedValue = Instr->clone();
2319 ClonedValue->insertBefore(InsertBefore);
2320 ClonedValue->setName(Instr->getName() + ".remat");
2321
2322 // If it is not first instruction in the chain then it uses previously
2323 // cloned value. We should update it to use cloned value.
2324 if (LastClonedValue) {
2325 assert(LastValue);
2326 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue);
2327 #ifndef NDEBUG
2328 for (auto OpValue : ClonedValue->operand_values()) {
2329 // Assert that cloned instruction does not use any instructions from
2330 // this chain other than LastClonedValue
2331 assert(!is_contained(ChainToBase, OpValue) &&
2332 "incorrect use in rematerialization chain");
2333 // Assert that the cloned instruction does not use the RootOfChain
2334 // or the AlternateLiveBase.
2335 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase);
2336 }
2337 #endif
2338 } else {
2339 // For the first instruction, replace the use of unrelocated base i.e.
2340 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the
2341 // live set. They have been proved to be the same PHI nodes. Note
2342 // that the *only* use of the RootOfChain in the ChainToBase list is
2343 // the first Value in the list.
2344 if (RootOfChain != AlternateLiveBase)
2345 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase);
2346 }
2347
2348 LastClonedValue = ClonedValue;
2349 LastValue = Instr;
2350 }
2351 assert(LastClonedValue);
2352 return LastClonedValue;
2353 };
2354
2355 // Different cases for calls and invokes. For invokes we need to clone
2356 // instructions both on normal and unwind path.
2357 if (isa<CallInst>(Call)) {
2358 Instruction *InsertBefore = Call->getNextNode();
2359 assert(InsertBefore);
2360 Instruction *RematerializedValue = rematerializeChain(
2361 InsertBefore, RootOfChain, Info.PointerToBase[LiveValue]);
2362 Info.RematerializedValues[RematerializedValue] = LiveValue;
2363 } else {
2364 auto *Invoke = cast<InvokeInst>(Call);
2365
2366 Instruction *NormalInsertBefore =
2367 &*Invoke->getNormalDest()->getFirstInsertionPt();
2368 Instruction *UnwindInsertBefore =
2369 &*Invoke->getUnwindDest()->getFirstInsertionPt();
2370
2371 Instruction *NormalRematerializedValue = rematerializeChain(
2372 NormalInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]);
2373 Instruction *UnwindRematerializedValue = rematerializeChain(
2374 UnwindInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]);
2375
2376 Info.RematerializedValues[NormalRematerializedValue] = LiveValue;
2377 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue;
2378 }
2379 }
2380
2381 // Remove rematerializaed values from the live set
2382 for (auto LiveValue: LiveValuesToBeDeleted) {
2383 Info.LiveSet.remove(LiveValue);
2384 }
2385 }
2386
insertParsePoints(Function & F,DominatorTree & DT,TargetTransformInfo & TTI,SmallVectorImpl<CallBase * > & ToUpdate)2387 static bool insertParsePoints(Function &F, DominatorTree &DT,
2388 TargetTransformInfo &TTI,
2389 SmallVectorImpl<CallBase *> &ToUpdate) {
2390 #ifndef NDEBUG
2391 // sanity check the input
2392 std::set<CallBase *> Uniqued;
2393 Uniqued.insert(ToUpdate.begin(), ToUpdate.end());
2394 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!");
2395
2396 for (CallBase *Call : ToUpdate)
2397 assert(Call->getFunction() == &F);
2398 #endif
2399
2400 // When inserting gc.relocates for invokes, we need to be able to insert at
2401 // the top of the successor blocks. See the comment on
2402 // normalForInvokeSafepoint on exactly what is needed. Note that this step
2403 // may restructure the CFG.
2404 for (CallBase *Call : ToUpdate) {
2405 auto *II = dyn_cast<InvokeInst>(Call);
2406 if (!II)
2407 continue;
2408 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT);
2409 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT);
2410 }
2411
2412 // A list of dummy calls added to the IR to keep various values obviously
2413 // live in the IR. We'll remove all of these when done.
2414 SmallVector<CallInst *, 64> Holders;
2415
2416 // Insert a dummy call with all of the deopt operands we'll need for the
2417 // actual safepoint insertion as arguments. This ensures reference operands
2418 // in the deopt argument list are considered live through the safepoint (and
2419 // thus makes sure they get relocated.)
2420 for (CallBase *Call : ToUpdate) {
2421 SmallVector<Value *, 64> DeoptValues;
2422
2423 for (Value *Arg : GetDeoptBundleOperands(Call)) {
2424 assert(!isUnhandledGCPointerType(Arg->getType()) &&
2425 "support for FCA unimplemented");
2426 if (isHandledGCPointerType(Arg->getType()))
2427 DeoptValues.push_back(Arg);
2428 }
2429
2430 insertUseHolderAfter(Call, DeoptValues, Holders);
2431 }
2432
2433 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size());
2434
2435 // A) Identify all gc pointers which are statically live at the given call
2436 // site.
2437 findLiveReferences(F, DT, ToUpdate, Records);
2438
2439 // B) Find the base pointers for each live pointer
2440 /* scope for caching */ {
2441 // Cache the 'defining value' relation used in the computation and
2442 // insertion of base phis and selects. This ensures that we don't insert
2443 // large numbers of duplicate base_phis.
2444 DefiningValueMapTy DVCache;
2445
2446 for (size_t i = 0; i < Records.size(); i++) {
2447 PartiallyConstructedSafepointRecord &info = Records[i];
2448 findBasePointers(DT, DVCache, ToUpdate[i], info);
2449 }
2450 } // end of cache scope
2451
2452 // The base phi insertion logic (for any safepoint) may have inserted new
2453 // instructions which are now live at some safepoint. The simplest such
2454 // example is:
2455 // loop:
2456 // phi a <-- will be a new base_phi here
2457 // safepoint 1 <-- that needs to be live here
2458 // gep a + 1
2459 // safepoint 2
2460 // br loop
2461 // We insert some dummy calls after each safepoint to definitely hold live
2462 // the base pointers which were identified for that safepoint. We'll then
2463 // ask liveness for _every_ base inserted to see what is now live. Then we
2464 // remove the dummy calls.
2465 Holders.reserve(Holders.size() + Records.size());
2466 for (size_t i = 0; i < Records.size(); i++) {
2467 PartiallyConstructedSafepointRecord &Info = Records[i];
2468
2469 SmallVector<Value *, 128> Bases;
2470 for (auto Pair : Info.PointerToBase)
2471 Bases.push_back(Pair.second);
2472
2473 insertUseHolderAfter(ToUpdate[i], Bases, Holders);
2474 }
2475
2476 // By selecting base pointers, we've effectively inserted new uses. Thus, we
2477 // need to rerun liveness. We may *also* have inserted new defs, but that's
2478 // not the key issue.
2479 recomputeLiveInValues(F, DT, ToUpdate, Records);
2480
2481 if (PrintBasePointers) {
2482 for (auto &Info : Records) {
2483 errs() << "Base Pairs: (w/Relocation)\n";
2484 for (auto Pair : Info.PointerToBase) {
2485 errs() << " derived ";
2486 Pair.first->printAsOperand(errs(), false);
2487 errs() << " base ";
2488 Pair.second->printAsOperand(errs(), false);
2489 errs() << "\n";
2490 }
2491 }
2492 }
2493
2494 // It is possible that non-constant live variables have a constant base. For
2495 // example, a GEP with a variable offset from a global. In this case we can
2496 // remove it from the liveset. We already don't add constants to the liveset
2497 // because we assume they won't move at runtime and the GC doesn't need to be
2498 // informed about them. The same reasoning applies if the base is constant.
2499 // Note that the relocation placement code relies on this filtering for
2500 // correctness as it expects the base to be in the liveset, which isn't true
2501 // if the base is constant.
2502 for (auto &Info : Records)
2503 for (auto &BasePair : Info.PointerToBase)
2504 if (isa<Constant>(BasePair.second))
2505 Info.LiveSet.remove(BasePair.first);
2506
2507 for (CallInst *CI : Holders)
2508 CI->eraseFromParent();
2509
2510 Holders.clear();
2511
2512 // In order to reduce live set of statepoint we might choose to rematerialize
2513 // some values instead of relocating them. This is purely an optimization and
2514 // does not influence correctness.
2515 for (size_t i = 0; i < Records.size(); i++)
2516 rematerializeLiveValues(ToUpdate[i], Records[i], TTI);
2517
2518 // We need this to safely RAUW and delete call or invoke return values that
2519 // may themselves be live over a statepoint. For details, please see usage in
2520 // makeStatepointExplicitImpl.
2521 std::vector<DeferredReplacement> Replacements;
2522
2523 // Now run through and replace the existing statepoints with new ones with
2524 // the live variables listed. We do not yet update uses of the values being
2525 // relocated. We have references to live variables that need to
2526 // survive to the last iteration of this loop. (By construction, the
2527 // previous statepoint can not be a live variable, thus we can and remove
2528 // the old statepoint calls as we go.)
2529 for (size_t i = 0; i < Records.size(); i++)
2530 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements);
2531
2532 ToUpdate.clear(); // prevent accident use of invalid calls.
2533
2534 for (auto &PR : Replacements)
2535 PR.doReplacement();
2536
2537 Replacements.clear();
2538
2539 for (auto &Info : Records) {
2540 // These live sets may contain state Value pointers, since we replaced calls
2541 // with operand bundles with calls wrapped in gc.statepoint, and some of
2542 // those calls may have been def'ing live gc pointers. Clear these out to
2543 // avoid accidentally using them.
2544 //
2545 // TODO: We should create a separate data structure that does not contain
2546 // these live sets, and migrate to using that data structure from this point
2547 // onward.
2548 Info.LiveSet.clear();
2549 Info.PointerToBase.clear();
2550 }
2551
2552 // Do all the fixups of the original live variables to their relocated selves
2553 SmallVector<Value *, 128> Live;
2554 for (size_t i = 0; i < Records.size(); i++) {
2555 PartiallyConstructedSafepointRecord &Info = Records[i];
2556
2557 // We can't simply save the live set from the original insertion. One of
2558 // the live values might be the result of a call which needs a safepoint.
2559 // That Value* no longer exists and we need to use the new gc_result.
2560 // Thankfully, the live set is embedded in the statepoint (and updated), so
2561 // we just grab that.
2562 llvm::append_range(Live, Info.StatepointToken->gc_args());
2563 #ifndef NDEBUG
2564 // Do some basic sanity checks on our liveness results before performing
2565 // relocation. Relocation can and will turn mistakes in liveness results
2566 // into non-sensical code which is must harder to debug.
2567 // TODO: It would be nice to test consistency as well
2568 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) &&
2569 "statepoint must be reachable or liveness is meaningless");
2570 for (Value *V : Info.StatepointToken->gc_args()) {
2571 if (!isa<Instruction>(V))
2572 // Non-instruction values trivial dominate all possible uses
2573 continue;
2574 auto *LiveInst = cast<Instruction>(V);
2575 assert(DT.isReachableFromEntry(LiveInst->getParent()) &&
2576 "unreachable values should never be live");
2577 assert(DT.dominates(LiveInst, Info.StatepointToken) &&
2578 "basic SSA liveness expectation violated by liveness analysis");
2579 }
2580 #endif
2581 }
2582 unique_unsorted(Live);
2583
2584 #ifndef NDEBUG
2585 // sanity check
2586 for (auto *Ptr : Live)
2587 assert(isHandledGCPointerType(Ptr->getType()) &&
2588 "must be a gc pointer type");
2589 #endif
2590
2591 relocationViaAlloca(F, DT, Live, Records);
2592 return !Records.empty();
2593 }
2594
2595 // Handles both return values and arguments for Functions and calls.
2596 template <typename AttrHolder>
RemoveNonValidAttrAtIndex(LLVMContext & Ctx,AttrHolder & AH,unsigned Index)2597 static void RemoveNonValidAttrAtIndex(LLVMContext &Ctx, AttrHolder &AH,
2598 unsigned Index) {
2599 AttrBuilder R;
2600 if (AH.getDereferenceableBytes(Index))
2601 R.addAttribute(Attribute::get(Ctx, Attribute::Dereferenceable,
2602 AH.getDereferenceableBytes(Index)));
2603 if (AH.getDereferenceableOrNullBytes(Index))
2604 R.addAttribute(Attribute::get(Ctx, Attribute::DereferenceableOrNull,
2605 AH.getDereferenceableOrNullBytes(Index)));
2606 for (auto Attr : ParamAttrsToStrip)
2607 if (AH.getAttributes().hasAttribute(Index, Attr))
2608 R.addAttribute(Attr);
2609
2610 if (!R.empty())
2611 AH.setAttributes(AH.getAttributes().removeAttributes(Ctx, Index, R));
2612 }
2613
stripNonValidAttributesFromPrototype(Function & F)2614 static void stripNonValidAttributesFromPrototype(Function &F) {
2615 LLVMContext &Ctx = F.getContext();
2616
2617 // Intrinsics are very delicate. Lowering sometimes depends the presence
2618 // of certain attributes for correctness, but we may have also inferred
2619 // additional ones in the abstract machine model which need stripped. This
2620 // assumes that the attributes defined in Intrinsic.td are conservatively
2621 // correct for both physical and abstract model.
2622 if (Intrinsic::ID id = F.getIntrinsicID()) {
2623 F.setAttributes(Intrinsic::getAttributes(Ctx, id));
2624 return;
2625 }
2626
2627 for (Argument &A : F.args())
2628 if (isa<PointerType>(A.getType()))
2629 RemoveNonValidAttrAtIndex(Ctx, F,
2630 A.getArgNo() + AttributeList::FirstArgIndex);
2631
2632 if (isa<PointerType>(F.getReturnType()))
2633 RemoveNonValidAttrAtIndex(Ctx, F, AttributeList::ReturnIndex);
2634
2635 for (auto Attr : FnAttrsToStrip)
2636 F.removeFnAttr(Attr);
2637 }
2638
2639 /// Certain metadata on instructions are invalid after running RS4GC.
2640 /// Optimizations that run after RS4GC can incorrectly use this metadata to
2641 /// optimize functions. We drop such metadata on the instruction.
stripInvalidMetadataFromInstruction(Instruction & I)2642 static void stripInvalidMetadataFromInstruction(Instruction &I) {
2643 if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
2644 return;
2645 // These are the attributes that are still valid on loads and stores after
2646 // RS4GC.
2647 // The metadata implying dereferenceability and noalias are (conservatively)
2648 // dropped. This is because semantically, after RewriteStatepointsForGC runs,
2649 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can
2650 // touch the entire heap including noalias objects. Note: The reasoning is
2651 // same as stripping the dereferenceability and noalias attributes that are
2652 // analogous to the metadata counterparts.
2653 // We also drop the invariant.load metadata on the load because that metadata
2654 // implies the address operand to the load points to memory that is never
2655 // changed once it became dereferenceable. This is no longer true after RS4GC.
2656 // Similar reasoning applies to invariant.group metadata, which applies to
2657 // loads within a group.
2658 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa,
2659 LLVMContext::MD_range,
2660 LLVMContext::MD_alias_scope,
2661 LLVMContext::MD_nontemporal,
2662 LLVMContext::MD_nonnull,
2663 LLVMContext::MD_align,
2664 LLVMContext::MD_type};
2665
2666 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC.
2667 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC);
2668 }
2669
stripNonValidDataFromBody(Function & F)2670 static void stripNonValidDataFromBody(Function &F) {
2671 if (F.empty())
2672 return;
2673
2674 LLVMContext &Ctx = F.getContext();
2675 MDBuilder Builder(Ctx);
2676
2677 // Set of invariantstart instructions that we need to remove.
2678 // Use this to avoid invalidating the instruction iterator.
2679 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions;
2680
2681 for (Instruction &I : instructions(F)) {
2682 // invariant.start on memory location implies that the referenced memory
2683 // location is constant and unchanging. This is no longer true after
2684 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint
2685 // which frees the entire heap and the presence of invariant.start allows
2686 // the optimizer to sink the load of a memory location past a statepoint,
2687 // which is incorrect.
2688 if (auto *II = dyn_cast<IntrinsicInst>(&I))
2689 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
2690 InvariantStartInstructions.push_back(II);
2691 continue;
2692 }
2693
2694 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) {
2695 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag);
2696 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA);
2697 }
2698
2699 stripInvalidMetadataFromInstruction(I);
2700
2701 if (auto *Call = dyn_cast<CallBase>(&I)) {
2702 for (int i = 0, e = Call->arg_size(); i != e; i++)
2703 if (isa<PointerType>(Call->getArgOperand(i)->getType()))
2704 RemoveNonValidAttrAtIndex(Ctx, *Call,
2705 i + AttributeList::FirstArgIndex);
2706 if (isa<PointerType>(Call->getType()))
2707 RemoveNonValidAttrAtIndex(Ctx, *Call, AttributeList::ReturnIndex);
2708 }
2709 }
2710
2711 // Delete the invariant.start instructions and RAUW undef.
2712 for (auto *II : InvariantStartInstructions) {
2713 II->replaceAllUsesWith(UndefValue::get(II->getType()));
2714 II->eraseFromParent();
2715 }
2716 }
2717
2718 /// Returns true if this function should be rewritten by this pass. The main
2719 /// point of this function is as an extension point for custom logic.
shouldRewriteStatepointsIn(Function & F)2720 static bool shouldRewriteStatepointsIn(Function &F) {
2721 // TODO: This should check the GCStrategy
2722 if (F.hasGC()) {
2723 const auto &FunctionGCName = F.getGC();
2724 const StringRef StatepointExampleName("statepoint-example");
2725 const StringRef CoreCLRName("coreclr");
2726 return (StatepointExampleName == FunctionGCName) ||
2727 (CoreCLRName == FunctionGCName);
2728 } else
2729 return false;
2730 }
2731
stripNonValidData(Module & M)2732 static void stripNonValidData(Module &M) {
2733 #ifndef NDEBUG
2734 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!");
2735 #endif
2736
2737 for (Function &F : M)
2738 stripNonValidAttributesFromPrototype(F);
2739
2740 for (Function &F : M)
2741 stripNonValidDataFromBody(F);
2742 }
2743
runOnFunction(Function & F,DominatorTree & DT,TargetTransformInfo & TTI,const TargetLibraryInfo & TLI)2744 bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT,
2745 TargetTransformInfo &TTI,
2746 const TargetLibraryInfo &TLI) {
2747 assert(!F.isDeclaration() && !F.empty() &&
2748 "need function body to rewrite statepoints in");
2749 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision");
2750
2751 auto NeedsRewrite = [&TLI](Instruction &I) {
2752 if (const auto *Call = dyn_cast<CallBase>(&I)) {
2753 if (isa<GCStatepointInst>(Call))
2754 return false;
2755 if (callsGCLeafFunction(Call, TLI))
2756 return false;
2757
2758 // Normally it's up to the frontend to make sure that non-leaf calls also
2759 // have proper deopt state if it is required. We make an exception for
2760 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics
2761 // these are non-leaf by default. They might be generated by the optimizer
2762 // which doesn't know how to produce a proper deopt state. So if we see a
2763 // non-leaf memcpy/memmove without deopt state just treat it as a leaf
2764 // copy and don't produce a statepoint.
2765 if (!AllowStatepointWithNoDeoptInfo &&
2766 !Call->getOperandBundle(LLVMContext::OB_deopt)) {
2767 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) &&
2768 "Don't expect any other calls here!");
2769 return false;
2770 }
2771 return true;
2772 }
2773 return false;
2774 };
2775
2776 // Delete any unreachable statepoints so that we don't have unrewritten
2777 // statepoints surviving this pass. This makes testing easier and the
2778 // resulting IR less confusing to human readers.
2779 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
2780 bool MadeChange = removeUnreachableBlocks(F, &DTU);
2781 // Flush the Dominator Tree.
2782 DTU.getDomTree();
2783
2784 // Gather all the statepoints which need rewritten. Be careful to only
2785 // consider those in reachable code since we need to ask dominance queries
2786 // when rewriting. We'll delete the unreachable ones in a moment.
2787 SmallVector<CallBase *, 64> ParsePointNeeded;
2788 for (Instruction &I : instructions(F)) {
2789 // TODO: only the ones with the flag set!
2790 if (NeedsRewrite(I)) {
2791 // NOTE removeUnreachableBlocks() is stronger than
2792 // DominatorTree::isReachableFromEntry(). In other words
2793 // removeUnreachableBlocks can remove some blocks for which
2794 // isReachableFromEntry() returns true.
2795 assert(DT.isReachableFromEntry(I.getParent()) &&
2796 "no unreachable blocks expected");
2797 ParsePointNeeded.push_back(cast<CallBase>(&I));
2798 }
2799 }
2800
2801 // Return early if no work to do.
2802 if (ParsePointNeeded.empty())
2803 return MadeChange;
2804
2805 // As a prepass, go ahead and aggressively destroy single entry phi nodes.
2806 // These are created by LCSSA. They have the effect of increasing the size
2807 // of liveness sets for no good reason. It may be harder to do this post
2808 // insertion since relocations and base phis can confuse things.
2809 for (BasicBlock &BB : F)
2810 if (BB.getUniquePredecessor())
2811 MadeChange |= FoldSingleEntryPHINodes(&BB);
2812
2813 // Before we start introducing relocations, we want to tweak the IR a bit to
2814 // avoid unfortunate code generation effects. The main example is that we
2815 // want to try to make sure the comparison feeding a branch is after any
2816 // safepoints. Otherwise, we end up with a comparison of pre-relocation
2817 // values feeding a branch after relocation. This is semantically correct,
2818 // but results in extra register pressure since both the pre-relocation and
2819 // post-relocation copies must be available in registers. For code without
2820 // relocations this is handled elsewhere, but teaching the scheduler to
2821 // reverse the transform we're about to do would be slightly complex.
2822 // Note: This may extend the live range of the inputs to the icmp and thus
2823 // increase the liveset of any statepoint we move over. This is profitable
2824 // as long as all statepoints are in rare blocks. If we had in-register
2825 // lowering for live values this would be a much safer transform.
2826 auto getConditionInst = [](Instruction *TI) -> Instruction * {
2827 if (auto *BI = dyn_cast<BranchInst>(TI))
2828 if (BI->isConditional())
2829 return dyn_cast<Instruction>(BI->getCondition());
2830 // TODO: Extend this to handle switches
2831 return nullptr;
2832 };
2833 for (BasicBlock &BB : F) {
2834 Instruction *TI = BB.getTerminator();
2835 if (auto *Cond = getConditionInst(TI))
2836 // TODO: Handle more than just ICmps here. We should be able to move
2837 // most instructions without side effects or memory access.
2838 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) {
2839 MadeChange = true;
2840 Cond->moveBefore(TI);
2841 }
2842 }
2843
2844 // Nasty workaround - The base computation code in the main algorithm doesn't
2845 // consider the fact that a GEP can be used to convert a scalar to a vector.
2846 // The right fix for this is to integrate GEPs into the base rewriting
2847 // algorithm properly, this is just a short term workaround to prevent
2848 // crashes by canonicalizing such GEPs into fully vector GEPs.
2849 for (Instruction &I : instructions(F)) {
2850 if (!isa<GetElementPtrInst>(I))
2851 continue;
2852
2853 unsigned VF = 0;
2854 for (unsigned i = 0; i < I.getNumOperands(); i++)
2855 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) {
2856 assert(VF == 0 ||
2857 VF == cast<FixedVectorType>(OpndVTy)->getNumElements());
2858 VF = cast<FixedVectorType>(OpndVTy)->getNumElements();
2859 }
2860
2861 // It's the vector to scalar traversal through the pointer operand which
2862 // confuses base pointer rewriting, so limit ourselves to that case.
2863 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) {
2864 IRBuilder<> B(&I);
2865 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0));
2866 I.setOperand(0, Splat);
2867 MadeChange = true;
2868 }
2869 }
2870
2871 MadeChange |= insertParsePoints(F, DT, TTI, ParsePointNeeded);
2872 return MadeChange;
2873 }
2874
2875 // liveness computation via standard dataflow
2876 // -------------------------------------------------------------------
2877
2878 // TODO: Consider using bitvectors for liveness, the set of potentially
2879 // interesting values should be small and easy to pre-compute.
2880
2881 /// Compute the live-in set for the location rbegin starting from
2882 /// the live-out set of the basic block
computeLiveInValues(BasicBlock::reverse_iterator Begin,BasicBlock::reverse_iterator End,SetVector<Value * > & LiveTmp)2883 static void computeLiveInValues(BasicBlock::reverse_iterator Begin,
2884 BasicBlock::reverse_iterator End,
2885 SetVector<Value *> &LiveTmp) {
2886 for (auto &I : make_range(Begin, End)) {
2887 // KILL/Def - Remove this definition from LiveIn
2888 LiveTmp.remove(&I);
2889
2890 // Don't consider *uses* in PHI nodes, we handle their contribution to
2891 // predecessor blocks when we seed the LiveOut sets
2892 if (isa<PHINode>(I))
2893 continue;
2894
2895 // USE - Add to the LiveIn set for this instruction
2896 for (Value *V : I.operands()) {
2897 assert(!isUnhandledGCPointerType(V->getType()) &&
2898 "support for FCA unimplemented");
2899 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) {
2900 // The choice to exclude all things constant here is slightly subtle.
2901 // There are two independent reasons:
2902 // - We assume that things which are constant (from LLVM's definition)
2903 // do not move at runtime. For example, the address of a global
2904 // variable is fixed, even though it's contents may not be.
2905 // - Second, we can't disallow arbitrary inttoptr constants even
2906 // if the language frontend does. Optimization passes are free to
2907 // locally exploit facts without respect to global reachability. This
2908 // can create sections of code which are dynamically unreachable and
2909 // contain just about anything. (see constants.ll in tests)
2910 LiveTmp.insert(V);
2911 }
2912 }
2913 }
2914 }
2915
computeLiveOutSeed(BasicBlock * BB,SetVector<Value * > & LiveTmp)2916 static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp) {
2917 for (BasicBlock *Succ : successors(BB)) {
2918 for (auto &I : *Succ) {
2919 PHINode *PN = dyn_cast<PHINode>(&I);
2920 if (!PN)
2921 break;
2922
2923 Value *V = PN->getIncomingValueForBlock(BB);
2924 assert(!isUnhandledGCPointerType(V->getType()) &&
2925 "support for FCA unimplemented");
2926 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V))
2927 LiveTmp.insert(V);
2928 }
2929 }
2930 }
2931
computeKillSet(BasicBlock * BB)2932 static SetVector<Value *> computeKillSet(BasicBlock *BB) {
2933 SetVector<Value *> KillSet;
2934 for (Instruction &I : *BB)
2935 if (isHandledGCPointerType(I.getType()))
2936 KillSet.insert(&I);
2937 return KillSet;
2938 }
2939
2940 #ifndef NDEBUG
2941 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic
2942 /// sanity check for the liveness computation.
checkBasicSSA(DominatorTree & DT,SetVector<Value * > & Live,Instruction * TI,bool TermOkay=false)2943 static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live,
2944 Instruction *TI, bool TermOkay = false) {
2945 for (Value *V : Live) {
2946 if (auto *I = dyn_cast<Instruction>(V)) {
2947 // The terminator can be a member of the LiveOut set. LLVM's definition
2948 // of instruction dominance states that V does not dominate itself. As
2949 // such, we need to special case this to allow it.
2950 if (TermOkay && TI == I)
2951 continue;
2952 assert(DT.dominates(I, TI) &&
2953 "basic SSA liveness expectation violated by liveness analysis");
2954 }
2955 }
2956 }
2957
2958 /// Check that all the liveness sets used during the computation of liveness
2959 /// obey basic SSA properties. This is useful for finding cases where we miss
2960 /// a def.
checkBasicSSA(DominatorTree & DT,GCPtrLivenessData & Data,BasicBlock & BB)2961 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data,
2962 BasicBlock &BB) {
2963 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator());
2964 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true);
2965 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator());
2966 }
2967 #endif
2968
computeLiveInValues(DominatorTree & DT,Function & F,GCPtrLivenessData & Data)2969 static void computeLiveInValues(DominatorTree &DT, Function &F,
2970 GCPtrLivenessData &Data) {
2971 SmallSetVector<BasicBlock *, 32> Worklist;
2972
2973 // Seed the liveness for each individual block
2974 for (BasicBlock &BB : F) {
2975 Data.KillSet[&BB] = computeKillSet(&BB);
2976 Data.LiveSet[&BB].clear();
2977 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]);
2978
2979 #ifndef NDEBUG
2980 for (Value *Kill : Data.KillSet[&BB])
2981 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill");
2982 #endif
2983
2984 Data.LiveOut[&BB] = SetVector<Value *>();
2985 computeLiveOutSeed(&BB, Data.LiveOut[&BB]);
2986 Data.LiveIn[&BB] = Data.LiveSet[&BB];
2987 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]);
2988 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]);
2989 if (!Data.LiveIn[&BB].empty())
2990 Worklist.insert(pred_begin(&BB), pred_end(&BB));
2991 }
2992
2993 // Propagate that liveness until stable
2994 while (!Worklist.empty()) {
2995 BasicBlock *BB = Worklist.pop_back_val();
2996
2997 // Compute our new liveout set, then exit early if it hasn't changed despite
2998 // the contribution of our successor.
2999 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3000 const auto OldLiveOutSize = LiveOut.size();
3001 for (BasicBlock *Succ : successors(BB)) {
3002 assert(Data.LiveIn.count(Succ));
3003 LiveOut.set_union(Data.LiveIn[Succ]);
3004 }
3005 // assert OutLiveOut is a subset of LiveOut
3006 if (OldLiveOutSize == LiveOut.size()) {
3007 // If the sets are the same size, then we didn't actually add anything
3008 // when unioning our successors LiveIn. Thus, the LiveIn of this block
3009 // hasn't changed.
3010 continue;
3011 }
3012 Data.LiveOut[BB] = LiveOut;
3013
3014 // Apply the effects of this basic block
3015 SetVector<Value *> LiveTmp = LiveOut;
3016 LiveTmp.set_union(Data.LiveSet[BB]);
3017 LiveTmp.set_subtract(Data.KillSet[BB]);
3018
3019 assert(Data.LiveIn.count(BB));
3020 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB];
3021 // assert: OldLiveIn is a subset of LiveTmp
3022 if (OldLiveIn.size() != LiveTmp.size()) {
3023 Data.LiveIn[BB] = LiveTmp;
3024 Worklist.insert(pred_begin(BB), pred_end(BB));
3025 }
3026 } // while (!Worklist.empty())
3027
3028 #ifndef NDEBUG
3029 // Sanity check our output against SSA properties. This helps catch any
3030 // missing kills during the above iteration.
3031 for (BasicBlock &BB : F)
3032 checkBasicSSA(DT, Data, BB);
3033 #endif
3034 }
3035
findLiveSetAtInst(Instruction * Inst,GCPtrLivenessData & Data,StatepointLiveSetTy & Out)3036 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data,
3037 StatepointLiveSetTy &Out) {
3038 BasicBlock *BB = Inst->getParent();
3039
3040 // Note: The copy is intentional and required
3041 assert(Data.LiveOut.count(BB));
3042 SetVector<Value *> LiveOut = Data.LiveOut[BB];
3043
3044 // We want to handle the statepoint itself oddly. It's
3045 // call result is not live (normal), nor are it's arguments
3046 // (unless they're used again later). This adjustment is
3047 // specifically what we need to relocate
3048 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(),
3049 LiveOut);
3050 LiveOut.remove(Inst);
3051 Out.insert(LiveOut.begin(), LiveOut.end());
3052 }
3053
recomputeLiveInValues(GCPtrLivenessData & RevisedLivenessData,CallBase * Call,PartiallyConstructedSafepointRecord & Info)3054 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData,
3055 CallBase *Call,
3056 PartiallyConstructedSafepointRecord &Info) {
3057 StatepointLiveSetTy Updated;
3058 findLiveSetAtInst(Call, RevisedLivenessData, Updated);
3059
3060 // We may have base pointers which are now live that weren't before. We need
3061 // to update the PointerToBase structure to reflect this.
3062 for (auto V : Updated)
3063 Info.PointerToBase.insert({V, V});
3064
3065 #ifndef NDEBUG
3066 for (auto V : Updated)
3067 assert(Info.PointerToBase.count(V) &&
3068 "Must be able to find base for live value!");
3069 #endif
3070
3071 // Remove any stale base mappings - this can happen since our liveness is
3072 // more precise then the one inherent in the base pointer analysis.
3073 DenseSet<Value *> ToErase;
3074 for (auto KVPair : Info.PointerToBase)
3075 if (!Updated.count(KVPair.first))
3076 ToErase.insert(KVPair.first);
3077
3078 for (auto *V : ToErase)
3079 Info.PointerToBase.erase(V);
3080
3081 #ifndef NDEBUG
3082 for (auto KVPair : Info.PointerToBase)
3083 assert(Updated.count(KVPair.first) && "record for non-live value");
3084 #endif
3085
3086 Info.LiveSet = Updated;
3087 }
3088