1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Rewrite call/invoke instructions so as to make potential relocations 10 // performed by the garbage collector explicit in the IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h" 15 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/DomTreeUpdater.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constant.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/InstIterator.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/MDBuilder.h" 50 #include "llvm/IR/Metadata.h" 51 #include "llvm/IR/Module.h" 52 #include "llvm/IR/Statepoint.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/ValueHandle.h" 57 #include "llvm/InitializePasses.h" 58 #include "llvm/Pass.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Compiler.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/ErrorHandling.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Transforms/Scalar.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 69 #include <algorithm> 70 #include <cassert> 71 #include <cstddef> 72 #include <cstdint> 73 #include <iterator> 74 #include <set> 75 #include <string> 76 #include <utility> 77 #include <vector> 78 79 #define DEBUG_TYPE "rewrite-statepoints-for-gc" 80 81 using namespace llvm; 82 83 // Print the liveset found at the insert location 84 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden, 85 cl::init(false)); 86 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, 87 cl::init(false)); 88 89 // Print out the base pointers for debugging 90 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden, 91 cl::init(false)); 92 93 // Cost threshold measuring when it is profitable to rematerialize value instead 94 // of relocating it 95 static cl::opt<unsigned> 96 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, 97 cl::init(6)); 98 99 #ifdef EXPENSIVE_CHECKS 100 static bool ClobberNonLive = true; 101 #else 102 static bool ClobberNonLive = false; 103 #endif 104 105 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live", 106 cl::location(ClobberNonLive), 107 cl::Hidden); 108 109 static cl::opt<bool> 110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", 111 cl::Hidden, cl::init(true)); 112 113 /// The IR fed into RewriteStatepointsForGC may have had attributes and 114 /// metadata implying dereferenceability that are no longer valid/correct after 115 /// RewriteStatepointsForGC has run. This is because semantically, after 116 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire 117 /// heap. stripNonValidData (conservatively) restores 118 /// correctness by erasing all attributes in the module that externally imply 119 /// dereferenceability. Similar reasoning also applies to the noalias 120 /// attributes and metadata. gc.statepoint can touch the entire heap including 121 /// noalias objects. 122 /// Apart from attributes and metadata, we also remove instructions that imply 123 /// constant physical memory: llvm.invariant.start. 124 static void stripNonValidData(Module &M); 125 126 static bool shouldRewriteStatepointsIn(Function &F); 127 128 PreservedAnalyses RewriteStatepointsForGC::run(Module &M, 129 ModuleAnalysisManager &AM) { 130 bool Changed = false; 131 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 132 for (Function &F : M) { 133 // Nothing to do for declarations. 134 if (F.isDeclaration() || F.empty()) 135 continue; 136 137 // Policy choice says not to rewrite - the most common reason is that we're 138 // compiling code without a GCStrategy. 139 if (!shouldRewriteStatepointsIn(F)) 140 continue; 141 142 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F); 143 auto &TTI = FAM.getResult<TargetIRAnalysis>(F); 144 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 145 Changed |= runOnFunction(F, DT, TTI, TLI); 146 } 147 if (!Changed) 148 return PreservedAnalyses::all(); 149 150 // stripNonValidData asserts that shouldRewriteStatepointsIn 151 // returns true for at least one function in the module. Since at least 152 // one function changed, we know that the precondition is satisfied. 153 stripNonValidData(M); 154 155 PreservedAnalyses PA; 156 PA.preserve<TargetIRAnalysis>(); 157 PA.preserve<TargetLibraryAnalysis>(); 158 return PA; 159 } 160 161 namespace { 162 163 class RewriteStatepointsForGCLegacyPass : public ModulePass { 164 RewriteStatepointsForGC Impl; 165 166 public: 167 static char ID; // Pass identification, replacement for typeid 168 169 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() { 170 initializeRewriteStatepointsForGCLegacyPassPass( 171 *PassRegistry::getPassRegistry()); 172 } 173 174 bool runOnModule(Module &M) override { 175 bool Changed = false; 176 for (Function &F : M) { 177 // Nothing to do for declarations. 178 if (F.isDeclaration() || F.empty()) 179 continue; 180 181 // Policy choice says not to rewrite - the most common reason is that 182 // we're compiling code without a GCStrategy. 183 if (!shouldRewriteStatepointsIn(F)) 184 continue; 185 186 TargetTransformInfo &TTI = 187 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 188 const TargetLibraryInfo &TLI = 189 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 190 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 191 192 Changed |= Impl.runOnFunction(F, DT, TTI, TLI); 193 } 194 195 if (!Changed) 196 return false; 197 198 // stripNonValidData asserts that shouldRewriteStatepointsIn 199 // returns true for at least one function in the module. Since at least 200 // one function changed, we know that the precondition is satisfied. 201 stripNonValidData(M); 202 return true; 203 } 204 205 void getAnalysisUsage(AnalysisUsage &AU) const override { 206 // We add and rewrite a bunch of instructions, but don't really do much 207 // else. We could in theory preserve a lot more analyses here. 208 AU.addRequired<DominatorTreeWrapperPass>(); 209 AU.addRequired<TargetTransformInfoWrapperPass>(); 210 AU.addRequired<TargetLibraryInfoWrapperPass>(); 211 } 212 }; 213 214 } // end anonymous namespace 215 216 char RewriteStatepointsForGCLegacyPass::ID = 0; 217 218 ModulePass *llvm::createRewriteStatepointsForGCLegacyPass() { 219 return new RewriteStatepointsForGCLegacyPass(); 220 } 221 222 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass, 223 "rewrite-statepoints-for-gc", 224 "Make relocations explicit at statepoints", false, false) 225 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 226 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 227 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass, 228 "rewrite-statepoints-for-gc", 229 "Make relocations explicit at statepoints", false, false) 230 231 namespace { 232 233 struct GCPtrLivenessData { 234 /// Values defined in this block. 235 MapVector<BasicBlock *, SetVector<Value *>> KillSet; 236 237 /// Values used in this block (and thus live); does not included values 238 /// killed within this block. 239 MapVector<BasicBlock *, SetVector<Value *>> LiveSet; 240 241 /// Values live into this basic block (i.e. used by any 242 /// instruction in this basic block or ones reachable from here) 243 MapVector<BasicBlock *, SetVector<Value *>> LiveIn; 244 245 /// Values live out of this basic block (i.e. live into 246 /// any successor block) 247 MapVector<BasicBlock *, SetVector<Value *>> LiveOut; 248 }; 249 250 // The type of the internal cache used inside the findBasePointers family 251 // of functions. From the callers perspective, this is an opaque type and 252 // should not be inspected. 253 // 254 // In the actual implementation this caches two relations: 255 // - The base relation itself (i.e. this pointer is based on that one) 256 // - The base defining value relation (i.e. before base_phi insertion) 257 // Generally, after the execution of a full findBasePointer call, only the 258 // base relation will remain. Internally, we add a mixture of the two 259 // types, then update all the second type to the first type 260 using DefiningValueMapTy = MapVector<Value *, Value *>; 261 using StatepointLiveSetTy = SetVector<Value *>; 262 using RematerializedValueMapTy = 263 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>; 264 265 struct PartiallyConstructedSafepointRecord { 266 /// The set of values known to be live across this safepoint 267 StatepointLiveSetTy LiveSet; 268 269 /// Mapping from live pointers to a base-defining-value 270 MapVector<Value *, Value *> PointerToBase; 271 272 /// The *new* gc.statepoint instruction itself. This produces the token 273 /// that normal path gc.relocates and the gc.result are tied to. 274 GCStatepointInst *StatepointToken; 275 276 /// Instruction to which exceptional gc relocates are attached 277 /// Makes it easier to iterate through them during relocationViaAlloca. 278 Instruction *UnwindToken; 279 280 /// Record live values we are rematerialized instead of relocating. 281 /// They are not included into 'LiveSet' field. 282 /// Maps rematerialized copy to it's original value. 283 RematerializedValueMapTy RematerializedValues; 284 }; 285 286 } // end anonymous namespace 287 288 static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) { 289 Optional<OperandBundleUse> DeoptBundle = 290 Call->getOperandBundle(LLVMContext::OB_deopt); 291 292 if (!DeoptBundle.hasValue()) { 293 assert(AllowStatepointWithNoDeoptInfo && 294 "Found non-leaf call without deopt info!"); 295 return None; 296 } 297 298 return DeoptBundle.getValue().Inputs; 299 } 300 301 /// Compute the live-in set for every basic block in the function 302 static void computeLiveInValues(DominatorTree &DT, Function &F, 303 GCPtrLivenessData &Data); 304 305 /// Given results from the dataflow liveness computation, find the set of live 306 /// Values at a particular instruction. 307 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, 308 StatepointLiveSetTy &out); 309 310 // TODO: Once we can get to the GCStrategy, this becomes 311 // Optional<bool> isGCManagedPointer(const Type *Ty) const override { 312 313 static bool isGCPointerType(Type *T) { 314 if (auto *PT = dyn_cast<PointerType>(T)) 315 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our 316 // GC managed heap. We know that a pointer into this heap needs to be 317 // updated and that no other pointer does. 318 return PT->getAddressSpace() == 1; 319 return false; 320 } 321 322 // Return true if this type is one which a) is a gc pointer or contains a GC 323 // pointer and b) is of a type this code expects to encounter as a live value. 324 // (The insertion code will assert that a type which matches (a) and not (b) 325 // is not encountered.) 326 static bool isHandledGCPointerType(Type *T) { 327 // We fully support gc pointers 328 if (isGCPointerType(T)) 329 return true; 330 // We partially support vectors of gc pointers. The code will assert if it 331 // can't handle something. 332 if (auto VT = dyn_cast<VectorType>(T)) 333 if (isGCPointerType(VT->getElementType())) 334 return true; 335 return false; 336 } 337 338 #ifndef NDEBUG 339 /// Returns true if this type contains a gc pointer whether we know how to 340 /// handle that type or not. 341 static bool containsGCPtrType(Type *Ty) { 342 if (isGCPointerType(Ty)) 343 return true; 344 if (VectorType *VT = dyn_cast<VectorType>(Ty)) 345 return isGCPointerType(VT->getScalarType()); 346 if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) 347 return containsGCPtrType(AT->getElementType()); 348 if (StructType *ST = dyn_cast<StructType>(Ty)) 349 return llvm::any_of(ST->elements(), containsGCPtrType); 350 return false; 351 } 352 353 // Returns true if this is a type which a) is a gc pointer or contains a GC 354 // pointer and b) is of a type which the code doesn't expect (i.e. first class 355 // aggregates). Used to trip assertions. 356 static bool isUnhandledGCPointerType(Type *Ty) { 357 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty); 358 } 359 #endif 360 361 // Return the name of the value suffixed with the provided value, or if the 362 // value didn't have a name, the default value specified. 363 static std::string suffixed_name_or(Value *V, StringRef Suffix, 364 StringRef DefaultName) { 365 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str(); 366 } 367 368 // Conservatively identifies any definitions which might be live at the 369 // given instruction. The analysis is performed immediately before the 370 // given instruction. Values defined by that instruction are not considered 371 // live. Values used by that instruction are considered live. 372 static void analyzeParsePointLiveness( 373 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, 374 PartiallyConstructedSafepointRecord &Result) { 375 StatepointLiveSetTy LiveSet; 376 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet); 377 378 if (PrintLiveSet) { 379 dbgs() << "Live Variables:\n"; 380 for (Value *V : LiveSet) 381 dbgs() << " " << V->getName() << " " << *V << "\n"; 382 } 383 if (PrintLiveSetSize) { 384 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n"; 385 dbgs() << "Number live values: " << LiveSet.size() << "\n"; 386 } 387 Result.LiveSet = LiveSet; 388 } 389 390 // Returns true is V is a knownBaseResult. 391 static bool isKnownBaseResult(Value *V); 392 393 // Returns true if V is a BaseResult that already exists in the IR, i.e. it is 394 // not created by the findBasePointers algorithm. 395 static bool isOriginalBaseResult(Value *V); 396 397 namespace { 398 399 /// A single base defining value - An immediate base defining value for an 400 /// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'. 401 /// For instructions which have multiple pointer [vector] inputs or that 402 /// transition between vector and scalar types, there is no immediate base 403 /// defining value. The 'base defining value' for 'Def' is the transitive 404 /// closure of this relation stopping at the first instruction which has no 405 /// immediate base defining value. The b.d.v. might itself be a base pointer, 406 /// but it can also be an arbitrary derived pointer. 407 struct BaseDefiningValueResult { 408 /// Contains the value which is the base defining value. 409 Value * const BDV; 410 411 /// True if the base defining value is also known to be an actual base 412 /// pointer. 413 const bool IsKnownBase; 414 415 BaseDefiningValueResult(Value *BDV, bool IsKnownBase) 416 : BDV(BDV), IsKnownBase(IsKnownBase) { 417 #ifndef NDEBUG 418 // Check consistency between new and old means of checking whether a BDV is 419 // a base. 420 bool MustBeBase = isKnownBaseResult(BDV); 421 assert(!MustBeBase || MustBeBase == IsKnownBase); 422 #endif 423 } 424 }; 425 426 } // end anonymous namespace 427 428 static BaseDefiningValueResult findBaseDefiningValue(Value *I); 429 430 /// Return a base defining value for the 'Index' element of the given vector 431 /// instruction 'I'. If Index is null, returns a BDV for the entire vector 432 /// 'I'. As an optimization, this method will try to determine when the 433 /// element is known to already be a base pointer. If this can be established, 434 /// the second value in the returned pair will be true. Note that either a 435 /// vector or a pointer typed value can be returned. For the former, the 436 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'. 437 /// If the later, the return pointer is a BDV (or possibly a base) for the 438 /// particular element in 'I'. 439 static BaseDefiningValueResult 440 findBaseDefiningValueOfVector(Value *I) { 441 // Each case parallels findBaseDefiningValue below, see that code for 442 // detailed motivation. 443 444 if (isa<Argument>(I)) 445 // An incoming argument to the function is a base pointer 446 return BaseDefiningValueResult(I, true); 447 448 if (isa<Constant>(I)) 449 // Base of constant vector consists only of constant null pointers. 450 // For reasoning see similar case inside 'findBaseDefiningValue' function. 451 return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()), 452 true); 453 454 if (isa<LoadInst>(I)) 455 return BaseDefiningValueResult(I, true); 456 457 if (isa<InsertElementInst>(I)) 458 // We don't know whether this vector contains entirely base pointers or 459 // not. To be conservatively correct, we treat it as a BDV and will 460 // duplicate code as needed to construct a parallel vector of bases. 461 return BaseDefiningValueResult(I, false); 462 463 if (isa<ShuffleVectorInst>(I)) 464 // We don't know whether this vector contains entirely base pointers or 465 // not. To be conservatively correct, we treat it as a BDV and will 466 // duplicate code as needed to construct a parallel vector of bases. 467 // TODO: There a number of local optimizations which could be applied here 468 // for particular sufflevector patterns. 469 return BaseDefiningValueResult(I, false); 470 471 // The behavior of getelementptr instructions is the same for vector and 472 // non-vector data types. 473 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 474 return findBaseDefiningValue(GEP->getPointerOperand()); 475 476 // If the pointer comes through a bitcast of a vector of pointers to 477 // a vector of another type of pointer, then look through the bitcast 478 if (auto *BC = dyn_cast<BitCastInst>(I)) 479 return findBaseDefiningValue(BC->getOperand(0)); 480 481 // We assume that functions in the source language only return base 482 // pointers. This should probably be generalized via attributes to support 483 // both source language and internal functions. 484 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 485 return BaseDefiningValueResult(I, true); 486 487 // A PHI or Select is a base defining value. The outer findBasePointer 488 // algorithm is responsible for constructing a base value for this BDV. 489 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 490 "unknown vector instruction - no base found for vector element"); 491 return BaseDefiningValueResult(I, false); 492 } 493 494 /// Helper function for findBasePointer - Will return a value which either a) 495 /// defines the base pointer for the input, b) blocks the simple search 496 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change 497 /// from pointer to vector type or back. 498 static BaseDefiningValueResult findBaseDefiningValue(Value *I) { 499 assert(I->getType()->isPtrOrPtrVectorTy() && 500 "Illegal to ask for the base pointer of a non-pointer type"); 501 502 if (I->getType()->isVectorTy()) 503 return findBaseDefiningValueOfVector(I); 504 505 if (isa<Argument>(I)) 506 // An incoming argument to the function is a base pointer 507 // We should have never reached here if this argument isn't an gc value 508 return BaseDefiningValueResult(I, true); 509 510 if (isa<Constant>(I)) { 511 // We assume that objects with a constant base (e.g. a global) can't move 512 // and don't need to be reported to the collector because they are always 513 // live. Besides global references, all kinds of constants (e.g. undef, 514 // constant expressions, null pointers) can be introduced by the inliner or 515 // the optimizer, especially on dynamically dead paths. 516 // Here we treat all of them as having single null base. By doing this we 517 // trying to avoid problems reporting various conflicts in a form of 518 // "phi (const1, const2)" or "phi (const, regular gc ptr)". 519 // See constant.ll file for relevant test cases. 520 521 return BaseDefiningValueResult( 522 ConstantPointerNull::get(cast<PointerType>(I->getType())), true); 523 } 524 525 if (CastInst *CI = dyn_cast<CastInst>(I)) { 526 Value *Def = CI->stripPointerCasts(); 527 // If stripping pointer casts changes the address space there is an 528 // addrspacecast in between. 529 assert(cast<PointerType>(Def->getType())->getAddressSpace() == 530 cast<PointerType>(CI->getType())->getAddressSpace() && 531 "unsupported addrspacecast"); 532 // If we find a cast instruction here, it means we've found a cast which is 533 // not simply a pointer cast (i.e. an inttoptr). We don't know how to 534 // handle int->ptr conversion. 535 assert(!isa<CastInst>(Def) && "shouldn't find another cast here"); 536 return findBaseDefiningValue(Def); 537 } 538 539 if (isa<LoadInst>(I)) 540 // The value loaded is an gc base itself 541 return BaseDefiningValueResult(I, true); 542 543 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 544 // The base of this GEP is the base 545 return findBaseDefiningValue(GEP->getPointerOperand()); 546 547 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 548 switch (II->getIntrinsicID()) { 549 default: 550 // fall through to general call handling 551 break; 552 case Intrinsic::experimental_gc_statepoint: 553 llvm_unreachable("statepoints don't produce pointers"); 554 case Intrinsic::experimental_gc_relocate: 555 // Rerunning safepoint insertion after safepoints are already 556 // inserted is not supported. It could probably be made to work, 557 // but why are you doing this? There's no good reason. 558 llvm_unreachable("repeat safepoint insertion is not supported"); 559 case Intrinsic::gcroot: 560 // Currently, this mechanism hasn't been extended to work with gcroot. 561 // There's no reason it couldn't be, but I haven't thought about the 562 // implications much. 563 llvm_unreachable( 564 "interaction with the gcroot mechanism is not supported"); 565 } 566 } 567 // We assume that functions in the source language only return base 568 // pointers. This should probably be generalized via attributes to support 569 // both source language and internal functions. 570 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 571 return BaseDefiningValueResult(I, true); 572 573 // TODO: I have absolutely no idea how to implement this part yet. It's not 574 // necessarily hard, I just haven't really looked at it yet. 575 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented"); 576 577 if (isa<AtomicCmpXchgInst>(I)) 578 // A CAS is effectively a atomic store and load combined under a 579 // predicate. From the perspective of base pointers, we just treat it 580 // like a load. 581 return BaseDefiningValueResult(I, true); 582 583 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are " 584 "binary ops which don't apply to pointers"); 585 586 // The aggregate ops. Aggregates can either be in the heap or on the 587 // stack, but in either case, this is simply a field load. As a result, 588 // this is a defining definition of the base just like a load is. 589 if (isa<ExtractValueInst>(I)) 590 return BaseDefiningValueResult(I, true); 591 592 // We should never see an insert vector since that would require we be 593 // tracing back a struct value not a pointer value. 594 assert(!isa<InsertValueInst>(I) && 595 "Base pointer for a struct is meaningless"); 596 597 // An extractelement produces a base result exactly when it's input does. 598 // We may need to insert a parallel instruction to extract the appropriate 599 // element out of the base vector corresponding to the input. Given this, 600 // it's analogous to the phi and select case even though it's not a merge. 601 if (isa<ExtractElementInst>(I)) 602 // Note: There a lot of obvious peephole cases here. This are deliberately 603 // handled after the main base pointer inference algorithm to make writing 604 // test cases to exercise that code easier. 605 return BaseDefiningValueResult(I, false); 606 607 // The last two cases here don't return a base pointer. Instead, they 608 // return a value which dynamically selects from among several base 609 // derived pointers (each with it's own base potentially). It's the job of 610 // the caller to resolve these. 611 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 612 "missing instruction case in findBaseDefiningValing"); 613 return BaseDefiningValueResult(I, false); 614 } 615 616 /// Returns the base defining value for this value. 617 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) { 618 Value *&Cached = Cache[I]; 619 if (!Cached) { 620 Cached = findBaseDefiningValue(I).BDV; 621 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> " 622 << Cached->getName() << "\n"); 623 } 624 assert(Cache[I] != nullptr); 625 return Cached; 626 } 627 628 /// Return a base pointer for this value if known. Otherwise, return it's 629 /// base defining value. 630 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) { 631 Value *Def = findBaseDefiningValueCached(I, Cache); 632 auto Found = Cache.find(Def); 633 if (Found != Cache.end()) { 634 // Either a base-of relation, or a self reference. Caller must check. 635 return Found->second; 636 } 637 // Only a BDV available 638 return Def; 639 } 640 641 /// This value is a base pointer that is not generated by RS4GC, i.e. it already 642 /// exists in the code. 643 static bool isOriginalBaseResult(Value *V) { 644 // no recursion possible 645 return !isa<PHINode>(V) && !isa<SelectInst>(V) && 646 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) && 647 !isa<ShuffleVectorInst>(V); 648 } 649 650 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, 651 /// is it known to be a base pointer? Or do we need to continue searching. 652 static bool isKnownBaseResult(Value *V) { 653 if (isOriginalBaseResult(V)) 654 return true; 655 if (isa<Instruction>(V) && 656 cast<Instruction>(V)->getMetadata("is_base_value")) { 657 // This is a previously inserted base phi or select. We know 658 // that this is a base value. 659 return true; 660 } 661 662 // We need to keep searching 663 return false; 664 } 665 666 // Returns true if First and Second values are both scalar or both vector. 667 static bool areBothVectorOrScalar(Value *First, Value *Second) { 668 return isa<VectorType>(First->getType()) == 669 isa<VectorType>(Second->getType()); 670 } 671 672 namespace { 673 674 /// Models the state of a single base defining value in the findBasePointer 675 /// algorithm for determining where a new instruction is needed to propagate 676 /// the base of this BDV. 677 class BDVState { 678 public: 679 enum StatusTy { 680 // Starting state of lattice 681 Unknown, 682 // Some specific base value -- does *not* mean that instruction 683 // propagates the base of the object 684 // ex: gep %arg, 16 -> %arg is the base value 685 Base, 686 // Need to insert a node to represent a merge. 687 Conflict 688 }; 689 690 BDVState() { 691 llvm_unreachable("missing state in map"); 692 } 693 694 explicit BDVState(Value *OriginalValue) 695 : OriginalValue(OriginalValue) {} 696 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr) 697 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) { 698 assert(Status != Base || BaseValue); 699 } 700 701 StatusTy getStatus() const { return Status; } 702 Value *getOriginalValue() const { return OriginalValue; } 703 Value *getBaseValue() const { return BaseValue; } 704 705 bool isBase() const { return getStatus() == Base; } 706 bool isUnknown() const { return getStatus() == Unknown; } 707 bool isConflict() const { return getStatus() == Conflict; } 708 709 bool operator==(const BDVState &Other) const { 710 return OriginalValue == OriginalValue && BaseValue == Other.BaseValue && 711 Status == Other.Status; 712 } 713 714 bool operator!=(const BDVState &other) const { return !(*this == other); } 715 716 LLVM_DUMP_METHOD 717 void dump() const { 718 print(dbgs()); 719 dbgs() << '\n'; 720 } 721 722 void print(raw_ostream &OS) const { 723 switch (getStatus()) { 724 case Unknown: 725 OS << "U"; 726 break; 727 case Base: 728 OS << "B"; 729 break; 730 case Conflict: 731 OS << "C"; 732 break; 733 } 734 OS << " (base " << getBaseValue() << " - " 735 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")" 736 << " for " << OriginalValue->getName() << ":"; 737 } 738 739 private: 740 AssertingVH<Value> OriginalValue; // instruction this state corresponds to 741 StatusTy Status = Unknown; 742 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base. 743 }; 744 745 } // end anonymous namespace 746 747 #ifndef NDEBUG 748 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) { 749 State.print(OS); 750 return OS; 751 } 752 #endif 753 754 static BDVState::StatusTy meet(const BDVState::StatusTy &LHS, 755 const BDVState::StatusTy &RHS) { 756 switch (LHS) { 757 case BDVState::Unknown: 758 return RHS; 759 case BDVState::Base: 760 switch (RHS) { 761 case BDVState::Unknown: 762 case BDVState::Base: 763 return BDVState::Base; 764 case BDVState::Conflict: 765 return BDVState::Conflict; 766 }; 767 llvm_unreachable("covered switch"); 768 case BDVState::Conflict: 769 return BDVState::Conflict; 770 } 771 llvm_unreachable("covered switch"); 772 } 773 774 // Values of type BDVState form a lattice, and this function implements the meet 775 // operation. 776 static BDVState meetBDVState(const BDVState &LHS, const BDVState &RHS) { 777 auto NewStatus = meet(LHS.getStatus(), RHS.getStatus()); 778 assert(NewStatus == meet(RHS.getStatus(), LHS.getStatus())); 779 780 Value *BaseValue = LHS.getStatus() == BDVState::Base ? 781 LHS.getBaseValue() : RHS.getBaseValue(); 782 if (LHS.getStatus() == BDVState::Base && RHS.getStatus() == BDVState::Base && 783 LHS.getBaseValue() != RHS.getBaseValue()) { 784 NewStatus = BDVState::Conflict; 785 BaseValue = nullptr; 786 } 787 return BDVState(LHS.getOriginalValue(), NewStatus, BaseValue); 788 } 789 790 /// For a given value or instruction, figure out what base ptr its derived from. 791 /// For gc objects, this is simply itself. On success, returns a value which is 792 /// the base pointer. (This is reliable and can be used for relocation.) On 793 /// failure, returns nullptr. 794 static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) { 795 Value *Def = findBaseOrBDV(I, Cache); 796 797 if (isKnownBaseResult(Def) && areBothVectorOrScalar(Def, I)) 798 return Def; 799 800 // Here's the rough algorithm: 801 // - For every SSA value, construct a mapping to either an actual base 802 // pointer or a PHI which obscures the base pointer. 803 // - Construct a mapping from PHI to unknown TOP state. Use an 804 // optimistic algorithm to propagate base pointer information. Lattice 805 // looks like: 806 // UNKNOWN 807 // b1 b2 b3 b4 808 // CONFLICT 809 // When algorithm terminates, all PHIs will either have a single concrete 810 // base or be in a conflict state. 811 // - For every conflict, insert a dummy PHI node without arguments. Add 812 // these to the base[Instruction] = BasePtr mapping. For every 813 // non-conflict, add the actual base. 814 // - For every conflict, add arguments for the base[a] of each input 815 // arguments. 816 // 817 // Note: A simpler form of this would be to add the conflict form of all 818 // PHIs without running the optimistic algorithm. This would be 819 // analogous to pessimistic data flow and would likely lead to an 820 // overall worse solution. 821 822 #ifndef NDEBUG 823 auto isExpectedBDVType = [](Value *BDV) { 824 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) || 825 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) || 826 isa<ShuffleVectorInst>(BDV); 827 }; 828 #endif 829 830 // Once populated, will contain a mapping from each potentially non-base BDV 831 // to a lattice value (described above) which corresponds to that BDV. 832 // We use the order of insertion (DFS over the def/use graph) to provide a 833 // stable deterministic ordering for visiting DenseMaps (which are unordered) 834 // below. This is important for deterministic compilation. 835 MapVector<Value *, BDVState> States; 836 837 #ifndef NDEBUG 838 auto VerifyStates = [&]() { 839 for (auto &Entry : States) { 840 assert(Entry.first == Entry.second.getOriginalValue()); 841 } 842 }; 843 #endif 844 845 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) { 846 if (PHINode *PN = dyn_cast<PHINode>(BDV)) { 847 for (Value *InVal : PN->incoming_values()) 848 F(InVal); 849 } else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) { 850 F(SI->getTrueValue()); 851 F(SI->getFalseValue()); 852 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) { 853 F(EE->getVectorOperand()); 854 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) { 855 F(IE->getOperand(0)); 856 F(IE->getOperand(1)); 857 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) { 858 F(SV->getOperand(0)); 859 F(SV->getOperand(1)); 860 } else { 861 llvm_unreachable("unexpected BDV type"); 862 } 863 }; 864 865 866 // Recursively fill in all base defining values reachable from the initial 867 // one for which we don't already know a definite base value for 868 /* scope */ { 869 SmallVector<Value*, 16> Worklist; 870 Worklist.push_back(Def); 871 States.insert({Def, BDVState(Def)}); 872 while (!Worklist.empty()) { 873 Value *Current = Worklist.pop_back_val(); 874 assert(!isOriginalBaseResult(Current) && "why did it get added?"); 875 876 auto visitIncomingValue = [&](Value *InVal) { 877 Value *Base = findBaseOrBDV(InVal, Cache); 878 if (isKnownBaseResult(Base) && areBothVectorOrScalar(Base, InVal)) 879 // Known bases won't need new instructions introduced and can be 880 // ignored safely. However, this can only be done when InVal and Base 881 // are both scalar or both vector. Otherwise, we need to find a 882 // correct BDV for InVal, by creating an entry in the lattice 883 // (States). 884 return; 885 assert(isExpectedBDVType(Base) && "the only non-base values " 886 "we see should be base defining values"); 887 if (States.insert(std::make_pair(Base, BDVState(Base))).second) 888 Worklist.push_back(Base); 889 }; 890 891 visitBDVOperands(Current, visitIncomingValue); 892 } 893 } 894 895 #ifndef NDEBUG 896 VerifyStates(); 897 LLVM_DEBUG(dbgs() << "States after initialization:\n"); 898 for (auto Pair : States) { 899 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 900 } 901 #endif 902 903 // Return a phi state for a base defining value. We'll generate a new 904 // base state for known bases and expect to find a cached state otherwise. 905 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) { 906 if (isKnownBaseResult(BaseValue) && areBothVectorOrScalar(BaseValue, Input)) 907 return BDVState(BaseValue, BDVState::Base, BaseValue); 908 auto I = States.find(BaseValue); 909 assert(I != States.end() && "lookup failed!"); 910 return I->second; 911 }; 912 913 bool Progress = true; 914 while (Progress) { 915 #ifndef NDEBUG 916 const size_t OldSize = States.size(); 917 #endif 918 Progress = false; 919 // We're only changing values in this loop, thus safe to keep iterators. 920 // Since this is computing a fixed point, the order of visit does not 921 // effect the result. TODO: We could use a worklist here and make this run 922 // much faster. 923 for (auto Pair : States) { 924 Value *BDV = Pair.first; 925 // Only values that do not have known bases or those that have differing 926 // type (scalar versus vector) from a possible known base should be in the 927 // lattice. 928 assert((!isKnownBaseResult(BDV) || 929 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) && 930 "why did it get added?"); 931 932 BDVState NewState(BDV); 933 visitBDVOperands(BDV, [&] (Value *Op) { 934 Value *BDV = findBaseOrBDV(Op, Cache); 935 auto OpState = GetStateForBDV(BDV, Op); 936 NewState = meetBDVState(NewState, OpState); 937 }); 938 939 BDVState OldState = States[BDV]; 940 if (OldState != NewState) { 941 Progress = true; 942 States[BDV] = NewState; 943 } 944 } 945 946 assert(OldSize == States.size() && 947 "fixed point shouldn't be adding any new nodes to state"); 948 } 949 950 #ifndef NDEBUG 951 VerifyStates(); 952 LLVM_DEBUG(dbgs() << "States after meet iteration:\n"); 953 for (auto Pair : States) { 954 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 955 } 956 #endif 957 958 // Handle all instructions that have a vector BDV, but the instruction itself 959 // is of scalar type. 960 for (auto Pair : States) { 961 Instruction *I = cast<Instruction>(Pair.first); 962 BDVState State = Pair.second; 963 auto *BaseValue = State.getBaseValue(); 964 // Only values that do not have known bases or those that have differing 965 // type (scalar versus vector) from a possible known base should be in the 966 // lattice. 967 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, BaseValue)) && 968 "why did it get added?"); 969 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 970 971 if (!State.isBase() || !isa<VectorType>(BaseValue->getType())) 972 continue; 973 // extractelement instructions are a bit special in that we may need to 974 // insert an extract even when we know an exact base for the instruction. 975 // The problem is that we need to convert from a vector base to a scalar 976 // base for the particular indice we're interested in. 977 if (isa<ExtractElementInst>(I)) { 978 auto *EE = cast<ExtractElementInst>(I); 979 // TODO: In many cases, the new instruction is just EE itself. We should 980 // exploit this, but can't do it here since it would break the invariant 981 // about the BDV not being known to be a base. 982 auto *BaseInst = ExtractElementInst::Create( 983 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE); 984 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 985 States[I] = BDVState(I, BDVState::Base, BaseInst); 986 } else if (!isa<VectorType>(I->getType())) { 987 // We need to handle cases that have a vector base but the instruction is 988 // a scalar type (these could be phis or selects or any instruction that 989 // are of scalar type, but the base can be a vector type). We 990 // conservatively set this as conflict. Setting the base value for these 991 // conflicts is handled in the next loop which traverses States. 992 States[I] = BDVState(I, BDVState::Conflict); 993 } 994 } 995 996 #ifndef NDEBUG 997 VerifyStates(); 998 #endif 999 1000 // Insert Phis for all conflicts 1001 // TODO: adjust naming patterns to avoid this order of iteration dependency 1002 for (auto Pair : States) { 1003 Instruction *I = cast<Instruction>(Pair.first); 1004 BDVState State = Pair.second; 1005 // Only values that do not have known bases or those that have differing 1006 // type (scalar versus vector) from a possible known base should be in the 1007 // lattice. 1008 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, State.getBaseValue())) && 1009 "why did it get added?"); 1010 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1011 1012 // Since we're joining a vector and scalar base, they can never be the 1013 // same. As a result, we should always see insert element having reached 1014 // the conflict state. 1015 assert(!isa<InsertElementInst>(I) || State.isConflict()); 1016 1017 if (!State.isConflict()) 1018 continue; 1019 1020 /// Create and insert a new instruction which will represent the base of 1021 /// the given instruction 'I'. 1022 auto MakeBaseInstPlaceholder = [](Instruction *I) -> Instruction* { 1023 if (isa<PHINode>(I)) { 1024 BasicBlock *BB = I->getParent(); 1025 int NumPreds = pred_size(BB); 1026 assert(NumPreds > 0 && "how did we reach here"); 1027 std::string Name = suffixed_name_or(I, ".base", "base_phi"); 1028 return PHINode::Create(I->getType(), NumPreds, Name, I); 1029 } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 1030 // The undef will be replaced later 1031 UndefValue *Undef = UndefValue::get(SI->getType()); 1032 std::string Name = suffixed_name_or(I, ".base", "base_select"); 1033 return SelectInst::Create(SI->getCondition(), Undef, Undef, Name, SI); 1034 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 1035 UndefValue *Undef = UndefValue::get(EE->getVectorOperand()->getType()); 1036 std::string Name = suffixed_name_or(I, ".base", "base_ee"); 1037 return ExtractElementInst::Create(Undef, EE->getIndexOperand(), Name, 1038 EE); 1039 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 1040 UndefValue *VecUndef = UndefValue::get(IE->getOperand(0)->getType()); 1041 UndefValue *ScalarUndef = UndefValue::get(IE->getOperand(1)->getType()); 1042 std::string Name = suffixed_name_or(I, ".base", "base_ie"); 1043 return InsertElementInst::Create(VecUndef, ScalarUndef, 1044 IE->getOperand(2), Name, IE); 1045 } else { 1046 auto *SV = cast<ShuffleVectorInst>(I); 1047 UndefValue *VecUndef = UndefValue::get(SV->getOperand(0)->getType()); 1048 std::string Name = suffixed_name_or(I, ".base", "base_sv"); 1049 return new ShuffleVectorInst(VecUndef, VecUndef, SV->getShuffleMask(), 1050 Name, SV); 1051 } 1052 }; 1053 Instruction *BaseInst = MakeBaseInstPlaceholder(I); 1054 // Add metadata marking this as a base value 1055 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1056 States[I] = BDVState(I, BDVState::Conflict, BaseInst); 1057 } 1058 1059 #ifndef NDEBUG 1060 VerifyStates(); 1061 #endif 1062 1063 // Returns a instruction which produces the base pointer for a given 1064 // instruction. The instruction is assumed to be an input to one of the BDVs 1065 // seen in the inference algorithm above. As such, we must either already 1066 // know it's base defining value is a base, or have inserted a new 1067 // instruction to propagate the base of it's BDV and have entered that newly 1068 // introduced instruction into the state table. In either case, we are 1069 // assured to be able to determine an instruction which produces it's base 1070 // pointer. 1071 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) { 1072 Value *BDV = findBaseOrBDV(Input, Cache); 1073 Value *Base = nullptr; 1074 if (isKnownBaseResult(BDV) && areBothVectorOrScalar(BDV, Input)) { 1075 Base = BDV; 1076 } else { 1077 // Either conflict or base. 1078 assert(States.count(BDV)); 1079 Base = States[BDV].getBaseValue(); 1080 } 1081 assert(Base && "Can't be null"); 1082 // The cast is needed since base traversal may strip away bitcasts 1083 if (Base->getType() != Input->getType() && InsertPt) 1084 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt); 1085 return Base; 1086 }; 1087 1088 // Fixup all the inputs of the new PHIs. Visit order needs to be 1089 // deterministic and predictable because we're naming newly created 1090 // instructions. 1091 for (auto Pair : States) { 1092 Instruction *BDV = cast<Instruction>(Pair.first); 1093 BDVState State = Pair.second; 1094 1095 // Only values that do not have known bases or those that have differing 1096 // type (scalar versus vector) from a possible known base should be in the 1097 // lattice. 1098 assert((!isKnownBaseResult(BDV) || 1099 !areBothVectorOrScalar(BDV, State.getBaseValue())) && 1100 "why did it get added?"); 1101 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1102 if (!State.isConflict()) 1103 continue; 1104 1105 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) { 1106 PHINode *PN = cast<PHINode>(BDV); 1107 unsigned NumPHIValues = PN->getNumIncomingValues(); 1108 for (unsigned i = 0; i < NumPHIValues; i++) { 1109 Value *InVal = PN->getIncomingValue(i); 1110 BasicBlock *InBB = PN->getIncomingBlock(i); 1111 1112 // If we've already seen InBB, add the same incoming value 1113 // we added for it earlier. The IR verifier requires phi 1114 // nodes with multiple entries from the same basic block 1115 // to have the same incoming value for each of those 1116 // entries. If we don't do this check here and basephi 1117 // has a different type than base, we'll end up adding two 1118 // bitcasts (and hence two distinct values) as incoming 1119 // values for the same basic block. 1120 1121 int BlockIndex = BasePHI->getBasicBlockIndex(InBB); 1122 if (BlockIndex != -1) { 1123 Value *OldBase = BasePHI->getIncomingValue(BlockIndex); 1124 BasePHI->addIncoming(OldBase, InBB); 1125 1126 #ifndef NDEBUG 1127 Value *Base = getBaseForInput(InVal, nullptr); 1128 // In essence this assert states: the only way two values 1129 // incoming from the same basic block may be different is by 1130 // being different bitcasts of the same value. A cleanup 1131 // that remains TODO is changing findBaseOrBDV to return an 1132 // llvm::Value of the correct type (and still remain pure). 1133 // This will remove the need to add bitcasts. 1134 assert(Base->stripPointerCasts() == OldBase->stripPointerCasts() && 1135 "Sanity -- findBaseOrBDV should be pure!"); 1136 #endif 1137 continue; 1138 } 1139 1140 // Find the instruction which produces the base for each input. We may 1141 // need to insert a bitcast in the incoming block. 1142 // TODO: Need to split critical edges if insertion is needed 1143 Value *Base = getBaseForInput(InVal, InBB->getTerminator()); 1144 BasePHI->addIncoming(Base, InBB); 1145 } 1146 assert(BasePHI->getNumIncomingValues() == NumPHIValues); 1147 } else if (SelectInst *BaseSI = 1148 dyn_cast<SelectInst>(State.getBaseValue())) { 1149 SelectInst *SI = cast<SelectInst>(BDV); 1150 1151 // Find the instruction which produces the base for each input. 1152 // We may need to insert a bitcast. 1153 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI)); 1154 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI)); 1155 } else if (auto *BaseEE = 1156 dyn_cast<ExtractElementInst>(State.getBaseValue())) { 1157 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand(); 1158 // Find the instruction which produces the base for each input. We may 1159 // need to insert a bitcast. 1160 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE)); 1161 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){ 1162 auto *BdvIE = cast<InsertElementInst>(BDV); 1163 auto UpdateOperand = [&](int OperandIdx) { 1164 Value *InVal = BdvIE->getOperand(OperandIdx); 1165 Value *Base = getBaseForInput(InVal, BaseIE); 1166 BaseIE->setOperand(OperandIdx, Base); 1167 }; 1168 UpdateOperand(0); // vector operand 1169 UpdateOperand(1); // scalar operand 1170 } else { 1171 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue()); 1172 auto *BdvSV = cast<ShuffleVectorInst>(BDV); 1173 auto UpdateOperand = [&](int OperandIdx) { 1174 Value *InVal = BdvSV->getOperand(OperandIdx); 1175 Value *Base = getBaseForInput(InVal, BaseSV); 1176 BaseSV->setOperand(OperandIdx, Base); 1177 }; 1178 UpdateOperand(0); // vector operand 1179 UpdateOperand(1); // vector operand 1180 } 1181 } 1182 1183 #ifndef NDEBUG 1184 VerifyStates(); 1185 #endif 1186 1187 // Cache all of our results so we can cheaply reuse them 1188 // NOTE: This is actually two caches: one of the base defining value 1189 // relation and one of the base pointer relation! FIXME 1190 for (auto Pair : States) { 1191 auto *BDV = Pair.first; 1192 Value *Base = Pair.second.getBaseValue(); 1193 assert(BDV && Base); 1194 // Only values that do not have known bases or those that have differing 1195 // type (scalar versus vector) from a possible known base should be in the 1196 // lattice. 1197 assert((!isKnownBaseResult(BDV) || !areBothVectorOrScalar(BDV, Base)) && 1198 "why did it get added?"); 1199 1200 LLVM_DEBUG( 1201 dbgs() << "Updating base value cache" 1202 << " for: " << BDV->getName() << " from: " 1203 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none") 1204 << " to: " << Base->getName() << "\n"); 1205 1206 if (Cache.count(BDV)) { 1207 assert(isKnownBaseResult(Base) && 1208 "must be something we 'know' is a base pointer"); 1209 // Once we transition from the BDV relation being store in the Cache to 1210 // the base relation being stored, it must be stable 1211 assert((!isKnownBaseResult(Cache[BDV]) || Cache[BDV] == Base) && 1212 "base relation should be stable"); 1213 } 1214 Cache[BDV] = Base; 1215 } 1216 assert(Cache.count(Def)); 1217 return Cache[Def]; 1218 } 1219 1220 // For a set of live pointers (base and/or derived), identify the base 1221 // pointer of the object which they are derived from. This routine will 1222 // mutate the IR graph as needed to make the 'base' pointer live at the 1223 // definition site of 'derived'. This ensures that any use of 'derived' can 1224 // also use 'base'. This may involve the insertion of a number of 1225 // additional PHI nodes. 1226 // 1227 // preconditions: live is a set of pointer type Values 1228 // 1229 // side effects: may insert PHI nodes into the existing CFG, will preserve 1230 // CFG, will not remove or mutate any existing nodes 1231 // 1232 // post condition: PointerToBase contains one (derived, base) pair for every 1233 // pointer in live. Note that derived can be equal to base if the original 1234 // pointer was a base pointer. 1235 static void 1236 findBasePointers(const StatepointLiveSetTy &live, 1237 MapVector<Value *, Value *> &PointerToBase, 1238 DominatorTree *DT, DefiningValueMapTy &DVCache) { 1239 for (Value *ptr : live) { 1240 Value *base = findBasePointer(ptr, DVCache); 1241 assert(base && "failed to find base pointer"); 1242 PointerToBase[ptr] = base; 1243 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) || 1244 DT->dominates(cast<Instruction>(base)->getParent(), 1245 cast<Instruction>(ptr)->getParent())) && 1246 "The base we found better dominate the derived pointer"); 1247 } 1248 } 1249 1250 /// Find the required based pointers (and adjust the live set) for the given 1251 /// parse point. 1252 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache, 1253 CallBase *Call, 1254 PartiallyConstructedSafepointRecord &result) { 1255 MapVector<Value *, Value *> PointerToBase; 1256 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet; 1257 // We assume that all pointers passed to deopt are base pointers; as an 1258 // optimization, we can use this to avoid seperately materializing the base 1259 // pointer graph. This is only relevant since we're very conservative about 1260 // generating new conflict nodes during base pointer insertion. If we were 1261 // smarter there, this would be irrelevant. 1262 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt)) 1263 for (Value *V : Opt->Inputs) { 1264 if (!PotentiallyDerivedPointers.count(V)) 1265 continue; 1266 PotentiallyDerivedPointers.remove(V); 1267 PointerToBase[V] = V; 1268 } 1269 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache); 1270 1271 if (PrintBasePointers) { 1272 errs() << "Base Pairs (w/o Relocation):\n"; 1273 for (auto &Pair : PointerToBase) { 1274 errs() << " derived "; 1275 Pair.first->printAsOperand(errs(), false); 1276 errs() << " base "; 1277 Pair.second->printAsOperand(errs(), false); 1278 errs() << "\n";; 1279 } 1280 } 1281 1282 result.PointerToBase = PointerToBase; 1283 } 1284 1285 /// Given an updated version of the dataflow liveness results, update the 1286 /// liveset and base pointer maps for the call site CS. 1287 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 1288 CallBase *Call, 1289 PartiallyConstructedSafepointRecord &result); 1290 1291 static void recomputeLiveInValues( 1292 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 1293 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 1294 // TODO-PERF: reuse the original liveness, then simply run the dataflow 1295 // again. The old values are still live and will help it stabilize quickly. 1296 GCPtrLivenessData RevisedLivenessData; 1297 computeLiveInValues(DT, F, RevisedLivenessData); 1298 for (size_t i = 0; i < records.size(); i++) { 1299 struct PartiallyConstructedSafepointRecord &info = records[i]; 1300 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info); 1301 } 1302 } 1303 1304 // When inserting gc.relocate and gc.result calls, we need to ensure there are 1305 // no uses of the original value / return value between the gc.statepoint and 1306 // the gc.relocate / gc.result call. One case which can arise is a phi node 1307 // starting one of the successor blocks. We also need to be able to insert the 1308 // gc.relocates only on the path which goes through the statepoint. We might 1309 // need to split an edge to make this possible. 1310 static BasicBlock * 1311 normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, 1312 DominatorTree &DT) { 1313 BasicBlock *Ret = BB; 1314 if (!BB->getUniquePredecessor()) 1315 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT); 1316 1317 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes 1318 // from it 1319 FoldSingleEntryPHINodes(Ret); 1320 assert(!isa<PHINode>(Ret->begin()) && 1321 "All PHI nodes should have been removed!"); 1322 1323 // At this point, we can safely insert a gc.relocate or gc.result as the first 1324 // instruction in Ret if needed. 1325 return Ret; 1326 } 1327 1328 // Create new attribute set containing only attributes which can be transferred 1329 // from original call to the safepoint. 1330 static AttributeList legalizeCallAttributes(LLVMContext &Ctx, 1331 AttributeList AL) { 1332 if (AL.isEmpty()) 1333 return AL; 1334 1335 // Remove the readonly, readnone, and statepoint function attributes. 1336 AttrBuilder FnAttrs = AL.getFnAttributes(); 1337 FnAttrs.removeAttribute(Attribute::ReadNone); 1338 FnAttrs.removeAttribute(Attribute::ReadOnly); 1339 for (Attribute A : AL.getFnAttributes()) { 1340 if (isStatepointDirectiveAttr(A)) 1341 FnAttrs.remove(A); 1342 } 1343 1344 // Just skip parameter and return attributes for now 1345 return AttributeList::get(Ctx, AttributeList::FunctionIndex, 1346 AttributeSet::get(Ctx, FnAttrs)); 1347 } 1348 1349 /// Helper function to place all gc relocates necessary for the given 1350 /// statepoint. 1351 /// Inputs: 1352 /// liveVariables - list of variables to be relocated. 1353 /// basePtrs - base pointers. 1354 /// statepointToken - statepoint instruction to which relocates should be 1355 /// bound. 1356 /// Builder - Llvm IR builder to be used to construct new calls. 1357 static void CreateGCRelocates(ArrayRef<Value *> LiveVariables, 1358 ArrayRef<Value *> BasePtrs, 1359 Instruction *StatepointToken, 1360 IRBuilder<> &Builder) { 1361 if (LiveVariables.empty()) 1362 return; 1363 1364 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) { 1365 auto ValIt = llvm::find(LiveVec, Val); 1366 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!"); 1367 size_t Index = std::distance(LiveVec.begin(), ValIt); 1368 assert(Index < LiveVec.size() && "Bug in std::find?"); 1369 return Index; 1370 }; 1371 Module *M = StatepointToken->getModule(); 1372 1373 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose 1374 // element type is i8 addrspace(1)*). We originally generated unique 1375 // declarations for each pointer type, but this proved problematic because 1376 // the intrinsic mangling code is incomplete and fragile. Since we're moving 1377 // towards a single unified pointer type anyways, we can just cast everything 1378 // to an i8* of the right address space. A bitcast is added later to convert 1379 // gc_relocate to the actual value's type. 1380 auto getGCRelocateDecl = [&] (Type *Ty) { 1381 assert(isHandledGCPointerType(Ty)); 1382 auto AS = Ty->getScalarType()->getPointerAddressSpace(); 1383 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS); 1384 if (auto *VT = dyn_cast<VectorType>(Ty)) 1385 NewTy = FixedVectorType::get(NewTy, 1386 cast<FixedVectorType>(VT)->getNumElements()); 1387 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, 1388 {NewTy}); 1389 }; 1390 1391 // Lazily populated map from input types to the canonicalized form mentioned 1392 // in the comment above. This should probably be cached somewhere more 1393 // broadly. 1394 DenseMap<Type *, Function *> TypeToDeclMap; 1395 1396 for (unsigned i = 0; i < LiveVariables.size(); i++) { 1397 // Generate the gc.relocate call and save the result 1398 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i])); 1399 Value *LiveIdx = Builder.getInt32(i); 1400 1401 Type *Ty = LiveVariables[i]->getType(); 1402 if (!TypeToDeclMap.count(Ty)) 1403 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty); 1404 Function *GCRelocateDecl = TypeToDeclMap[Ty]; 1405 1406 // only specify a debug name if we can give a useful one 1407 CallInst *Reloc = Builder.CreateCall( 1408 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx}, 1409 suffixed_name_or(LiveVariables[i], ".relocated", "")); 1410 // Trick CodeGen into thinking there are lots of free registers at this 1411 // fake call. 1412 Reloc->setCallingConv(CallingConv::Cold); 1413 } 1414 } 1415 1416 namespace { 1417 1418 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this 1419 /// avoids having to worry about keeping around dangling pointers to Values. 1420 class DeferredReplacement { 1421 AssertingVH<Instruction> Old; 1422 AssertingVH<Instruction> New; 1423 bool IsDeoptimize = false; 1424 1425 DeferredReplacement() = default; 1426 1427 public: 1428 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) { 1429 assert(Old != New && Old && New && 1430 "Cannot RAUW equal values or to / from null!"); 1431 1432 DeferredReplacement D; 1433 D.Old = Old; 1434 D.New = New; 1435 return D; 1436 } 1437 1438 static DeferredReplacement createDelete(Instruction *ToErase) { 1439 DeferredReplacement D; 1440 D.Old = ToErase; 1441 return D; 1442 } 1443 1444 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) { 1445 #ifndef NDEBUG 1446 auto *F = cast<CallInst>(Old)->getCalledFunction(); 1447 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize && 1448 "Only way to construct a deoptimize deferred replacement"); 1449 #endif 1450 DeferredReplacement D; 1451 D.Old = Old; 1452 D.IsDeoptimize = true; 1453 return D; 1454 } 1455 1456 /// Does the task represented by this instance. 1457 void doReplacement() { 1458 Instruction *OldI = Old; 1459 Instruction *NewI = New; 1460 1461 assert(OldI != NewI && "Disallowed at construction?!"); 1462 assert((!IsDeoptimize || !New) && 1463 "Deoptimize intrinsics are not replaced!"); 1464 1465 Old = nullptr; 1466 New = nullptr; 1467 1468 if (NewI) 1469 OldI->replaceAllUsesWith(NewI); 1470 1471 if (IsDeoptimize) { 1472 // Note: we've inserted instructions, so the call to llvm.deoptimize may 1473 // not necessarily be followed by the matching return. 1474 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator()); 1475 new UnreachableInst(RI->getContext(), RI); 1476 RI->eraseFromParent(); 1477 } 1478 1479 OldI->eraseFromParent(); 1480 } 1481 }; 1482 1483 } // end anonymous namespace 1484 1485 static StringRef getDeoptLowering(CallBase *Call) { 1486 const char *DeoptLowering = "deopt-lowering"; 1487 if (Call->hasFnAttr(DeoptLowering)) { 1488 // FIXME: Calls have a *really* confusing interface around attributes 1489 // with values. 1490 const AttributeList &CSAS = Call->getAttributes(); 1491 if (CSAS.hasAttribute(AttributeList::FunctionIndex, DeoptLowering)) 1492 return CSAS.getAttribute(AttributeList::FunctionIndex, DeoptLowering) 1493 .getValueAsString(); 1494 Function *F = Call->getCalledFunction(); 1495 assert(F && F->hasFnAttribute(DeoptLowering)); 1496 return F->getFnAttribute(DeoptLowering).getValueAsString(); 1497 } 1498 return "live-through"; 1499 } 1500 1501 static void 1502 makeStatepointExplicitImpl(CallBase *Call, /* to replace */ 1503 const SmallVectorImpl<Value *> &BasePtrs, 1504 const SmallVectorImpl<Value *> &LiveVariables, 1505 PartiallyConstructedSafepointRecord &Result, 1506 std::vector<DeferredReplacement> &Replacements) { 1507 assert(BasePtrs.size() == LiveVariables.size()); 1508 1509 // Then go ahead and use the builder do actually do the inserts. We insert 1510 // immediately before the previous instruction under the assumption that all 1511 // arguments will be available here. We can't insert afterwards since we may 1512 // be replacing a terminator. 1513 IRBuilder<> Builder(Call); 1514 1515 ArrayRef<Value *> GCArgs(LiveVariables); 1516 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID; 1517 uint32_t NumPatchBytes = 0; 1518 uint32_t Flags = uint32_t(StatepointFlags::None); 1519 1520 SmallVector<Value *, 8> CallArgs(Call->args()); 1521 Optional<ArrayRef<Use>> DeoptArgs; 1522 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt)) 1523 DeoptArgs = Bundle->Inputs; 1524 Optional<ArrayRef<Use>> TransitionArgs; 1525 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) { 1526 TransitionArgs = Bundle->Inputs; 1527 // TODO: This flag no longer serves a purpose and can be removed later 1528 Flags |= uint32_t(StatepointFlags::GCTransition); 1529 } 1530 1531 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls 1532 // with a return value, we lower then as never returning calls to 1533 // __llvm_deoptimize that are followed by unreachable to get better codegen. 1534 bool IsDeoptimize = false; 1535 1536 StatepointDirectives SD = 1537 parseStatepointDirectivesFromAttrs(Call->getAttributes()); 1538 if (SD.NumPatchBytes) 1539 NumPatchBytes = *SD.NumPatchBytes; 1540 if (SD.StatepointID) 1541 StatepointID = *SD.StatepointID; 1542 1543 // Pass through the requested lowering if any. The default is live-through. 1544 StringRef DeoptLowering = getDeoptLowering(Call); 1545 if (DeoptLowering.equals("live-in")) 1546 Flags |= uint32_t(StatepointFlags::DeoptLiveIn); 1547 else { 1548 assert(DeoptLowering.equals("live-through") && "Unsupported value!"); 1549 } 1550 1551 Value *CallTarget = Call->getCalledOperand(); 1552 if (Function *F = dyn_cast<Function>(CallTarget)) { 1553 auto IID = F->getIntrinsicID(); 1554 if (IID == Intrinsic::experimental_deoptimize) { 1555 // Calls to llvm.experimental.deoptimize are lowered to calls to the 1556 // __llvm_deoptimize symbol. We want to resolve this now, since the 1557 // verifier does not allow taking the address of an intrinsic function. 1558 1559 SmallVector<Type *, 8> DomainTy; 1560 for (Value *Arg : CallArgs) 1561 DomainTy.push_back(Arg->getType()); 1562 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1563 /* isVarArg = */ false); 1564 1565 // Note: CallTarget can be a bitcast instruction of a symbol if there are 1566 // calls to @llvm.experimental.deoptimize with different argument types in 1567 // the same module. This is fine -- we assume the frontend knew what it 1568 // was doing when generating this kind of IR. 1569 CallTarget = F->getParent() 1570 ->getOrInsertFunction("__llvm_deoptimize", FTy) 1571 .getCallee(); 1572 1573 IsDeoptimize = true; 1574 } else if (IID == Intrinsic::memcpy_element_unordered_atomic || 1575 IID == Intrinsic::memmove_element_unordered_atomic) { 1576 // Unordered atomic memcpy and memmove intrinsics which are not explicitly 1577 // marked as "gc-leaf-function" should be lowered in a GC parseable way. 1578 // Specifically, these calls should be lowered to the 1579 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols. 1580 // Similarly to __llvm_deoptimize we want to resolve this now, since the 1581 // verifier does not allow taking the address of an intrinsic function. 1582 // 1583 // Moreover we need to shuffle the arguments for the call in order to 1584 // accommodate GC. The underlying source and destination objects might be 1585 // relocated during copy operation should the GC occur. To relocate the 1586 // derived source and destination pointers the implementation of the 1587 // intrinsic should know the corresponding base pointers. 1588 // 1589 // To make the base pointers available pass them explicitly as arguments: 1590 // memcpy(dest_derived, source_derived, ...) => 1591 // memcpy(dest_base, dest_offset, source_base, source_offset, ...) 1592 auto &Context = Call->getContext(); 1593 auto &DL = Call->getModule()->getDataLayout(); 1594 auto GetBaseAndOffset = [&](Value *Derived) { 1595 assert(Result.PointerToBase.count(Derived)); 1596 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace(); 1597 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace); 1598 Value *Base = Result.PointerToBase.find(Derived)->second; 1599 Value *Base_int = Builder.CreatePtrToInt( 1600 Base, Type::getIntNTy(Context, IntPtrSize)); 1601 Value *Derived_int = Builder.CreatePtrToInt( 1602 Derived, Type::getIntNTy(Context, IntPtrSize)); 1603 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int)); 1604 }; 1605 1606 auto *Dest = CallArgs[0]; 1607 Value *DestBase, *DestOffset; 1608 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest); 1609 1610 auto *Source = CallArgs[1]; 1611 Value *SourceBase, *SourceOffset; 1612 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source); 1613 1614 auto *LengthInBytes = CallArgs[2]; 1615 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]); 1616 1617 CallArgs.clear(); 1618 CallArgs.push_back(DestBase); 1619 CallArgs.push_back(DestOffset); 1620 CallArgs.push_back(SourceBase); 1621 CallArgs.push_back(SourceOffset); 1622 CallArgs.push_back(LengthInBytes); 1623 1624 SmallVector<Type *, 8> DomainTy; 1625 for (Value *Arg : CallArgs) 1626 DomainTy.push_back(Arg->getType()); 1627 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1628 /* isVarArg = */ false); 1629 1630 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) { 1631 uint64_t ElementSize = ElementSizeCI->getZExtValue(); 1632 if (IID == Intrinsic::memcpy_element_unordered_atomic) { 1633 switch (ElementSize) { 1634 case 1: 1635 return "__llvm_memcpy_element_unordered_atomic_safepoint_1"; 1636 case 2: 1637 return "__llvm_memcpy_element_unordered_atomic_safepoint_2"; 1638 case 4: 1639 return "__llvm_memcpy_element_unordered_atomic_safepoint_4"; 1640 case 8: 1641 return "__llvm_memcpy_element_unordered_atomic_safepoint_8"; 1642 case 16: 1643 return "__llvm_memcpy_element_unordered_atomic_safepoint_16"; 1644 default: 1645 llvm_unreachable("unexpected element size!"); 1646 } 1647 } 1648 assert(IID == Intrinsic::memmove_element_unordered_atomic); 1649 switch (ElementSize) { 1650 case 1: 1651 return "__llvm_memmove_element_unordered_atomic_safepoint_1"; 1652 case 2: 1653 return "__llvm_memmove_element_unordered_atomic_safepoint_2"; 1654 case 4: 1655 return "__llvm_memmove_element_unordered_atomic_safepoint_4"; 1656 case 8: 1657 return "__llvm_memmove_element_unordered_atomic_safepoint_8"; 1658 case 16: 1659 return "__llvm_memmove_element_unordered_atomic_safepoint_16"; 1660 default: 1661 llvm_unreachable("unexpected element size!"); 1662 } 1663 }; 1664 1665 CallTarget = 1666 F->getParent() 1667 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy) 1668 .getCallee(); 1669 } 1670 } 1671 1672 // Create the statepoint given all the arguments 1673 GCStatepointInst *Token = nullptr; 1674 if (auto *CI = dyn_cast<CallInst>(Call)) { 1675 CallInst *SPCall = Builder.CreateGCStatepointCall( 1676 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs, 1677 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token"); 1678 1679 SPCall->setTailCallKind(CI->getTailCallKind()); 1680 SPCall->setCallingConv(CI->getCallingConv()); 1681 1682 // Currently we will fail on parameter attributes and on certain 1683 // function attributes. In case if we can handle this set of attributes - 1684 // set up function attrs directly on statepoint and return attrs later for 1685 // gc_result intrinsic. 1686 SPCall->setAttributes( 1687 legalizeCallAttributes(CI->getContext(), CI->getAttributes())); 1688 1689 Token = cast<GCStatepointInst>(SPCall); 1690 1691 // Put the following gc_result and gc_relocate calls immediately after the 1692 // the old call (which we're about to delete) 1693 assert(CI->getNextNode() && "Not a terminator, must have next!"); 1694 Builder.SetInsertPoint(CI->getNextNode()); 1695 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc()); 1696 } else { 1697 auto *II = cast<InvokeInst>(Call); 1698 1699 // Insert the new invoke into the old block. We'll remove the old one in a 1700 // moment at which point this will become the new terminator for the 1701 // original block. 1702 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke( 1703 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(), 1704 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs, 1705 "statepoint_token"); 1706 1707 SPInvoke->setCallingConv(II->getCallingConv()); 1708 1709 // Currently we will fail on parameter attributes and on certain 1710 // function attributes. In case if we can handle this set of attributes - 1711 // set up function attrs directly on statepoint and return attrs later for 1712 // gc_result intrinsic. 1713 SPInvoke->setAttributes( 1714 legalizeCallAttributes(II->getContext(), II->getAttributes())); 1715 1716 Token = cast<GCStatepointInst>(SPInvoke); 1717 1718 // Generate gc relocates in exceptional path 1719 BasicBlock *UnwindBlock = II->getUnwindDest(); 1720 assert(!isa<PHINode>(UnwindBlock->begin()) && 1721 UnwindBlock->getUniquePredecessor() && 1722 "can't safely insert in this block!"); 1723 1724 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt()); 1725 Builder.SetCurrentDebugLocation(II->getDebugLoc()); 1726 1727 // Attach exceptional gc relocates to the landingpad. 1728 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst(); 1729 Result.UnwindToken = ExceptionalToken; 1730 1731 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder); 1732 1733 // Generate gc relocates and returns for normal block 1734 BasicBlock *NormalDest = II->getNormalDest(); 1735 assert(!isa<PHINode>(NormalDest->begin()) && 1736 NormalDest->getUniquePredecessor() && 1737 "can't safely insert in this block!"); 1738 1739 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt()); 1740 1741 // gc relocates will be generated later as if it were regular call 1742 // statepoint 1743 } 1744 assert(Token && "Should be set in one of the above branches!"); 1745 1746 if (IsDeoptimize) { 1747 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we 1748 // transform the tail-call like structure to a call to a void function 1749 // followed by unreachable to get better codegen. 1750 Replacements.push_back( 1751 DeferredReplacement::createDeoptimizeReplacement(Call)); 1752 } else { 1753 Token->setName("statepoint_token"); 1754 if (!Call->getType()->isVoidTy() && !Call->use_empty()) { 1755 StringRef Name = Call->hasName() ? Call->getName() : ""; 1756 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name); 1757 GCResult->setAttributes( 1758 AttributeList::get(GCResult->getContext(), AttributeList::ReturnIndex, 1759 Call->getAttributes().getRetAttributes())); 1760 1761 // We cannot RAUW or delete CS.getInstruction() because it could be in the 1762 // live set of some other safepoint, in which case that safepoint's 1763 // PartiallyConstructedSafepointRecord will hold a raw pointer to this 1764 // llvm::Instruction. Instead, we defer the replacement and deletion to 1765 // after the live sets have been made explicit in the IR, and we no longer 1766 // have raw pointers to worry about. 1767 Replacements.emplace_back( 1768 DeferredReplacement::createRAUW(Call, GCResult)); 1769 } else { 1770 Replacements.emplace_back(DeferredReplacement::createDelete(Call)); 1771 } 1772 } 1773 1774 Result.StatepointToken = Token; 1775 1776 // Second, create a gc.relocate for every live variable 1777 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder); 1778 } 1779 1780 // Replace an existing gc.statepoint with a new one and a set of gc.relocates 1781 // which make the relocations happening at this safepoint explicit. 1782 // 1783 // WARNING: Does not do any fixup to adjust users of the original live 1784 // values. That's the callers responsibility. 1785 static void 1786 makeStatepointExplicit(DominatorTree &DT, CallBase *Call, 1787 PartiallyConstructedSafepointRecord &Result, 1788 std::vector<DeferredReplacement> &Replacements) { 1789 const auto &LiveSet = Result.LiveSet; 1790 const auto &PointerToBase = Result.PointerToBase; 1791 1792 // Convert to vector for efficient cross referencing. 1793 SmallVector<Value *, 64> BaseVec, LiveVec; 1794 LiveVec.reserve(LiveSet.size()); 1795 BaseVec.reserve(LiveSet.size()); 1796 for (Value *L : LiveSet) { 1797 LiveVec.push_back(L); 1798 assert(PointerToBase.count(L)); 1799 Value *Base = PointerToBase.find(L)->second; 1800 BaseVec.push_back(Base); 1801 } 1802 assert(LiveVec.size() == BaseVec.size()); 1803 1804 // Do the actual rewriting and delete the old statepoint 1805 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements); 1806 } 1807 1808 // Helper function for the relocationViaAlloca. 1809 // 1810 // It receives iterator to the statepoint gc relocates and emits a store to the 1811 // assigned location (via allocaMap) for the each one of them. It adds the 1812 // visited values into the visitedLiveValues set, which we will later use them 1813 // for sanity checking. 1814 static void 1815 insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs, 1816 DenseMap<Value *, AllocaInst *> &AllocaMap, 1817 DenseSet<Value *> &VisitedLiveValues) { 1818 for (User *U : GCRelocs) { 1819 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U); 1820 if (!Relocate) 1821 continue; 1822 1823 Value *OriginalValue = Relocate->getDerivedPtr(); 1824 assert(AllocaMap.count(OriginalValue)); 1825 Value *Alloca = AllocaMap[OriginalValue]; 1826 1827 // Emit store into the related alloca 1828 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to 1829 // the correct type according to alloca. 1830 assert(Relocate->getNextNode() && 1831 "Should always have one since it's not a terminator"); 1832 IRBuilder<> Builder(Relocate->getNextNode()); 1833 Value *CastedRelocatedValue = 1834 Builder.CreateBitCast(Relocate, 1835 cast<AllocaInst>(Alloca)->getAllocatedType(), 1836 suffixed_name_or(Relocate, ".casted", "")); 1837 1838 new StoreInst(CastedRelocatedValue, Alloca, 1839 cast<Instruction>(CastedRelocatedValue)->getNextNode()); 1840 1841 #ifndef NDEBUG 1842 VisitedLiveValues.insert(OriginalValue); 1843 #endif 1844 } 1845 } 1846 1847 // Helper function for the "relocationViaAlloca". Similar to the 1848 // "insertRelocationStores" but works for rematerialized values. 1849 static void insertRematerializationStores( 1850 const RematerializedValueMapTy &RematerializedValues, 1851 DenseMap<Value *, AllocaInst *> &AllocaMap, 1852 DenseSet<Value *> &VisitedLiveValues) { 1853 for (auto RematerializedValuePair: RematerializedValues) { 1854 Instruction *RematerializedValue = RematerializedValuePair.first; 1855 Value *OriginalValue = RematerializedValuePair.second; 1856 1857 assert(AllocaMap.count(OriginalValue) && 1858 "Can not find alloca for rematerialized value"); 1859 Value *Alloca = AllocaMap[OriginalValue]; 1860 1861 new StoreInst(RematerializedValue, Alloca, 1862 RematerializedValue->getNextNode()); 1863 1864 #ifndef NDEBUG 1865 VisitedLiveValues.insert(OriginalValue); 1866 #endif 1867 } 1868 } 1869 1870 /// Do all the relocation update via allocas and mem2reg 1871 static void relocationViaAlloca( 1872 Function &F, DominatorTree &DT, ArrayRef<Value *> Live, 1873 ArrayRef<PartiallyConstructedSafepointRecord> Records) { 1874 #ifndef NDEBUG 1875 // record initial number of (static) allocas; we'll check we have the same 1876 // number when we get done. 1877 int InitialAllocaNum = 0; 1878 for (Instruction &I : F.getEntryBlock()) 1879 if (isa<AllocaInst>(I)) 1880 InitialAllocaNum++; 1881 #endif 1882 1883 // TODO-PERF: change data structures, reserve 1884 DenseMap<Value *, AllocaInst *> AllocaMap; 1885 SmallVector<AllocaInst *, 200> PromotableAllocas; 1886 // Used later to chack that we have enough allocas to store all values 1887 std::size_t NumRematerializedValues = 0; 1888 PromotableAllocas.reserve(Live.size()); 1889 1890 // Emit alloca for "LiveValue" and record it in "allocaMap" and 1891 // "PromotableAllocas" 1892 const DataLayout &DL = F.getParent()->getDataLayout(); 1893 auto emitAllocaFor = [&](Value *LiveValue) { 1894 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), 1895 DL.getAllocaAddrSpace(), "", 1896 F.getEntryBlock().getFirstNonPHI()); 1897 AllocaMap[LiveValue] = Alloca; 1898 PromotableAllocas.push_back(Alloca); 1899 }; 1900 1901 // Emit alloca for each live gc pointer 1902 for (Value *V : Live) 1903 emitAllocaFor(V); 1904 1905 // Emit allocas for rematerialized values 1906 for (const auto &Info : Records) 1907 for (auto RematerializedValuePair : Info.RematerializedValues) { 1908 Value *OriginalValue = RematerializedValuePair.second; 1909 if (AllocaMap.count(OriginalValue) != 0) 1910 continue; 1911 1912 emitAllocaFor(OriginalValue); 1913 ++NumRematerializedValues; 1914 } 1915 1916 // The next two loops are part of the same conceptual operation. We need to 1917 // insert a store to the alloca after the original def and at each 1918 // redefinition. We need to insert a load before each use. These are split 1919 // into distinct loops for performance reasons. 1920 1921 // Update gc pointer after each statepoint: either store a relocated value or 1922 // null (if no relocated value was found for this gc pointer and it is not a 1923 // gc_result). This must happen before we update the statepoint with load of 1924 // alloca otherwise we lose the link between statepoint and old def. 1925 for (const auto &Info : Records) { 1926 Value *Statepoint = Info.StatepointToken; 1927 1928 // This will be used for consistency check 1929 DenseSet<Value *> VisitedLiveValues; 1930 1931 // Insert stores for normal statepoint gc relocates 1932 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues); 1933 1934 // In case if it was invoke statepoint 1935 // we will insert stores for exceptional path gc relocates. 1936 if (isa<InvokeInst>(Statepoint)) { 1937 insertRelocationStores(Info.UnwindToken->users(), AllocaMap, 1938 VisitedLiveValues); 1939 } 1940 1941 // Do similar thing with rematerialized values 1942 insertRematerializationStores(Info.RematerializedValues, AllocaMap, 1943 VisitedLiveValues); 1944 1945 if (ClobberNonLive) { 1946 // As a debugging aid, pretend that an unrelocated pointer becomes null at 1947 // the gc.statepoint. This will turn some subtle GC problems into 1948 // slightly easier to debug SEGVs. Note that on large IR files with 1949 // lots of gc.statepoints this is extremely costly both memory and time 1950 // wise. 1951 SmallVector<AllocaInst *, 64> ToClobber; 1952 for (auto Pair : AllocaMap) { 1953 Value *Def = Pair.first; 1954 AllocaInst *Alloca = Pair.second; 1955 1956 // This value was relocated 1957 if (VisitedLiveValues.count(Def)) { 1958 continue; 1959 } 1960 ToClobber.push_back(Alloca); 1961 } 1962 1963 auto InsertClobbersAt = [&](Instruction *IP) { 1964 for (auto *AI : ToClobber) { 1965 auto PT = cast<PointerType>(AI->getAllocatedType()); 1966 Constant *CPN = ConstantPointerNull::get(PT); 1967 new StoreInst(CPN, AI, IP); 1968 } 1969 }; 1970 1971 // Insert the clobbering stores. These may get intermixed with the 1972 // gc.results and gc.relocates, but that's fine. 1973 if (auto II = dyn_cast<InvokeInst>(Statepoint)) { 1974 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt()); 1975 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt()); 1976 } else { 1977 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode()); 1978 } 1979 } 1980 } 1981 1982 // Update use with load allocas and add store for gc_relocated. 1983 for (auto Pair : AllocaMap) { 1984 Value *Def = Pair.first; 1985 AllocaInst *Alloca = Pair.second; 1986 1987 // We pre-record the uses of allocas so that we dont have to worry about 1988 // later update that changes the user information.. 1989 1990 SmallVector<Instruction *, 20> Uses; 1991 // PERF: trade a linear scan for repeated reallocation 1992 Uses.reserve(Def->getNumUses()); 1993 for (User *U : Def->users()) { 1994 if (!isa<ConstantExpr>(U)) { 1995 // If the def has a ConstantExpr use, then the def is either a 1996 // ConstantExpr use itself or null. In either case 1997 // (recursively in the first, directly in the second), the oop 1998 // it is ultimately dependent on is null and this particular 1999 // use does not need to be fixed up. 2000 Uses.push_back(cast<Instruction>(U)); 2001 } 2002 } 2003 2004 llvm::sort(Uses); 2005 auto Last = std::unique(Uses.begin(), Uses.end()); 2006 Uses.erase(Last, Uses.end()); 2007 2008 for (Instruction *Use : Uses) { 2009 if (isa<PHINode>(Use)) { 2010 PHINode *Phi = cast<PHINode>(Use); 2011 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { 2012 if (Def == Phi->getIncomingValue(i)) { 2013 LoadInst *Load = 2014 new LoadInst(Alloca->getAllocatedType(), Alloca, "", 2015 Phi->getIncomingBlock(i)->getTerminator()); 2016 Phi->setIncomingValue(i, Load); 2017 } 2018 } 2019 } else { 2020 LoadInst *Load = 2021 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use); 2022 Use->replaceUsesOfWith(Def, Load); 2023 } 2024 } 2025 2026 // Emit store for the initial gc value. Store must be inserted after load, 2027 // otherwise store will be in alloca's use list and an extra load will be 2028 // inserted before it. 2029 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false, 2030 DL.getABITypeAlign(Def->getType())); 2031 if (Instruction *Inst = dyn_cast<Instruction>(Def)) { 2032 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) { 2033 // InvokeInst is a terminator so the store need to be inserted into its 2034 // normal destination block. 2035 BasicBlock *NormalDest = Invoke->getNormalDest(); 2036 Store->insertBefore(NormalDest->getFirstNonPHI()); 2037 } else { 2038 assert(!Inst->isTerminator() && 2039 "The only terminator that can produce a value is " 2040 "InvokeInst which is handled above."); 2041 Store->insertAfter(Inst); 2042 } 2043 } else { 2044 assert(isa<Argument>(Def)); 2045 Store->insertAfter(cast<Instruction>(Alloca)); 2046 } 2047 } 2048 2049 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues && 2050 "we must have the same allocas with lives"); 2051 if (!PromotableAllocas.empty()) { 2052 // Apply mem2reg to promote alloca to SSA 2053 PromoteMemToReg(PromotableAllocas, DT); 2054 } 2055 2056 #ifndef NDEBUG 2057 for (auto &I : F.getEntryBlock()) 2058 if (isa<AllocaInst>(I)) 2059 InitialAllocaNum--; 2060 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); 2061 #endif 2062 } 2063 2064 /// Implement a unique function which doesn't require we sort the input 2065 /// vector. Doing so has the effect of changing the output of a couple of 2066 /// tests in ways which make them less useful in testing fused safepoints. 2067 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { 2068 SmallSet<T, 8> Seen; 2069 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; }); 2070 } 2071 2072 /// Insert holders so that each Value is obviously live through the entire 2073 /// lifetime of the call. 2074 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values, 2075 SmallVectorImpl<CallInst *> &Holders) { 2076 if (Values.empty()) 2077 // No values to hold live, might as well not insert the empty holder 2078 return; 2079 2080 Module *M = Call->getModule(); 2081 // Use a dummy vararg function to actually hold the values live 2082 FunctionCallee Func = M->getOrInsertFunction( 2083 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true)); 2084 if (isa<CallInst>(Call)) { 2085 // For call safepoints insert dummy calls right after safepoint 2086 Holders.push_back( 2087 CallInst::Create(Func, Values, "", &*++Call->getIterator())); 2088 return; 2089 } 2090 // For invoke safepooints insert dummy calls both in normal and 2091 // exceptional destination blocks 2092 auto *II = cast<InvokeInst>(Call); 2093 Holders.push_back(CallInst::Create( 2094 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt())); 2095 Holders.push_back(CallInst::Create( 2096 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt())); 2097 } 2098 2099 static void findLiveReferences( 2100 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 2101 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 2102 GCPtrLivenessData OriginalLivenessData; 2103 computeLiveInValues(DT, F, OriginalLivenessData); 2104 for (size_t i = 0; i < records.size(); i++) { 2105 struct PartiallyConstructedSafepointRecord &info = records[i]; 2106 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info); 2107 } 2108 } 2109 2110 // Helper function for the "rematerializeLiveValues". It walks use chain 2111 // starting from the "CurrentValue" until it reaches the root of the chain, i.e. 2112 // the base or a value it cannot process. Only "simple" values are processed 2113 // (currently it is GEP's and casts). The returned root is examined by the 2114 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array 2115 // with all visited values. 2116 static Value* findRematerializableChainToBasePointer( 2117 SmallVectorImpl<Instruction*> &ChainToBase, 2118 Value *CurrentValue) { 2119 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) { 2120 ChainToBase.push_back(GEP); 2121 return findRematerializableChainToBasePointer(ChainToBase, 2122 GEP->getPointerOperand()); 2123 } 2124 2125 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) { 2126 if (!CI->isNoopCast(CI->getModule()->getDataLayout())) 2127 return CI; 2128 2129 ChainToBase.push_back(CI); 2130 return findRematerializableChainToBasePointer(ChainToBase, 2131 CI->getOperand(0)); 2132 } 2133 2134 // We have reached the root of the chain, which is either equal to the base or 2135 // is the first unsupported value along the use chain. 2136 return CurrentValue; 2137 } 2138 2139 // Helper function for the "rematerializeLiveValues". Compute cost of the use 2140 // chain we are going to rematerialize. 2141 static InstructionCost 2142 chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain, 2143 TargetTransformInfo &TTI) { 2144 InstructionCost Cost = 0; 2145 2146 for (Instruction *Instr : Chain) { 2147 if (CastInst *CI = dyn_cast<CastInst>(Instr)) { 2148 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) && 2149 "non noop cast is found during rematerialization"); 2150 2151 Type *SrcTy = CI->getOperand(0)->getType(); 2152 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy, 2153 TTI::getCastContextHint(CI), 2154 TargetTransformInfo::TCK_SizeAndLatency, CI); 2155 2156 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) { 2157 // Cost of the address calculation 2158 Type *ValTy = GEP->getSourceElementType(); 2159 Cost += TTI.getAddressComputationCost(ValTy); 2160 2161 // And cost of the GEP itself 2162 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not 2163 // allowed for the external usage) 2164 if (!GEP->hasAllConstantIndices()) 2165 Cost += 2; 2166 2167 } else { 2168 llvm_unreachable("unsupported instruction type during rematerialization"); 2169 } 2170 } 2171 2172 return Cost; 2173 } 2174 2175 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) { 2176 unsigned PhiNum = OrigRootPhi.getNumIncomingValues(); 2177 if (PhiNum != AlternateRootPhi.getNumIncomingValues() || 2178 OrigRootPhi.getParent() != AlternateRootPhi.getParent()) 2179 return false; 2180 // Map of incoming values and their corresponding basic blocks of 2181 // OrigRootPhi. 2182 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues; 2183 for (unsigned i = 0; i < PhiNum; i++) 2184 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] = 2185 OrigRootPhi.getIncomingBlock(i); 2186 2187 // Both current and base PHIs should have same incoming values and 2188 // the same basic blocks corresponding to the incoming values. 2189 for (unsigned i = 0; i < PhiNum; i++) { 2190 auto CIVI = 2191 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i)); 2192 if (CIVI == CurrentIncomingValues.end()) 2193 return false; 2194 BasicBlock *CurrentIncomingBB = CIVI->second; 2195 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i)) 2196 return false; 2197 } 2198 return true; 2199 } 2200 2201 // From the statepoint live set pick values that are cheaper to recompute then 2202 // to relocate. Remove this values from the live set, rematerialize them after 2203 // statepoint and record them in "Info" structure. Note that similar to 2204 // relocated values we don't do any user adjustments here. 2205 static void rematerializeLiveValues(CallBase *Call, 2206 PartiallyConstructedSafepointRecord &Info, 2207 TargetTransformInfo &TTI) { 2208 const unsigned int ChainLengthThreshold = 10; 2209 2210 // Record values we are going to delete from this statepoint live set. 2211 // We can not di this in following loop due to iterator invalidation. 2212 SmallVector<Value *, 32> LiveValuesToBeDeleted; 2213 2214 for (Value *LiveValue: Info.LiveSet) { 2215 // For each live pointer find its defining chain 2216 SmallVector<Instruction *, 3> ChainToBase; 2217 assert(Info.PointerToBase.count(LiveValue)); 2218 Value *RootOfChain = 2219 findRematerializableChainToBasePointer(ChainToBase, 2220 LiveValue); 2221 2222 // Nothing to do, or chain is too long 2223 if ( ChainToBase.size() == 0 || 2224 ChainToBase.size() > ChainLengthThreshold) 2225 continue; 2226 2227 // Handle the scenario where the RootOfChain is not equal to the 2228 // Base Value, but they are essentially the same phi values. 2229 if (RootOfChain != Info.PointerToBase[LiveValue]) { 2230 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain); 2231 PHINode *AlternateRootPhi = dyn_cast<PHINode>(Info.PointerToBase[LiveValue]); 2232 if (!OrigRootPhi || !AlternateRootPhi) 2233 continue; 2234 // PHI nodes that have the same incoming values, and belonging to the same 2235 // basic blocks are essentially the same SSA value. When the original phi 2236 // has incoming values with different base pointers, the original phi is 2237 // marked as conflict, and an additional `AlternateRootPhi` with the same 2238 // incoming values get generated by the findBasePointer function. We need 2239 // to identify the newly generated AlternateRootPhi (.base version of phi) 2240 // and RootOfChain (the original phi node itself) are the same, so that we 2241 // can rematerialize the gep and casts. This is a workaround for the 2242 // deficiency in the findBasePointer algorithm. 2243 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi)) 2244 continue; 2245 // Now that the phi nodes are proved to be the same, assert that 2246 // findBasePointer's newly generated AlternateRootPhi is present in the 2247 // liveset of the call. 2248 assert(Info.LiveSet.count(AlternateRootPhi)); 2249 } 2250 // Compute cost of this chain 2251 InstructionCost Cost = chainToBasePointerCost(ChainToBase, TTI); 2252 // TODO: We can also account for cases when we will be able to remove some 2253 // of the rematerialized values by later optimization passes. I.e if 2254 // we rematerialized several intersecting chains. Or if original values 2255 // don't have any uses besides this statepoint. 2256 2257 // For invokes we need to rematerialize each chain twice - for normal and 2258 // for unwind basic blocks. Model this by multiplying cost by two. 2259 if (isa<InvokeInst>(Call)) { 2260 Cost *= 2; 2261 } 2262 // If it's too expensive - skip it 2263 if (Cost >= RematerializationThreshold) 2264 continue; 2265 2266 // Remove value from the live set 2267 LiveValuesToBeDeleted.push_back(LiveValue); 2268 2269 // Clone instructions and record them inside "Info" structure 2270 2271 // Walk backwards to visit top-most instructions first 2272 std::reverse(ChainToBase.begin(), ChainToBase.end()); 2273 2274 // Utility function which clones all instructions from "ChainToBase" 2275 // and inserts them before "InsertBefore". Returns rematerialized value 2276 // which should be used after statepoint. 2277 auto rematerializeChain = [&ChainToBase]( 2278 Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) { 2279 Instruction *LastClonedValue = nullptr; 2280 Instruction *LastValue = nullptr; 2281 for (Instruction *Instr: ChainToBase) { 2282 // Only GEP's and casts are supported as we need to be careful to not 2283 // introduce any new uses of pointers not in the liveset. 2284 // Note that it's fine to introduce new uses of pointers which were 2285 // otherwise not used after this statepoint. 2286 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr)); 2287 2288 Instruction *ClonedValue = Instr->clone(); 2289 ClonedValue->insertBefore(InsertBefore); 2290 ClonedValue->setName(Instr->getName() + ".remat"); 2291 2292 // If it is not first instruction in the chain then it uses previously 2293 // cloned value. We should update it to use cloned value. 2294 if (LastClonedValue) { 2295 assert(LastValue); 2296 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue); 2297 #ifndef NDEBUG 2298 for (auto OpValue : ClonedValue->operand_values()) { 2299 // Assert that cloned instruction does not use any instructions from 2300 // this chain other than LastClonedValue 2301 assert(!is_contained(ChainToBase, OpValue) && 2302 "incorrect use in rematerialization chain"); 2303 // Assert that the cloned instruction does not use the RootOfChain 2304 // or the AlternateLiveBase. 2305 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase); 2306 } 2307 #endif 2308 } else { 2309 // For the first instruction, replace the use of unrelocated base i.e. 2310 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the 2311 // live set. They have been proved to be the same PHI nodes. Note 2312 // that the *only* use of the RootOfChain in the ChainToBase list is 2313 // the first Value in the list. 2314 if (RootOfChain != AlternateLiveBase) 2315 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase); 2316 } 2317 2318 LastClonedValue = ClonedValue; 2319 LastValue = Instr; 2320 } 2321 assert(LastClonedValue); 2322 return LastClonedValue; 2323 }; 2324 2325 // Different cases for calls and invokes. For invokes we need to clone 2326 // instructions both on normal and unwind path. 2327 if (isa<CallInst>(Call)) { 2328 Instruction *InsertBefore = Call->getNextNode(); 2329 assert(InsertBefore); 2330 Instruction *RematerializedValue = rematerializeChain( 2331 InsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2332 Info.RematerializedValues[RematerializedValue] = LiveValue; 2333 } else { 2334 auto *Invoke = cast<InvokeInst>(Call); 2335 2336 Instruction *NormalInsertBefore = 2337 &*Invoke->getNormalDest()->getFirstInsertionPt(); 2338 Instruction *UnwindInsertBefore = 2339 &*Invoke->getUnwindDest()->getFirstInsertionPt(); 2340 2341 Instruction *NormalRematerializedValue = rematerializeChain( 2342 NormalInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2343 Instruction *UnwindRematerializedValue = rematerializeChain( 2344 UnwindInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2345 2346 Info.RematerializedValues[NormalRematerializedValue] = LiveValue; 2347 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue; 2348 } 2349 } 2350 2351 // Remove rematerializaed values from the live set 2352 for (auto LiveValue: LiveValuesToBeDeleted) { 2353 Info.LiveSet.remove(LiveValue); 2354 } 2355 } 2356 2357 static bool insertParsePoints(Function &F, DominatorTree &DT, 2358 TargetTransformInfo &TTI, 2359 SmallVectorImpl<CallBase *> &ToUpdate) { 2360 #ifndef NDEBUG 2361 // sanity check the input 2362 std::set<CallBase *> Uniqued; 2363 Uniqued.insert(ToUpdate.begin(), ToUpdate.end()); 2364 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!"); 2365 2366 for (CallBase *Call : ToUpdate) 2367 assert(Call->getFunction() == &F); 2368 #endif 2369 2370 // When inserting gc.relocates for invokes, we need to be able to insert at 2371 // the top of the successor blocks. See the comment on 2372 // normalForInvokeSafepoint on exactly what is needed. Note that this step 2373 // may restructure the CFG. 2374 for (CallBase *Call : ToUpdate) { 2375 auto *II = dyn_cast<InvokeInst>(Call); 2376 if (!II) 2377 continue; 2378 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT); 2379 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT); 2380 } 2381 2382 // A list of dummy calls added to the IR to keep various values obviously 2383 // live in the IR. We'll remove all of these when done. 2384 SmallVector<CallInst *, 64> Holders; 2385 2386 // Insert a dummy call with all of the deopt operands we'll need for the 2387 // actual safepoint insertion as arguments. This ensures reference operands 2388 // in the deopt argument list are considered live through the safepoint (and 2389 // thus makes sure they get relocated.) 2390 for (CallBase *Call : ToUpdate) { 2391 SmallVector<Value *, 64> DeoptValues; 2392 2393 for (Value *Arg : GetDeoptBundleOperands(Call)) { 2394 assert(!isUnhandledGCPointerType(Arg->getType()) && 2395 "support for FCA unimplemented"); 2396 if (isHandledGCPointerType(Arg->getType())) 2397 DeoptValues.push_back(Arg); 2398 } 2399 2400 insertUseHolderAfter(Call, DeoptValues, Holders); 2401 } 2402 2403 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size()); 2404 2405 // A) Identify all gc pointers which are statically live at the given call 2406 // site. 2407 findLiveReferences(F, DT, ToUpdate, Records); 2408 2409 // B) Find the base pointers for each live pointer 2410 /* scope for caching */ { 2411 // Cache the 'defining value' relation used in the computation and 2412 // insertion of base phis and selects. This ensures that we don't insert 2413 // large numbers of duplicate base_phis. 2414 DefiningValueMapTy DVCache; 2415 2416 for (size_t i = 0; i < Records.size(); i++) { 2417 PartiallyConstructedSafepointRecord &info = Records[i]; 2418 findBasePointers(DT, DVCache, ToUpdate[i], info); 2419 } 2420 } // end of cache scope 2421 2422 // The base phi insertion logic (for any safepoint) may have inserted new 2423 // instructions which are now live at some safepoint. The simplest such 2424 // example is: 2425 // loop: 2426 // phi a <-- will be a new base_phi here 2427 // safepoint 1 <-- that needs to be live here 2428 // gep a + 1 2429 // safepoint 2 2430 // br loop 2431 // We insert some dummy calls after each safepoint to definitely hold live 2432 // the base pointers which were identified for that safepoint. We'll then 2433 // ask liveness for _every_ base inserted to see what is now live. Then we 2434 // remove the dummy calls. 2435 Holders.reserve(Holders.size() + Records.size()); 2436 for (size_t i = 0; i < Records.size(); i++) { 2437 PartiallyConstructedSafepointRecord &Info = Records[i]; 2438 2439 SmallVector<Value *, 128> Bases; 2440 for (auto Pair : Info.PointerToBase) 2441 Bases.push_back(Pair.second); 2442 2443 insertUseHolderAfter(ToUpdate[i], Bases, Holders); 2444 } 2445 2446 // By selecting base pointers, we've effectively inserted new uses. Thus, we 2447 // need to rerun liveness. We may *also* have inserted new defs, but that's 2448 // not the key issue. 2449 recomputeLiveInValues(F, DT, ToUpdate, Records); 2450 2451 if (PrintBasePointers) { 2452 for (auto &Info : Records) { 2453 errs() << "Base Pairs: (w/Relocation)\n"; 2454 for (auto Pair : Info.PointerToBase) { 2455 errs() << " derived "; 2456 Pair.first->printAsOperand(errs(), false); 2457 errs() << " base "; 2458 Pair.second->printAsOperand(errs(), false); 2459 errs() << "\n"; 2460 } 2461 } 2462 } 2463 2464 // It is possible that non-constant live variables have a constant base. For 2465 // example, a GEP with a variable offset from a global. In this case we can 2466 // remove it from the liveset. We already don't add constants to the liveset 2467 // because we assume they won't move at runtime and the GC doesn't need to be 2468 // informed about them. The same reasoning applies if the base is constant. 2469 // Note that the relocation placement code relies on this filtering for 2470 // correctness as it expects the base to be in the liveset, which isn't true 2471 // if the base is constant. 2472 for (auto &Info : Records) 2473 for (auto &BasePair : Info.PointerToBase) 2474 if (isa<Constant>(BasePair.second)) 2475 Info.LiveSet.remove(BasePair.first); 2476 2477 for (CallInst *CI : Holders) 2478 CI->eraseFromParent(); 2479 2480 Holders.clear(); 2481 2482 // In order to reduce live set of statepoint we might choose to rematerialize 2483 // some values instead of relocating them. This is purely an optimization and 2484 // does not influence correctness. 2485 for (size_t i = 0; i < Records.size(); i++) 2486 rematerializeLiveValues(ToUpdate[i], Records[i], TTI); 2487 2488 // We need this to safely RAUW and delete call or invoke return values that 2489 // may themselves be live over a statepoint. For details, please see usage in 2490 // makeStatepointExplicitImpl. 2491 std::vector<DeferredReplacement> Replacements; 2492 2493 // Now run through and replace the existing statepoints with new ones with 2494 // the live variables listed. We do not yet update uses of the values being 2495 // relocated. We have references to live variables that need to 2496 // survive to the last iteration of this loop. (By construction, the 2497 // previous statepoint can not be a live variable, thus we can and remove 2498 // the old statepoint calls as we go.) 2499 for (size_t i = 0; i < Records.size(); i++) 2500 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements); 2501 2502 ToUpdate.clear(); // prevent accident use of invalid calls. 2503 2504 for (auto &PR : Replacements) 2505 PR.doReplacement(); 2506 2507 Replacements.clear(); 2508 2509 for (auto &Info : Records) { 2510 // These live sets may contain state Value pointers, since we replaced calls 2511 // with operand bundles with calls wrapped in gc.statepoint, and some of 2512 // those calls may have been def'ing live gc pointers. Clear these out to 2513 // avoid accidentally using them. 2514 // 2515 // TODO: We should create a separate data structure that does not contain 2516 // these live sets, and migrate to using that data structure from this point 2517 // onward. 2518 Info.LiveSet.clear(); 2519 Info.PointerToBase.clear(); 2520 } 2521 2522 // Do all the fixups of the original live variables to their relocated selves 2523 SmallVector<Value *, 128> Live; 2524 for (size_t i = 0; i < Records.size(); i++) { 2525 PartiallyConstructedSafepointRecord &Info = Records[i]; 2526 2527 // We can't simply save the live set from the original insertion. One of 2528 // the live values might be the result of a call which needs a safepoint. 2529 // That Value* no longer exists and we need to use the new gc_result. 2530 // Thankfully, the live set is embedded in the statepoint (and updated), so 2531 // we just grab that. 2532 llvm::append_range(Live, Info.StatepointToken->gc_args()); 2533 #ifndef NDEBUG 2534 // Do some basic sanity checks on our liveness results before performing 2535 // relocation. Relocation can and will turn mistakes in liveness results 2536 // into non-sensical code which is must harder to debug. 2537 // TODO: It would be nice to test consistency as well 2538 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) && 2539 "statepoint must be reachable or liveness is meaningless"); 2540 for (Value *V : Info.StatepointToken->gc_args()) { 2541 if (!isa<Instruction>(V)) 2542 // Non-instruction values trivial dominate all possible uses 2543 continue; 2544 auto *LiveInst = cast<Instruction>(V); 2545 assert(DT.isReachableFromEntry(LiveInst->getParent()) && 2546 "unreachable values should never be live"); 2547 assert(DT.dominates(LiveInst, Info.StatepointToken) && 2548 "basic SSA liveness expectation violated by liveness analysis"); 2549 } 2550 #endif 2551 } 2552 unique_unsorted(Live); 2553 2554 #ifndef NDEBUG 2555 // sanity check 2556 for (auto *Ptr : Live) 2557 assert(isHandledGCPointerType(Ptr->getType()) && 2558 "must be a gc pointer type"); 2559 #endif 2560 2561 relocationViaAlloca(F, DT, Live, Records); 2562 return !Records.empty(); 2563 } 2564 2565 // Handles both return values and arguments for Functions and calls. 2566 template <typename AttrHolder> 2567 static void RemoveNonValidAttrAtIndex(LLVMContext &Ctx, AttrHolder &AH, 2568 unsigned Index) { 2569 AttrBuilder R; 2570 if (AH.getDereferenceableBytes(Index)) 2571 R.addAttribute(Attribute::get(Ctx, Attribute::Dereferenceable, 2572 AH.getDereferenceableBytes(Index))); 2573 if (AH.getDereferenceableOrNullBytes(Index)) 2574 R.addAttribute(Attribute::get(Ctx, Attribute::DereferenceableOrNull, 2575 AH.getDereferenceableOrNullBytes(Index))); 2576 if (AH.getAttributes().hasAttribute(Index, Attribute::NoAlias)) 2577 R.addAttribute(Attribute::NoAlias); 2578 2579 if (!R.empty()) 2580 AH.setAttributes(AH.getAttributes().removeAttributes(Ctx, Index, R)); 2581 } 2582 2583 static void stripNonValidAttributesFromPrototype(Function &F) { 2584 LLVMContext &Ctx = F.getContext(); 2585 2586 for (Argument &A : F.args()) 2587 if (isa<PointerType>(A.getType())) 2588 RemoveNonValidAttrAtIndex(Ctx, F, 2589 A.getArgNo() + AttributeList::FirstArgIndex); 2590 2591 if (isa<PointerType>(F.getReturnType())) 2592 RemoveNonValidAttrAtIndex(Ctx, F, AttributeList::ReturnIndex); 2593 } 2594 2595 /// Certain metadata on instructions are invalid after running RS4GC. 2596 /// Optimizations that run after RS4GC can incorrectly use this metadata to 2597 /// optimize functions. We drop such metadata on the instruction. 2598 static void stripInvalidMetadataFromInstruction(Instruction &I) { 2599 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 2600 return; 2601 // These are the attributes that are still valid on loads and stores after 2602 // RS4GC. 2603 // The metadata implying dereferenceability and noalias are (conservatively) 2604 // dropped. This is because semantically, after RewriteStatepointsForGC runs, 2605 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can 2606 // touch the entire heap including noalias objects. Note: The reasoning is 2607 // same as stripping the dereferenceability and noalias attributes that are 2608 // analogous to the metadata counterparts. 2609 // We also drop the invariant.load metadata on the load because that metadata 2610 // implies the address operand to the load points to memory that is never 2611 // changed once it became dereferenceable. This is no longer true after RS4GC. 2612 // Similar reasoning applies to invariant.group metadata, which applies to 2613 // loads within a group. 2614 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa, 2615 LLVMContext::MD_range, 2616 LLVMContext::MD_alias_scope, 2617 LLVMContext::MD_nontemporal, 2618 LLVMContext::MD_nonnull, 2619 LLVMContext::MD_align, 2620 LLVMContext::MD_type}; 2621 2622 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC. 2623 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC); 2624 } 2625 2626 static void stripNonValidDataFromBody(Function &F) { 2627 if (F.empty()) 2628 return; 2629 2630 LLVMContext &Ctx = F.getContext(); 2631 MDBuilder Builder(Ctx); 2632 2633 // Set of invariantstart instructions that we need to remove. 2634 // Use this to avoid invalidating the instruction iterator. 2635 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions; 2636 2637 for (Instruction &I : instructions(F)) { 2638 // invariant.start on memory location implies that the referenced memory 2639 // location is constant and unchanging. This is no longer true after 2640 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint 2641 // which frees the entire heap and the presence of invariant.start allows 2642 // the optimizer to sink the load of a memory location past a statepoint, 2643 // which is incorrect. 2644 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 2645 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2646 InvariantStartInstructions.push_back(II); 2647 continue; 2648 } 2649 2650 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) { 2651 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag); 2652 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA); 2653 } 2654 2655 stripInvalidMetadataFromInstruction(I); 2656 2657 if (auto *Call = dyn_cast<CallBase>(&I)) { 2658 for (int i = 0, e = Call->arg_size(); i != e; i++) 2659 if (isa<PointerType>(Call->getArgOperand(i)->getType())) 2660 RemoveNonValidAttrAtIndex(Ctx, *Call, 2661 i + AttributeList::FirstArgIndex); 2662 if (isa<PointerType>(Call->getType())) 2663 RemoveNonValidAttrAtIndex(Ctx, *Call, AttributeList::ReturnIndex); 2664 } 2665 } 2666 2667 // Delete the invariant.start instructions and RAUW undef. 2668 for (auto *II : InvariantStartInstructions) { 2669 II->replaceAllUsesWith(UndefValue::get(II->getType())); 2670 II->eraseFromParent(); 2671 } 2672 } 2673 2674 /// Returns true if this function should be rewritten by this pass. The main 2675 /// point of this function is as an extension point for custom logic. 2676 static bool shouldRewriteStatepointsIn(Function &F) { 2677 // TODO: This should check the GCStrategy 2678 if (F.hasGC()) { 2679 const auto &FunctionGCName = F.getGC(); 2680 const StringRef StatepointExampleName("statepoint-example"); 2681 const StringRef CoreCLRName("coreclr"); 2682 return (StatepointExampleName == FunctionGCName) || 2683 (CoreCLRName == FunctionGCName); 2684 } else 2685 return false; 2686 } 2687 2688 static void stripNonValidData(Module &M) { 2689 #ifndef NDEBUG 2690 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!"); 2691 #endif 2692 2693 for (Function &F : M) 2694 stripNonValidAttributesFromPrototype(F); 2695 2696 for (Function &F : M) 2697 stripNonValidDataFromBody(F); 2698 } 2699 2700 bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT, 2701 TargetTransformInfo &TTI, 2702 const TargetLibraryInfo &TLI) { 2703 assert(!F.isDeclaration() && !F.empty() && 2704 "need function body to rewrite statepoints in"); 2705 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision"); 2706 2707 auto NeedsRewrite = [&TLI](Instruction &I) { 2708 if (const auto *Call = dyn_cast<CallBase>(&I)) { 2709 if (isa<GCStatepointInst>(Call)) 2710 return false; 2711 if (callsGCLeafFunction(Call, TLI)) 2712 return false; 2713 2714 // Normally it's up to the frontend to make sure that non-leaf calls also 2715 // have proper deopt state if it is required. We make an exception for 2716 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics 2717 // these are non-leaf by default. They might be generated by the optimizer 2718 // which doesn't know how to produce a proper deopt state. So if we see a 2719 // non-leaf memcpy/memmove without deopt state just treat it as a leaf 2720 // copy and don't produce a statepoint. 2721 if (!AllowStatepointWithNoDeoptInfo && 2722 !Call->getOperandBundle(LLVMContext::OB_deopt)) { 2723 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) && 2724 "Don't expect any other calls here!"); 2725 return false; 2726 } 2727 return true; 2728 } 2729 return false; 2730 }; 2731 2732 // Delete any unreachable statepoints so that we don't have unrewritten 2733 // statepoints surviving this pass. This makes testing easier and the 2734 // resulting IR less confusing to human readers. 2735 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 2736 bool MadeChange = removeUnreachableBlocks(F, &DTU); 2737 // Flush the Dominator Tree. 2738 DTU.getDomTree(); 2739 2740 // Gather all the statepoints which need rewritten. Be careful to only 2741 // consider those in reachable code since we need to ask dominance queries 2742 // when rewriting. We'll delete the unreachable ones in a moment. 2743 SmallVector<CallBase *, 64> ParsePointNeeded; 2744 for (Instruction &I : instructions(F)) { 2745 // TODO: only the ones with the flag set! 2746 if (NeedsRewrite(I)) { 2747 // NOTE removeUnreachableBlocks() is stronger than 2748 // DominatorTree::isReachableFromEntry(). In other words 2749 // removeUnreachableBlocks can remove some blocks for which 2750 // isReachableFromEntry() returns true. 2751 assert(DT.isReachableFromEntry(I.getParent()) && 2752 "no unreachable blocks expected"); 2753 ParsePointNeeded.push_back(cast<CallBase>(&I)); 2754 } 2755 } 2756 2757 // Return early if no work to do. 2758 if (ParsePointNeeded.empty()) 2759 return MadeChange; 2760 2761 // As a prepass, go ahead and aggressively destroy single entry phi nodes. 2762 // These are created by LCSSA. They have the effect of increasing the size 2763 // of liveness sets for no good reason. It may be harder to do this post 2764 // insertion since relocations and base phis can confuse things. 2765 for (BasicBlock &BB : F) 2766 if (BB.getUniquePredecessor()) 2767 MadeChange |= FoldSingleEntryPHINodes(&BB); 2768 2769 // Before we start introducing relocations, we want to tweak the IR a bit to 2770 // avoid unfortunate code generation effects. The main example is that we 2771 // want to try to make sure the comparison feeding a branch is after any 2772 // safepoints. Otherwise, we end up with a comparison of pre-relocation 2773 // values feeding a branch after relocation. This is semantically correct, 2774 // but results in extra register pressure since both the pre-relocation and 2775 // post-relocation copies must be available in registers. For code without 2776 // relocations this is handled elsewhere, but teaching the scheduler to 2777 // reverse the transform we're about to do would be slightly complex. 2778 // Note: This may extend the live range of the inputs to the icmp and thus 2779 // increase the liveset of any statepoint we move over. This is profitable 2780 // as long as all statepoints are in rare blocks. If we had in-register 2781 // lowering for live values this would be a much safer transform. 2782 auto getConditionInst = [](Instruction *TI) -> Instruction * { 2783 if (auto *BI = dyn_cast<BranchInst>(TI)) 2784 if (BI->isConditional()) 2785 return dyn_cast<Instruction>(BI->getCondition()); 2786 // TODO: Extend this to handle switches 2787 return nullptr; 2788 }; 2789 for (BasicBlock &BB : F) { 2790 Instruction *TI = BB.getTerminator(); 2791 if (auto *Cond = getConditionInst(TI)) 2792 // TODO: Handle more than just ICmps here. We should be able to move 2793 // most instructions without side effects or memory access. 2794 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) { 2795 MadeChange = true; 2796 Cond->moveBefore(TI); 2797 } 2798 } 2799 2800 // Nasty workaround - The base computation code in the main algorithm doesn't 2801 // consider the fact that a GEP can be used to convert a scalar to a vector. 2802 // The right fix for this is to integrate GEPs into the base rewriting 2803 // algorithm properly, this is just a short term workaround to prevent 2804 // crashes by canonicalizing such GEPs into fully vector GEPs. 2805 for (Instruction &I : instructions(F)) { 2806 if (!isa<GetElementPtrInst>(I)) 2807 continue; 2808 2809 unsigned VF = 0; 2810 for (unsigned i = 0; i < I.getNumOperands(); i++) 2811 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) { 2812 assert(VF == 0 || 2813 VF == cast<FixedVectorType>(OpndVTy)->getNumElements()); 2814 VF = cast<FixedVectorType>(OpndVTy)->getNumElements(); 2815 } 2816 2817 // It's the vector to scalar traversal through the pointer operand which 2818 // confuses base pointer rewriting, so limit ourselves to that case. 2819 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) { 2820 IRBuilder<> B(&I); 2821 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0)); 2822 I.setOperand(0, Splat); 2823 MadeChange = true; 2824 } 2825 } 2826 2827 MadeChange |= insertParsePoints(F, DT, TTI, ParsePointNeeded); 2828 return MadeChange; 2829 } 2830 2831 // liveness computation via standard dataflow 2832 // ------------------------------------------------------------------- 2833 2834 // TODO: Consider using bitvectors for liveness, the set of potentially 2835 // interesting values should be small and easy to pre-compute. 2836 2837 /// Compute the live-in set for the location rbegin starting from 2838 /// the live-out set of the basic block 2839 static void computeLiveInValues(BasicBlock::reverse_iterator Begin, 2840 BasicBlock::reverse_iterator End, 2841 SetVector<Value *> &LiveTmp) { 2842 for (auto &I : make_range(Begin, End)) { 2843 // KILL/Def - Remove this definition from LiveIn 2844 LiveTmp.remove(&I); 2845 2846 // Don't consider *uses* in PHI nodes, we handle their contribution to 2847 // predecessor blocks when we seed the LiveOut sets 2848 if (isa<PHINode>(I)) 2849 continue; 2850 2851 // USE - Add to the LiveIn set for this instruction 2852 for (Value *V : I.operands()) { 2853 assert(!isUnhandledGCPointerType(V->getType()) && 2854 "support for FCA unimplemented"); 2855 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) { 2856 // The choice to exclude all things constant here is slightly subtle. 2857 // There are two independent reasons: 2858 // - We assume that things which are constant (from LLVM's definition) 2859 // do not move at runtime. For example, the address of a global 2860 // variable is fixed, even though it's contents may not be. 2861 // - Second, we can't disallow arbitrary inttoptr constants even 2862 // if the language frontend does. Optimization passes are free to 2863 // locally exploit facts without respect to global reachability. This 2864 // can create sections of code which are dynamically unreachable and 2865 // contain just about anything. (see constants.ll in tests) 2866 LiveTmp.insert(V); 2867 } 2868 } 2869 } 2870 } 2871 2872 static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp) { 2873 for (BasicBlock *Succ : successors(BB)) { 2874 for (auto &I : *Succ) { 2875 PHINode *PN = dyn_cast<PHINode>(&I); 2876 if (!PN) 2877 break; 2878 2879 Value *V = PN->getIncomingValueForBlock(BB); 2880 assert(!isUnhandledGCPointerType(V->getType()) && 2881 "support for FCA unimplemented"); 2882 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) 2883 LiveTmp.insert(V); 2884 } 2885 } 2886 } 2887 2888 static SetVector<Value *> computeKillSet(BasicBlock *BB) { 2889 SetVector<Value *> KillSet; 2890 for (Instruction &I : *BB) 2891 if (isHandledGCPointerType(I.getType())) 2892 KillSet.insert(&I); 2893 return KillSet; 2894 } 2895 2896 #ifndef NDEBUG 2897 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic 2898 /// sanity check for the liveness computation. 2899 static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live, 2900 Instruction *TI, bool TermOkay = false) { 2901 for (Value *V : Live) { 2902 if (auto *I = dyn_cast<Instruction>(V)) { 2903 // The terminator can be a member of the LiveOut set. LLVM's definition 2904 // of instruction dominance states that V does not dominate itself. As 2905 // such, we need to special case this to allow it. 2906 if (TermOkay && TI == I) 2907 continue; 2908 assert(DT.dominates(I, TI) && 2909 "basic SSA liveness expectation violated by liveness analysis"); 2910 } 2911 } 2912 } 2913 2914 /// Check that all the liveness sets used during the computation of liveness 2915 /// obey basic SSA properties. This is useful for finding cases where we miss 2916 /// a def. 2917 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, 2918 BasicBlock &BB) { 2919 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); 2920 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); 2921 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); 2922 } 2923 #endif 2924 2925 static void computeLiveInValues(DominatorTree &DT, Function &F, 2926 GCPtrLivenessData &Data) { 2927 SmallSetVector<BasicBlock *, 32> Worklist; 2928 2929 // Seed the liveness for each individual block 2930 for (BasicBlock &BB : F) { 2931 Data.KillSet[&BB] = computeKillSet(&BB); 2932 Data.LiveSet[&BB].clear(); 2933 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]); 2934 2935 #ifndef NDEBUG 2936 for (Value *Kill : Data.KillSet[&BB]) 2937 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); 2938 #endif 2939 2940 Data.LiveOut[&BB] = SetVector<Value *>(); 2941 computeLiveOutSeed(&BB, Data.LiveOut[&BB]); 2942 Data.LiveIn[&BB] = Data.LiveSet[&BB]; 2943 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]); 2944 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]); 2945 if (!Data.LiveIn[&BB].empty()) 2946 Worklist.insert(pred_begin(&BB), pred_end(&BB)); 2947 } 2948 2949 // Propagate that liveness until stable 2950 while (!Worklist.empty()) { 2951 BasicBlock *BB = Worklist.pop_back_val(); 2952 2953 // Compute our new liveout set, then exit early if it hasn't changed despite 2954 // the contribution of our successor. 2955 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 2956 const auto OldLiveOutSize = LiveOut.size(); 2957 for (BasicBlock *Succ : successors(BB)) { 2958 assert(Data.LiveIn.count(Succ)); 2959 LiveOut.set_union(Data.LiveIn[Succ]); 2960 } 2961 // assert OutLiveOut is a subset of LiveOut 2962 if (OldLiveOutSize == LiveOut.size()) { 2963 // If the sets are the same size, then we didn't actually add anything 2964 // when unioning our successors LiveIn. Thus, the LiveIn of this block 2965 // hasn't changed. 2966 continue; 2967 } 2968 Data.LiveOut[BB] = LiveOut; 2969 2970 // Apply the effects of this basic block 2971 SetVector<Value *> LiveTmp = LiveOut; 2972 LiveTmp.set_union(Data.LiveSet[BB]); 2973 LiveTmp.set_subtract(Data.KillSet[BB]); 2974 2975 assert(Data.LiveIn.count(BB)); 2976 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB]; 2977 // assert: OldLiveIn is a subset of LiveTmp 2978 if (OldLiveIn.size() != LiveTmp.size()) { 2979 Data.LiveIn[BB] = LiveTmp; 2980 Worklist.insert(pred_begin(BB), pred_end(BB)); 2981 } 2982 } // while (!Worklist.empty()) 2983 2984 #ifndef NDEBUG 2985 // Sanity check our output against SSA properties. This helps catch any 2986 // missing kills during the above iteration. 2987 for (BasicBlock &BB : F) 2988 checkBasicSSA(DT, Data, BB); 2989 #endif 2990 } 2991 2992 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, 2993 StatepointLiveSetTy &Out) { 2994 BasicBlock *BB = Inst->getParent(); 2995 2996 // Note: The copy is intentional and required 2997 assert(Data.LiveOut.count(BB)); 2998 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 2999 3000 // We want to handle the statepoint itself oddly. It's 3001 // call result is not live (normal), nor are it's arguments 3002 // (unless they're used again later). This adjustment is 3003 // specifically what we need to relocate 3004 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(), 3005 LiveOut); 3006 LiveOut.remove(Inst); 3007 Out.insert(LiveOut.begin(), LiveOut.end()); 3008 } 3009 3010 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 3011 CallBase *Call, 3012 PartiallyConstructedSafepointRecord &Info) { 3013 StatepointLiveSetTy Updated; 3014 findLiveSetAtInst(Call, RevisedLivenessData, Updated); 3015 3016 // We may have base pointers which are now live that weren't before. We need 3017 // to update the PointerToBase structure to reflect this. 3018 for (auto V : Updated) 3019 if (Info.PointerToBase.insert({V, V}).second) { 3020 assert(isKnownBaseResult(V) && 3021 "Can't find base for unexpected live value!"); 3022 continue; 3023 } 3024 3025 #ifndef NDEBUG 3026 for (auto V : Updated) 3027 assert(Info.PointerToBase.count(V) && 3028 "Must be able to find base for live value!"); 3029 #endif 3030 3031 // Remove any stale base mappings - this can happen since our liveness is 3032 // more precise then the one inherent in the base pointer analysis. 3033 DenseSet<Value *> ToErase; 3034 for (auto KVPair : Info.PointerToBase) 3035 if (!Updated.count(KVPair.first)) 3036 ToErase.insert(KVPair.first); 3037 3038 for (auto *V : ToErase) 3039 Info.PointerToBase.erase(V); 3040 3041 #ifndef NDEBUG 3042 for (auto KVPair : Info.PointerToBase) 3043 assert(Updated.count(KVPair.first) && "record for non-live value"); 3044 #endif 3045 3046 Info.LiveSet = Updated; 3047 } 3048