1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Rewrite call/invoke instructions so as to make potential relocations 10 // performed by the garbage collector explicit in the IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h" 15 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/DomTreeUpdater.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constant.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/InstIterator.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/MDBuilder.h" 50 #include "llvm/IR/Metadata.h" 51 #include "llvm/IR/Module.h" 52 #include "llvm/IR/Statepoint.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/ValueHandle.h" 57 #include "llvm/InitializePasses.h" 58 #include "llvm/Pass.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Compiler.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/ErrorHandling.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Transforms/Scalar.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 69 #include <algorithm> 70 #include <cassert> 71 #include <cstddef> 72 #include <cstdint> 73 #include <iterator> 74 #include <set> 75 #include <string> 76 #include <utility> 77 #include <vector> 78 79 #define DEBUG_TYPE "rewrite-statepoints-for-gc" 80 81 using namespace llvm; 82 83 // Print the liveset found at the insert location 84 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden, 85 cl::init(false)); 86 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, 87 cl::init(false)); 88 89 // Print out the base pointers for debugging 90 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden, 91 cl::init(false)); 92 93 // Cost threshold measuring when it is profitable to rematerialize value instead 94 // of relocating it 95 static cl::opt<unsigned> 96 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, 97 cl::init(6)); 98 99 #ifdef EXPENSIVE_CHECKS 100 static bool ClobberNonLive = true; 101 #else 102 static bool ClobberNonLive = false; 103 #endif 104 105 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live", 106 cl::location(ClobberNonLive), 107 cl::Hidden); 108 109 static cl::opt<bool> 110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", 111 cl::Hidden, cl::init(true)); 112 113 /// The IR fed into RewriteStatepointsForGC may have had attributes and 114 /// metadata implying dereferenceability that are no longer valid/correct after 115 /// RewriteStatepointsForGC has run. This is because semantically, after 116 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire 117 /// heap. stripNonValidData (conservatively) restores 118 /// correctness by erasing all attributes in the module that externally imply 119 /// dereferenceability. Similar reasoning also applies to the noalias 120 /// attributes and metadata. gc.statepoint can touch the entire heap including 121 /// noalias objects. 122 /// Apart from attributes and metadata, we also remove instructions that imply 123 /// constant physical memory: llvm.invariant.start. 124 static void stripNonValidData(Module &M); 125 126 static bool shouldRewriteStatepointsIn(Function &F); 127 128 PreservedAnalyses RewriteStatepointsForGC::run(Module &M, 129 ModuleAnalysisManager &AM) { 130 bool Changed = false; 131 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 132 for (Function &F : M) { 133 // Nothing to do for declarations. 134 if (F.isDeclaration() || F.empty()) 135 continue; 136 137 // Policy choice says not to rewrite - the most common reason is that we're 138 // compiling code without a GCStrategy. 139 if (!shouldRewriteStatepointsIn(F)) 140 continue; 141 142 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F); 143 auto &TTI = FAM.getResult<TargetIRAnalysis>(F); 144 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 145 Changed |= runOnFunction(F, DT, TTI, TLI); 146 } 147 if (!Changed) 148 return PreservedAnalyses::all(); 149 150 // stripNonValidData asserts that shouldRewriteStatepointsIn 151 // returns true for at least one function in the module. Since at least 152 // one function changed, we know that the precondition is satisfied. 153 stripNonValidData(M); 154 155 PreservedAnalyses PA; 156 PA.preserve<TargetIRAnalysis>(); 157 PA.preserve<TargetLibraryAnalysis>(); 158 return PA; 159 } 160 161 namespace { 162 163 class RewriteStatepointsForGCLegacyPass : public ModulePass { 164 RewriteStatepointsForGC Impl; 165 166 public: 167 static char ID; // Pass identification, replacement for typeid 168 169 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() { 170 initializeRewriteStatepointsForGCLegacyPassPass( 171 *PassRegistry::getPassRegistry()); 172 } 173 174 bool runOnModule(Module &M) override { 175 bool Changed = false; 176 for (Function &F : M) { 177 // Nothing to do for declarations. 178 if (F.isDeclaration() || F.empty()) 179 continue; 180 181 // Policy choice says not to rewrite - the most common reason is that 182 // we're compiling code without a GCStrategy. 183 if (!shouldRewriteStatepointsIn(F)) 184 continue; 185 186 TargetTransformInfo &TTI = 187 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 188 const TargetLibraryInfo &TLI = 189 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 190 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 191 192 Changed |= Impl.runOnFunction(F, DT, TTI, TLI); 193 } 194 195 if (!Changed) 196 return false; 197 198 // stripNonValidData asserts that shouldRewriteStatepointsIn 199 // returns true for at least one function in the module. Since at least 200 // one function changed, we know that the precondition is satisfied. 201 stripNonValidData(M); 202 return true; 203 } 204 205 void getAnalysisUsage(AnalysisUsage &AU) const override { 206 // We add and rewrite a bunch of instructions, but don't really do much 207 // else. We could in theory preserve a lot more analyses here. 208 AU.addRequired<DominatorTreeWrapperPass>(); 209 AU.addRequired<TargetTransformInfoWrapperPass>(); 210 AU.addRequired<TargetLibraryInfoWrapperPass>(); 211 } 212 }; 213 214 } // end anonymous namespace 215 216 char RewriteStatepointsForGCLegacyPass::ID = 0; 217 218 ModulePass *llvm::createRewriteStatepointsForGCLegacyPass() { 219 return new RewriteStatepointsForGCLegacyPass(); 220 } 221 222 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass, 223 "rewrite-statepoints-for-gc", 224 "Make relocations explicit at statepoints", false, false) 225 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 226 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 227 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass, 228 "rewrite-statepoints-for-gc", 229 "Make relocations explicit at statepoints", false, false) 230 231 namespace { 232 233 struct GCPtrLivenessData { 234 /// Values defined in this block. 235 MapVector<BasicBlock *, SetVector<Value *>> KillSet; 236 237 /// Values used in this block (and thus live); does not included values 238 /// killed within this block. 239 MapVector<BasicBlock *, SetVector<Value *>> LiveSet; 240 241 /// Values live into this basic block (i.e. used by any 242 /// instruction in this basic block or ones reachable from here) 243 MapVector<BasicBlock *, SetVector<Value *>> LiveIn; 244 245 /// Values live out of this basic block (i.e. live into 246 /// any successor block) 247 MapVector<BasicBlock *, SetVector<Value *>> LiveOut; 248 }; 249 250 // The type of the internal cache used inside the findBasePointers family 251 // of functions. From the callers perspective, this is an opaque type and 252 // should not be inspected. 253 // 254 // In the actual implementation this caches two relations: 255 // - The base relation itself (i.e. this pointer is based on that one) 256 // - The base defining value relation (i.e. before base_phi insertion) 257 // Generally, after the execution of a full findBasePointer call, only the 258 // base relation will remain. Internally, we add a mixture of the two 259 // types, then update all the second type to the first type 260 using DefiningValueMapTy = MapVector<Value *, Value *>; 261 using StatepointLiveSetTy = SetVector<Value *>; 262 using RematerializedValueMapTy = 263 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>; 264 265 struct PartiallyConstructedSafepointRecord { 266 /// The set of values known to be live across this safepoint 267 StatepointLiveSetTy LiveSet; 268 269 /// Mapping from live pointers to a base-defining-value 270 MapVector<Value *, Value *> PointerToBase; 271 272 /// The *new* gc.statepoint instruction itself. This produces the token 273 /// that normal path gc.relocates and the gc.result are tied to. 274 GCStatepointInst *StatepointToken; 275 276 /// Instruction to which exceptional gc relocates are attached 277 /// Makes it easier to iterate through them during relocationViaAlloca. 278 Instruction *UnwindToken; 279 280 /// Record live values we are rematerialized instead of relocating. 281 /// They are not included into 'LiveSet' field. 282 /// Maps rematerialized copy to it's original value. 283 RematerializedValueMapTy RematerializedValues; 284 }; 285 286 } // end anonymous namespace 287 288 static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) { 289 Optional<OperandBundleUse> DeoptBundle = 290 Call->getOperandBundle(LLVMContext::OB_deopt); 291 292 if (!DeoptBundle.hasValue()) { 293 assert(AllowStatepointWithNoDeoptInfo && 294 "Found non-leaf call without deopt info!"); 295 return None; 296 } 297 298 return DeoptBundle.getValue().Inputs; 299 } 300 301 /// Compute the live-in set for every basic block in the function 302 static void computeLiveInValues(DominatorTree &DT, Function &F, 303 GCPtrLivenessData &Data); 304 305 /// Given results from the dataflow liveness computation, find the set of live 306 /// Values at a particular instruction. 307 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, 308 StatepointLiveSetTy &out); 309 310 // TODO: Once we can get to the GCStrategy, this becomes 311 // Optional<bool> isGCManagedPointer(const Type *Ty) const override { 312 313 static bool isGCPointerType(Type *T) { 314 if (auto *PT = dyn_cast<PointerType>(T)) 315 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our 316 // GC managed heap. We know that a pointer into this heap needs to be 317 // updated and that no other pointer does. 318 return PT->getAddressSpace() == 1; 319 return false; 320 } 321 322 // Return true if this type is one which a) is a gc pointer or contains a GC 323 // pointer and b) is of a type this code expects to encounter as a live value. 324 // (The insertion code will assert that a type which matches (a) and not (b) 325 // is not encountered.) 326 static bool isHandledGCPointerType(Type *T) { 327 // We fully support gc pointers 328 if (isGCPointerType(T)) 329 return true; 330 // We partially support vectors of gc pointers. The code will assert if it 331 // can't handle something. 332 if (auto VT = dyn_cast<VectorType>(T)) 333 if (isGCPointerType(VT->getElementType())) 334 return true; 335 return false; 336 } 337 338 #ifndef NDEBUG 339 /// Returns true if this type contains a gc pointer whether we know how to 340 /// handle that type or not. 341 static bool containsGCPtrType(Type *Ty) { 342 if (isGCPointerType(Ty)) 343 return true; 344 if (VectorType *VT = dyn_cast<VectorType>(Ty)) 345 return isGCPointerType(VT->getScalarType()); 346 if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) 347 return containsGCPtrType(AT->getElementType()); 348 if (StructType *ST = dyn_cast<StructType>(Ty)) 349 return llvm::any_of(ST->elements(), containsGCPtrType); 350 return false; 351 } 352 353 // Returns true if this is a type which a) is a gc pointer or contains a GC 354 // pointer and b) is of a type which the code doesn't expect (i.e. first class 355 // aggregates). Used to trip assertions. 356 static bool isUnhandledGCPointerType(Type *Ty) { 357 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty); 358 } 359 #endif 360 361 // Return the name of the value suffixed with the provided value, or if the 362 // value didn't have a name, the default value specified. 363 static std::string suffixed_name_or(Value *V, StringRef Suffix, 364 StringRef DefaultName) { 365 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str(); 366 } 367 368 // Conservatively identifies any definitions which might be live at the 369 // given instruction. The analysis is performed immediately before the 370 // given instruction. Values defined by that instruction are not considered 371 // live. Values used by that instruction are considered live. 372 static void analyzeParsePointLiveness( 373 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, 374 PartiallyConstructedSafepointRecord &Result) { 375 StatepointLiveSetTy LiveSet; 376 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet); 377 378 if (PrintLiveSet) { 379 dbgs() << "Live Variables:\n"; 380 for (Value *V : LiveSet) 381 dbgs() << " " << V->getName() << " " << *V << "\n"; 382 } 383 if (PrintLiveSetSize) { 384 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n"; 385 dbgs() << "Number live values: " << LiveSet.size() << "\n"; 386 } 387 Result.LiveSet = LiveSet; 388 } 389 390 // Returns true is V is a knownBaseResult. 391 static bool isKnownBaseResult(Value *V); 392 393 // Returns true if V is a BaseResult that already exists in the IR, i.e. it is 394 // not created by the findBasePointers algorithm. 395 static bool isOriginalBaseResult(Value *V); 396 397 namespace { 398 399 /// A single base defining value - An immediate base defining value for an 400 /// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'. 401 /// For instructions which have multiple pointer [vector] inputs or that 402 /// transition between vector and scalar types, there is no immediate base 403 /// defining value. The 'base defining value' for 'Def' is the transitive 404 /// closure of this relation stopping at the first instruction which has no 405 /// immediate base defining value. The b.d.v. might itself be a base pointer, 406 /// but it can also be an arbitrary derived pointer. 407 struct BaseDefiningValueResult { 408 /// Contains the value which is the base defining value. 409 Value * const BDV; 410 411 /// True if the base defining value is also known to be an actual base 412 /// pointer. 413 const bool IsKnownBase; 414 415 BaseDefiningValueResult(Value *BDV, bool IsKnownBase) 416 : BDV(BDV), IsKnownBase(IsKnownBase) { 417 #ifndef NDEBUG 418 // Check consistency between new and old means of checking whether a BDV is 419 // a base. 420 bool MustBeBase = isKnownBaseResult(BDV); 421 assert(!MustBeBase || MustBeBase == IsKnownBase); 422 #endif 423 } 424 }; 425 426 } // end anonymous namespace 427 428 static BaseDefiningValueResult findBaseDefiningValue(Value *I); 429 430 /// Return a base defining value for the 'Index' element of the given vector 431 /// instruction 'I'. If Index is null, returns a BDV for the entire vector 432 /// 'I'. As an optimization, this method will try to determine when the 433 /// element is known to already be a base pointer. If this can be established, 434 /// the second value in the returned pair will be true. Note that either a 435 /// vector or a pointer typed value can be returned. For the former, the 436 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'. 437 /// If the later, the return pointer is a BDV (or possibly a base) for the 438 /// particular element in 'I'. 439 static BaseDefiningValueResult 440 findBaseDefiningValueOfVector(Value *I) { 441 // Each case parallels findBaseDefiningValue below, see that code for 442 // detailed motivation. 443 444 if (isa<Argument>(I)) 445 // An incoming argument to the function is a base pointer 446 return BaseDefiningValueResult(I, true); 447 448 if (isa<Constant>(I)) 449 // Base of constant vector consists only of constant null pointers. 450 // For reasoning see similar case inside 'findBaseDefiningValue' function. 451 return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()), 452 true); 453 454 if (isa<LoadInst>(I)) 455 return BaseDefiningValueResult(I, true); 456 457 if (isa<InsertElementInst>(I)) 458 // We don't know whether this vector contains entirely base pointers or 459 // not. To be conservatively correct, we treat it as a BDV and will 460 // duplicate code as needed to construct a parallel vector of bases. 461 return BaseDefiningValueResult(I, false); 462 463 if (isa<ShuffleVectorInst>(I)) 464 // We don't know whether this vector contains entirely base pointers or 465 // not. To be conservatively correct, we treat it as a BDV and will 466 // duplicate code as needed to construct a parallel vector of bases. 467 // TODO: There a number of local optimizations which could be applied here 468 // for particular sufflevector patterns. 469 return BaseDefiningValueResult(I, false); 470 471 // The behavior of getelementptr instructions is the same for vector and 472 // non-vector data types. 473 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 474 return findBaseDefiningValue(GEP->getPointerOperand()); 475 476 // If the pointer comes through a bitcast of a vector of pointers to 477 // a vector of another type of pointer, then look through the bitcast 478 if (auto *BC = dyn_cast<BitCastInst>(I)) 479 return findBaseDefiningValue(BC->getOperand(0)); 480 481 // We assume that functions in the source language only return base 482 // pointers. This should probably be generalized via attributes to support 483 // both source language and internal functions. 484 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 485 return BaseDefiningValueResult(I, true); 486 487 // A PHI or Select is a base defining value. The outer findBasePointer 488 // algorithm is responsible for constructing a base value for this BDV. 489 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 490 "unknown vector instruction - no base found for vector element"); 491 return BaseDefiningValueResult(I, false); 492 } 493 494 /// Helper function for findBasePointer - Will return a value which either a) 495 /// defines the base pointer for the input, b) blocks the simple search 496 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change 497 /// from pointer to vector type or back. 498 static BaseDefiningValueResult findBaseDefiningValue(Value *I) { 499 assert(I->getType()->isPtrOrPtrVectorTy() && 500 "Illegal to ask for the base pointer of a non-pointer type"); 501 502 if (I->getType()->isVectorTy()) 503 return findBaseDefiningValueOfVector(I); 504 505 if (isa<Argument>(I)) 506 // An incoming argument to the function is a base pointer 507 // We should have never reached here if this argument isn't an gc value 508 return BaseDefiningValueResult(I, true); 509 510 if (isa<Constant>(I)) { 511 // We assume that objects with a constant base (e.g. a global) can't move 512 // and don't need to be reported to the collector because they are always 513 // live. Besides global references, all kinds of constants (e.g. undef, 514 // constant expressions, null pointers) can be introduced by the inliner or 515 // the optimizer, especially on dynamically dead paths. 516 // Here we treat all of them as having single null base. By doing this we 517 // trying to avoid problems reporting various conflicts in a form of 518 // "phi (const1, const2)" or "phi (const, regular gc ptr)". 519 // See constant.ll file for relevant test cases. 520 521 return BaseDefiningValueResult( 522 ConstantPointerNull::get(cast<PointerType>(I->getType())), true); 523 } 524 525 if (CastInst *CI = dyn_cast<CastInst>(I)) { 526 Value *Def = CI->stripPointerCasts(); 527 // If stripping pointer casts changes the address space there is an 528 // addrspacecast in between. 529 assert(cast<PointerType>(Def->getType())->getAddressSpace() == 530 cast<PointerType>(CI->getType())->getAddressSpace() && 531 "unsupported addrspacecast"); 532 // If we find a cast instruction here, it means we've found a cast which is 533 // not simply a pointer cast (i.e. an inttoptr). We don't know how to 534 // handle int->ptr conversion. 535 assert(!isa<CastInst>(Def) && "shouldn't find another cast here"); 536 return findBaseDefiningValue(Def); 537 } 538 539 if (isa<LoadInst>(I)) 540 // The value loaded is an gc base itself 541 return BaseDefiningValueResult(I, true); 542 543 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 544 // The base of this GEP is the base 545 return findBaseDefiningValue(GEP->getPointerOperand()); 546 547 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 548 switch (II->getIntrinsicID()) { 549 default: 550 // fall through to general call handling 551 break; 552 case Intrinsic::experimental_gc_statepoint: 553 llvm_unreachable("statepoints don't produce pointers"); 554 case Intrinsic::experimental_gc_relocate: 555 // Rerunning safepoint insertion after safepoints are already 556 // inserted is not supported. It could probably be made to work, 557 // but why are you doing this? There's no good reason. 558 llvm_unreachable("repeat safepoint insertion is not supported"); 559 case Intrinsic::gcroot: 560 // Currently, this mechanism hasn't been extended to work with gcroot. 561 // There's no reason it couldn't be, but I haven't thought about the 562 // implications much. 563 llvm_unreachable( 564 "interaction with the gcroot mechanism is not supported"); 565 } 566 } 567 // We assume that functions in the source language only return base 568 // pointers. This should probably be generalized via attributes to support 569 // both source language and internal functions. 570 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 571 return BaseDefiningValueResult(I, true); 572 573 // TODO: I have absolutely no idea how to implement this part yet. It's not 574 // necessarily hard, I just haven't really looked at it yet. 575 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented"); 576 577 if (isa<AtomicCmpXchgInst>(I)) 578 // A CAS is effectively a atomic store and load combined under a 579 // predicate. From the perspective of base pointers, we just treat it 580 // like a load. 581 return BaseDefiningValueResult(I, true); 582 583 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are " 584 "binary ops which don't apply to pointers"); 585 586 // The aggregate ops. Aggregates can either be in the heap or on the 587 // stack, but in either case, this is simply a field load. As a result, 588 // this is a defining definition of the base just like a load is. 589 if (isa<ExtractValueInst>(I)) 590 return BaseDefiningValueResult(I, true); 591 592 // We should never see an insert vector since that would require we be 593 // tracing back a struct value not a pointer value. 594 assert(!isa<InsertValueInst>(I) && 595 "Base pointer for a struct is meaningless"); 596 597 // An extractelement produces a base result exactly when it's input does. 598 // We may need to insert a parallel instruction to extract the appropriate 599 // element out of the base vector corresponding to the input. Given this, 600 // it's analogous to the phi and select case even though it's not a merge. 601 if (isa<ExtractElementInst>(I)) 602 // Note: There a lot of obvious peephole cases here. This are deliberately 603 // handled after the main base pointer inference algorithm to make writing 604 // test cases to exercise that code easier. 605 return BaseDefiningValueResult(I, false); 606 607 // The last two cases here don't return a base pointer. Instead, they 608 // return a value which dynamically selects from among several base 609 // derived pointers (each with it's own base potentially). It's the job of 610 // the caller to resolve these. 611 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 612 "missing instruction case in findBaseDefiningValing"); 613 return BaseDefiningValueResult(I, false); 614 } 615 616 /// Returns the base defining value for this value. 617 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) { 618 Value *&Cached = Cache[I]; 619 if (!Cached) { 620 Cached = findBaseDefiningValue(I).BDV; 621 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> " 622 << Cached->getName() << "\n"); 623 } 624 assert(Cache[I] != nullptr); 625 return Cached; 626 } 627 628 /// Return a base pointer for this value if known. Otherwise, return it's 629 /// base defining value. 630 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) { 631 Value *Def = findBaseDefiningValueCached(I, Cache); 632 auto Found = Cache.find(Def); 633 if (Found != Cache.end()) { 634 // Either a base-of relation, or a self reference. Caller must check. 635 return Found->second; 636 } 637 // Only a BDV available 638 return Def; 639 } 640 641 /// This value is a base pointer that is not generated by RS4GC, i.e. it already 642 /// exists in the code. 643 static bool isOriginalBaseResult(Value *V) { 644 // no recursion possible 645 return !isa<PHINode>(V) && !isa<SelectInst>(V) && 646 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) && 647 !isa<ShuffleVectorInst>(V); 648 } 649 650 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, 651 /// is it known to be a base pointer? Or do we need to continue searching. 652 static bool isKnownBaseResult(Value *V) { 653 if (isOriginalBaseResult(V)) 654 return true; 655 if (isa<Instruction>(V) && 656 cast<Instruction>(V)->getMetadata("is_base_value")) { 657 // This is a previously inserted base phi or select. We know 658 // that this is a base value. 659 return true; 660 } 661 662 // We need to keep searching 663 return false; 664 } 665 666 // Returns true if First and Second values are both scalar or both vector. 667 static bool areBothVectorOrScalar(Value *First, Value *Second) { 668 return isa<VectorType>(First->getType()) == 669 isa<VectorType>(Second->getType()); 670 } 671 672 namespace { 673 674 /// Models the state of a single base defining value in the findBasePointer 675 /// algorithm for determining where a new instruction is needed to propagate 676 /// the base of this BDV. 677 class BDVState { 678 public: 679 enum Status { 680 // Starting state of lattice 681 Unknown, 682 // Some specific base value 683 Base, 684 // Need to insert a node to represent a merge. 685 Conflict 686 }; 687 688 BDVState() {} 689 explicit BDVState(Status Status, Value *BaseValue = nullptr) 690 : Status(Status), BaseValue(BaseValue) { 691 assert(Status != Base || BaseValue); 692 } 693 694 Status getStatus() const { return Status; } 695 Value *getBaseValue() const { return BaseValue; } 696 697 bool isBase() const { return getStatus() == Base; } 698 bool isUnknown() const { return getStatus() == Unknown; } 699 bool isConflict() const { return getStatus() == Conflict; } 700 701 bool operator==(const BDVState &Other) const { 702 return BaseValue == Other.BaseValue && Status == Other.Status; 703 } 704 705 bool operator!=(const BDVState &other) const { return !(*this == other); } 706 707 LLVM_DUMP_METHOD 708 void dump() const { 709 print(dbgs()); 710 dbgs() << '\n'; 711 } 712 713 void print(raw_ostream &OS) const { 714 switch (getStatus()) { 715 case Unknown: 716 OS << "U"; 717 break; 718 case Base: 719 OS << "B"; 720 break; 721 case Conflict: 722 OS << "C"; 723 break; 724 } 725 OS << " (" << getBaseValue() << " - " 726 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << "): "; 727 } 728 729 private: 730 Status Status = Unknown; 731 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base. 732 }; 733 734 } // end anonymous namespace 735 736 #ifndef NDEBUG 737 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) { 738 State.print(OS); 739 return OS; 740 } 741 #endif 742 743 static BDVState meetBDVStateImpl(const BDVState &LHS, const BDVState &RHS) { 744 switch (LHS.getStatus()) { 745 case BDVState::Unknown: 746 return RHS; 747 748 case BDVState::Base: 749 assert(LHS.getBaseValue() && "can't be null"); 750 if (RHS.isUnknown()) 751 return LHS; 752 753 if (RHS.isBase()) { 754 if (LHS.getBaseValue() == RHS.getBaseValue()) { 755 assert(LHS == RHS && "equality broken!"); 756 return LHS; 757 } 758 return BDVState(BDVState::Conflict); 759 } 760 assert(RHS.isConflict() && "only three states!"); 761 return BDVState(BDVState::Conflict); 762 763 case BDVState::Conflict: 764 return LHS; 765 } 766 llvm_unreachable("only three states!"); 767 } 768 769 // Values of type BDVState form a lattice, and this function implements the meet 770 // operation. 771 static BDVState meetBDVState(const BDVState &LHS, const BDVState &RHS) { 772 BDVState Result = meetBDVStateImpl(LHS, RHS); 773 assert(Result == meetBDVStateImpl(RHS, LHS) && 774 "Math is wrong: meet does not commute!"); 775 return Result; 776 } 777 778 /// For a given value or instruction, figure out what base ptr its derived from. 779 /// For gc objects, this is simply itself. On success, returns a value which is 780 /// the base pointer. (This is reliable and can be used for relocation.) On 781 /// failure, returns nullptr. 782 static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) { 783 Value *Def = findBaseOrBDV(I, Cache); 784 785 if (isKnownBaseResult(Def) && areBothVectorOrScalar(Def, I)) 786 return Def; 787 788 // Here's the rough algorithm: 789 // - For every SSA value, construct a mapping to either an actual base 790 // pointer or a PHI which obscures the base pointer. 791 // - Construct a mapping from PHI to unknown TOP state. Use an 792 // optimistic algorithm to propagate base pointer information. Lattice 793 // looks like: 794 // UNKNOWN 795 // b1 b2 b3 b4 796 // CONFLICT 797 // When algorithm terminates, all PHIs will either have a single concrete 798 // base or be in a conflict state. 799 // - For every conflict, insert a dummy PHI node without arguments. Add 800 // these to the base[Instruction] = BasePtr mapping. For every 801 // non-conflict, add the actual base. 802 // - For every conflict, add arguments for the base[a] of each input 803 // arguments. 804 // 805 // Note: A simpler form of this would be to add the conflict form of all 806 // PHIs without running the optimistic algorithm. This would be 807 // analogous to pessimistic data flow and would likely lead to an 808 // overall worse solution. 809 810 #ifndef NDEBUG 811 auto isExpectedBDVType = [](Value *BDV) { 812 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) || 813 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) || 814 isa<ShuffleVectorInst>(BDV); 815 }; 816 #endif 817 818 // Once populated, will contain a mapping from each potentially non-base BDV 819 // to a lattice value (described above) which corresponds to that BDV. 820 // We use the order of insertion (DFS over the def/use graph) to provide a 821 // stable deterministic ordering for visiting DenseMaps (which are unordered) 822 // below. This is important for deterministic compilation. 823 MapVector<Value *, BDVState> States; 824 825 // Recursively fill in all base defining values reachable from the initial 826 // one for which we don't already know a definite base value for 827 /* scope */ { 828 SmallVector<Value*, 16> Worklist; 829 Worklist.push_back(Def); 830 States.insert({Def, BDVState()}); 831 while (!Worklist.empty()) { 832 Value *Current = Worklist.pop_back_val(); 833 assert(!isOriginalBaseResult(Current) && "why did it get added?"); 834 835 auto visitIncomingValue = [&](Value *InVal) { 836 Value *Base = findBaseOrBDV(InVal, Cache); 837 if (isKnownBaseResult(Base) && areBothVectorOrScalar(Base, InVal)) 838 // Known bases won't need new instructions introduced and can be 839 // ignored safely. However, this can only be done when InVal and Base 840 // are both scalar or both vector. Otherwise, we need to find a 841 // correct BDV for InVal, by creating an entry in the lattice 842 // (States). 843 return; 844 assert(isExpectedBDVType(Base) && "the only non-base values " 845 "we see should be base defining values"); 846 if (States.insert(std::make_pair(Base, BDVState())).second) 847 Worklist.push_back(Base); 848 }; 849 if (PHINode *PN = dyn_cast<PHINode>(Current)) { 850 for (Value *InVal : PN->incoming_values()) 851 visitIncomingValue(InVal); 852 } else if (SelectInst *SI = dyn_cast<SelectInst>(Current)) { 853 visitIncomingValue(SI->getTrueValue()); 854 visitIncomingValue(SI->getFalseValue()); 855 } else if (auto *EE = dyn_cast<ExtractElementInst>(Current)) { 856 visitIncomingValue(EE->getVectorOperand()); 857 } else if (auto *IE = dyn_cast<InsertElementInst>(Current)) { 858 visitIncomingValue(IE->getOperand(0)); // vector operand 859 visitIncomingValue(IE->getOperand(1)); // scalar operand 860 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(Current)) { 861 visitIncomingValue(SV->getOperand(0)); 862 visitIncomingValue(SV->getOperand(1)); 863 } 864 else { 865 llvm_unreachable("Unimplemented instruction case"); 866 } 867 } 868 } 869 870 #ifndef NDEBUG 871 LLVM_DEBUG(dbgs() << "States after initialization:\n"); 872 for (auto Pair : States) { 873 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 874 } 875 #endif 876 877 // Return a phi state for a base defining value. We'll generate a new 878 // base state for known bases and expect to find a cached state otherwise. 879 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) { 880 if (isKnownBaseResult(BaseValue) && areBothVectorOrScalar(BaseValue, Input)) 881 return BDVState(BDVState::Base, BaseValue); 882 auto I = States.find(BaseValue); 883 assert(I != States.end() && "lookup failed!"); 884 return I->second; 885 }; 886 887 bool Progress = true; 888 while (Progress) { 889 #ifndef NDEBUG 890 const size_t OldSize = States.size(); 891 #endif 892 Progress = false; 893 // We're only changing values in this loop, thus safe to keep iterators. 894 // Since this is computing a fixed point, the order of visit does not 895 // effect the result. TODO: We could use a worklist here and make this run 896 // much faster. 897 for (auto Pair : States) { 898 Value *BDV = Pair.first; 899 // Only values that do not have known bases or those that have differing 900 // type (scalar versus vector) from a possible known base should be in the 901 // lattice. 902 assert((!isKnownBaseResult(BDV) || 903 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) && 904 "why did it get added?"); 905 906 // Given an input value for the current instruction, return a BDVState 907 // instance which represents the BDV of that value. 908 auto getStateForInput = [&](Value *V) mutable { 909 Value *BDV = findBaseOrBDV(V, Cache); 910 return GetStateForBDV(BDV, V); 911 }; 912 913 BDVState NewState; 914 if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) { 915 NewState = meetBDVState(NewState, getStateForInput(SI->getTrueValue())); 916 NewState = 917 meetBDVState(NewState, getStateForInput(SI->getFalseValue())); 918 } else if (PHINode *PN = dyn_cast<PHINode>(BDV)) { 919 for (Value *Val : PN->incoming_values()) 920 NewState = meetBDVState(NewState, getStateForInput(Val)); 921 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) { 922 // The 'meet' for an extractelement is slightly trivial, but it's still 923 // useful in that it drives us to conflict if our input is. 924 NewState = 925 meetBDVState(NewState, getStateForInput(EE->getVectorOperand())); 926 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)){ 927 // Given there's a inherent type mismatch between the operands, will 928 // *always* produce Conflict. 929 NewState = meetBDVState(NewState, getStateForInput(IE->getOperand(0))); 930 NewState = meetBDVState(NewState, getStateForInput(IE->getOperand(1))); 931 } else { 932 // The only instance this does not return a Conflict is when both the 933 // vector operands are the same vector. 934 auto *SV = cast<ShuffleVectorInst>(BDV); 935 NewState = meetBDVState(NewState, getStateForInput(SV->getOperand(0))); 936 NewState = meetBDVState(NewState, getStateForInput(SV->getOperand(1))); 937 } 938 939 BDVState OldState = States[BDV]; 940 if (OldState != NewState) { 941 Progress = true; 942 States[BDV] = NewState; 943 } 944 } 945 946 assert(OldSize == States.size() && 947 "fixed point shouldn't be adding any new nodes to state"); 948 } 949 950 #ifndef NDEBUG 951 LLVM_DEBUG(dbgs() << "States after meet iteration:\n"); 952 for (auto Pair : States) { 953 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 954 } 955 #endif 956 957 // Handle all instructions that have a vector BDV, but the instruction itself 958 // is of scalar type. 959 for (auto Pair : States) { 960 Instruction *I = cast<Instruction>(Pair.first); 961 BDVState State = Pair.second; 962 auto *BaseValue = State.getBaseValue(); 963 // Only values that do not have known bases or those that have differing 964 // type (scalar versus vector) from a possible known base should be in the 965 // lattice. 966 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, BaseValue)) && 967 "why did it get added?"); 968 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 969 970 if (!State.isBase() || !isa<VectorType>(BaseValue->getType())) 971 continue; 972 // extractelement instructions are a bit special in that we may need to 973 // insert an extract even when we know an exact base for the instruction. 974 // The problem is that we need to convert from a vector base to a scalar 975 // base for the particular indice we're interested in. 976 if (isa<ExtractElementInst>(I)) { 977 auto *EE = cast<ExtractElementInst>(I); 978 // TODO: In many cases, the new instruction is just EE itself. We should 979 // exploit this, but can't do it here since it would break the invariant 980 // about the BDV not being known to be a base. 981 auto *BaseInst = ExtractElementInst::Create( 982 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE); 983 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 984 States[I] = BDVState(BDVState::Base, BaseInst); 985 } else if (!isa<VectorType>(I->getType())) { 986 // We need to handle cases that have a vector base but the instruction is 987 // a scalar type (these could be phis or selects or any instruction that 988 // are of scalar type, but the base can be a vector type). We 989 // conservatively set this as conflict. Setting the base value for these 990 // conflicts is handled in the next loop which traverses States. 991 States[I] = BDVState(BDVState::Conflict); 992 } 993 } 994 995 // Insert Phis for all conflicts 996 // TODO: adjust naming patterns to avoid this order of iteration dependency 997 for (auto Pair : States) { 998 Instruction *I = cast<Instruction>(Pair.first); 999 BDVState State = Pair.second; 1000 // Only values that do not have known bases or those that have differing 1001 // type (scalar versus vector) from a possible known base should be in the 1002 // lattice. 1003 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, State.getBaseValue())) && 1004 "why did it get added?"); 1005 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1006 1007 // Since we're joining a vector and scalar base, they can never be the 1008 // same. As a result, we should always see insert element having reached 1009 // the conflict state. 1010 assert(!isa<InsertElementInst>(I) || State.isConflict()); 1011 1012 if (!State.isConflict()) 1013 continue; 1014 1015 /// Create and insert a new instruction which will represent the base of 1016 /// the given instruction 'I'. 1017 auto MakeBaseInstPlaceholder = [](Instruction *I) -> Instruction* { 1018 if (isa<PHINode>(I)) { 1019 BasicBlock *BB = I->getParent(); 1020 int NumPreds = pred_size(BB); 1021 assert(NumPreds > 0 && "how did we reach here"); 1022 std::string Name = suffixed_name_or(I, ".base", "base_phi"); 1023 return PHINode::Create(I->getType(), NumPreds, Name, I); 1024 } else if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 1025 // The undef will be replaced later 1026 UndefValue *Undef = UndefValue::get(SI->getType()); 1027 std::string Name = suffixed_name_or(I, ".base", "base_select"); 1028 return SelectInst::Create(SI->getCondition(), Undef, Undef, Name, SI); 1029 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 1030 UndefValue *Undef = UndefValue::get(EE->getVectorOperand()->getType()); 1031 std::string Name = suffixed_name_or(I, ".base", "base_ee"); 1032 return ExtractElementInst::Create(Undef, EE->getIndexOperand(), Name, 1033 EE); 1034 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 1035 UndefValue *VecUndef = UndefValue::get(IE->getOperand(0)->getType()); 1036 UndefValue *ScalarUndef = UndefValue::get(IE->getOperand(1)->getType()); 1037 std::string Name = suffixed_name_or(I, ".base", "base_ie"); 1038 return InsertElementInst::Create(VecUndef, ScalarUndef, 1039 IE->getOperand(2), Name, IE); 1040 } else { 1041 auto *SV = cast<ShuffleVectorInst>(I); 1042 UndefValue *VecUndef = UndefValue::get(SV->getOperand(0)->getType()); 1043 std::string Name = suffixed_name_or(I, ".base", "base_sv"); 1044 return new ShuffleVectorInst(VecUndef, VecUndef, SV->getShuffleMask(), 1045 Name, SV); 1046 } 1047 }; 1048 Instruction *BaseInst = MakeBaseInstPlaceholder(I); 1049 // Add metadata marking this as a base value 1050 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1051 States[I] = BDVState(BDVState::Conflict, BaseInst); 1052 } 1053 1054 // Returns a instruction which produces the base pointer for a given 1055 // instruction. The instruction is assumed to be an input to one of the BDVs 1056 // seen in the inference algorithm above. As such, we must either already 1057 // know it's base defining value is a base, or have inserted a new 1058 // instruction to propagate the base of it's BDV and have entered that newly 1059 // introduced instruction into the state table. In either case, we are 1060 // assured to be able to determine an instruction which produces it's base 1061 // pointer. 1062 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) { 1063 Value *BDV = findBaseOrBDV(Input, Cache); 1064 Value *Base = nullptr; 1065 if (isKnownBaseResult(BDV) && areBothVectorOrScalar(BDV, Input)) { 1066 Base = BDV; 1067 } else { 1068 // Either conflict or base. 1069 assert(States.count(BDV)); 1070 Base = States[BDV].getBaseValue(); 1071 } 1072 assert(Base && "Can't be null"); 1073 // The cast is needed since base traversal may strip away bitcasts 1074 if (Base->getType() != Input->getType() && InsertPt) 1075 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt); 1076 return Base; 1077 }; 1078 1079 // Fixup all the inputs of the new PHIs. Visit order needs to be 1080 // deterministic and predictable because we're naming newly created 1081 // instructions. 1082 for (auto Pair : States) { 1083 Instruction *BDV = cast<Instruction>(Pair.first); 1084 BDVState State = Pair.second; 1085 1086 // Only values that do not have known bases or those that have differing 1087 // type (scalar versus vector) from a possible known base should be in the 1088 // lattice. 1089 assert((!isKnownBaseResult(BDV) || 1090 !areBothVectorOrScalar(BDV, State.getBaseValue())) && 1091 "why did it get added?"); 1092 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1093 if (!State.isConflict()) 1094 continue; 1095 1096 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) { 1097 PHINode *PN = cast<PHINode>(BDV); 1098 unsigned NumPHIValues = PN->getNumIncomingValues(); 1099 for (unsigned i = 0; i < NumPHIValues; i++) { 1100 Value *InVal = PN->getIncomingValue(i); 1101 BasicBlock *InBB = PN->getIncomingBlock(i); 1102 1103 // If we've already seen InBB, add the same incoming value 1104 // we added for it earlier. The IR verifier requires phi 1105 // nodes with multiple entries from the same basic block 1106 // to have the same incoming value for each of those 1107 // entries. If we don't do this check here and basephi 1108 // has a different type than base, we'll end up adding two 1109 // bitcasts (and hence two distinct values) as incoming 1110 // values for the same basic block. 1111 1112 int BlockIndex = BasePHI->getBasicBlockIndex(InBB); 1113 if (BlockIndex != -1) { 1114 Value *OldBase = BasePHI->getIncomingValue(BlockIndex); 1115 BasePHI->addIncoming(OldBase, InBB); 1116 1117 #ifndef NDEBUG 1118 Value *Base = getBaseForInput(InVal, nullptr); 1119 // In essence this assert states: the only way two values 1120 // incoming from the same basic block may be different is by 1121 // being different bitcasts of the same value. A cleanup 1122 // that remains TODO is changing findBaseOrBDV to return an 1123 // llvm::Value of the correct type (and still remain pure). 1124 // This will remove the need to add bitcasts. 1125 assert(Base->stripPointerCasts() == OldBase->stripPointerCasts() && 1126 "Sanity -- findBaseOrBDV should be pure!"); 1127 #endif 1128 continue; 1129 } 1130 1131 // Find the instruction which produces the base for each input. We may 1132 // need to insert a bitcast in the incoming block. 1133 // TODO: Need to split critical edges if insertion is needed 1134 Value *Base = getBaseForInput(InVal, InBB->getTerminator()); 1135 BasePHI->addIncoming(Base, InBB); 1136 } 1137 assert(BasePHI->getNumIncomingValues() == NumPHIValues); 1138 } else if (SelectInst *BaseSI = 1139 dyn_cast<SelectInst>(State.getBaseValue())) { 1140 SelectInst *SI = cast<SelectInst>(BDV); 1141 1142 // Find the instruction which produces the base for each input. 1143 // We may need to insert a bitcast. 1144 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI)); 1145 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI)); 1146 } else if (auto *BaseEE = 1147 dyn_cast<ExtractElementInst>(State.getBaseValue())) { 1148 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand(); 1149 // Find the instruction which produces the base for each input. We may 1150 // need to insert a bitcast. 1151 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE)); 1152 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){ 1153 auto *BdvIE = cast<InsertElementInst>(BDV); 1154 auto UpdateOperand = [&](int OperandIdx) { 1155 Value *InVal = BdvIE->getOperand(OperandIdx); 1156 Value *Base = getBaseForInput(InVal, BaseIE); 1157 BaseIE->setOperand(OperandIdx, Base); 1158 }; 1159 UpdateOperand(0); // vector operand 1160 UpdateOperand(1); // scalar operand 1161 } else { 1162 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue()); 1163 auto *BdvSV = cast<ShuffleVectorInst>(BDV); 1164 auto UpdateOperand = [&](int OperandIdx) { 1165 Value *InVal = BdvSV->getOperand(OperandIdx); 1166 Value *Base = getBaseForInput(InVal, BaseSV); 1167 BaseSV->setOperand(OperandIdx, Base); 1168 }; 1169 UpdateOperand(0); // vector operand 1170 UpdateOperand(1); // vector operand 1171 } 1172 } 1173 1174 // Cache all of our results so we can cheaply reuse them 1175 // NOTE: This is actually two caches: one of the base defining value 1176 // relation and one of the base pointer relation! FIXME 1177 for (auto Pair : States) { 1178 auto *BDV = Pair.first; 1179 Value *Base = Pair.second.getBaseValue(); 1180 assert(BDV && Base); 1181 // Only values that do not have known bases or those that have differing 1182 // type (scalar versus vector) from a possible known base should be in the 1183 // lattice. 1184 assert((!isKnownBaseResult(BDV) || !areBothVectorOrScalar(BDV, Base)) && 1185 "why did it get added?"); 1186 1187 LLVM_DEBUG( 1188 dbgs() << "Updating base value cache" 1189 << " for: " << BDV->getName() << " from: " 1190 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none") 1191 << " to: " << Base->getName() << "\n"); 1192 1193 if (Cache.count(BDV)) { 1194 assert(isKnownBaseResult(Base) && 1195 "must be something we 'know' is a base pointer"); 1196 // Once we transition from the BDV relation being store in the Cache to 1197 // the base relation being stored, it must be stable 1198 assert((!isKnownBaseResult(Cache[BDV]) || Cache[BDV] == Base) && 1199 "base relation should be stable"); 1200 } 1201 Cache[BDV] = Base; 1202 } 1203 assert(Cache.count(Def)); 1204 return Cache[Def]; 1205 } 1206 1207 // For a set of live pointers (base and/or derived), identify the base 1208 // pointer of the object which they are derived from. This routine will 1209 // mutate the IR graph as needed to make the 'base' pointer live at the 1210 // definition site of 'derived'. This ensures that any use of 'derived' can 1211 // also use 'base'. This may involve the insertion of a number of 1212 // additional PHI nodes. 1213 // 1214 // preconditions: live is a set of pointer type Values 1215 // 1216 // side effects: may insert PHI nodes into the existing CFG, will preserve 1217 // CFG, will not remove or mutate any existing nodes 1218 // 1219 // post condition: PointerToBase contains one (derived, base) pair for every 1220 // pointer in live. Note that derived can be equal to base if the original 1221 // pointer was a base pointer. 1222 static void 1223 findBasePointers(const StatepointLiveSetTy &live, 1224 MapVector<Value *, Value *> &PointerToBase, 1225 DominatorTree *DT, DefiningValueMapTy &DVCache) { 1226 for (Value *ptr : live) { 1227 Value *base = findBasePointer(ptr, DVCache); 1228 assert(base && "failed to find base pointer"); 1229 PointerToBase[ptr] = base; 1230 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) || 1231 DT->dominates(cast<Instruction>(base)->getParent(), 1232 cast<Instruction>(ptr)->getParent())) && 1233 "The base we found better dominate the derived pointer"); 1234 } 1235 } 1236 1237 /// Find the required based pointers (and adjust the live set) for the given 1238 /// parse point. 1239 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache, 1240 CallBase *Call, 1241 PartiallyConstructedSafepointRecord &result) { 1242 MapVector<Value *, Value *> PointerToBase; 1243 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet; 1244 // We assume that all pointers passed to deopt are base pointers; as an 1245 // optimization, we can use this to avoid seperately materializing the base 1246 // pointer graph. This is only relevant since we're very conservative about 1247 // generating new conflict nodes during base pointer insertion. If we were 1248 // smarter there, this would be irrelevant. 1249 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt)) 1250 for (Value *V : Opt->Inputs) { 1251 if (!PotentiallyDerivedPointers.count(V)) 1252 continue; 1253 PotentiallyDerivedPointers.remove(V); 1254 PointerToBase[V] = V; 1255 } 1256 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache); 1257 1258 if (PrintBasePointers) { 1259 errs() << "Base Pairs (w/o Relocation):\n"; 1260 for (auto &Pair : PointerToBase) { 1261 errs() << " derived "; 1262 Pair.first->printAsOperand(errs(), false); 1263 errs() << " base "; 1264 Pair.second->printAsOperand(errs(), false); 1265 errs() << "\n";; 1266 } 1267 } 1268 1269 result.PointerToBase = PointerToBase; 1270 } 1271 1272 /// Given an updated version of the dataflow liveness results, update the 1273 /// liveset and base pointer maps for the call site CS. 1274 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 1275 CallBase *Call, 1276 PartiallyConstructedSafepointRecord &result); 1277 1278 static void recomputeLiveInValues( 1279 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 1280 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 1281 // TODO-PERF: reuse the original liveness, then simply run the dataflow 1282 // again. The old values are still live and will help it stabilize quickly. 1283 GCPtrLivenessData RevisedLivenessData; 1284 computeLiveInValues(DT, F, RevisedLivenessData); 1285 for (size_t i = 0; i < records.size(); i++) { 1286 struct PartiallyConstructedSafepointRecord &info = records[i]; 1287 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info); 1288 } 1289 } 1290 1291 // When inserting gc.relocate and gc.result calls, we need to ensure there are 1292 // no uses of the original value / return value between the gc.statepoint and 1293 // the gc.relocate / gc.result call. One case which can arise is a phi node 1294 // starting one of the successor blocks. We also need to be able to insert the 1295 // gc.relocates only on the path which goes through the statepoint. We might 1296 // need to split an edge to make this possible. 1297 static BasicBlock * 1298 normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, 1299 DominatorTree &DT) { 1300 BasicBlock *Ret = BB; 1301 if (!BB->getUniquePredecessor()) 1302 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT); 1303 1304 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes 1305 // from it 1306 FoldSingleEntryPHINodes(Ret); 1307 assert(!isa<PHINode>(Ret->begin()) && 1308 "All PHI nodes should have been removed!"); 1309 1310 // At this point, we can safely insert a gc.relocate or gc.result as the first 1311 // instruction in Ret if needed. 1312 return Ret; 1313 } 1314 1315 // Create new attribute set containing only attributes which can be transferred 1316 // from original call to the safepoint. 1317 static AttributeList legalizeCallAttributes(LLVMContext &Ctx, 1318 AttributeList AL) { 1319 if (AL.isEmpty()) 1320 return AL; 1321 1322 // Remove the readonly, readnone, and statepoint function attributes. 1323 AttrBuilder FnAttrs = AL.getFnAttributes(); 1324 FnAttrs.removeAttribute(Attribute::ReadNone); 1325 FnAttrs.removeAttribute(Attribute::ReadOnly); 1326 for (Attribute A : AL.getFnAttributes()) { 1327 if (isStatepointDirectiveAttr(A)) 1328 FnAttrs.remove(A); 1329 } 1330 1331 // Just skip parameter and return attributes for now 1332 return AttributeList::get(Ctx, AttributeList::FunctionIndex, 1333 AttributeSet::get(Ctx, FnAttrs)); 1334 } 1335 1336 /// Helper function to place all gc relocates necessary for the given 1337 /// statepoint. 1338 /// Inputs: 1339 /// liveVariables - list of variables to be relocated. 1340 /// basePtrs - base pointers. 1341 /// statepointToken - statepoint instruction to which relocates should be 1342 /// bound. 1343 /// Builder - Llvm IR builder to be used to construct new calls. 1344 static void CreateGCRelocates(ArrayRef<Value *> LiveVariables, 1345 ArrayRef<Value *> BasePtrs, 1346 Instruction *StatepointToken, 1347 IRBuilder<> &Builder) { 1348 if (LiveVariables.empty()) 1349 return; 1350 1351 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) { 1352 auto ValIt = llvm::find(LiveVec, Val); 1353 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!"); 1354 size_t Index = std::distance(LiveVec.begin(), ValIt); 1355 assert(Index < LiveVec.size() && "Bug in std::find?"); 1356 return Index; 1357 }; 1358 Module *M = StatepointToken->getModule(); 1359 1360 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose 1361 // element type is i8 addrspace(1)*). We originally generated unique 1362 // declarations for each pointer type, but this proved problematic because 1363 // the intrinsic mangling code is incomplete and fragile. Since we're moving 1364 // towards a single unified pointer type anyways, we can just cast everything 1365 // to an i8* of the right address space. A bitcast is added later to convert 1366 // gc_relocate to the actual value's type. 1367 auto getGCRelocateDecl = [&] (Type *Ty) { 1368 assert(isHandledGCPointerType(Ty)); 1369 auto AS = Ty->getScalarType()->getPointerAddressSpace(); 1370 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS); 1371 if (auto *VT = dyn_cast<VectorType>(Ty)) 1372 NewTy = FixedVectorType::get(NewTy, 1373 cast<FixedVectorType>(VT)->getNumElements()); 1374 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, 1375 {NewTy}); 1376 }; 1377 1378 // Lazily populated map from input types to the canonicalized form mentioned 1379 // in the comment above. This should probably be cached somewhere more 1380 // broadly. 1381 DenseMap<Type *, Function *> TypeToDeclMap; 1382 1383 for (unsigned i = 0; i < LiveVariables.size(); i++) { 1384 // Generate the gc.relocate call and save the result 1385 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i])); 1386 Value *LiveIdx = Builder.getInt32(i); 1387 1388 Type *Ty = LiveVariables[i]->getType(); 1389 if (!TypeToDeclMap.count(Ty)) 1390 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty); 1391 Function *GCRelocateDecl = TypeToDeclMap[Ty]; 1392 1393 // only specify a debug name if we can give a useful one 1394 CallInst *Reloc = Builder.CreateCall( 1395 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx}, 1396 suffixed_name_or(LiveVariables[i], ".relocated", "")); 1397 // Trick CodeGen into thinking there are lots of free registers at this 1398 // fake call. 1399 Reloc->setCallingConv(CallingConv::Cold); 1400 } 1401 } 1402 1403 namespace { 1404 1405 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this 1406 /// avoids having to worry about keeping around dangling pointers to Values. 1407 class DeferredReplacement { 1408 AssertingVH<Instruction> Old; 1409 AssertingVH<Instruction> New; 1410 bool IsDeoptimize = false; 1411 1412 DeferredReplacement() = default; 1413 1414 public: 1415 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) { 1416 assert(Old != New && Old && New && 1417 "Cannot RAUW equal values or to / from null!"); 1418 1419 DeferredReplacement D; 1420 D.Old = Old; 1421 D.New = New; 1422 return D; 1423 } 1424 1425 static DeferredReplacement createDelete(Instruction *ToErase) { 1426 DeferredReplacement D; 1427 D.Old = ToErase; 1428 return D; 1429 } 1430 1431 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) { 1432 #ifndef NDEBUG 1433 auto *F = cast<CallInst>(Old)->getCalledFunction(); 1434 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize && 1435 "Only way to construct a deoptimize deferred replacement"); 1436 #endif 1437 DeferredReplacement D; 1438 D.Old = Old; 1439 D.IsDeoptimize = true; 1440 return D; 1441 } 1442 1443 /// Does the task represented by this instance. 1444 void doReplacement() { 1445 Instruction *OldI = Old; 1446 Instruction *NewI = New; 1447 1448 assert(OldI != NewI && "Disallowed at construction?!"); 1449 assert((!IsDeoptimize || !New) && 1450 "Deoptimize intrinsics are not replaced!"); 1451 1452 Old = nullptr; 1453 New = nullptr; 1454 1455 if (NewI) 1456 OldI->replaceAllUsesWith(NewI); 1457 1458 if (IsDeoptimize) { 1459 // Note: we've inserted instructions, so the call to llvm.deoptimize may 1460 // not necessarily be followed by the matching return. 1461 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator()); 1462 new UnreachableInst(RI->getContext(), RI); 1463 RI->eraseFromParent(); 1464 } 1465 1466 OldI->eraseFromParent(); 1467 } 1468 }; 1469 1470 } // end anonymous namespace 1471 1472 static StringRef getDeoptLowering(CallBase *Call) { 1473 const char *DeoptLowering = "deopt-lowering"; 1474 if (Call->hasFnAttr(DeoptLowering)) { 1475 // FIXME: Calls have a *really* confusing interface around attributes 1476 // with values. 1477 const AttributeList &CSAS = Call->getAttributes(); 1478 if (CSAS.hasAttribute(AttributeList::FunctionIndex, DeoptLowering)) 1479 return CSAS.getAttribute(AttributeList::FunctionIndex, DeoptLowering) 1480 .getValueAsString(); 1481 Function *F = Call->getCalledFunction(); 1482 assert(F && F->hasFnAttribute(DeoptLowering)); 1483 return F->getFnAttribute(DeoptLowering).getValueAsString(); 1484 } 1485 return "live-through"; 1486 } 1487 1488 static void 1489 makeStatepointExplicitImpl(CallBase *Call, /* to replace */ 1490 const SmallVectorImpl<Value *> &BasePtrs, 1491 const SmallVectorImpl<Value *> &LiveVariables, 1492 PartiallyConstructedSafepointRecord &Result, 1493 std::vector<DeferredReplacement> &Replacements) { 1494 assert(BasePtrs.size() == LiveVariables.size()); 1495 1496 // Then go ahead and use the builder do actually do the inserts. We insert 1497 // immediately before the previous instruction under the assumption that all 1498 // arguments will be available here. We can't insert afterwards since we may 1499 // be replacing a terminator. 1500 IRBuilder<> Builder(Call); 1501 1502 ArrayRef<Value *> GCArgs(LiveVariables); 1503 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID; 1504 uint32_t NumPatchBytes = 0; 1505 uint32_t Flags = uint32_t(StatepointFlags::None); 1506 1507 SmallVector<Value *, 8> CallArgs(Call->args()); 1508 Optional<ArrayRef<Use>> DeoptArgs; 1509 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt)) 1510 DeoptArgs = Bundle->Inputs; 1511 Optional<ArrayRef<Use>> TransitionArgs; 1512 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) { 1513 TransitionArgs = Bundle->Inputs; 1514 // TODO: This flag no longer serves a purpose and can be removed later 1515 Flags |= uint32_t(StatepointFlags::GCTransition); 1516 } 1517 1518 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls 1519 // with a return value, we lower then as never returning calls to 1520 // __llvm_deoptimize that are followed by unreachable to get better codegen. 1521 bool IsDeoptimize = false; 1522 1523 StatepointDirectives SD = 1524 parseStatepointDirectivesFromAttrs(Call->getAttributes()); 1525 if (SD.NumPatchBytes) 1526 NumPatchBytes = *SD.NumPatchBytes; 1527 if (SD.StatepointID) 1528 StatepointID = *SD.StatepointID; 1529 1530 // Pass through the requested lowering if any. The default is live-through. 1531 StringRef DeoptLowering = getDeoptLowering(Call); 1532 if (DeoptLowering.equals("live-in")) 1533 Flags |= uint32_t(StatepointFlags::DeoptLiveIn); 1534 else { 1535 assert(DeoptLowering.equals("live-through") && "Unsupported value!"); 1536 } 1537 1538 Value *CallTarget = Call->getCalledOperand(); 1539 if (Function *F = dyn_cast<Function>(CallTarget)) { 1540 auto IID = F->getIntrinsicID(); 1541 if (IID == Intrinsic::experimental_deoptimize) { 1542 // Calls to llvm.experimental.deoptimize are lowered to calls to the 1543 // __llvm_deoptimize symbol. We want to resolve this now, since the 1544 // verifier does not allow taking the address of an intrinsic function. 1545 1546 SmallVector<Type *, 8> DomainTy; 1547 for (Value *Arg : CallArgs) 1548 DomainTy.push_back(Arg->getType()); 1549 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1550 /* isVarArg = */ false); 1551 1552 // Note: CallTarget can be a bitcast instruction of a symbol if there are 1553 // calls to @llvm.experimental.deoptimize with different argument types in 1554 // the same module. This is fine -- we assume the frontend knew what it 1555 // was doing when generating this kind of IR. 1556 CallTarget = F->getParent() 1557 ->getOrInsertFunction("__llvm_deoptimize", FTy) 1558 .getCallee(); 1559 1560 IsDeoptimize = true; 1561 } else if (IID == Intrinsic::memcpy_element_unordered_atomic || 1562 IID == Intrinsic::memmove_element_unordered_atomic) { 1563 // Unordered atomic memcpy and memmove intrinsics which are not explicitly 1564 // marked as "gc-leaf-function" should be lowered in a GC parseable way. 1565 // Specifically, these calls should be lowered to the 1566 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols. 1567 // Similarly to __llvm_deoptimize we want to resolve this now, since the 1568 // verifier does not allow taking the address of an intrinsic function. 1569 // 1570 // Moreover we need to shuffle the arguments for the call in order to 1571 // accommodate GC. The underlying source and destination objects might be 1572 // relocated during copy operation should the GC occur. To relocate the 1573 // derived source and destination pointers the implementation of the 1574 // intrinsic should know the corresponding base pointers. 1575 // 1576 // To make the base pointers available pass them explicitly as arguments: 1577 // memcpy(dest_derived, source_derived, ...) => 1578 // memcpy(dest_base, dest_offset, source_base, source_offset, ...) 1579 auto &Context = Call->getContext(); 1580 auto &DL = Call->getModule()->getDataLayout(); 1581 auto GetBaseAndOffset = [&](Value *Derived) { 1582 assert(Result.PointerToBase.count(Derived)); 1583 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace(); 1584 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace); 1585 Value *Base = Result.PointerToBase.find(Derived)->second; 1586 Value *Base_int = Builder.CreatePtrToInt( 1587 Base, Type::getIntNTy(Context, IntPtrSize)); 1588 Value *Derived_int = Builder.CreatePtrToInt( 1589 Derived, Type::getIntNTy(Context, IntPtrSize)); 1590 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int)); 1591 }; 1592 1593 auto *Dest = CallArgs[0]; 1594 Value *DestBase, *DestOffset; 1595 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest); 1596 1597 auto *Source = CallArgs[1]; 1598 Value *SourceBase, *SourceOffset; 1599 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source); 1600 1601 auto *LengthInBytes = CallArgs[2]; 1602 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]); 1603 1604 CallArgs.clear(); 1605 CallArgs.push_back(DestBase); 1606 CallArgs.push_back(DestOffset); 1607 CallArgs.push_back(SourceBase); 1608 CallArgs.push_back(SourceOffset); 1609 CallArgs.push_back(LengthInBytes); 1610 1611 SmallVector<Type *, 8> DomainTy; 1612 for (Value *Arg : CallArgs) 1613 DomainTy.push_back(Arg->getType()); 1614 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1615 /* isVarArg = */ false); 1616 1617 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) { 1618 uint64_t ElementSize = ElementSizeCI->getZExtValue(); 1619 if (IID == Intrinsic::memcpy_element_unordered_atomic) { 1620 switch (ElementSize) { 1621 case 1: 1622 return "__llvm_memcpy_element_unordered_atomic_safepoint_1"; 1623 case 2: 1624 return "__llvm_memcpy_element_unordered_atomic_safepoint_2"; 1625 case 4: 1626 return "__llvm_memcpy_element_unordered_atomic_safepoint_4"; 1627 case 8: 1628 return "__llvm_memcpy_element_unordered_atomic_safepoint_8"; 1629 case 16: 1630 return "__llvm_memcpy_element_unordered_atomic_safepoint_16"; 1631 default: 1632 llvm_unreachable("unexpected element size!"); 1633 } 1634 } 1635 assert(IID == Intrinsic::memmove_element_unordered_atomic); 1636 switch (ElementSize) { 1637 case 1: 1638 return "__llvm_memmove_element_unordered_atomic_safepoint_1"; 1639 case 2: 1640 return "__llvm_memmove_element_unordered_atomic_safepoint_2"; 1641 case 4: 1642 return "__llvm_memmove_element_unordered_atomic_safepoint_4"; 1643 case 8: 1644 return "__llvm_memmove_element_unordered_atomic_safepoint_8"; 1645 case 16: 1646 return "__llvm_memmove_element_unordered_atomic_safepoint_16"; 1647 default: 1648 llvm_unreachable("unexpected element size!"); 1649 } 1650 }; 1651 1652 CallTarget = 1653 F->getParent() 1654 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy) 1655 .getCallee(); 1656 } 1657 } 1658 1659 // Create the statepoint given all the arguments 1660 GCStatepointInst *Token = nullptr; 1661 if (auto *CI = dyn_cast<CallInst>(Call)) { 1662 CallInst *SPCall = Builder.CreateGCStatepointCall( 1663 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs, 1664 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token"); 1665 1666 SPCall->setTailCallKind(CI->getTailCallKind()); 1667 SPCall->setCallingConv(CI->getCallingConv()); 1668 1669 // Currently we will fail on parameter attributes and on certain 1670 // function attributes. In case if we can handle this set of attributes - 1671 // set up function attrs directly on statepoint and return attrs later for 1672 // gc_result intrinsic. 1673 SPCall->setAttributes( 1674 legalizeCallAttributes(CI->getContext(), CI->getAttributes())); 1675 1676 Token = cast<GCStatepointInst>(SPCall); 1677 1678 // Put the following gc_result and gc_relocate calls immediately after the 1679 // the old call (which we're about to delete) 1680 assert(CI->getNextNode() && "Not a terminator, must have next!"); 1681 Builder.SetInsertPoint(CI->getNextNode()); 1682 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc()); 1683 } else { 1684 auto *II = cast<InvokeInst>(Call); 1685 1686 // Insert the new invoke into the old block. We'll remove the old one in a 1687 // moment at which point this will become the new terminator for the 1688 // original block. 1689 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke( 1690 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(), 1691 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs, 1692 "statepoint_token"); 1693 1694 SPInvoke->setCallingConv(II->getCallingConv()); 1695 1696 // Currently we will fail on parameter attributes and on certain 1697 // function attributes. In case if we can handle this set of attributes - 1698 // set up function attrs directly on statepoint and return attrs later for 1699 // gc_result intrinsic. 1700 SPInvoke->setAttributes( 1701 legalizeCallAttributes(II->getContext(), II->getAttributes())); 1702 1703 Token = cast<GCStatepointInst>(SPInvoke); 1704 1705 // Generate gc relocates in exceptional path 1706 BasicBlock *UnwindBlock = II->getUnwindDest(); 1707 assert(!isa<PHINode>(UnwindBlock->begin()) && 1708 UnwindBlock->getUniquePredecessor() && 1709 "can't safely insert in this block!"); 1710 1711 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt()); 1712 Builder.SetCurrentDebugLocation(II->getDebugLoc()); 1713 1714 // Attach exceptional gc relocates to the landingpad. 1715 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst(); 1716 Result.UnwindToken = ExceptionalToken; 1717 1718 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder); 1719 1720 // Generate gc relocates and returns for normal block 1721 BasicBlock *NormalDest = II->getNormalDest(); 1722 assert(!isa<PHINode>(NormalDest->begin()) && 1723 NormalDest->getUniquePredecessor() && 1724 "can't safely insert in this block!"); 1725 1726 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt()); 1727 1728 // gc relocates will be generated later as if it were regular call 1729 // statepoint 1730 } 1731 assert(Token && "Should be set in one of the above branches!"); 1732 1733 if (IsDeoptimize) { 1734 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we 1735 // transform the tail-call like structure to a call to a void function 1736 // followed by unreachable to get better codegen. 1737 Replacements.push_back( 1738 DeferredReplacement::createDeoptimizeReplacement(Call)); 1739 } else { 1740 Token->setName("statepoint_token"); 1741 if (!Call->getType()->isVoidTy() && !Call->use_empty()) { 1742 StringRef Name = Call->hasName() ? Call->getName() : ""; 1743 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name); 1744 GCResult->setAttributes( 1745 AttributeList::get(GCResult->getContext(), AttributeList::ReturnIndex, 1746 Call->getAttributes().getRetAttributes())); 1747 1748 // We cannot RAUW or delete CS.getInstruction() because it could be in the 1749 // live set of some other safepoint, in which case that safepoint's 1750 // PartiallyConstructedSafepointRecord will hold a raw pointer to this 1751 // llvm::Instruction. Instead, we defer the replacement and deletion to 1752 // after the live sets have been made explicit in the IR, and we no longer 1753 // have raw pointers to worry about. 1754 Replacements.emplace_back( 1755 DeferredReplacement::createRAUW(Call, GCResult)); 1756 } else { 1757 Replacements.emplace_back(DeferredReplacement::createDelete(Call)); 1758 } 1759 } 1760 1761 Result.StatepointToken = Token; 1762 1763 // Second, create a gc.relocate for every live variable 1764 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder); 1765 } 1766 1767 // Replace an existing gc.statepoint with a new one and a set of gc.relocates 1768 // which make the relocations happening at this safepoint explicit. 1769 // 1770 // WARNING: Does not do any fixup to adjust users of the original live 1771 // values. That's the callers responsibility. 1772 static void 1773 makeStatepointExplicit(DominatorTree &DT, CallBase *Call, 1774 PartiallyConstructedSafepointRecord &Result, 1775 std::vector<DeferredReplacement> &Replacements) { 1776 const auto &LiveSet = Result.LiveSet; 1777 const auto &PointerToBase = Result.PointerToBase; 1778 1779 // Convert to vector for efficient cross referencing. 1780 SmallVector<Value *, 64> BaseVec, LiveVec; 1781 LiveVec.reserve(LiveSet.size()); 1782 BaseVec.reserve(LiveSet.size()); 1783 for (Value *L : LiveSet) { 1784 LiveVec.push_back(L); 1785 assert(PointerToBase.count(L)); 1786 Value *Base = PointerToBase.find(L)->second; 1787 BaseVec.push_back(Base); 1788 } 1789 assert(LiveVec.size() == BaseVec.size()); 1790 1791 // Do the actual rewriting and delete the old statepoint 1792 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements); 1793 } 1794 1795 // Helper function for the relocationViaAlloca. 1796 // 1797 // It receives iterator to the statepoint gc relocates and emits a store to the 1798 // assigned location (via allocaMap) for the each one of them. It adds the 1799 // visited values into the visitedLiveValues set, which we will later use them 1800 // for sanity checking. 1801 static void 1802 insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs, 1803 DenseMap<Value *, AllocaInst *> &AllocaMap, 1804 DenseSet<Value *> &VisitedLiveValues) { 1805 for (User *U : GCRelocs) { 1806 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U); 1807 if (!Relocate) 1808 continue; 1809 1810 Value *OriginalValue = Relocate->getDerivedPtr(); 1811 assert(AllocaMap.count(OriginalValue)); 1812 Value *Alloca = AllocaMap[OriginalValue]; 1813 1814 // Emit store into the related alloca 1815 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to 1816 // the correct type according to alloca. 1817 assert(Relocate->getNextNode() && 1818 "Should always have one since it's not a terminator"); 1819 IRBuilder<> Builder(Relocate->getNextNode()); 1820 Value *CastedRelocatedValue = 1821 Builder.CreateBitCast(Relocate, 1822 cast<AllocaInst>(Alloca)->getAllocatedType(), 1823 suffixed_name_or(Relocate, ".casted", "")); 1824 1825 new StoreInst(CastedRelocatedValue, Alloca, 1826 cast<Instruction>(CastedRelocatedValue)->getNextNode()); 1827 1828 #ifndef NDEBUG 1829 VisitedLiveValues.insert(OriginalValue); 1830 #endif 1831 } 1832 } 1833 1834 // Helper function for the "relocationViaAlloca". Similar to the 1835 // "insertRelocationStores" but works for rematerialized values. 1836 static void insertRematerializationStores( 1837 const RematerializedValueMapTy &RematerializedValues, 1838 DenseMap<Value *, AllocaInst *> &AllocaMap, 1839 DenseSet<Value *> &VisitedLiveValues) { 1840 for (auto RematerializedValuePair: RematerializedValues) { 1841 Instruction *RematerializedValue = RematerializedValuePair.first; 1842 Value *OriginalValue = RematerializedValuePair.second; 1843 1844 assert(AllocaMap.count(OriginalValue) && 1845 "Can not find alloca for rematerialized value"); 1846 Value *Alloca = AllocaMap[OriginalValue]; 1847 1848 new StoreInst(RematerializedValue, Alloca, 1849 RematerializedValue->getNextNode()); 1850 1851 #ifndef NDEBUG 1852 VisitedLiveValues.insert(OriginalValue); 1853 #endif 1854 } 1855 } 1856 1857 /// Do all the relocation update via allocas and mem2reg 1858 static void relocationViaAlloca( 1859 Function &F, DominatorTree &DT, ArrayRef<Value *> Live, 1860 ArrayRef<PartiallyConstructedSafepointRecord> Records) { 1861 #ifndef NDEBUG 1862 // record initial number of (static) allocas; we'll check we have the same 1863 // number when we get done. 1864 int InitialAllocaNum = 0; 1865 for (Instruction &I : F.getEntryBlock()) 1866 if (isa<AllocaInst>(I)) 1867 InitialAllocaNum++; 1868 #endif 1869 1870 // TODO-PERF: change data structures, reserve 1871 DenseMap<Value *, AllocaInst *> AllocaMap; 1872 SmallVector<AllocaInst *, 200> PromotableAllocas; 1873 // Used later to chack that we have enough allocas to store all values 1874 std::size_t NumRematerializedValues = 0; 1875 PromotableAllocas.reserve(Live.size()); 1876 1877 // Emit alloca for "LiveValue" and record it in "allocaMap" and 1878 // "PromotableAllocas" 1879 const DataLayout &DL = F.getParent()->getDataLayout(); 1880 auto emitAllocaFor = [&](Value *LiveValue) { 1881 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), 1882 DL.getAllocaAddrSpace(), "", 1883 F.getEntryBlock().getFirstNonPHI()); 1884 AllocaMap[LiveValue] = Alloca; 1885 PromotableAllocas.push_back(Alloca); 1886 }; 1887 1888 // Emit alloca for each live gc pointer 1889 for (Value *V : Live) 1890 emitAllocaFor(V); 1891 1892 // Emit allocas for rematerialized values 1893 for (const auto &Info : Records) 1894 for (auto RematerializedValuePair : Info.RematerializedValues) { 1895 Value *OriginalValue = RematerializedValuePair.second; 1896 if (AllocaMap.count(OriginalValue) != 0) 1897 continue; 1898 1899 emitAllocaFor(OriginalValue); 1900 ++NumRematerializedValues; 1901 } 1902 1903 // The next two loops are part of the same conceptual operation. We need to 1904 // insert a store to the alloca after the original def and at each 1905 // redefinition. We need to insert a load before each use. These are split 1906 // into distinct loops for performance reasons. 1907 1908 // Update gc pointer after each statepoint: either store a relocated value or 1909 // null (if no relocated value was found for this gc pointer and it is not a 1910 // gc_result). This must happen before we update the statepoint with load of 1911 // alloca otherwise we lose the link between statepoint and old def. 1912 for (const auto &Info : Records) { 1913 Value *Statepoint = Info.StatepointToken; 1914 1915 // This will be used for consistency check 1916 DenseSet<Value *> VisitedLiveValues; 1917 1918 // Insert stores for normal statepoint gc relocates 1919 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues); 1920 1921 // In case if it was invoke statepoint 1922 // we will insert stores for exceptional path gc relocates. 1923 if (isa<InvokeInst>(Statepoint)) { 1924 insertRelocationStores(Info.UnwindToken->users(), AllocaMap, 1925 VisitedLiveValues); 1926 } 1927 1928 // Do similar thing with rematerialized values 1929 insertRematerializationStores(Info.RematerializedValues, AllocaMap, 1930 VisitedLiveValues); 1931 1932 if (ClobberNonLive) { 1933 // As a debugging aid, pretend that an unrelocated pointer becomes null at 1934 // the gc.statepoint. This will turn some subtle GC problems into 1935 // slightly easier to debug SEGVs. Note that on large IR files with 1936 // lots of gc.statepoints this is extremely costly both memory and time 1937 // wise. 1938 SmallVector<AllocaInst *, 64> ToClobber; 1939 for (auto Pair : AllocaMap) { 1940 Value *Def = Pair.first; 1941 AllocaInst *Alloca = Pair.second; 1942 1943 // This value was relocated 1944 if (VisitedLiveValues.count(Def)) { 1945 continue; 1946 } 1947 ToClobber.push_back(Alloca); 1948 } 1949 1950 auto InsertClobbersAt = [&](Instruction *IP) { 1951 for (auto *AI : ToClobber) { 1952 auto PT = cast<PointerType>(AI->getAllocatedType()); 1953 Constant *CPN = ConstantPointerNull::get(PT); 1954 new StoreInst(CPN, AI, IP); 1955 } 1956 }; 1957 1958 // Insert the clobbering stores. These may get intermixed with the 1959 // gc.results and gc.relocates, but that's fine. 1960 if (auto II = dyn_cast<InvokeInst>(Statepoint)) { 1961 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt()); 1962 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt()); 1963 } else { 1964 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode()); 1965 } 1966 } 1967 } 1968 1969 // Update use with load allocas and add store for gc_relocated. 1970 for (auto Pair : AllocaMap) { 1971 Value *Def = Pair.first; 1972 AllocaInst *Alloca = Pair.second; 1973 1974 // We pre-record the uses of allocas so that we dont have to worry about 1975 // later update that changes the user information.. 1976 1977 SmallVector<Instruction *, 20> Uses; 1978 // PERF: trade a linear scan for repeated reallocation 1979 Uses.reserve(Def->getNumUses()); 1980 for (User *U : Def->users()) { 1981 if (!isa<ConstantExpr>(U)) { 1982 // If the def has a ConstantExpr use, then the def is either a 1983 // ConstantExpr use itself or null. In either case 1984 // (recursively in the first, directly in the second), the oop 1985 // it is ultimately dependent on is null and this particular 1986 // use does not need to be fixed up. 1987 Uses.push_back(cast<Instruction>(U)); 1988 } 1989 } 1990 1991 llvm::sort(Uses); 1992 auto Last = std::unique(Uses.begin(), Uses.end()); 1993 Uses.erase(Last, Uses.end()); 1994 1995 for (Instruction *Use : Uses) { 1996 if (isa<PHINode>(Use)) { 1997 PHINode *Phi = cast<PHINode>(Use); 1998 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { 1999 if (Def == Phi->getIncomingValue(i)) { 2000 LoadInst *Load = 2001 new LoadInst(Alloca->getAllocatedType(), Alloca, "", 2002 Phi->getIncomingBlock(i)->getTerminator()); 2003 Phi->setIncomingValue(i, Load); 2004 } 2005 } 2006 } else { 2007 LoadInst *Load = 2008 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use); 2009 Use->replaceUsesOfWith(Def, Load); 2010 } 2011 } 2012 2013 // Emit store for the initial gc value. Store must be inserted after load, 2014 // otherwise store will be in alloca's use list and an extra load will be 2015 // inserted before it. 2016 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false, 2017 DL.getABITypeAlign(Def->getType())); 2018 if (Instruction *Inst = dyn_cast<Instruction>(Def)) { 2019 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) { 2020 // InvokeInst is a terminator so the store need to be inserted into its 2021 // normal destination block. 2022 BasicBlock *NormalDest = Invoke->getNormalDest(); 2023 Store->insertBefore(NormalDest->getFirstNonPHI()); 2024 } else { 2025 assert(!Inst->isTerminator() && 2026 "The only terminator that can produce a value is " 2027 "InvokeInst which is handled above."); 2028 Store->insertAfter(Inst); 2029 } 2030 } else { 2031 assert(isa<Argument>(Def)); 2032 Store->insertAfter(cast<Instruction>(Alloca)); 2033 } 2034 } 2035 2036 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues && 2037 "we must have the same allocas with lives"); 2038 if (!PromotableAllocas.empty()) { 2039 // Apply mem2reg to promote alloca to SSA 2040 PromoteMemToReg(PromotableAllocas, DT); 2041 } 2042 2043 #ifndef NDEBUG 2044 for (auto &I : F.getEntryBlock()) 2045 if (isa<AllocaInst>(I)) 2046 InitialAllocaNum--; 2047 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); 2048 #endif 2049 } 2050 2051 /// Implement a unique function which doesn't require we sort the input 2052 /// vector. Doing so has the effect of changing the output of a couple of 2053 /// tests in ways which make them less useful in testing fused safepoints. 2054 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { 2055 SmallSet<T, 8> Seen; 2056 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; }); 2057 } 2058 2059 /// Insert holders so that each Value is obviously live through the entire 2060 /// lifetime of the call. 2061 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values, 2062 SmallVectorImpl<CallInst *> &Holders) { 2063 if (Values.empty()) 2064 // No values to hold live, might as well not insert the empty holder 2065 return; 2066 2067 Module *M = Call->getModule(); 2068 // Use a dummy vararg function to actually hold the values live 2069 FunctionCallee Func = M->getOrInsertFunction( 2070 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true)); 2071 if (isa<CallInst>(Call)) { 2072 // For call safepoints insert dummy calls right after safepoint 2073 Holders.push_back( 2074 CallInst::Create(Func, Values, "", &*++Call->getIterator())); 2075 return; 2076 } 2077 // For invoke safepooints insert dummy calls both in normal and 2078 // exceptional destination blocks 2079 auto *II = cast<InvokeInst>(Call); 2080 Holders.push_back(CallInst::Create( 2081 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt())); 2082 Holders.push_back(CallInst::Create( 2083 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt())); 2084 } 2085 2086 static void findLiveReferences( 2087 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 2088 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 2089 GCPtrLivenessData OriginalLivenessData; 2090 computeLiveInValues(DT, F, OriginalLivenessData); 2091 for (size_t i = 0; i < records.size(); i++) { 2092 struct PartiallyConstructedSafepointRecord &info = records[i]; 2093 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info); 2094 } 2095 } 2096 2097 // Helper function for the "rematerializeLiveValues". It walks use chain 2098 // starting from the "CurrentValue" until it reaches the root of the chain, i.e. 2099 // the base or a value it cannot process. Only "simple" values are processed 2100 // (currently it is GEP's and casts). The returned root is examined by the 2101 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array 2102 // with all visited values. 2103 static Value* findRematerializableChainToBasePointer( 2104 SmallVectorImpl<Instruction*> &ChainToBase, 2105 Value *CurrentValue) { 2106 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) { 2107 ChainToBase.push_back(GEP); 2108 return findRematerializableChainToBasePointer(ChainToBase, 2109 GEP->getPointerOperand()); 2110 } 2111 2112 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) { 2113 if (!CI->isNoopCast(CI->getModule()->getDataLayout())) 2114 return CI; 2115 2116 ChainToBase.push_back(CI); 2117 return findRematerializableChainToBasePointer(ChainToBase, 2118 CI->getOperand(0)); 2119 } 2120 2121 // We have reached the root of the chain, which is either equal to the base or 2122 // is the first unsupported value along the use chain. 2123 return CurrentValue; 2124 } 2125 2126 // Helper function for the "rematerializeLiveValues". Compute cost of the use 2127 // chain we are going to rematerialize. 2128 static InstructionCost 2129 chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain, 2130 TargetTransformInfo &TTI) { 2131 InstructionCost Cost = 0; 2132 2133 for (Instruction *Instr : Chain) { 2134 if (CastInst *CI = dyn_cast<CastInst>(Instr)) { 2135 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) && 2136 "non noop cast is found during rematerialization"); 2137 2138 Type *SrcTy = CI->getOperand(0)->getType(); 2139 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy, 2140 TTI::getCastContextHint(CI), 2141 TargetTransformInfo::TCK_SizeAndLatency, CI); 2142 2143 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) { 2144 // Cost of the address calculation 2145 Type *ValTy = GEP->getSourceElementType(); 2146 Cost += TTI.getAddressComputationCost(ValTy); 2147 2148 // And cost of the GEP itself 2149 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not 2150 // allowed for the external usage) 2151 if (!GEP->hasAllConstantIndices()) 2152 Cost += 2; 2153 2154 } else { 2155 llvm_unreachable("unsupported instruction type during rematerialization"); 2156 } 2157 } 2158 2159 return Cost; 2160 } 2161 2162 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) { 2163 unsigned PhiNum = OrigRootPhi.getNumIncomingValues(); 2164 if (PhiNum != AlternateRootPhi.getNumIncomingValues() || 2165 OrigRootPhi.getParent() != AlternateRootPhi.getParent()) 2166 return false; 2167 // Map of incoming values and their corresponding basic blocks of 2168 // OrigRootPhi. 2169 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues; 2170 for (unsigned i = 0; i < PhiNum; i++) 2171 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] = 2172 OrigRootPhi.getIncomingBlock(i); 2173 2174 // Both current and base PHIs should have same incoming values and 2175 // the same basic blocks corresponding to the incoming values. 2176 for (unsigned i = 0; i < PhiNum; i++) { 2177 auto CIVI = 2178 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i)); 2179 if (CIVI == CurrentIncomingValues.end()) 2180 return false; 2181 BasicBlock *CurrentIncomingBB = CIVI->second; 2182 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i)) 2183 return false; 2184 } 2185 return true; 2186 } 2187 2188 // From the statepoint live set pick values that are cheaper to recompute then 2189 // to relocate. Remove this values from the live set, rematerialize them after 2190 // statepoint and record them in "Info" structure. Note that similar to 2191 // relocated values we don't do any user adjustments here. 2192 static void rematerializeLiveValues(CallBase *Call, 2193 PartiallyConstructedSafepointRecord &Info, 2194 TargetTransformInfo &TTI) { 2195 const unsigned int ChainLengthThreshold = 10; 2196 2197 // Record values we are going to delete from this statepoint live set. 2198 // We can not di this in following loop due to iterator invalidation. 2199 SmallVector<Value *, 32> LiveValuesToBeDeleted; 2200 2201 for (Value *LiveValue: Info.LiveSet) { 2202 // For each live pointer find its defining chain 2203 SmallVector<Instruction *, 3> ChainToBase; 2204 assert(Info.PointerToBase.count(LiveValue)); 2205 Value *RootOfChain = 2206 findRematerializableChainToBasePointer(ChainToBase, 2207 LiveValue); 2208 2209 // Nothing to do, or chain is too long 2210 if ( ChainToBase.size() == 0 || 2211 ChainToBase.size() > ChainLengthThreshold) 2212 continue; 2213 2214 // Handle the scenario where the RootOfChain is not equal to the 2215 // Base Value, but they are essentially the same phi values. 2216 if (RootOfChain != Info.PointerToBase[LiveValue]) { 2217 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain); 2218 PHINode *AlternateRootPhi = dyn_cast<PHINode>(Info.PointerToBase[LiveValue]); 2219 if (!OrigRootPhi || !AlternateRootPhi) 2220 continue; 2221 // PHI nodes that have the same incoming values, and belonging to the same 2222 // basic blocks are essentially the same SSA value. When the original phi 2223 // has incoming values with different base pointers, the original phi is 2224 // marked as conflict, and an additional `AlternateRootPhi` with the same 2225 // incoming values get generated by the findBasePointer function. We need 2226 // to identify the newly generated AlternateRootPhi (.base version of phi) 2227 // and RootOfChain (the original phi node itself) are the same, so that we 2228 // can rematerialize the gep and casts. This is a workaround for the 2229 // deficiency in the findBasePointer algorithm. 2230 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi)) 2231 continue; 2232 // Now that the phi nodes are proved to be the same, assert that 2233 // findBasePointer's newly generated AlternateRootPhi is present in the 2234 // liveset of the call. 2235 assert(Info.LiveSet.count(AlternateRootPhi)); 2236 } 2237 // Compute cost of this chain 2238 InstructionCost Cost = chainToBasePointerCost(ChainToBase, TTI); 2239 // TODO: We can also account for cases when we will be able to remove some 2240 // of the rematerialized values by later optimization passes. I.e if 2241 // we rematerialized several intersecting chains. Or if original values 2242 // don't have any uses besides this statepoint. 2243 2244 // For invokes we need to rematerialize each chain twice - for normal and 2245 // for unwind basic blocks. Model this by multiplying cost by two. 2246 if (isa<InvokeInst>(Call)) { 2247 Cost *= 2; 2248 } 2249 // If it's too expensive - skip it 2250 if (Cost >= RematerializationThreshold) 2251 continue; 2252 2253 // Remove value from the live set 2254 LiveValuesToBeDeleted.push_back(LiveValue); 2255 2256 // Clone instructions and record them inside "Info" structure 2257 2258 // Walk backwards to visit top-most instructions first 2259 std::reverse(ChainToBase.begin(), ChainToBase.end()); 2260 2261 // Utility function which clones all instructions from "ChainToBase" 2262 // and inserts them before "InsertBefore". Returns rematerialized value 2263 // which should be used after statepoint. 2264 auto rematerializeChain = [&ChainToBase]( 2265 Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) { 2266 Instruction *LastClonedValue = nullptr; 2267 Instruction *LastValue = nullptr; 2268 for (Instruction *Instr: ChainToBase) { 2269 // Only GEP's and casts are supported as we need to be careful to not 2270 // introduce any new uses of pointers not in the liveset. 2271 // Note that it's fine to introduce new uses of pointers which were 2272 // otherwise not used after this statepoint. 2273 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr)); 2274 2275 Instruction *ClonedValue = Instr->clone(); 2276 ClonedValue->insertBefore(InsertBefore); 2277 ClonedValue->setName(Instr->getName() + ".remat"); 2278 2279 // If it is not first instruction in the chain then it uses previously 2280 // cloned value. We should update it to use cloned value. 2281 if (LastClonedValue) { 2282 assert(LastValue); 2283 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue); 2284 #ifndef NDEBUG 2285 for (auto OpValue : ClonedValue->operand_values()) { 2286 // Assert that cloned instruction does not use any instructions from 2287 // this chain other than LastClonedValue 2288 assert(!is_contained(ChainToBase, OpValue) && 2289 "incorrect use in rematerialization chain"); 2290 // Assert that the cloned instruction does not use the RootOfChain 2291 // or the AlternateLiveBase. 2292 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase); 2293 } 2294 #endif 2295 } else { 2296 // For the first instruction, replace the use of unrelocated base i.e. 2297 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the 2298 // live set. They have been proved to be the same PHI nodes. Note 2299 // that the *only* use of the RootOfChain in the ChainToBase list is 2300 // the first Value in the list. 2301 if (RootOfChain != AlternateLiveBase) 2302 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase); 2303 } 2304 2305 LastClonedValue = ClonedValue; 2306 LastValue = Instr; 2307 } 2308 assert(LastClonedValue); 2309 return LastClonedValue; 2310 }; 2311 2312 // Different cases for calls and invokes. For invokes we need to clone 2313 // instructions both on normal and unwind path. 2314 if (isa<CallInst>(Call)) { 2315 Instruction *InsertBefore = Call->getNextNode(); 2316 assert(InsertBefore); 2317 Instruction *RematerializedValue = rematerializeChain( 2318 InsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2319 Info.RematerializedValues[RematerializedValue] = LiveValue; 2320 } else { 2321 auto *Invoke = cast<InvokeInst>(Call); 2322 2323 Instruction *NormalInsertBefore = 2324 &*Invoke->getNormalDest()->getFirstInsertionPt(); 2325 Instruction *UnwindInsertBefore = 2326 &*Invoke->getUnwindDest()->getFirstInsertionPt(); 2327 2328 Instruction *NormalRematerializedValue = rematerializeChain( 2329 NormalInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2330 Instruction *UnwindRematerializedValue = rematerializeChain( 2331 UnwindInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2332 2333 Info.RematerializedValues[NormalRematerializedValue] = LiveValue; 2334 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue; 2335 } 2336 } 2337 2338 // Remove rematerializaed values from the live set 2339 for (auto LiveValue: LiveValuesToBeDeleted) { 2340 Info.LiveSet.remove(LiveValue); 2341 } 2342 } 2343 2344 static bool insertParsePoints(Function &F, DominatorTree &DT, 2345 TargetTransformInfo &TTI, 2346 SmallVectorImpl<CallBase *> &ToUpdate) { 2347 #ifndef NDEBUG 2348 // sanity check the input 2349 std::set<CallBase *> Uniqued; 2350 Uniqued.insert(ToUpdate.begin(), ToUpdate.end()); 2351 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!"); 2352 2353 for (CallBase *Call : ToUpdate) 2354 assert(Call->getFunction() == &F); 2355 #endif 2356 2357 // When inserting gc.relocates for invokes, we need to be able to insert at 2358 // the top of the successor blocks. See the comment on 2359 // normalForInvokeSafepoint on exactly what is needed. Note that this step 2360 // may restructure the CFG. 2361 for (CallBase *Call : ToUpdate) { 2362 auto *II = dyn_cast<InvokeInst>(Call); 2363 if (!II) 2364 continue; 2365 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT); 2366 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT); 2367 } 2368 2369 // A list of dummy calls added to the IR to keep various values obviously 2370 // live in the IR. We'll remove all of these when done. 2371 SmallVector<CallInst *, 64> Holders; 2372 2373 // Insert a dummy call with all of the deopt operands we'll need for the 2374 // actual safepoint insertion as arguments. This ensures reference operands 2375 // in the deopt argument list are considered live through the safepoint (and 2376 // thus makes sure they get relocated.) 2377 for (CallBase *Call : ToUpdate) { 2378 SmallVector<Value *, 64> DeoptValues; 2379 2380 for (Value *Arg : GetDeoptBundleOperands(Call)) { 2381 assert(!isUnhandledGCPointerType(Arg->getType()) && 2382 "support for FCA unimplemented"); 2383 if (isHandledGCPointerType(Arg->getType())) 2384 DeoptValues.push_back(Arg); 2385 } 2386 2387 insertUseHolderAfter(Call, DeoptValues, Holders); 2388 } 2389 2390 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size()); 2391 2392 // A) Identify all gc pointers which are statically live at the given call 2393 // site. 2394 findLiveReferences(F, DT, ToUpdate, Records); 2395 2396 // B) Find the base pointers for each live pointer 2397 /* scope for caching */ { 2398 // Cache the 'defining value' relation used in the computation and 2399 // insertion of base phis and selects. This ensures that we don't insert 2400 // large numbers of duplicate base_phis. 2401 DefiningValueMapTy DVCache; 2402 2403 for (size_t i = 0; i < Records.size(); i++) { 2404 PartiallyConstructedSafepointRecord &info = Records[i]; 2405 findBasePointers(DT, DVCache, ToUpdate[i], info); 2406 } 2407 } // end of cache scope 2408 2409 // The base phi insertion logic (for any safepoint) may have inserted new 2410 // instructions which are now live at some safepoint. The simplest such 2411 // example is: 2412 // loop: 2413 // phi a <-- will be a new base_phi here 2414 // safepoint 1 <-- that needs to be live here 2415 // gep a + 1 2416 // safepoint 2 2417 // br loop 2418 // We insert some dummy calls after each safepoint to definitely hold live 2419 // the base pointers which were identified for that safepoint. We'll then 2420 // ask liveness for _every_ base inserted to see what is now live. Then we 2421 // remove the dummy calls. 2422 Holders.reserve(Holders.size() + Records.size()); 2423 for (size_t i = 0; i < Records.size(); i++) { 2424 PartiallyConstructedSafepointRecord &Info = Records[i]; 2425 2426 SmallVector<Value *, 128> Bases; 2427 for (auto Pair : Info.PointerToBase) 2428 Bases.push_back(Pair.second); 2429 2430 insertUseHolderAfter(ToUpdate[i], Bases, Holders); 2431 } 2432 2433 // By selecting base pointers, we've effectively inserted new uses. Thus, we 2434 // need to rerun liveness. We may *also* have inserted new defs, but that's 2435 // not the key issue. 2436 recomputeLiveInValues(F, DT, ToUpdate, Records); 2437 2438 if (PrintBasePointers) { 2439 for (auto &Info : Records) { 2440 errs() << "Base Pairs: (w/Relocation)\n"; 2441 for (auto Pair : Info.PointerToBase) { 2442 errs() << " derived "; 2443 Pair.first->printAsOperand(errs(), false); 2444 errs() << " base "; 2445 Pair.second->printAsOperand(errs(), false); 2446 errs() << "\n"; 2447 } 2448 } 2449 } 2450 2451 // It is possible that non-constant live variables have a constant base. For 2452 // example, a GEP with a variable offset from a global. In this case we can 2453 // remove it from the liveset. We already don't add constants to the liveset 2454 // because we assume they won't move at runtime and the GC doesn't need to be 2455 // informed about them. The same reasoning applies if the base is constant. 2456 // Note that the relocation placement code relies on this filtering for 2457 // correctness as it expects the base to be in the liveset, which isn't true 2458 // if the base is constant. 2459 for (auto &Info : Records) 2460 for (auto &BasePair : Info.PointerToBase) 2461 if (isa<Constant>(BasePair.second)) 2462 Info.LiveSet.remove(BasePair.first); 2463 2464 for (CallInst *CI : Holders) 2465 CI->eraseFromParent(); 2466 2467 Holders.clear(); 2468 2469 // In order to reduce live set of statepoint we might choose to rematerialize 2470 // some values instead of relocating them. This is purely an optimization and 2471 // does not influence correctness. 2472 for (size_t i = 0; i < Records.size(); i++) 2473 rematerializeLiveValues(ToUpdate[i], Records[i], TTI); 2474 2475 // We need this to safely RAUW and delete call or invoke return values that 2476 // may themselves be live over a statepoint. For details, please see usage in 2477 // makeStatepointExplicitImpl. 2478 std::vector<DeferredReplacement> Replacements; 2479 2480 // Now run through and replace the existing statepoints with new ones with 2481 // the live variables listed. We do not yet update uses of the values being 2482 // relocated. We have references to live variables that need to 2483 // survive to the last iteration of this loop. (By construction, the 2484 // previous statepoint can not be a live variable, thus we can and remove 2485 // the old statepoint calls as we go.) 2486 for (size_t i = 0; i < Records.size(); i++) 2487 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements); 2488 2489 ToUpdate.clear(); // prevent accident use of invalid calls. 2490 2491 for (auto &PR : Replacements) 2492 PR.doReplacement(); 2493 2494 Replacements.clear(); 2495 2496 for (auto &Info : Records) { 2497 // These live sets may contain state Value pointers, since we replaced calls 2498 // with operand bundles with calls wrapped in gc.statepoint, and some of 2499 // those calls may have been def'ing live gc pointers. Clear these out to 2500 // avoid accidentally using them. 2501 // 2502 // TODO: We should create a separate data structure that does not contain 2503 // these live sets, and migrate to using that data structure from this point 2504 // onward. 2505 Info.LiveSet.clear(); 2506 Info.PointerToBase.clear(); 2507 } 2508 2509 // Do all the fixups of the original live variables to their relocated selves 2510 SmallVector<Value *, 128> Live; 2511 for (size_t i = 0; i < Records.size(); i++) { 2512 PartiallyConstructedSafepointRecord &Info = Records[i]; 2513 2514 // We can't simply save the live set from the original insertion. One of 2515 // the live values might be the result of a call which needs a safepoint. 2516 // That Value* no longer exists and we need to use the new gc_result. 2517 // Thankfully, the live set is embedded in the statepoint (and updated), so 2518 // we just grab that. 2519 llvm::append_range(Live, Info.StatepointToken->gc_args()); 2520 #ifndef NDEBUG 2521 // Do some basic sanity checks on our liveness results before performing 2522 // relocation. Relocation can and will turn mistakes in liveness results 2523 // into non-sensical code which is must harder to debug. 2524 // TODO: It would be nice to test consistency as well 2525 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) && 2526 "statepoint must be reachable or liveness is meaningless"); 2527 for (Value *V : Info.StatepointToken->gc_args()) { 2528 if (!isa<Instruction>(V)) 2529 // Non-instruction values trivial dominate all possible uses 2530 continue; 2531 auto *LiveInst = cast<Instruction>(V); 2532 assert(DT.isReachableFromEntry(LiveInst->getParent()) && 2533 "unreachable values should never be live"); 2534 assert(DT.dominates(LiveInst, Info.StatepointToken) && 2535 "basic SSA liveness expectation violated by liveness analysis"); 2536 } 2537 #endif 2538 } 2539 unique_unsorted(Live); 2540 2541 #ifndef NDEBUG 2542 // sanity check 2543 for (auto *Ptr : Live) 2544 assert(isHandledGCPointerType(Ptr->getType()) && 2545 "must be a gc pointer type"); 2546 #endif 2547 2548 relocationViaAlloca(F, DT, Live, Records); 2549 return !Records.empty(); 2550 } 2551 2552 // Handles both return values and arguments for Functions and calls. 2553 template <typename AttrHolder> 2554 static void RemoveNonValidAttrAtIndex(LLVMContext &Ctx, AttrHolder &AH, 2555 unsigned Index) { 2556 AttrBuilder R; 2557 if (AH.getDereferenceableBytes(Index)) 2558 R.addAttribute(Attribute::get(Ctx, Attribute::Dereferenceable, 2559 AH.getDereferenceableBytes(Index))); 2560 if (AH.getDereferenceableOrNullBytes(Index)) 2561 R.addAttribute(Attribute::get(Ctx, Attribute::DereferenceableOrNull, 2562 AH.getDereferenceableOrNullBytes(Index))); 2563 if (AH.getAttributes().hasAttribute(Index, Attribute::NoAlias)) 2564 R.addAttribute(Attribute::NoAlias); 2565 2566 if (!R.empty()) 2567 AH.setAttributes(AH.getAttributes().removeAttributes(Ctx, Index, R)); 2568 } 2569 2570 static void stripNonValidAttributesFromPrototype(Function &F) { 2571 LLVMContext &Ctx = F.getContext(); 2572 2573 for (Argument &A : F.args()) 2574 if (isa<PointerType>(A.getType())) 2575 RemoveNonValidAttrAtIndex(Ctx, F, 2576 A.getArgNo() + AttributeList::FirstArgIndex); 2577 2578 if (isa<PointerType>(F.getReturnType())) 2579 RemoveNonValidAttrAtIndex(Ctx, F, AttributeList::ReturnIndex); 2580 } 2581 2582 /// Certain metadata on instructions are invalid after running RS4GC. 2583 /// Optimizations that run after RS4GC can incorrectly use this metadata to 2584 /// optimize functions. We drop such metadata on the instruction. 2585 static void stripInvalidMetadataFromInstruction(Instruction &I) { 2586 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 2587 return; 2588 // These are the attributes that are still valid on loads and stores after 2589 // RS4GC. 2590 // The metadata implying dereferenceability and noalias are (conservatively) 2591 // dropped. This is because semantically, after RewriteStatepointsForGC runs, 2592 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can 2593 // touch the entire heap including noalias objects. Note: The reasoning is 2594 // same as stripping the dereferenceability and noalias attributes that are 2595 // analogous to the metadata counterparts. 2596 // We also drop the invariant.load metadata on the load because that metadata 2597 // implies the address operand to the load points to memory that is never 2598 // changed once it became dereferenceable. This is no longer true after RS4GC. 2599 // Similar reasoning applies to invariant.group metadata, which applies to 2600 // loads within a group. 2601 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa, 2602 LLVMContext::MD_range, 2603 LLVMContext::MD_alias_scope, 2604 LLVMContext::MD_nontemporal, 2605 LLVMContext::MD_nonnull, 2606 LLVMContext::MD_align, 2607 LLVMContext::MD_type}; 2608 2609 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC. 2610 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC); 2611 } 2612 2613 static void stripNonValidDataFromBody(Function &F) { 2614 if (F.empty()) 2615 return; 2616 2617 LLVMContext &Ctx = F.getContext(); 2618 MDBuilder Builder(Ctx); 2619 2620 // Set of invariantstart instructions that we need to remove. 2621 // Use this to avoid invalidating the instruction iterator. 2622 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions; 2623 2624 for (Instruction &I : instructions(F)) { 2625 // invariant.start on memory location implies that the referenced memory 2626 // location is constant and unchanging. This is no longer true after 2627 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint 2628 // which frees the entire heap and the presence of invariant.start allows 2629 // the optimizer to sink the load of a memory location past a statepoint, 2630 // which is incorrect. 2631 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 2632 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2633 InvariantStartInstructions.push_back(II); 2634 continue; 2635 } 2636 2637 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) { 2638 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag); 2639 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA); 2640 } 2641 2642 stripInvalidMetadataFromInstruction(I); 2643 2644 if (auto *Call = dyn_cast<CallBase>(&I)) { 2645 for (int i = 0, e = Call->arg_size(); i != e; i++) 2646 if (isa<PointerType>(Call->getArgOperand(i)->getType())) 2647 RemoveNonValidAttrAtIndex(Ctx, *Call, 2648 i + AttributeList::FirstArgIndex); 2649 if (isa<PointerType>(Call->getType())) 2650 RemoveNonValidAttrAtIndex(Ctx, *Call, AttributeList::ReturnIndex); 2651 } 2652 } 2653 2654 // Delete the invariant.start instructions and RAUW undef. 2655 for (auto *II : InvariantStartInstructions) { 2656 II->replaceAllUsesWith(UndefValue::get(II->getType())); 2657 II->eraseFromParent(); 2658 } 2659 } 2660 2661 /// Returns true if this function should be rewritten by this pass. The main 2662 /// point of this function is as an extension point for custom logic. 2663 static bool shouldRewriteStatepointsIn(Function &F) { 2664 // TODO: This should check the GCStrategy 2665 if (F.hasGC()) { 2666 const auto &FunctionGCName = F.getGC(); 2667 const StringRef StatepointExampleName("statepoint-example"); 2668 const StringRef CoreCLRName("coreclr"); 2669 return (StatepointExampleName == FunctionGCName) || 2670 (CoreCLRName == FunctionGCName); 2671 } else 2672 return false; 2673 } 2674 2675 static void stripNonValidData(Module &M) { 2676 #ifndef NDEBUG 2677 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!"); 2678 #endif 2679 2680 for (Function &F : M) 2681 stripNonValidAttributesFromPrototype(F); 2682 2683 for (Function &F : M) 2684 stripNonValidDataFromBody(F); 2685 } 2686 2687 bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT, 2688 TargetTransformInfo &TTI, 2689 const TargetLibraryInfo &TLI) { 2690 assert(!F.isDeclaration() && !F.empty() && 2691 "need function body to rewrite statepoints in"); 2692 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision"); 2693 2694 auto NeedsRewrite = [&TLI](Instruction &I) { 2695 if (const auto *Call = dyn_cast<CallBase>(&I)) { 2696 if (isa<GCStatepointInst>(Call)) 2697 return false; 2698 if (callsGCLeafFunction(Call, TLI)) 2699 return false; 2700 2701 // Normally it's up to the frontend to make sure that non-leaf calls also 2702 // have proper deopt state if it is required. We make an exception for 2703 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics 2704 // these are non-leaf by default. They might be generated by the optimizer 2705 // which doesn't know how to produce a proper deopt state. So if we see a 2706 // non-leaf memcpy/memmove without deopt state just treat it as a leaf 2707 // copy and don't produce a statepoint. 2708 if (!AllowStatepointWithNoDeoptInfo && 2709 !Call->getOperandBundle(LLVMContext::OB_deopt)) { 2710 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) && 2711 "Don't expect any other calls here!"); 2712 return false; 2713 } 2714 return true; 2715 } 2716 return false; 2717 }; 2718 2719 // Delete any unreachable statepoints so that we don't have unrewritten 2720 // statepoints surviving this pass. This makes testing easier and the 2721 // resulting IR less confusing to human readers. 2722 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 2723 bool MadeChange = removeUnreachableBlocks(F, &DTU); 2724 // Flush the Dominator Tree. 2725 DTU.getDomTree(); 2726 2727 // Gather all the statepoints which need rewritten. Be careful to only 2728 // consider those in reachable code since we need to ask dominance queries 2729 // when rewriting. We'll delete the unreachable ones in a moment. 2730 SmallVector<CallBase *, 64> ParsePointNeeded; 2731 for (Instruction &I : instructions(F)) { 2732 // TODO: only the ones with the flag set! 2733 if (NeedsRewrite(I)) { 2734 // NOTE removeUnreachableBlocks() is stronger than 2735 // DominatorTree::isReachableFromEntry(). In other words 2736 // removeUnreachableBlocks can remove some blocks for which 2737 // isReachableFromEntry() returns true. 2738 assert(DT.isReachableFromEntry(I.getParent()) && 2739 "no unreachable blocks expected"); 2740 ParsePointNeeded.push_back(cast<CallBase>(&I)); 2741 } 2742 } 2743 2744 // Return early if no work to do. 2745 if (ParsePointNeeded.empty()) 2746 return MadeChange; 2747 2748 // As a prepass, go ahead and aggressively destroy single entry phi nodes. 2749 // These are created by LCSSA. They have the effect of increasing the size 2750 // of liveness sets for no good reason. It may be harder to do this post 2751 // insertion since relocations and base phis can confuse things. 2752 for (BasicBlock &BB : F) 2753 if (BB.getUniquePredecessor()) 2754 MadeChange |= FoldSingleEntryPHINodes(&BB); 2755 2756 // Before we start introducing relocations, we want to tweak the IR a bit to 2757 // avoid unfortunate code generation effects. The main example is that we 2758 // want to try to make sure the comparison feeding a branch is after any 2759 // safepoints. Otherwise, we end up with a comparison of pre-relocation 2760 // values feeding a branch after relocation. This is semantically correct, 2761 // but results in extra register pressure since both the pre-relocation and 2762 // post-relocation copies must be available in registers. For code without 2763 // relocations this is handled elsewhere, but teaching the scheduler to 2764 // reverse the transform we're about to do would be slightly complex. 2765 // Note: This may extend the live range of the inputs to the icmp and thus 2766 // increase the liveset of any statepoint we move over. This is profitable 2767 // as long as all statepoints are in rare blocks. If we had in-register 2768 // lowering for live values this would be a much safer transform. 2769 auto getConditionInst = [](Instruction *TI) -> Instruction * { 2770 if (auto *BI = dyn_cast<BranchInst>(TI)) 2771 if (BI->isConditional()) 2772 return dyn_cast<Instruction>(BI->getCondition()); 2773 // TODO: Extend this to handle switches 2774 return nullptr; 2775 }; 2776 for (BasicBlock &BB : F) { 2777 Instruction *TI = BB.getTerminator(); 2778 if (auto *Cond = getConditionInst(TI)) 2779 // TODO: Handle more than just ICmps here. We should be able to move 2780 // most instructions without side effects or memory access. 2781 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) { 2782 MadeChange = true; 2783 Cond->moveBefore(TI); 2784 } 2785 } 2786 2787 // Nasty workaround - The base computation code in the main algorithm doesn't 2788 // consider the fact that a GEP can be used to convert a scalar to a vector. 2789 // The right fix for this is to integrate GEPs into the base rewriting 2790 // algorithm properly, this is just a short term workaround to prevent 2791 // crashes by canonicalizing such GEPs into fully vector GEPs. 2792 for (Instruction &I : instructions(F)) { 2793 if (!isa<GetElementPtrInst>(I)) 2794 continue; 2795 2796 unsigned VF = 0; 2797 for (unsigned i = 0; i < I.getNumOperands(); i++) 2798 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) { 2799 assert(VF == 0 || 2800 VF == cast<FixedVectorType>(OpndVTy)->getNumElements()); 2801 VF = cast<FixedVectorType>(OpndVTy)->getNumElements(); 2802 } 2803 2804 // It's the vector to scalar traversal through the pointer operand which 2805 // confuses base pointer rewriting, so limit ourselves to that case. 2806 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) { 2807 IRBuilder<> B(&I); 2808 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0)); 2809 I.setOperand(0, Splat); 2810 MadeChange = true; 2811 } 2812 } 2813 2814 MadeChange |= insertParsePoints(F, DT, TTI, ParsePointNeeded); 2815 return MadeChange; 2816 } 2817 2818 // liveness computation via standard dataflow 2819 // ------------------------------------------------------------------- 2820 2821 // TODO: Consider using bitvectors for liveness, the set of potentially 2822 // interesting values should be small and easy to pre-compute. 2823 2824 /// Compute the live-in set for the location rbegin starting from 2825 /// the live-out set of the basic block 2826 static void computeLiveInValues(BasicBlock::reverse_iterator Begin, 2827 BasicBlock::reverse_iterator End, 2828 SetVector<Value *> &LiveTmp) { 2829 for (auto &I : make_range(Begin, End)) { 2830 // KILL/Def - Remove this definition from LiveIn 2831 LiveTmp.remove(&I); 2832 2833 // Don't consider *uses* in PHI nodes, we handle their contribution to 2834 // predecessor blocks when we seed the LiveOut sets 2835 if (isa<PHINode>(I)) 2836 continue; 2837 2838 // USE - Add to the LiveIn set for this instruction 2839 for (Value *V : I.operands()) { 2840 assert(!isUnhandledGCPointerType(V->getType()) && 2841 "support for FCA unimplemented"); 2842 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) { 2843 // The choice to exclude all things constant here is slightly subtle. 2844 // There are two independent reasons: 2845 // - We assume that things which are constant (from LLVM's definition) 2846 // do not move at runtime. For example, the address of a global 2847 // variable is fixed, even though it's contents may not be. 2848 // - Second, we can't disallow arbitrary inttoptr constants even 2849 // if the language frontend does. Optimization passes are free to 2850 // locally exploit facts without respect to global reachability. This 2851 // can create sections of code which are dynamically unreachable and 2852 // contain just about anything. (see constants.ll in tests) 2853 LiveTmp.insert(V); 2854 } 2855 } 2856 } 2857 } 2858 2859 static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp) { 2860 for (BasicBlock *Succ : successors(BB)) { 2861 for (auto &I : *Succ) { 2862 PHINode *PN = dyn_cast<PHINode>(&I); 2863 if (!PN) 2864 break; 2865 2866 Value *V = PN->getIncomingValueForBlock(BB); 2867 assert(!isUnhandledGCPointerType(V->getType()) && 2868 "support for FCA unimplemented"); 2869 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) 2870 LiveTmp.insert(V); 2871 } 2872 } 2873 } 2874 2875 static SetVector<Value *> computeKillSet(BasicBlock *BB) { 2876 SetVector<Value *> KillSet; 2877 for (Instruction &I : *BB) 2878 if (isHandledGCPointerType(I.getType())) 2879 KillSet.insert(&I); 2880 return KillSet; 2881 } 2882 2883 #ifndef NDEBUG 2884 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic 2885 /// sanity check for the liveness computation. 2886 static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live, 2887 Instruction *TI, bool TermOkay = false) { 2888 for (Value *V : Live) { 2889 if (auto *I = dyn_cast<Instruction>(V)) { 2890 // The terminator can be a member of the LiveOut set. LLVM's definition 2891 // of instruction dominance states that V does not dominate itself. As 2892 // such, we need to special case this to allow it. 2893 if (TermOkay && TI == I) 2894 continue; 2895 assert(DT.dominates(I, TI) && 2896 "basic SSA liveness expectation violated by liveness analysis"); 2897 } 2898 } 2899 } 2900 2901 /// Check that all the liveness sets used during the computation of liveness 2902 /// obey basic SSA properties. This is useful for finding cases where we miss 2903 /// a def. 2904 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, 2905 BasicBlock &BB) { 2906 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); 2907 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); 2908 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); 2909 } 2910 #endif 2911 2912 static void computeLiveInValues(DominatorTree &DT, Function &F, 2913 GCPtrLivenessData &Data) { 2914 SmallSetVector<BasicBlock *, 32> Worklist; 2915 2916 // Seed the liveness for each individual block 2917 for (BasicBlock &BB : F) { 2918 Data.KillSet[&BB] = computeKillSet(&BB); 2919 Data.LiveSet[&BB].clear(); 2920 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]); 2921 2922 #ifndef NDEBUG 2923 for (Value *Kill : Data.KillSet[&BB]) 2924 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); 2925 #endif 2926 2927 Data.LiveOut[&BB] = SetVector<Value *>(); 2928 computeLiveOutSeed(&BB, Data.LiveOut[&BB]); 2929 Data.LiveIn[&BB] = Data.LiveSet[&BB]; 2930 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]); 2931 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]); 2932 if (!Data.LiveIn[&BB].empty()) 2933 Worklist.insert(pred_begin(&BB), pred_end(&BB)); 2934 } 2935 2936 // Propagate that liveness until stable 2937 while (!Worklist.empty()) { 2938 BasicBlock *BB = Worklist.pop_back_val(); 2939 2940 // Compute our new liveout set, then exit early if it hasn't changed despite 2941 // the contribution of our successor. 2942 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 2943 const auto OldLiveOutSize = LiveOut.size(); 2944 for (BasicBlock *Succ : successors(BB)) { 2945 assert(Data.LiveIn.count(Succ)); 2946 LiveOut.set_union(Data.LiveIn[Succ]); 2947 } 2948 // assert OutLiveOut is a subset of LiveOut 2949 if (OldLiveOutSize == LiveOut.size()) { 2950 // If the sets are the same size, then we didn't actually add anything 2951 // when unioning our successors LiveIn. Thus, the LiveIn of this block 2952 // hasn't changed. 2953 continue; 2954 } 2955 Data.LiveOut[BB] = LiveOut; 2956 2957 // Apply the effects of this basic block 2958 SetVector<Value *> LiveTmp = LiveOut; 2959 LiveTmp.set_union(Data.LiveSet[BB]); 2960 LiveTmp.set_subtract(Data.KillSet[BB]); 2961 2962 assert(Data.LiveIn.count(BB)); 2963 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB]; 2964 // assert: OldLiveIn is a subset of LiveTmp 2965 if (OldLiveIn.size() != LiveTmp.size()) { 2966 Data.LiveIn[BB] = LiveTmp; 2967 Worklist.insert(pred_begin(BB), pred_end(BB)); 2968 } 2969 } // while (!Worklist.empty()) 2970 2971 #ifndef NDEBUG 2972 // Sanity check our output against SSA properties. This helps catch any 2973 // missing kills during the above iteration. 2974 for (BasicBlock &BB : F) 2975 checkBasicSSA(DT, Data, BB); 2976 #endif 2977 } 2978 2979 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, 2980 StatepointLiveSetTy &Out) { 2981 BasicBlock *BB = Inst->getParent(); 2982 2983 // Note: The copy is intentional and required 2984 assert(Data.LiveOut.count(BB)); 2985 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 2986 2987 // We want to handle the statepoint itself oddly. It's 2988 // call result is not live (normal), nor are it's arguments 2989 // (unless they're used again later). This adjustment is 2990 // specifically what we need to relocate 2991 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(), 2992 LiveOut); 2993 LiveOut.remove(Inst); 2994 Out.insert(LiveOut.begin(), LiveOut.end()); 2995 } 2996 2997 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 2998 CallBase *Call, 2999 PartiallyConstructedSafepointRecord &Info) { 3000 StatepointLiveSetTy Updated; 3001 findLiveSetAtInst(Call, RevisedLivenessData, Updated); 3002 3003 // We may have base pointers which are now live that weren't before. We need 3004 // to update the PointerToBase structure to reflect this. 3005 for (auto V : Updated) 3006 if (Info.PointerToBase.insert({V, V}).second) { 3007 assert(isKnownBaseResult(V) && 3008 "Can't find base for unexpected live value!"); 3009 continue; 3010 } 3011 3012 #ifndef NDEBUG 3013 for (auto V : Updated) 3014 assert(Info.PointerToBase.count(V) && 3015 "Must be able to find base for live value!"); 3016 #endif 3017 3018 // Remove any stale base mappings - this can happen since our liveness is 3019 // more precise then the one inherent in the base pointer analysis. 3020 DenseSet<Value *> ToErase; 3021 for (auto KVPair : Info.PointerToBase) 3022 if (!Updated.count(KVPair.first)) 3023 ToErase.insert(KVPair.first); 3024 3025 for (auto *V : ToErase) 3026 Info.PointerToBase.erase(V); 3027 3028 #ifndef NDEBUG 3029 for (auto KVPair : Info.PointerToBase) 3030 assert(Updated.count(KVPair.first) && "record for non-live value"); 3031 #endif 3032 3033 Info.LiveSet = Updated; 3034 } 3035