1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Rewrite call/invoke instructions so as to make potential relocations 10 // performed by the garbage collector explicit in the IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h" 15 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/DomTreeUpdater.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constant.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/InstIterator.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/MDBuilder.h" 50 #include "llvm/IR/Metadata.h" 51 #include "llvm/IR/Module.h" 52 #include "llvm/IR/Statepoint.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/ValueHandle.h" 57 #include "llvm/InitializePasses.h" 58 #include "llvm/Pass.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Compiler.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/ErrorHandling.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Transforms/Scalar.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 69 #include <algorithm> 70 #include <cassert> 71 #include <cstddef> 72 #include <cstdint> 73 #include <iterator> 74 #include <set> 75 #include <string> 76 #include <utility> 77 #include <vector> 78 79 #define DEBUG_TYPE "rewrite-statepoints-for-gc" 80 81 using namespace llvm; 82 83 // Print the liveset found at the insert location 84 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden, 85 cl::init(false)); 86 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, 87 cl::init(false)); 88 89 // Print out the base pointers for debugging 90 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden, 91 cl::init(false)); 92 93 // Cost threshold measuring when it is profitable to rematerialize value instead 94 // of relocating it 95 static cl::opt<unsigned> 96 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, 97 cl::init(6)); 98 99 #ifdef EXPENSIVE_CHECKS 100 static bool ClobberNonLive = true; 101 #else 102 static bool ClobberNonLive = false; 103 #endif 104 105 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live", 106 cl::location(ClobberNonLive), 107 cl::Hidden); 108 109 static cl::opt<bool> 110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", 111 cl::Hidden, cl::init(true)); 112 113 /// The IR fed into RewriteStatepointsForGC may have had attributes and 114 /// metadata implying dereferenceability that are no longer valid/correct after 115 /// RewriteStatepointsForGC has run. This is because semantically, after 116 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire 117 /// heap. stripNonValidData (conservatively) restores 118 /// correctness by erasing all attributes in the module that externally imply 119 /// dereferenceability. Similar reasoning also applies to the noalias 120 /// attributes and metadata. gc.statepoint can touch the entire heap including 121 /// noalias objects. 122 /// Apart from attributes and metadata, we also remove instructions that imply 123 /// constant physical memory: llvm.invariant.start. 124 static void stripNonValidData(Module &M); 125 126 static bool shouldRewriteStatepointsIn(Function &F); 127 128 PreservedAnalyses RewriteStatepointsForGC::run(Module &M, 129 ModuleAnalysisManager &AM) { 130 bool Changed = false; 131 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 132 for (Function &F : M) { 133 // Nothing to do for declarations. 134 if (F.isDeclaration() || F.empty()) 135 continue; 136 137 // Policy choice says not to rewrite - the most common reason is that we're 138 // compiling code without a GCStrategy. 139 if (!shouldRewriteStatepointsIn(F)) 140 continue; 141 142 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F); 143 auto &TTI = FAM.getResult<TargetIRAnalysis>(F); 144 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 145 Changed |= runOnFunction(F, DT, TTI, TLI); 146 } 147 if (!Changed) 148 return PreservedAnalyses::all(); 149 150 // stripNonValidData asserts that shouldRewriteStatepointsIn 151 // returns true for at least one function in the module. Since at least 152 // one function changed, we know that the precondition is satisfied. 153 stripNonValidData(M); 154 155 PreservedAnalyses PA; 156 PA.preserve<TargetIRAnalysis>(); 157 PA.preserve<TargetLibraryAnalysis>(); 158 return PA; 159 } 160 161 namespace { 162 163 class RewriteStatepointsForGCLegacyPass : public ModulePass { 164 RewriteStatepointsForGC Impl; 165 166 public: 167 static char ID; // Pass identification, replacement for typeid 168 169 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() { 170 initializeRewriteStatepointsForGCLegacyPassPass( 171 *PassRegistry::getPassRegistry()); 172 } 173 174 bool runOnModule(Module &M) override { 175 bool Changed = false; 176 for (Function &F : M) { 177 // Nothing to do for declarations. 178 if (F.isDeclaration() || F.empty()) 179 continue; 180 181 // Policy choice says not to rewrite - the most common reason is that 182 // we're compiling code without a GCStrategy. 183 if (!shouldRewriteStatepointsIn(F)) 184 continue; 185 186 TargetTransformInfo &TTI = 187 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 188 const TargetLibraryInfo &TLI = 189 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 190 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 191 192 Changed |= Impl.runOnFunction(F, DT, TTI, TLI); 193 } 194 195 if (!Changed) 196 return false; 197 198 // stripNonValidData asserts that shouldRewriteStatepointsIn 199 // returns true for at least one function in the module. Since at least 200 // one function changed, we know that the precondition is satisfied. 201 stripNonValidData(M); 202 return true; 203 } 204 205 void getAnalysisUsage(AnalysisUsage &AU) const override { 206 // We add and rewrite a bunch of instructions, but don't really do much 207 // else. We could in theory preserve a lot more analyses here. 208 AU.addRequired<DominatorTreeWrapperPass>(); 209 AU.addRequired<TargetTransformInfoWrapperPass>(); 210 AU.addRequired<TargetLibraryInfoWrapperPass>(); 211 } 212 }; 213 214 } // end anonymous namespace 215 216 char RewriteStatepointsForGCLegacyPass::ID = 0; 217 218 ModulePass *llvm::createRewriteStatepointsForGCLegacyPass() { 219 return new RewriteStatepointsForGCLegacyPass(); 220 } 221 222 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass, 223 "rewrite-statepoints-for-gc", 224 "Make relocations explicit at statepoints", false, false) 225 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 226 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 227 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass, 228 "rewrite-statepoints-for-gc", 229 "Make relocations explicit at statepoints", false, false) 230 231 namespace { 232 233 struct GCPtrLivenessData { 234 /// Values defined in this block. 235 MapVector<BasicBlock *, SetVector<Value *>> KillSet; 236 237 /// Values used in this block (and thus live); does not included values 238 /// killed within this block. 239 MapVector<BasicBlock *, SetVector<Value *>> LiveSet; 240 241 /// Values live into this basic block (i.e. used by any 242 /// instruction in this basic block or ones reachable from here) 243 MapVector<BasicBlock *, SetVector<Value *>> LiveIn; 244 245 /// Values live out of this basic block (i.e. live into 246 /// any successor block) 247 MapVector<BasicBlock *, SetVector<Value *>> LiveOut; 248 }; 249 250 // The type of the internal cache used inside the findBasePointers family 251 // of functions. From the callers perspective, this is an opaque type and 252 // should not be inspected. 253 // 254 // In the actual implementation this caches two relations: 255 // - The base relation itself (i.e. this pointer is based on that one) 256 // - The base defining value relation (i.e. before base_phi insertion) 257 // Generally, after the execution of a full findBasePointer call, only the 258 // base relation will remain. Internally, we add a mixture of the two 259 // types, then update all the second type to the first type 260 using DefiningValueMapTy = MapVector<Value *, Value *>; 261 using StatepointLiveSetTy = SetVector<Value *>; 262 using RematerializedValueMapTy = 263 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>; 264 265 struct PartiallyConstructedSafepointRecord { 266 /// The set of values known to be live across this safepoint 267 StatepointLiveSetTy LiveSet; 268 269 /// Mapping from live pointers to a base-defining-value 270 MapVector<Value *, Value *> PointerToBase; 271 272 /// The *new* gc.statepoint instruction itself. This produces the token 273 /// that normal path gc.relocates and the gc.result are tied to. 274 GCStatepointInst *StatepointToken; 275 276 /// Instruction to which exceptional gc relocates are attached 277 /// Makes it easier to iterate through them during relocationViaAlloca. 278 Instruction *UnwindToken; 279 280 /// Record live values we are rematerialized instead of relocating. 281 /// They are not included into 'LiveSet' field. 282 /// Maps rematerialized copy to it's original value. 283 RematerializedValueMapTy RematerializedValues; 284 }; 285 286 } // end anonymous namespace 287 288 static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) { 289 Optional<OperandBundleUse> DeoptBundle = 290 Call->getOperandBundle(LLVMContext::OB_deopt); 291 292 if (!DeoptBundle.hasValue()) { 293 assert(AllowStatepointWithNoDeoptInfo && 294 "Found non-leaf call without deopt info!"); 295 return None; 296 } 297 298 return DeoptBundle.getValue().Inputs; 299 } 300 301 /// Compute the live-in set for every basic block in the function 302 static void computeLiveInValues(DominatorTree &DT, Function &F, 303 GCPtrLivenessData &Data); 304 305 /// Given results from the dataflow liveness computation, find the set of live 306 /// Values at a particular instruction. 307 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, 308 StatepointLiveSetTy &out); 309 310 // TODO: Once we can get to the GCStrategy, this becomes 311 // Optional<bool> isGCManagedPointer(const Type *Ty) const override { 312 313 static bool isGCPointerType(Type *T) { 314 if (auto *PT = dyn_cast<PointerType>(T)) 315 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our 316 // GC managed heap. We know that a pointer into this heap needs to be 317 // updated and that no other pointer does. 318 return PT->getAddressSpace() == 1; 319 return false; 320 } 321 322 // Return true if this type is one which a) is a gc pointer or contains a GC 323 // pointer and b) is of a type this code expects to encounter as a live value. 324 // (The insertion code will assert that a type which matches (a) and not (b) 325 // is not encountered.) 326 static bool isHandledGCPointerType(Type *T) { 327 // We fully support gc pointers 328 if (isGCPointerType(T)) 329 return true; 330 // We partially support vectors of gc pointers. The code will assert if it 331 // can't handle something. 332 if (auto VT = dyn_cast<VectorType>(T)) 333 if (isGCPointerType(VT->getElementType())) 334 return true; 335 return false; 336 } 337 338 #ifndef NDEBUG 339 /// Returns true if this type contains a gc pointer whether we know how to 340 /// handle that type or not. 341 static bool containsGCPtrType(Type *Ty) { 342 if (isGCPointerType(Ty)) 343 return true; 344 if (VectorType *VT = dyn_cast<VectorType>(Ty)) 345 return isGCPointerType(VT->getScalarType()); 346 if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) 347 return containsGCPtrType(AT->getElementType()); 348 if (StructType *ST = dyn_cast<StructType>(Ty)) 349 return llvm::any_of(ST->elements(), containsGCPtrType); 350 return false; 351 } 352 353 // Returns true if this is a type which a) is a gc pointer or contains a GC 354 // pointer and b) is of a type which the code doesn't expect (i.e. first class 355 // aggregates). Used to trip assertions. 356 static bool isUnhandledGCPointerType(Type *Ty) { 357 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty); 358 } 359 #endif 360 361 // Return the name of the value suffixed with the provided value, or if the 362 // value didn't have a name, the default value specified. 363 static std::string suffixed_name_or(Value *V, StringRef Suffix, 364 StringRef DefaultName) { 365 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str(); 366 } 367 368 // Conservatively identifies any definitions which might be live at the 369 // given instruction. The analysis is performed immediately before the 370 // given instruction. Values defined by that instruction are not considered 371 // live. Values used by that instruction are considered live. 372 static void analyzeParsePointLiveness( 373 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, 374 PartiallyConstructedSafepointRecord &Result) { 375 StatepointLiveSetTy LiveSet; 376 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet); 377 378 if (PrintLiveSet) { 379 dbgs() << "Live Variables:\n"; 380 for (Value *V : LiveSet) 381 dbgs() << " " << V->getName() << " " << *V << "\n"; 382 } 383 if (PrintLiveSetSize) { 384 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n"; 385 dbgs() << "Number live values: " << LiveSet.size() << "\n"; 386 } 387 Result.LiveSet = LiveSet; 388 } 389 390 // Returns true is V is a knownBaseResult. 391 static bool isKnownBaseResult(Value *V); 392 393 // Returns true if V is a BaseResult that already exists in the IR, i.e. it is 394 // not created by the findBasePointers algorithm. 395 static bool isOriginalBaseResult(Value *V); 396 397 namespace { 398 399 /// A single base defining value - An immediate base defining value for an 400 /// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'. 401 /// For instructions which have multiple pointer [vector] inputs or that 402 /// transition between vector and scalar types, there is no immediate base 403 /// defining value. The 'base defining value' for 'Def' is the transitive 404 /// closure of this relation stopping at the first instruction which has no 405 /// immediate base defining value. The b.d.v. might itself be a base pointer, 406 /// but it can also be an arbitrary derived pointer. 407 struct BaseDefiningValueResult { 408 /// Contains the value which is the base defining value. 409 Value * const BDV; 410 411 /// True if the base defining value is also known to be an actual base 412 /// pointer. 413 const bool IsKnownBase; 414 415 BaseDefiningValueResult(Value *BDV, bool IsKnownBase) 416 : BDV(BDV), IsKnownBase(IsKnownBase) { 417 #ifndef NDEBUG 418 // Check consistency between new and old means of checking whether a BDV is 419 // a base. 420 bool MustBeBase = isKnownBaseResult(BDV); 421 assert(!MustBeBase || MustBeBase == IsKnownBase); 422 #endif 423 } 424 }; 425 426 } // end anonymous namespace 427 428 static BaseDefiningValueResult findBaseDefiningValue(Value *I); 429 430 /// Return a base defining value for the 'Index' element of the given vector 431 /// instruction 'I'. If Index is null, returns a BDV for the entire vector 432 /// 'I'. As an optimization, this method will try to determine when the 433 /// element is known to already be a base pointer. If this can be established, 434 /// the second value in the returned pair will be true. Note that either a 435 /// vector or a pointer typed value can be returned. For the former, the 436 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'. 437 /// If the later, the return pointer is a BDV (or possibly a base) for the 438 /// particular element in 'I'. 439 static BaseDefiningValueResult 440 findBaseDefiningValueOfVector(Value *I) { 441 // Each case parallels findBaseDefiningValue below, see that code for 442 // detailed motivation. 443 444 if (isa<Argument>(I)) 445 // An incoming argument to the function is a base pointer 446 return BaseDefiningValueResult(I, true); 447 448 if (isa<Constant>(I)) 449 // Base of constant vector consists only of constant null pointers. 450 // For reasoning see similar case inside 'findBaseDefiningValue' function. 451 return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()), 452 true); 453 454 if (isa<LoadInst>(I)) 455 return BaseDefiningValueResult(I, true); 456 457 if (isa<InsertElementInst>(I)) 458 // We don't know whether this vector contains entirely base pointers or 459 // not. To be conservatively correct, we treat it as a BDV and will 460 // duplicate code as needed to construct a parallel vector of bases. 461 return BaseDefiningValueResult(I, false); 462 463 if (isa<ShuffleVectorInst>(I)) 464 // We don't know whether this vector contains entirely base pointers or 465 // not. To be conservatively correct, we treat it as a BDV and will 466 // duplicate code as needed to construct a parallel vector of bases. 467 // TODO: There a number of local optimizations which could be applied here 468 // for particular sufflevector patterns. 469 return BaseDefiningValueResult(I, false); 470 471 // The behavior of getelementptr instructions is the same for vector and 472 // non-vector data types. 473 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 474 return findBaseDefiningValue(GEP->getPointerOperand()); 475 476 // If the pointer comes through a bitcast of a vector of pointers to 477 // a vector of another type of pointer, then look through the bitcast 478 if (auto *BC = dyn_cast<BitCastInst>(I)) 479 return findBaseDefiningValue(BC->getOperand(0)); 480 481 // We assume that functions in the source language only return base 482 // pointers. This should probably be generalized via attributes to support 483 // both source language and internal functions. 484 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 485 return BaseDefiningValueResult(I, true); 486 487 // A PHI or Select is a base defining value. The outer findBasePointer 488 // algorithm is responsible for constructing a base value for this BDV. 489 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 490 "unknown vector instruction - no base found for vector element"); 491 return BaseDefiningValueResult(I, false); 492 } 493 494 /// Helper function for findBasePointer - Will return a value which either a) 495 /// defines the base pointer for the input, b) blocks the simple search 496 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change 497 /// from pointer to vector type or back. 498 static BaseDefiningValueResult findBaseDefiningValue(Value *I) { 499 assert(I->getType()->isPtrOrPtrVectorTy() && 500 "Illegal to ask for the base pointer of a non-pointer type"); 501 502 if (I->getType()->isVectorTy()) 503 return findBaseDefiningValueOfVector(I); 504 505 if (isa<Argument>(I)) 506 // An incoming argument to the function is a base pointer 507 // We should have never reached here if this argument isn't an gc value 508 return BaseDefiningValueResult(I, true); 509 510 if (isa<Constant>(I)) { 511 // We assume that objects with a constant base (e.g. a global) can't move 512 // and don't need to be reported to the collector because they are always 513 // live. Besides global references, all kinds of constants (e.g. undef, 514 // constant expressions, null pointers) can be introduced by the inliner or 515 // the optimizer, especially on dynamically dead paths. 516 // Here we treat all of them as having single null base. By doing this we 517 // trying to avoid problems reporting various conflicts in a form of 518 // "phi (const1, const2)" or "phi (const, regular gc ptr)". 519 // See constant.ll file for relevant test cases. 520 521 return BaseDefiningValueResult( 522 ConstantPointerNull::get(cast<PointerType>(I->getType())), true); 523 } 524 525 if (CastInst *CI = dyn_cast<CastInst>(I)) { 526 Value *Def = CI->stripPointerCasts(); 527 // If stripping pointer casts changes the address space there is an 528 // addrspacecast in between. 529 assert(cast<PointerType>(Def->getType())->getAddressSpace() == 530 cast<PointerType>(CI->getType())->getAddressSpace() && 531 "unsupported addrspacecast"); 532 // If we find a cast instruction here, it means we've found a cast which is 533 // not simply a pointer cast (i.e. an inttoptr). We don't know how to 534 // handle int->ptr conversion. 535 assert(!isa<CastInst>(Def) && "shouldn't find another cast here"); 536 return findBaseDefiningValue(Def); 537 } 538 539 if (isa<LoadInst>(I)) 540 // The value loaded is an gc base itself 541 return BaseDefiningValueResult(I, true); 542 543 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 544 // The base of this GEP is the base 545 return findBaseDefiningValue(GEP->getPointerOperand()); 546 547 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 548 switch (II->getIntrinsicID()) { 549 default: 550 // fall through to general call handling 551 break; 552 case Intrinsic::experimental_gc_statepoint: 553 llvm_unreachable("statepoints don't produce pointers"); 554 case Intrinsic::experimental_gc_relocate: 555 // Rerunning safepoint insertion after safepoints are already 556 // inserted is not supported. It could probably be made to work, 557 // but why are you doing this? There's no good reason. 558 llvm_unreachable("repeat safepoint insertion is not supported"); 559 case Intrinsic::gcroot: 560 // Currently, this mechanism hasn't been extended to work with gcroot. 561 // There's no reason it couldn't be, but I haven't thought about the 562 // implications much. 563 llvm_unreachable( 564 "interaction with the gcroot mechanism is not supported"); 565 } 566 } 567 // We assume that functions in the source language only return base 568 // pointers. This should probably be generalized via attributes to support 569 // both source language and internal functions. 570 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 571 return BaseDefiningValueResult(I, true); 572 573 // TODO: I have absolutely no idea how to implement this part yet. It's not 574 // necessarily hard, I just haven't really looked at it yet. 575 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented"); 576 577 if (isa<AtomicCmpXchgInst>(I)) 578 // A CAS is effectively a atomic store and load combined under a 579 // predicate. From the perspective of base pointers, we just treat it 580 // like a load. 581 return BaseDefiningValueResult(I, true); 582 583 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are " 584 "binary ops which don't apply to pointers"); 585 586 // The aggregate ops. Aggregates can either be in the heap or on the 587 // stack, but in either case, this is simply a field load. As a result, 588 // this is a defining definition of the base just like a load is. 589 if (isa<ExtractValueInst>(I)) 590 return BaseDefiningValueResult(I, true); 591 592 // We should never see an insert vector since that would require we be 593 // tracing back a struct value not a pointer value. 594 assert(!isa<InsertValueInst>(I) && 595 "Base pointer for a struct is meaningless"); 596 597 // An extractelement produces a base result exactly when it's input does. 598 // We may need to insert a parallel instruction to extract the appropriate 599 // element out of the base vector corresponding to the input. Given this, 600 // it's analogous to the phi and select case even though it's not a merge. 601 if (isa<ExtractElementInst>(I)) 602 // Note: There a lot of obvious peephole cases here. This are deliberately 603 // handled after the main base pointer inference algorithm to make writing 604 // test cases to exercise that code easier. 605 return BaseDefiningValueResult(I, false); 606 607 // The last two cases here don't return a base pointer. Instead, they 608 // return a value which dynamically selects from among several base 609 // derived pointers (each with it's own base potentially). It's the job of 610 // the caller to resolve these. 611 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 612 "missing instruction case in findBaseDefiningValing"); 613 return BaseDefiningValueResult(I, false); 614 } 615 616 /// Returns the base defining value for this value. 617 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) { 618 Value *&Cached = Cache[I]; 619 if (!Cached) { 620 Cached = findBaseDefiningValue(I).BDV; 621 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> " 622 << Cached->getName() << "\n"); 623 } 624 assert(Cache[I] != nullptr); 625 return Cached; 626 } 627 628 /// Return a base pointer for this value if known. Otherwise, return it's 629 /// base defining value. 630 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) { 631 Value *Def = findBaseDefiningValueCached(I, Cache); 632 auto Found = Cache.find(Def); 633 if (Found != Cache.end()) { 634 // Either a base-of relation, or a self reference. Caller must check. 635 return Found->second; 636 } 637 // Only a BDV available 638 return Def; 639 } 640 641 /// This value is a base pointer that is not generated by RS4GC, i.e. it already 642 /// exists in the code. 643 static bool isOriginalBaseResult(Value *V) { 644 // no recursion possible 645 return !isa<PHINode>(V) && !isa<SelectInst>(V) && 646 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) && 647 !isa<ShuffleVectorInst>(V); 648 } 649 650 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, 651 /// is it known to be a base pointer? Or do we need to continue searching. 652 static bool isKnownBaseResult(Value *V) { 653 if (isOriginalBaseResult(V)) 654 return true; 655 if (isa<Instruction>(V) && 656 cast<Instruction>(V)->getMetadata("is_base_value")) { 657 // This is a previously inserted base phi or select. We know 658 // that this is a base value. 659 return true; 660 } 661 662 // We need to keep searching 663 return false; 664 } 665 666 // Returns true if First and Second values are both scalar or both vector. 667 static bool areBothVectorOrScalar(Value *First, Value *Second) { 668 return isa<VectorType>(First->getType()) == 669 isa<VectorType>(Second->getType()); 670 } 671 672 namespace { 673 674 /// Models the state of a single base defining value in the findBasePointer 675 /// algorithm for determining where a new instruction is needed to propagate 676 /// the base of this BDV. 677 class BDVState { 678 public: 679 enum StatusTy { 680 // Starting state of lattice 681 Unknown, 682 // Some specific base value -- does *not* mean that instruction 683 // propagates the base of the object 684 // ex: gep %arg, 16 -> %arg is the base value 685 Base, 686 // Need to insert a node to represent a merge. 687 Conflict 688 }; 689 690 BDVState() { 691 llvm_unreachable("missing state in map"); 692 } 693 694 explicit BDVState(Value *OriginalValue) 695 : OriginalValue(OriginalValue) {} 696 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr) 697 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) { 698 assert(Status != Base || BaseValue); 699 } 700 701 StatusTy getStatus() const { return Status; } 702 Value *getOriginalValue() const { return OriginalValue; } 703 Value *getBaseValue() const { return BaseValue; } 704 705 bool isBase() const { return getStatus() == Base; } 706 bool isUnknown() const { return getStatus() == Unknown; } 707 bool isConflict() const { return getStatus() == Conflict; } 708 709 // Values of type BDVState form a lattice, and this function implements the 710 // meet 711 // operation. 712 void meet(const BDVState &Other) { 713 auto markConflict = [&]() { 714 Status = BDVState::Conflict; 715 BaseValue = nullptr; 716 }; 717 // Conflict is a final state. 718 if (isConflict()) 719 return; 720 // if we are not known - just take other state. 721 if (isUnknown()) { 722 Status = Other.getStatus(); 723 BaseValue = Other.getBaseValue(); 724 return; 725 } 726 // We are base. 727 assert(isBase() && "Unknown state"); 728 // If other is unknown - just keep our state. 729 if (Other.isUnknown()) 730 return; 731 // If other is conflict - it is a final state. 732 if (Other.isConflict()) 733 return markConflict(); 734 // Other is base as well. 735 assert(Other.isBase() && "Unknown state"); 736 // If bases are different - Conflict. 737 if (getBaseValue() != Other.getBaseValue()) 738 return markConflict(); 739 // We are identical, do nothing. 740 } 741 742 bool operator==(const BDVState &Other) const { 743 return OriginalValue == OriginalValue && BaseValue == Other.BaseValue && 744 Status == Other.Status; 745 } 746 747 bool operator!=(const BDVState &other) const { return !(*this == other); } 748 749 LLVM_DUMP_METHOD 750 void dump() const { 751 print(dbgs()); 752 dbgs() << '\n'; 753 } 754 755 void print(raw_ostream &OS) const { 756 switch (getStatus()) { 757 case Unknown: 758 OS << "U"; 759 break; 760 case Base: 761 OS << "B"; 762 break; 763 case Conflict: 764 OS << "C"; 765 break; 766 } 767 OS << " (base " << getBaseValue() << " - " 768 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")" 769 << " for " << OriginalValue->getName() << ":"; 770 } 771 772 private: 773 AssertingVH<Value> OriginalValue; // instruction this state corresponds to 774 StatusTy Status = Unknown; 775 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base. 776 }; 777 778 } // end anonymous namespace 779 780 #ifndef NDEBUG 781 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) { 782 State.print(OS); 783 return OS; 784 } 785 #endif 786 787 /// For a given value or instruction, figure out what base ptr its derived from. 788 /// For gc objects, this is simply itself. On success, returns a value which is 789 /// the base pointer. (This is reliable and can be used for relocation.) On 790 /// failure, returns nullptr. 791 static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) { 792 Value *Def = findBaseOrBDV(I, Cache); 793 794 if (isKnownBaseResult(Def) && areBothVectorOrScalar(Def, I)) 795 return Def; 796 797 // Here's the rough algorithm: 798 // - For every SSA value, construct a mapping to either an actual base 799 // pointer or a PHI which obscures the base pointer. 800 // - Construct a mapping from PHI to unknown TOP state. Use an 801 // optimistic algorithm to propagate base pointer information. Lattice 802 // looks like: 803 // UNKNOWN 804 // b1 b2 b3 b4 805 // CONFLICT 806 // When algorithm terminates, all PHIs will either have a single concrete 807 // base or be in a conflict state. 808 // - For every conflict, insert a dummy PHI node without arguments. Add 809 // these to the base[Instruction] = BasePtr mapping. For every 810 // non-conflict, add the actual base. 811 // - For every conflict, add arguments for the base[a] of each input 812 // arguments. 813 // 814 // Note: A simpler form of this would be to add the conflict form of all 815 // PHIs without running the optimistic algorithm. This would be 816 // analogous to pessimistic data flow and would likely lead to an 817 // overall worse solution. 818 819 #ifndef NDEBUG 820 auto isExpectedBDVType = [](Value *BDV) { 821 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) || 822 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) || 823 isa<ShuffleVectorInst>(BDV); 824 }; 825 #endif 826 827 // Once populated, will contain a mapping from each potentially non-base BDV 828 // to a lattice value (described above) which corresponds to that BDV. 829 // We use the order of insertion (DFS over the def/use graph) to provide a 830 // stable deterministic ordering for visiting DenseMaps (which are unordered) 831 // below. This is important for deterministic compilation. 832 MapVector<Value *, BDVState> States; 833 834 #ifndef NDEBUG 835 auto VerifyStates = [&]() { 836 for (auto &Entry : States) { 837 assert(Entry.first == Entry.second.getOriginalValue()); 838 } 839 }; 840 #endif 841 842 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) { 843 if (PHINode *PN = dyn_cast<PHINode>(BDV)) { 844 for (Value *InVal : PN->incoming_values()) 845 F(InVal); 846 } else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) { 847 F(SI->getTrueValue()); 848 F(SI->getFalseValue()); 849 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) { 850 F(EE->getVectorOperand()); 851 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) { 852 F(IE->getOperand(0)); 853 F(IE->getOperand(1)); 854 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) { 855 // For a canonical broadcast, ignore the undef argument 856 // (without this, we insert a parallel base shuffle for every broadcast) 857 F(SV->getOperand(0)); 858 if (!SV->isZeroEltSplat()) 859 F(SV->getOperand(1)); 860 } else { 861 llvm_unreachable("unexpected BDV type"); 862 } 863 }; 864 865 866 // Recursively fill in all base defining values reachable from the initial 867 // one for which we don't already know a definite base value for 868 /* scope */ { 869 SmallVector<Value*, 16> Worklist; 870 Worklist.push_back(Def); 871 States.insert({Def, BDVState(Def)}); 872 while (!Worklist.empty()) { 873 Value *Current = Worklist.pop_back_val(); 874 assert(!isOriginalBaseResult(Current) && "why did it get added?"); 875 876 auto visitIncomingValue = [&](Value *InVal) { 877 Value *Base = findBaseOrBDV(InVal, Cache); 878 if (isKnownBaseResult(Base) && areBothVectorOrScalar(Base, InVal)) 879 // Known bases won't need new instructions introduced and can be 880 // ignored safely. However, this can only be done when InVal and Base 881 // are both scalar or both vector. Otherwise, we need to find a 882 // correct BDV for InVal, by creating an entry in the lattice 883 // (States). 884 return; 885 assert(isExpectedBDVType(Base) && "the only non-base values " 886 "we see should be base defining values"); 887 if (States.insert(std::make_pair(Base, BDVState(Base))).second) 888 Worklist.push_back(Base); 889 }; 890 891 visitBDVOperands(Current, visitIncomingValue); 892 } 893 } 894 895 #ifndef NDEBUG 896 VerifyStates(); 897 LLVM_DEBUG(dbgs() << "States after initialization:\n"); 898 for (auto Pair : States) { 899 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 900 } 901 #endif 902 903 // Iterate forward through the value graph pruning any node from the state 904 // list where all of the inputs are base pointers. The purpose of this is to 905 // reuse existing values when the derived pointer we were asked to materialize 906 // a base pointer for happens to be a base pointer itself. (Or a sub-graph 907 // feeding it does.) 908 SmallVector<Value *> ToRemove; 909 do { 910 ToRemove.clear(); 911 for (auto Pair : States) { 912 Value *BDV = Pair.first; 913 auto canPruneInput = [&](Value *V) { 914 Value *BDV = findBaseOrBDV(V, Cache); 915 if (V->stripPointerCasts() != BDV) 916 return false; 917 // The assumption is that anything not in the state list is 918 // propagates a base pointer. 919 return States.count(BDV) == 0; 920 }; 921 922 bool CanPrune = true; 923 visitBDVOperands(BDV, [&](Value *Op) { 924 CanPrune = CanPrune && canPruneInput(Op); 925 }); 926 if (CanPrune) 927 ToRemove.push_back(BDV); 928 } 929 for (Value *V : ToRemove) { 930 States.erase(V); 931 // Cache the fact V is it's own base for later usage. 932 Cache[V] = V; 933 } 934 } while (!ToRemove.empty()); 935 936 // Did we manage to prove that Def itself must be a base pointer? 937 if (!States.count(Def)) 938 return Def; 939 940 // Return a phi state for a base defining value. We'll generate a new 941 // base state for known bases and expect to find a cached state otherwise. 942 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) { 943 auto I = States.find(BaseValue); 944 if (I != States.end()) 945 return I->second; 946 assert(areBothVectorOrScalar(BaseValue, Input)); 947 return BDVState(BaseValue, BDVState::Base, BaseValue); 948 }; 949 950 bool Progress = true; 951 while (Progress) { 952 #ifndef NDEBUG 953 const size_t OldSize = States.size(); 954 #endif 955 Progress = false; 956 // We're only changing values in this loop, thus safe to keep iterators. 957 // Since this is computing a fixed point, the order of visit does not 958 // effect the result. TODO: We could use a worklist here and make this run 959 // much faster. 960 for (auto Pair : States) { 961 Value *BDV = Pair.first; 962 // Only values that do not have known bases or those that have differing 963 // type (scalar versus vector) from a possible known base should be in the 964 // lattice. 965 assert((!isKnownBaseResult(BDV) || 966 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) && 967 "why did it get added?"); 968 969 BDVState NewState(BDV); 970 visitBDVOperands(BDV, [&](Value *Op) { 971 Value *BDV = findBaseOrBDV(Op, Cache); 972 auto OpState = GetStateForBDV(BDV, Op); 973 NewState.meet(OpState); 974 }); 975 976 BDVState OldState = States[BDV]; 977 if (OldState != NewState) { 978 Progress = true; 979 States[BDV] = NewState; 980 } 981 } 982 983 assert(OldSize == States.size() && 984 "fixed point shouldn't be adding any new nodes to state"); 985 } 986 987 #ifndef NDEBUG 988 VerifyStates(); 989 LLVM_DEBUG(dbgs() << "States after meet iteration:\n"); 990 for (auto Pair : States) { 991 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 992 } 993 #endif 994 995 // Handle all instructions that have a vector BDV, but the instruction itself 996 // is of scalar type. 997 for (auto Pair : States) { 998 Instruction *I = cast<Instruction>(Pair.first); 999 BDVState State = Pair.second; 1000 auto *BaseValue = State.getBaseValue(); 1001 // Only values that do not have known bases or those that have differing 1002 // type (scalar versus vector) from a possible known base should be in the 1003 // lattice. 1004 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, BaseValue)) && 1005 "why did it get added?"); 1006 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1007 1008 if (!State.isBase() || !isa<VectorType>(BaseValue->getType())) 1009 continue; 1010 // extractelement instructions are a bit special in that we may need to 1011 // insert an extract even when we know an exact base for the instruction. 1012 // The problem is that we need to convert from a vector base to a scalar 1013 // base for the particular indice we're interested in. 1014 if (isa<ExtractElementInst>(I)) { 1015 auto *EE = cast<ExtractElementInst>(I); 1016 // TODO: In many cases, the new instruction is just EE itself. We should 1017 // exploit this, but can't do it here since it would break the invariant 1018 // about the BDV not being known to be a base. 1019 auto *BaseInst = ExtractElementInst::Create( 1020 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE); 1021 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1022 States[I] = BDVState(I, BDVState::Base, BaseInst); 1023 } else if (!isa<VectorType>(I->getType())) { 1024 // We need to handle cases that have a vector base but the instruction is 1025 // a scalar type (these could be phis or selects or any instruction that 1026 // are of scalar type, but the base can be a vector type). We 1027 // conservatively set this as conflict. Setting the base value for these 1028 // conflicts is handled in the next loop which traverses States. 1029 States[I] = BDVState(I, BDVState::Conflict); 1030 } 1031 } 1032 1033 #ifndef NDEBUG 1034 VerifyStates(); 1035 #endif 1036 1037 // Insert Phis for all conflicts 1038 // TODO: adjust naming patterns to avoid this order of iteration dependency 1039 for (auto Pair : States) { 1040 Instruction *I = cast<Instruction>(Pair.first); 1041 BDVState State = Pair.second; 1042 // Only values that do not have known bases or those that have differing 1043 // type (scalar versus vector) from a possible known base should be in the 1044 // lattice. 1045 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, State.getBaseValue())) && 1046 "why did it get added?"); 1047 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1048 1049 // Since we're joining a vector and scalar base, they can never be the 1050 // same. As a result, we should always see insert element having reached 1051 // the conflict state. 1052 assert(!isa<InsertElementInst>(I) || State.isConflict()); 1053 1054 if (!State.isConflict()) 1055 continue; 1056 1057 auto getMangledName = [](Instruction *I) -> std::string { 1058 if (isa<PHINode>(I)) { 1059 return suffixed_name_or(I, ".base", "base_phi"); 1060 } else if (isa<SelectInst>(I)) { 1061 return suffixed_name_or(I, ".base", "base_select"); 1062 } else if (isa<ExtractElementInst>(I)) { 1063 return suffixed_name_or(I, ".base", "base_ee"); 1064 } else if (isa<InsertElementInst>(I)) { 1065 return suffixed_name_or(I, ".base", "base_ie"); 1066 } else { 1067 return suffixed_name_or(I, ".base", "base_sv"); 1068 } 1069 }; 1070 1071 Instruction *BaseInst = I->clone(); 1072 BaseInst->insertBefore(I); 1073 BaseInst->setName(getMangledName(I)); 1074 // Add metadata marking this as a base value 1075 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1076 States[I] = BDVState(I, BDVState::Conflict, BaseInst); 1077 } 1078 1079 #ifndef NDEBUG 1080 VerifyStates(); 1081 #endif 1082 1083 // Returns a instruction which produces the base pointer for a given 1084 // instruction. The instruction is assumed to be an input to one of the BDVs 1085 // seen in the inference algorithm above. As such, we must either already 1086 // know it's base defining value is a base, or have inserted a new 1087 // instruction to propagate the base of it's BDV and have entered that newly 1088 // introduced instruction into the state table. In either case, we are 1089 // assured to be able to determine an instruction which produces it's base 1090 // pointer. 1091 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) { 1092 Value *BDV = findBaseOrBDV(Input, Cache); 1093 Value *Base = nullptr; 1094 if (!States.count(BDV)) { 1095 assert(areBothVectorOrScalar(BDV, Input)); 1096 Base = BDV; 1097 } else { 1098 // Either conflict or base. 1099 assert(States.count(BDV)); 1100 Base = States[BDV].getBaseValue(); 1101 } 1102 assert(Base && "Can't be null"); 1103 // The cast is needed since base traversal may strip away bitcasts 1104 if (Base->getType() != Input->getType() && InsertPt) 1105 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt); 1106 return Base; 1107 }; 1108 1109 // Fixup all the inputs of the new PHIs. Visit order needs to be 1110 // deterministic and predictable because we're naming newly created 1111 // instructions. 1112 for (auto Pair : States) { 1113 Instruction *BDV = cast<Instruction>(Pair.first); 1114 BDVState State = Pair.second; 1115 1116 // Only values that do not have known bases or those that have differing 1117 // type (scalar versus vector) from a possible known base should be in the 1118 // lattice. 1119 assert((!isKnownBaseResult(BDV) || 1120 !areBothVectorOrScalar(BDV, State.getBaseValue())) && 1121 "why did it get added?"); 1122 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1123 if (!State.isConflict()) 1124 continue; 1125 1126 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) { 1127 PHINode *PN = cast<PHINode>(BDV); 1128 const unsigned NumPHIValues = PN->getNumIncomingValues(); 1129 1130 // The IR verifier requires phi nodes with multiple entries from the 1131 // same basic block to have the same incoming value for each of those 1132 // entries. Since we're inserting bitcasts in the loop, make sure we 1133 // do so at least once per incoming block. 1134 DenseMap<BasicBlock *, Value*> BlockToValue; 1135 for (unsigned i = 0; i < NumPHIValues; i++) { 1136 Value *InVal = PN->getIncomingValue(i); 1137 BasicBlock *InBB = PN->getIncomingBlock(i); 1138 if (!BlockToValue.count(InBB)) 1139 BlockToValue[InBB] = getBaseForInput(InVal, InBB->getTerminator()); 1140 else { 1141 #ifndef NDEBUG 1142 Value *OldBase = BlockToValue[InBB]; 1143 Value *Base = getBaseForInput(InVal, nullptr); 1144 // In essence this assert states: the only way two values 1145 // incoming from the same basic block may be different is by 1146 // being different bitcasts of the same value. A cleanup 1147 // that remains TODO is changing findBaseOrBDV to return an 1148 // llvm::Value of the correct type (and still remain pure). 1149 // This will remove the need to add bitcasts. 1150 assert(Base->stripPointerCasts() == OldBase->stripPointerCasts() && 1151 "Sanity -- findBaseOrBDV should be pure!"); 1152 #endif 1153 } 1154 Value *Base = BlockToValue[InBB]; 1155 BasePHI->setIncomingValue(i, Base); 1156 } 1157 } else if (SelectInst *BaseSI = 1158 dyn_cast<SelectInst>(State.getBaseValue())) { 1159 SelectInst *SI = cast<SelectInst>(BDV); 1160 1161 // Find the instruction which produces the base for each input. 1162 // We may need to insert a bitcast. 1163 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI)); 1164 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI)); 1165 } else if (auto *BaseEE = 1166 dyn_cast<ExtractElementInst>(State.getBaseValue())) { 1167 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand(); 1168 // Find the instruction which produces the base for each input. We may 1169 // need to insert a bitcast. 1170 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE)); 1171 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){ 1172 auto *BdvIE = cast<InsertElementInst>(BDV); 1173 auto UpdateOperand = [&](int OperandIdx) { 1174 Value *InVal = BdvIE->getOperand(OperandIdx); 1175 Value *Base = getBaseForInput(InVal, BaseIE); 1176 BaseIE->setOperand(OperandIdx, Base); 1177 }; 1178 UpdateOperand(0); // vector operand 1179 UpdateOperand(1); // scalar operand 1180 } else { 1181 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue()); 1182 auto *BdvSV = cast<ShuffleVectorInst>(BDV); 1183 auto UpdateOperand = [&](int OperandIdx) { 1184 Value *InVal = BdvSV->getOperand(OperandIdx); 1185 Value *Base = getBaseForInput(InVal, BaseSV); 1186 BaseSV->setOperand(OperandIdx, Base); 1187 }; 1188 UpdateOperand(0); // vector operand 1189 if (!BdvSV->isZeroEltSplat()) 1190 UpdateOperand(1); // vector operand 1191 else { 1192 // Never read, so just use undef 1193 Value *InVal = BdvSV->getOperand(1); 1194 BaseSV->setOperand(1, UndefValue::get(InVal->getType())); 1195 } 1196 } 1197 } 1198 1199 #ifndef NDEBUG 1200 VerifyStates(); 1201 #endif 1202 1203 // Cache all of our results so we can cheaply reuse them 1204 // NOTE: This is actually two caches: one of the base defining value 1205 // relation and one of the base pointer relation! FIXME 1206 for (auto Pair : States) { 1207 auto *BDV = Pair.first; 1208 Value *Base = Pair.second.getBaseValue(); 1209 assert(BDV && Base); 1210 // Only values that do not have known bases or those that have differing 1211 // type (scalar versus vector) from a possible known base should be in the 1212 // lattice. 1213 assert((!isKnownBaseResult(BDV) || !areBothVectorOrScalar(BDV, Base)) && 1214 "why did it get added?"); 1215 1216 LLVM_DEBUG( 1217 dbgs() << "Updating base value cache" 1218 << " for: " << BDV->getName() << " from: " 1219 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none") 1220 << " to: " << Base->getName() << "\n"); 1221 1222 Cache[BDV] = Base; 1223 } 1224 assert(Cache.count(Def)); 1225 return Cache[Def]; 1226 } 1227 1228 // For a set of live pointers (base and/or derived), identify the base 1229 // pointer of the object which they are derived from. This routine will 1230 // mutate the IR graph as needed to make the 'base' pointer live at the 1231 // definition site of 'derived'. This ensures that any use of 'derived' can 1232 // also use 'base'. This may involve the insertion of a number of 1233 // additional PHI nodes. 1234 // 1235 // preconditions: live is a set of pointer type Values 1236 // 1237 // side effects: may insert PHI nodes into the existing CFG, will preserve 1238 // CFG, will not remove or mutate any existing nodes 1239 // 1240 // post condition: PointerToBase contains one (derived, base) pair for every 1241 // pointer in live. Note that derived can be equal to base if the original 1242 // pointer was a base pointer. 1243 static void 1244 findBasePointers(const StatepointLiveSetTy &live, 1245 MapVector<Value *, Value *> &PointerToBase, 1246 DominatorTree *DT, DefiningValueMapTy &DVCache) { 1247 for (Value *ptr : live) { 1248 Value *base = findBasePointer(ptr, DVCache); 1249 assert(base && "failed to find base pointer"); 1250 PointerToBase[ptr] = base; 1251 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) || 1252 DT->dominates(cast<Instruction>(base)->getParent(), 1253 cast<Instruction>(ptr)->getParent())) && 1254 "The base we found better dominate the derived pointer"); 1255 } 1256 } 1257 1258 /// Find the required based pointers (and adjust the live set) for the given 1259 /// parse point. 1260 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache, 1261 CallBase *Call, 1262 PartiallyConstructedSafepointRecord &result) { 1263 MapVector<Value *, Value *> PointerToBase; 1264 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet; 1265 // We assume that all pointers passed to deopt are base pointers; as an 1266 // optimization, we can use this to avoid seperately materializing the base 1267 // pointer graph. This is only relevant since we're very conservative about 1268 // generating new conflict nodes during base pointer insertion. If we were 1269 // smarter there, this would be irrelevant. 1270 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt)) 1271 for (Value *V : Opt->Inputs) { 1272 if (!PotentiallyDerivedPointers.count(V)) 1273 continue; 1274 PotentiallyDerivedPointers.remove(V); 1275 PointerToBase[V] = V; 1276 } 1277 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache); 1278 1279 if (PrintBasePointers) { 1280 errs() << "Base Pairs (w/o Relocation):\n"; 1281 for (auto &Pair : PointerToBase) { 1282 errs() << " derived "; 1283 Pair.first->printAsOperand(errs(), false); 1284 errs() << " base "; 1285 Pair.second->printAsOperand(errs(), false); 1286 errs() << "\n";; 1287 } 1288 } 1289 1290 result.PointerToBase = PointerToBase; 1291 } 1292 1293 /// Given an updated version of the dataflow liveness results, update the 1294 /// liveset and base pointer maps for the call site CS. 1295 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 1296 CallBase *Call, 1297 PartiallyConstructedSafepointRecord &result); 1298 1299 static void recomputeLiveInValues( 1300 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 1301 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 1302 // TODO-PERF: reuse the original liveness, then simply run the dataflow 1303 // again. The old values are still live and will help it stabilize quickly. 1304 GCPtrLivenessData RevisedLivenessData; 1305 computeLiveInValues(DT, F, RevisedLivenessData); 1306 for (size_t i = 0; i < records.size(); i++) { 1307 struct PartiallyConstructedSafepointRecord &info = records[i]; 1308 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info); 1309 } 1310 } 1311 1312 // When inserting gc.relocate and gc.result calls, we need to ensure there are 1313 // no uses of the original value / return value between the gc.statepoint and 1314 // the gc.relocate / gc.result call. One case which can arise is a phi node 1315 // starting one of the successor blocks. We also need to be able to insert the 1316 // gc.relocates only on the path which goes through the statepoint. We might 1317 // need to split an edge to make this possible. 1318 static BasicBlock * 1319 normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, 1320 DominatorTree &DT) { 1321 BasicBlock *Ret = BB; 1322 if (!BB->getUniquePredecessor()) 1323 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT); 1324 1325 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes 1326 // from it 1327 FoldSingleEntryPHINodes(Ret); 1328 assert(!isa<PHINode>(Ret->begin()) && 1329 "All PHI nodes should have been removed!"); 1330 1331 // At this point, we can safely insert a gc.relocate or gc.result as the first 1332 // instruction in Ret if needed. 1333 return Ret; 1334 } 1335 1336 // Create new attribute set containing only attributes which can be transferred 1337 // from original call to the safepoint. 1338 static AttributeList legalizeCallAttributes(LLVMContext &Ctx, 1339 AttributeList AL) { 1340 if (AL.isEmpty()) 1341 return AL; 1342 1343 // Remove the readonly, readnone, and statepoint function attributes. 1344 AttrBuilder FnAttrs = AL.getFnAttributes(); 1345 for (auto Attr : {Attribute::ReadNone, Attribute::ReadOnly, 1346 Attribute::NoSync, Attribute::NoFree}) 1347 FnAttrs.removeAttribute(Attr); 1348 1349 for (Attribute A : AL.getFnAttributes()) { 1350 if (isStatepointDirectiveAttr(A)) 1351 FnAttrs.remove(A); 1352 } 1353 1354 // Just skip parameter and return attributes for now 1355 return AttributeList::get(Ctx, AttributeList::FunctionIndex, 1356 AttributeSet::get(Ctx, FnAttrs)); 1357 } 1358 1359 /// Helper function to place all gc relocates necessary for the given 1360 /// statepoint. 1361 /// Inputs: 1362 /// liveVariables - list of variables to be relocated. 1363 /// basePtrs - base pointers. 1364 /// statepointToken - statepoint instruction to which relocates should be 1365 /// bound. 1366 /// Builder - Llvm IR builder to be used to construct new calls. 1367 static void CreateGCRelocates(ArrayRef<Value *> LiveVariables, 1368 ArrayRef<Value *> BasePtrs, 1369 Instruction *StatepointToken, 1370 IRBuilder<> &Builder) { 1371 if (LiveVariables.empty()) 1372 return; 1373 1374 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) { 1375 auto ValIt = llvm::find(LiveVec, Val); 1376 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!"); 1377 size_t Index = std::distance(LiveVec.begin(), ValIt); 1378 assert(Index < LiveVec.size() && "Bug in std::find?"); 1379 return Index; 1380 }; 1381 Module *M = StatepointToken->getModule(); 1382 1383 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose 1384 // element type is i8 addrspace(1)*). We originally generated unique 1385 // declarations for each pointer type, but this proved problematic because 1386 // the intrinsic mangling code is incomplete and fragile. Since we're moving 1387 // towards a single unified pointer type anyways, we can just cast everything 1388 // to an i8* of the right address space. A bitcast is added later to convert 1389 // gc_relocate to the actual value's type. 1390 auto getGCRelocateDecl = [&] (Type *Ty) { 1391 assert(isHandledGCPointerType(Ty)); 1392 auto AS = Ty->getScalarType()->getPointerAddressSpace(); 1393 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS); 1394 if (auto *VT = dyn_cast<VectorType>(Ty)) 1395 NewTy = FixedVectorType::get(NewTy, 1396 cast<FixedVectorType>(VT)->getNumElements()); 1397 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, 1398 {NewTy}); 1399 }; 1400 1401 // Lazily populated map from input types to the canonicalized form mentioned 1402 // in the comment above. This should probably be cached somewhere more 1403 // broadly. 1404 DenseMap<Type *, Function *> TypeToDeclMap; 1405 1406 for (unsigned i = 0; i < LiveVariables.size(); i++) { 1407 // Generate the gc.relocate call and save the result 1408 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i])); 1409 Value *LiveIdx = Builder.getInt32(i); 1410 1411 Type *Ty = LiveVariables[i]->getType(); 1412 if (!TypeToDeclMap.count(Ty)) 1413 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty); 1414 Function *GCRelocateDecl = TypeToDeclMap[Ty]; 1415 1416 // only specify a debug name if we can give a useful one 1417 CallInst *Reloc = Builder.CreateCall( 1418 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx}, 1419 suffixed_name_or(LiveVariables[i], ".relocated", "")); 1420 // Trick CodeGen into thinking there are lots of free registers at this 1421 // fake call. 1422 Reloc->setCallingConv(CallingConv::Cold); 1423 } 1424 } 1425 1426 namespace { 1427 1428 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this 1429 /// avoids having to worry about keeping around dangling pointers to Values. 1430 class DeferredReplacement { 1431 AssertingVH<Instruction> Old; 1432 AssertingVH<Instruction> New; 1433 bool IsDeoptimize = false; 1434 1435 DeferredReplacement() = default; 1436 1437 public: 1438 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) { 1439 assert(Old != New && Old && New && 1440 "Cannot RAUW equal values or to / from null!"); 1441 1442 DeferredReplacement D; 1443 D.Old = Old; 1444 D.New = New; 1445 return D; 1446 } 1447 1448 static DeferredReplacement createDelete(Instruction *ToErase) { 1449 DeferredReplacement D; 1450 D.Old = ToErase; 1451 return D; 1452 } 1453 1454 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) { 1455 #ifndef NDEBUG 1456 auto *F = cast<CallInst>(Old)->getCalledFunction(); 1457 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize && 1458 "Only way to construct a deoptimize deferred replacement"); 1459 #endif 1460 DeferredReplacement D; 1461 D.Old = Old; 1462 D.IsDeoptimize = true; 1463 return D; 1464 } 1465 1466 /// Does the task represented by this instance. 1467 void doReplacement() { 1468 Instruction *OldI = Old; 1469 Instruction *NewI = New; 1470 1471 assert(OldI != NewI && "Disallowed at construction?!"); 1472 assert((!IsDeoptimize || !New) && 1473 "Deoptimize intrinsics are not replaced!"); 1474 1475 Old = nullptr; 1476 New = nullptr; 1477 1478 if (NewI) 1479 OldI->replaceAllUsesWith(NewI); 1480 1481 if (IsDeoptimize) { 1482 // Note: we've inserted instructions, so the call to llvm.deoptimize may 1483 // not necessarily be followed by the matching return. 1484 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator()); 1485 new UnreachableInst(RI->getContext(), RI); 1486 RI->eraseFromParent(); 1487 } 1488 1489 OldI->eraseFromParent(); 1490 } 1491 }; 1492 1493 } // end anonymous namespace 1494 1495 static StringRef getDeoptLowering(CallBase *Call) { 1496 const char *DeoptLowering = "deopt-lowering"; 1497 if (Call->hasFnAttr(DeoptLowering)) { 1498 // FIXME: Calls have a *really* confusing interface around attributes 1499 // with values. 1500 const AttributeList &CSAS = Call->getAttributes(); 1501 if (CSAS.hasAttribute(AttributeList::FunctionIndex, DeoptLowering)) 1502 return CSAS.getAttribute(AttributeList::FunctionIndex, DeoptLowering) 1503 .getValueAsString(); 1504 Function *F = Call->getCalledFunction(); 1505 assert(F && F->hasFnAttribute(DeoptLowering)); 1506 return F->getFnAttribute(DeoptLowering).getValueAsString(); 1507 } 1508 return "live-through"; 1509 } 1510 1511 static void 1512 makeStatepointExplicitImpl(CallBase *Call, /* to replace */ 1513 const SmallVectorImpl<Value *> &BasePtrs, 1514 const SmallVectorImpl<Value *> &LiveVariables, 1515 PartiallyConstructedSafepointRecord &Result, 1516 std::vector<DeferredReplacement> &Replacements) { 1517 assert(BasePtrs.size() == LiveVariables.size()); 1518 1519 // Then go ahead and use the builder do actually do the inserts. We insert 1520 // immediately before the previous instruction under the assumption that all 1521 // arguments will be available here. We can't insert afterwards since we may 1522 // be replacing a terminator. 1523 IRBuilder<> Builder(Call); 1524 1525 ArrayRef<Value *> GCArgs(LiveVariables); 1526 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID; 1527 uint32_t NumPatchBytes = 0; 1528 uint32_t Flags = uint32_t(StatepointFlags::None); 1529 1530 SmallVector<Value *, 8> CallArgs(Call->args()); 1531 Optional<ArrayRef<Use>> DeoptArgs; 1532 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt)) 1533 DeoptArgs = Bundle->Inputs; 1534 Optional<ArrayRef<Use>> TransitionArgs; 1535 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) { 1536 TransitionArgs = Bundle->Inputs; 1537 // TODO: This flag no longer serves a purpose and can be removed later 1538 Flags |= uint32_t(StatepointFlags::GCTransition); 1539 } 1540 1541 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls 1542 // with a return value, we lower then as never returning calls to 1543 // __llvm_deoptimize that are followed by unreachable to get better codegen. 1544 bool IsDeoptimize = false; 1545 1546 StatepointDirectives SD = 1547 parseStatepointDirectivesFromAttrs(Call->getAttributes()); 1548 if (SD.NumPatchBytes) 1549 NumPatchBytes = *SD.NumPatchBytes; 1550 if (SD.StatepointID) 1551 StatepointID = *SD.StatepointID; 1552 1553 // Pass through the requested lowering if any. The default is live-through. 1554 StringRef DeoptLowering = getDeoptLowering(Call); 1555 if (DeoptLowering.equals("live-in")) 1556 Flags |= uint32_t(StatepointFlags::DeoptLiveIn); 1557 else { 1558 assert(DeoptLowering.equals("live-through") && "Unsupported value!"); 1559 } 1560 1561 Value *CallTarget = Call->getCalledOperand(); 1562 if (Function *F = dyn_cast<Function>(CallTarget)) { 1563 auto IID = F->getIntrinsicID(); 1564 if (IID == Intrinsic::experimental_deoptimize) { 1565 // Calls to llvm.experimental.deoptimize are lowered to calls to the 1566 // __llvm_deoptimize symbol. We want to resolve this now, since the 1567 // verifier does not allow taking the address of an intrinsic function. 1568 1569 SmallVector<Type *, 8> DomainTy; 1570 for (Value *Arg : CallArgs) 1571 DomainTy.push_back(Arg->getType()); 1572 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1573 /* isVarArg = */ false); 1574 1575 // Note: CallTarget can be a bitcast instruction of a symbol if there are 1576 // calls to @llvm.experimental.deoptimize with different argument types in 1577 // the same module. This is fine -- we assume the frontend knew what it 1578 // was doing when generating this kind of IR. 1579 CallTarget = F->getParent() 1580 ->getOrInsertFunction("__llvm_deoptimize", FTy) 1581 .getCallee(); 1582 1583 IsDeoptimize = true; 1584 } else if (IID == Intrinsic::memcpy_element_unordered_atomic || 1585 IID == Intrinsic::memmove_element_unordered_atomic) { 1586 // Unordered atomic memcpy and memmove intrinsics which are not explicitly 1587 // marked as "gc-leaf-function" should be lowered in a GC parseable way. 1588 // Specifically, these calls should be lowered to the 1589 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols. 1590 // Similarly to __llvm_deoptimize we want to resolve this now, since the 1591 // verifier does not allow taking the address of an intrinsic function. 1592 // 1593 // Moreover we need to shuffle the arguments for the call in order to 1594 // accommodate GC. The underlying source and destination objects might be 1595 // relocated during copy operation should the GC occur. To relocate the 1596 // derived source and destination pointers the implementation of the 1597 // intrinsic should know the corresponding base pointers. 1598 // 1599 // To make the base pointers available pass them explicitly as arguments: 1600 // memcpy(dest_derived, source_derived, ...) => 1601 // memcpy(dest_base, dest_offset, source_base, source_offset, ...) 1602 auto &Context = Call->getContext(); 1603 auto &DL = Call->getModule()->getDataLayout(); 1604 auto GetBaseAndOffset = [&](Value *Derived) { 1605 assert(Result.PointerToBase.count(Derived)); 1606 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace(); 1607 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace); 1608 Value *Base = Result.PointerToBase.find(Derived)->second; 1609 Value *Base_int = Builder.CreatePtrToInt( 1610 Base, Type::getIntNTy(Context, IntPtrSize)); 1611 Value *Derived_int = Builder.CreatePtrToInt( 1612 Derived, Type::getIntNTy(Context, IntPtrSize)); 1613 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int)); 1614 }; 1615 1616 auto *Dest = CallArgs[0]; 1617 Value *DestBase, *DestOffset; 1618 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest); 1619 1620 auto *Source = CallArgs[1]; 1621 Value *SourceBase, *SourceOffset; 1622 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source); 1623 1624 auto *LengthInBytes = CallArgs[2]; 1625 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]); 1626 1627 CallArgs.clear(); 1628 CallArgs.push_back(DestBase); 1629 CallArgs.push_back(DestOffset); 1630 CallArgs.push_back(SourceBase); 1631 CallArgs.push_back(SourceOffset); 1632 CallArgs.push_back(LengthInBytes); 1633 1634 SmallVector<Type *, 8> DomainTy; 1635 for (Value *Arg : CallArgs) 1636 DomainTy.push_back(Arg->getType()); 1637 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1638 /* isVarArg = */ false); 1639 1640 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) { 1641 uint64_t ElementSize = ElementSizeCI->getZExtValue(); 1642 if (IID == Intrinsic::memcpy_element_unordered_atomic) { 1643 switch (ElementSize) { 1644 case 1: 1645 return "__llvm_memcpy_element_unordered_atomic_safepoint_1"; 1646 case 2: 1647 return "__llvm_memcpy_element_unordered_atomic_safepoint_2"; 1648 case 4: 1649 return "__llvm_memcpy_element_unordered_atomic_safepoint_4"; 1650 case 8: 1651 return "__llvm_memcpy_element_unordered_atomic_safepoint_8"; 1652 case 16: 1653 return "__llvm_memcpy_element_unordered_atomic_safepoint_16"; 1654 default: 1655 llvm_unreachable("unexpected element size!"); 1656 } 1657 } 1658 assert(IID == Intrinsic::memmove_element_unordered_atomic); 1659 switch (ElementSize) { 1660 case 1: 1661 return "__llvm_memmove_element_unordered_atomic_safepoint_1"; 1662 case 2: 1663 return "__llvm_memmove_element_unordered_atomic_safepoint_2"; 1664 case 4: 1665 return "__llvm_memmove_element_unordered_atomic_safepoint_4"; 1666 case 8: 1667 return "__llvm_memmove_element_unordered_atomic_safepoint_8"; 1668 case 16: 1669 return "__llvm_memmove_element_unordered_atomic_safepoint_16"; 1670 default: 1671 llvm_unreachable("unexpected element size!"); 1672 } 1673 }; 1674 1675 CallTarget = 1676 F->getParent() 1677 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy) 1678 .getCallee(); 1679 } 1680 } 1681 1682 // Create the statepoint given all the arguments 1683 GCStatepointInst *Token = nullptr; 1684 if (auto *CI = dyn_cast<CallInst>(Call)) { 1685 CallInst *SPCall = Builder.CreateGCStatepointCall( 1686 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs, 1687 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token"); 1688 1689 SPCall->setTailCallKind(CI->getTailCallKind()); 1690 SPCall->setCallingConv(CI->getCallingConv()); 1691 1692 // Currently we will fail on parameter attributes and on certain 1693 // function attributes. In case if we can handle this set of attributes - 1694 // set up function attrs directly on statepoint and return attrs later for 1695 // gc_result intrinsic. 1696 SPCall->setAttributes( 1697 legalizeCallAttributes(CI->getContext(), CI->getAttributes())); 1698 1699 Token = cast<GCStatepointInst>(SPCall); 1700 1701 // Put the following gc_result and gc_relocate calls immediately after the 1702 // the old call (which we're about to delete) 1703 assert(CI->getNextNode() && "Not a terminator, must have next!"); 1704 Builder.SetInsertPoint(CI->getNextNode()); 1705 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc()); 1706 } else { 1707 auto *II = cast<InvokeInst>(Call); 1708 1709 // Insert the new invoke into the old block. We'll remove the old one in a 1710 // moment at which point this will become the new terminator for the 1711 // original block. 1712 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke( 1713 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(), 1714 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs, 1715 "statepoint_token"); 1716 1717 SPInvoke->setCallingConv(II->getCallingConv()); 1718 1719 // Currently we will fail on parameter attributes and on certain 1720 // function attributes. In case if we can handle this set of attributes - 1721 // set up function attrs directly on statepoint and return attrs later for 1722 // gc_result intrinsic. 1723 SPInvoke->setAttributes( 1724 legalizeCallAttributes(II->getContext(), II->getAttributes())); 1725 1726 Token = cast<GCStatepointInst>(SPInvoke); 1727 1728 // Generate gc relocates in exceptional path 1729 BasicBlock *UnwindBlock = II->getUnwindDest(); 1730 assert(!isa<PHINode>(UnwindBlock->begin()) && 1731 UnwindBlock->getUniquePredecessor() && 1732 "can't safely insert in this block!"); 1733 1734 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt()); 1735 Builder.SetCurrentDebugLocation(II->getDebugLoc()); 1736 1737 // Attach exceptional gc relocates to the landingpad. 1738 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst(); 1739 Result.UnwindToken = ExceptionalToken; 1740 1741 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder); 1742 1743 // Generate gc relocates and returns for normal block 1744 BasicBlock *NormalDest = II->getNormalDest(); 1745 assert(!isa<PHINode>(NormalDest->begin()) && 1746 NormalDest->getUniquePredecessor() && 1747 "can't safely insert in this block!"); 1748 1749 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt()); 1750 1751 // gc relocates will be generated later as if it were regular call 1752 // statepoint 1753 } 1754 assert(Token && "Should be set in one of the above branches!"); 1755 1756 if (IsDeoptimize) { 1757 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we 1758 // transform the tail-call like structure to a call to a void function 1759 // followed by unreachable to get better codegen. 1760 Replacements.push_back( 1761 DeferredReplacement::createDeoptimizeReplacement(Call)); 1762 } else { 1763 Token->setName("statepoint_token"); 1764 if (!Call->getType()->isVoidTy() && !Call->use_empty()) { 1765 StringRef Name = Call->hasName() ? Call->getName() : ""; 1766 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name); 1767 GCResult->setAttributes( 1768 AttributeList::get(GCResult->getContext(), AttributeList::ReturnIndex, 1769 Call->getAttributes().getRetAttributes())); 1770 1771 // We cannot RAUW or delete CS.getInstruction() because it could be in the 1772 // live set of some other safepoint, in which case that safepoint's 1773 // PartiallyConstructedSafepointRecord will hold a raw pointer to this 1774 // llvm::Instruction. Instead, we defer the replacement and deletion to 1775 // after the live sets have been made explicit in the IR, and we no longer 1776 // have raw pointers to worry about. 1777 Replacements.emplace_back( 1778 DeferredReplacement::createRAUW(Call, GCResult)); 1779 } else { 1780 Replacements.emplace_back(DeferredReplacement::createDelete(Call)); 1781 } 1782 } 1783 1784 Result.StatepointToken = Token; 1785 1786 // Second, create a gc.relocate for every live variable 1787 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder); 1788 } 1789 1790 // Replace an existing gc.statepoint with a new one and a set of gc.relocates 1791 // which make the relocations happening at this safepoint explicit. 1792 // 1793 // WARNING: Does not do any fixup to adjust users of the original live 1794 // values. That's the callers responsibility. 1795 static void 1796 makeStatepointExplicit(DominatorTree &DT, CallBase *Call, 1797 PartiallyConstructedSafepointRecord &Result, 1798 std::vector<DeferredReplacement> &Replacements) { 1799 const auto &LiveSet = Result.LiveSet; 1800 const auto &PointerToBase = Result.PointerToBase; 1801 1802 // Convert to vector for efficient cross referencing. 1803 SmallVector<Value *, 64> BaseVec, LiveVec; 1804 LiveVec.reserve(LiveSet.size()); 1805 BaseVec.reserve(LiveSet.size()); 1806 for (Value *L : LiveSet) { 1807 LiveVec.push_back(L); 1808 assert(PointerToBase.count(L)); 1809 Value *Base = PointerToBase.find(L)->second; 1810 BaseVec.push_back(Base); 1811 } 1812 assert(LiveVec.size() == BaseVec.size()); 1813 1814 // Do the actual rewriting and delete the old statepoint 1815 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements); 1816 } 1817 1818 // Helper function for the relocationViaAlloca. 1819 // 1820 // It receives iterator to the statepoint gc relocates and emits a store to the 1821 // assigned location (via allocaMap) for the each one of them. It adds the 1822 // visited values into the visitedLiveValues set, which we will later use them 1823 // for sanity checking. 1824 static void 1825 insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs, 1826 DenseMap<Value *, AllocaInst *> &AllocaMap, 1827 DenseSet<Value *> &VisitedLiveValues) { 1828 for (User *U : GCRelocs) { 1829 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U); 1830 if (!Relocate) 1831 continue; 1832 1833 Value *OriginalValue = Relocate->getDerivedPtr(); 1834 assert(AllocaMap.count(OriginalValue)); 1835 Value *Alloca = AllocaMap[OriginalValue]; 1836 1837 // Emit store into the related alloca 1838 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to 1839 // the correct type according to alloca. 1840 assert(Relocate->getNextNode() && 1841 "Should always have one since it's not a terminator"); 1842 IRBuilder<> Builder(Relocate->getNextNode()); 1843 Value *CastedRelocatedValue = 1844 Builder.CreateBitCast(Relocate, 1845 cast<AllocaInst>(Alloca)->getAllocatedType(), 1846 suffixed_name_or(Relocate, ".casted", "")); 1847 1848 new StoreInst(CastedRelocatedValue, Alloca, 1849 cast<Instruction>(CastedRelocatedValue)->getNextNode()); 1850 1851 #ifndef NDEBUG 1852 VisitedLiveValues.insert(OriginalValue); 1853 #endif 1854 } 1855 } 1856 1857 // Helper function for the "relocationViaAlloca". Similar to the 1858 // "insertRelocationStores" but works for rematerialized values. 1859 static void insertRematerializationStores( 1860 const RematerializedValueMapTy &RematerializedValues, 1861 DenseMap<Value *, AllocaInst *> &AllocaMap, 1862 DenseSet<Value *> &VisitedLiveValues) { 1863 for (auto RematerializedValuePair: RematerializedValues) { 1864 Instruction *RematerializedValue = RematerializedValuePair.first; 1865 Value *OriginalValue = RematerializedValuePair.second; 1866 1867 assert(AllocaMap.count(OriginalValue) && 1868 "Can not find alloca for rematerialized value"); 1869 Value *Alloca = AllocaMap[OriginalValue]; 1870 1871 new StoreInst(RematerializedValue, Alloca, 1872 RematerializedValue->getNextNode()); 1873 1874 #ifndef NDEBUG 1875 VisitedLiveValues.insert(OriginalValue); 1876 #endif 1877 } 1878 } 1879 1880 /// Do all the relocation update via allocas and mem2reg 1881 static void relocationViaAlloca( 1882 Function &F, DominatorTree &DT, ArrayRef<Value *> Live, 1883 ArrayRef<PartiallyConstructedSafepointRecord> Records) { 1884 #ifndef NDEBUG 1885 // record initial number of (static) allocas; we'll check we have the same 1886 // number when we get done. 1887 int InitialAllocaNum = 0; 1888 for (Instruction &I : F.getEntryBlock()) 1889 if (isa<AllocaInst>(I)) 1890 InitialAllocaNum++; 1891 #endif 1892 1893 // TODO-PERF: change data structures, reserve 1894 DenseMap<Value *, AllocaInst *> AllocaMap; 1895 SmallVector<AllocaInst *, 200> PromotableAllocas; 1896 // Used later to chack that we have enough allocas to store all values 1897 std::size_t NumRematerializedValues = 0; 1898 PromotableAllocas.reserve(Live.size()); 1899 1900 // Emit alloca for "LiveValue" and record it in "allocaMap" and 1901 // "PromotableAllocas" 1902 const DataLayout &DL = F.getParent()->getDataLayout(); 1903 auto emitAllocaFor = [&](Value *LiveValue) { 1904 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), 1905 DL.getAllocaAddrSpace(), "", 1906 F.getEntryBlock().getFirstNonPHI()); 1907 AllocaMap[LiveValue] = Alloca; 1908 PromotableAllocas.push_back(Alloca); 1909 }; 1910 1911 // Emit alloca for each live gc pointer 1912 for (Value *V : Live) 1913 emitAllocaFor(V); 1914 1915 // Emit allocas for rematerialized values 1916 for (const auto &Info : Records) 1917 for (auto RematerializedValuePair : Info.RematerializedValues) { 1918 Value *OriginalValue = RematerializedValuePair.second; 1919 if (AllocaMap.count(OriginalValue) != 0) 1920 continue; 1921 1922 emitAllocaFor(OriginalValue); 1923 ++NumRematerializedValues; 1924 } 1925 1926 // The next two loops are part of the same conceptual operation. We need to 1927 // insert a store to the alloca after the original def and at each 1928 // redefinition. We need to insert a load before each use. These are split 1929 // into distinct loops for performance reasons. 1930 1931 // Update gc pointer after each statepoint: either store a relocated value or 1932 // null (if no relocated value was found for this gc pointer and it is not a 1933 // gc_result). This must happen before we update the statepoint with load of 1934 // alloca otherwise we lose the link between statepoint and old def. 1935 for (const auto &Info : Records) { 1936 Value *Statepoint = Info.StatepointToken; 1937 1938 // This will be used for consistency check 1939 DenseSet<Value *> VisitedLiveValues; 1940 1941 // Insert stores for normal statepoint gc relocates 1942 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues); 1943 1944 // In case if it was invoke statepoint 1945 // we will insert stores for exceptional path gc relocates. 1946 if (isa<InvokeInst>(Statepoint)) { 1947 insertRelocationStores(Info.UnwindToken->users(), AllocaMap, 1948 VisitedLiveValues); 1949 } 1950 1951 // Do similar thing with rematerialized values 1952 insertRematerializationStores(Info.RematerializedValues, AllocaMap, 1953 VisitedLiveValues); 1954 1955 if (ClobberNonLive) { 1956 // As a debugging aid, pretend that an unrelocated pointer becomes null at 1957 // the gc.statepoint. This will turn some subtle GC problems into 1958 // slightly easier to debug SEGVs. Note that on large IR files with 1959 // lots of gc.statepoints this is extremely costly both memory and time 1960 // wise. 1961 SmallVector<AllocaInst *, 64> ToClobber; 1962 for (auto Pair : AllocaMap) { 1963 Value *Def = Pair.first; 1964 AllocaInst *Alloca = Pair.second; 1965 1966 // This value was relocated 1967 if (VisitedLiveValues.count(Def)) { 1968 continue; 1969 } 1970 ToClobber.push_back(Alloca); 1971 } 1972 1973 auto InsertClobbersAt = [&](Instruction *IP) { 1974 for (auto *AI : ToClobber) { 1975 auto PT = cast<PointerType>(AI->getAllocatedType()); 1976 Constant *CPN = ConstantPointerNull::get(PT); 1977 new StoreInst(CPN, AI, IP); 1978 } 1979 }; 1980 1981 // Insert the clobbering stores. These may get intermixed with the 1982 // gc.results and gc.relocates, but that's fine. 1983 if (auto II = dyn_cast<InvokeInst>(Statepoint)) { 1984 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt()); 1985 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt()); 1986 } else { 1987 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode()); 1988 } 1989 } 1990 } 1991 1992 // Update use with load allocas and add store for gc_relocated. 1993 for (auto Pair : AllocaMap) { 1994 Value *Def = Pair.first; 1995 AllocaInst *Alloca = Pair.second; 1996 1997 // We pre-record the uses of allocas so that we dont have to worry about 1998 // later update that changes the user information.. 1999 2000 SmallVector<Instruction *, 20> Uses; 2001 // PERF: trade a linear scan for repeated reallocation 2002 Uses.reserve(Def->getNumUses()); 2003 for (User *U : Def->users()) { 2004 if (!isa<ConstantExpr>(U)) { 2005 // If the def has a ConstantExpr use, then the def is either a 2006 // ConstantExpr use itself or null. In either case 2007 // (recursively in the first, directly in the second), the oop 2008 // it is ultimately dependent on is null and this particular 2009 // use does not need to be fixed up. 2010 Uses.push_back(cast<Instruction>(U)); 2011 } 2012 } 2013 2014 llvm::sort(Uses); 2015 auto Last = std::unique(Uses.begin(), Uses.end()); 2016 Uses.erase(Last, Uses.end()); 2017 2018 for (Instruction *Use : Uses) { 2019 if (isa<PHINode>(Use)) { 2020 PHINode *Phi = cast<PHINode>(Use); 2021 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { 2022 if (Def == Phi->getIncomingValue(i)) { 2023 LoadInst *Load = 2024 new LoadInst(Alloca->getAllocatedType(), Alloca, "", 2025 Phi->getIncomingBlock(i)->getTerminator()); 2026 Phi->setIncomingValue(i, Load); 2027 } 2028 } 2029 } else { 2030 LoadInst *Load = 2031 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use); 2032 Use->replaceUsesOfWith(Def, Load); 2033 } 2034 } 2035 2036 // Emit store for the initial gc value. Store must be inserted after load, 2037 // otherwise store will be in alloca's use list and an extra load will be 2038 // inserted before it. 2039 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false, 2040 DL.getABITypeAlign(Def->getType())); 2041 if (Instruction *Inst = dyn_cast<Instruction>(Def)) { 2042 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) { 2043 // InvokeInst is a terminator so the store need to be inserted into its 2044 // normal destination block. 2045 BasicBlock *NormalDest = Invoke->getNormalDest(); 2046 Store->insertBefore(NormalDest->getFirstNonPHI()); 2047 } else { 2048 assert(!Inst->isTerminator() && 2049 "The only terminator that can produce a value is " 2050 "InvokeInst which is handled above."); 2051 Store->insertAfter(Inst); 2052 } 2053 } else { 2054 assert(isa<Argument>(Def)); 2055 Store->insertAfter(cast<Instruction>(Alloca)); 2056 } 2057 } 2058 2059 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues && 2060 "we must have the same allocas with lives"); 2061 if (!PromotableAllocas.empty()) { 2062 // Apply mem2reg to promote alloca to SSA 2063 PromoteMemToReg(PromotableAllocas, DT); 2064 } 2065 2066 #ifndef NDEBUG 2067 for (auto &I : F.getEntryBlock()) 2068 if (isa<AllocaInst>(I)) 2069 InitialAllocaNum--; 2070 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); 2071 #endif 2072 } 2073 2074 /// Implement a unique function which doesn't require we sort the input 2075 /// vector. Doing so has the effect of changing the output of a couple of 2076 /// tests in ways which make them less useful in testing fused safepoints. 2077 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { 2078 SmallSet<T, 8> Seen; 2079 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; }); 2080 } 2081 2082 /// Insert holders so that each Value is obviously live through the entire 2083 /// lifetime of the call. 2084 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values, 2085 SmallVectorImpl<CallInst *> &Holders) { 2086 if (Values.empty()) 2087 // No values to hold live, might as well not insert the empty holder 2088 return; 2089 2090 Module *M = Call->getModule(); 2091 // Use a dummy vararg function to actually hold the values live 2092 FunctionCallee Func = M->getOrInsertFunction( 2093 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true)); 2094 if (isa<CallInst>(Call)) { 2095 // For call safepoints insert dummy calls right after safepoint 2096 Holders.push_back( 2097 CallInst::Create(Func, Values, "", &*++Call->getIterator())); 2098 return; 2099 } 2100 // For invoke safepooints insert dummy calls both in normal and 2101 // exceptional destination blocks 2102 auto *II = cast<InvokeInst>(Call); 2103 Holders.push_back(CallInst::Create( 2104 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt())); 2105 Holders.push_back(CallInst::Create( 2106 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt())); 2107 } 2108 2109 static void findLiveReferences( 2110 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 2111 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 2112 GCPtrLivenessData OriginalLivenessData; 2113 computeLiveInValues(DT, F, OriginalLivenessData); 2114 for (size_t i = 0; i < records.size(); i++) { 2115 struct PartiallyConstructedSafepointRecord &info = records[i]; 2116 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info); 2117 } 2118 } 2119 2120 // Helper function for the "rematerializeLiveValues". It walks use chain 2121 // starting from the "CurrentValue" until it reaches the root of the chain, i.e. 2122 // the base or a value it cannot process. Only "simple" values are processed 2123 // (currently it is GEP's and casts). The returned root is examined by the 2124 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array 2125 // with all visited values. 2126 static Value* findRematerializableChainToBasePointer( 2127 SmallVectorImpl<Instruction*> &ChainToBase, 2128 Value *CurrentValue) { 2129 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) { 2130 ChainToBase.push_back(GEP); 2131 return findRematerializableChainToBasePointer(ChainToBase, 2132 GEP->getPointerOperand()); 2133 } 2134 2135 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) { 2136 if (!CI->isNoopCast(CI->getModule()->getDataLayout())) 2137 return CI; 2138 2139 ChainToBase.push_back(CI); 2140 return findRematerializableChainToBasePointer(ChainToBase, 2141 CI->getOperand(0)); 2142 } 2143 2144 // We have reached the root of the chain, which is either equal to the base or 2145 // is the first unsupported value along the use chain. 2146 return CurrentValue; 2147 } 2148 2149 // Helper function for the "rematerializeLiveValues". Compute cost of the use 2150 // chain we are going to rematerialize. 2151 static InstructionCost 2152 chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain, 2153 TargetTransformInfo &TTI) { 2154 InstructionCost Cost = 0; 2155 2156 for (Instruction *Instr : Chain) { 2157 if (CastInst *CI = dyn_cast<CastInst>(Instr)) { 2158 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) && 2159 "non noop cast is found during rematerialization"); 2160 2161 Type *SrcTy = CI->getOperand(0)->getType(); 2162 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy, 2163 TTI::getCastContextHint(CI), 2164 TargetTransformInfo::TCK_SizeAndLatency, CI); 2165 2166 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) { 2167 // Cost of the address calculation 2168 Type *ValTy = GEP->getSourceElementType(); 2169 Cost += TTI.getAddressComputationCost(ValTy); 2170 2171 // And cost of the GEP itself 2172 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not 2173 // allowed for the external usage) 2174 if (!GEP->hasAllConstantIndices()) 2175 Cost += 2; 2176 2177 } else { 2178 llvm_unreachable("unsupported instruction type during rematerialization"); 2179 } 2180 } 2181 2182 return Cost; 2183 } 2184 2185 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) { 2186 unsigned PhiNum = OrigRootPhi.getNumIncomingValues(); 2187 if (PhiNum != AlternateRootPhi.getNumIncomingValues() || 2188 OrigRootPhi.getParent() != AlternateRootPhi.getParent()) 2189 return false; 2190 // Map of incoming values and their corresponding basic blocks of 2191 // OrigRootPhi. 2192 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues; 2193 for (unsigned i = 0; i < PhiNum; i++) 2194 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] = 2195 OrigRootPhi.getIncomingBlock(i); 2196 2197 // Both current and base PHIs should have same incoming values and 2198 // the same basic blocks corresponding to the incoming values. 2199 for (unsigned i = 0; i < PhiNum; i++) { 2200 auto CIVI = 2201 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i)); 2202 if (CIVI == CurrentIncomingValues.end()) 2203 return false; 2204 BasicBlock *CurrentIncomingBB = CIVI->second; 2205 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i)) 2206 return false; 2207 } 2208 return true; 2209 } 2210 2211 // From the statepoint live set pick values that are cheaper to recompute then 2212 // to relocate. Remove this values from the live set, rematerialize them after 2213 // statepoint and record them in "Info" structure. Note that similar to 2214 // relocated values we don't do any user adjustments here. 2215 static void rematerializeLiveValues(CallBase *Call, 2216 PartiallyConstructedSafepointRecord &Info, 2217 TargetTransformInfo &TTI) { 2218 const unsigned int ChainLengthThreshold = 10; 2219 2220 // Record values we are going to delete from this statepoint live set. 2221 // We can not di this in following loop due to iterator invalidation. 2222 SmallVector<Value *, 32> LiveValuesToBeDeleted; 2223 2224 for (Value *LiveValue: Info.LiveSet) { 2225 // For each live pointer find its defining chain 2226 SmallVector<Instruction *, 3> ChainToBase; 2227 assert(Info.PointerToBase.count(LiveValue)); 2228 Value *RootOfChain = 2229 findRematerializableChainToBasePointer(ChainToBase, 2230 LiveValue); 2231 2232 // Nothing to do, or chain is too long 2233 if ( ChainToBase.size() == 0 || 2234 ChainToBase.size() > ChainLengthThreshold) 2235 continue; 2236 2237 // Handle the scenario where the RootOfChain is not equal to the 2238 // Base Value, but they are essentially the same phi values. 2239 if (RootOfChain != Info.PointerToBase[LiveValue]) { 2240 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain); 2241 PHINode *AlternateRootPhi = dyn_cast<PHINode>(Info.PointerToBase[LiveValue]); 2242 if (!OrigRootPhi || !AlternateRootPhi) 2243 continue; 2244 // PHI nodes that have the same incoming values, and belonging to the same 2245 // basic blocks are essentially the same SSA value. When the original phi 2246 // has incoming values with different base pointers, the original phi is 2247 // marked as conflict, and an additional `AlternateRootPhi` with the same 2248 // incoming values get generated by the findBasePointer function. We need 2249 // to identify the newly generated AlternateRootPhi (.base version of phi) 2250 // and RootOfChain (the original phi node itself) are the same, so that we 2251 // can rematerialize the gep and casts. This is a workaround for the 2252 // deficiency in the findBasePointer algorithm. 2253 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi)) 2254 continue; 2255 // Now that the phi nodes are proved to be the same, assert that 2256 // findBasePointer's newly generated AlternateRootPhi is present in the 2257 // liveset of the call. 2258 assert(Info.LiveSet.count(AlternateRootPhi)); 2259 } 2260 // Compute cost of this chain 2261 InstructionCost Cost = chainToBasePointerCost(ChainToBase, TTI); 2262 // TODO: We can also account for cases when we will be able to remove some 2263 // of the rematerialized values by later optimization passes. I.e if 2264 // we rematerialized several intersecting chains. Or if original values 2265 // don't have any uses besides this statepoint. 2266 2267 // For invokes we need to rematerialize each chain twice - for normal and 2268 // for unwind basic blocks. Model this by multiplying cost by two. 2269 if (isa<InvokeInst>(Call)) { 2270 Cost *= 2; 2271 } 2272 // If it's too expensive - skip it 2273 if (Cost >= RematerializationThreshold) 2274 continue; 2275 2276 // Remove value from the live set 2277 LiveValuesToBeDeleted.push_back(LiveValue); 2278 2279 // Clone instructions and record them inside "Info" structure 2280 2281 // Walk backwards to visit top-most instructions first 2282 std::reverse(ChainToBase.begin(), ChainToBase.end()); 2283 2284 // Utility function which clones all instructions from "ChainToBase" 2285 // and inserts them before "InsertBefore". Returns rematerialized value 2286 // which should be used after statepoint. 2287 auto rematerializeChain = [&ChainToBase]( 2288 Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) { 2289 Instruction *LastClonedValue = nullptr; 2290 Instruction *LastValue = nullptr; 2291 for (Instruction *Instr: ChainToBase) { 2292 // Only GEP's and casts are supported as we need to be careful to not 2293 // introduce any new uses of pointers not in the liveset. 2294 // Note that it's fine to introduce new uses of pointers which were 2295 // otherwise not used after this statepoint. 2296 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr)); 2297 2298 Instruction *ClonedValue = Instr->clone(); 2299 ClonedValue->insertBefore(InsertBefore); 2300 ClonedValue->setName(Instr->getName() + ".remat"); 2301 2302 // If it is not first instruction in the chain then it uses previously 2303 // cloned value. We should update it to use cloned value. 2304 if (LastClonedValue) { 2305 assert(LastValue); 2306 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue); 2307 #ifndef NDEBUG 2308 for (auto OpValue : ClonedValue->operand_values()) { 2309 // Assert that cloned instruction does not use any instructions from 2310 // this chain other than LastClonedValue 2311 assert(!is_contained(ChainToBase, OpValue) && 2312 "incorrect use in rematerialization chain"); 2313 // Assert that the cloned instruction does not use the RootOfChain 2314 // or the AlternateLiveBase. 2315 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase); 2316 } 2317 #endif 2318 } else { 2319 // For the first instruction, replace the use of unrelocated base i.e. 2320 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the 2321 // live set. They have been proved to be the same PHI nodes. Note 2322 // that the *only* use of the RootOfChain in the ChainToBase list is 2323 // the first Value in the list. 2324 if (RootOfChain != AlternateLiveBase) 2325 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase); 2326 } 2327 2328 LastClonedValue = ClonedValue; 2329 LastValue = Instr; 2330 } 2331 assert(LastClonedValue); 2332 return LastClonedValue; 2333 }; 2334 2335 // Different cases for calls and invokes. For invokes we need to clone 2336 // instructions both on normal and unwind path. 2337 if (isa<CallInst>(Call)) { 2338 Instruction *InsertBefore = Call->getNextNode(); 2339 assert(InsertBefore); 2340 Instruction *RematerializedValue = rematerializeChain( 2341 InsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2342 Info.RematerializedValues[RematerializedValue] = LiveValue; 2343 } else { 2344 auto *Invoke = cast<InvokeInst>(Call); 2345 2346 Instruction *NormalInsertBefore = 2347 &*Invoke->getNormalDest()->getFirstInsertionPt(); 2348 Instruction *UnwindInsertBefore = 2349 &*Invoke->getUnwindDest()->getFirstInsertionPt(); 2350 2351 Instruction *NormalRematerializedValue = rematerializeChain( 2352 NormalInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2353 Instruction *UnwindRematerializedValue = rematerializeChain( 2354 UnwindInsertBefore, RootOfChain, Info.PointerToBase[LiveValue]); 2355 2356 Info.RematerializedValues[NormalRematerializedValue] = LiveValue; 2357 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue; 2358 } 2359 } 2360 2361 // Remove rematerializaed values from the live set 2362 for (auto LiveValue: LiveValuesToBeDeleted) { 2363 Info.LiveSet.remove(LiveValue); 2364 } 2365 } 2366 2367 static bool insertParsePoints(Function &F, DominatorTree &DT, 2368 TargetTransformInfo &TTI, 2369 SmallVectorImpl<CallBase *> &ToUpdate) { 2370 #ifndef NDEBUG 2371 // sanity check the input 2372 std::set<CallBase *> Uniqued; 2373 Uniqued.insert(ToUpdate.begin(), ToUpdate.end()); 2374 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!"); 2375 2376 for (CallBase *Call : ToUpdate) 2377 assert(Call->getFunction() == &F); 2378 #endif 2379 2380 // When inserting gc.relocates for invokes, we need to be able to insert at 2381 // the top of the successor blocks. See the comment on 2382 // normalForInvokeSafepoint on exactly what is needed. Note that this step 2383 // may restructure the CFG. 2384 for (CallBase *Call : ToUpdate) { 2385 auto *II = dyn_cast<InvokeInst>(Call); 2386 if (!II) 2387 continue; 2388 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT); 2389 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT); 2390 } 2391 2392 // A list of dummy calls added to the IR to keep various values obviously 2393 // live in the IR. We'll remove all of these when done. 2394 SmallVector<CallInst *, 64> Holders; 2395 2396 // Insert a dummy call with all of the deopt operands we'll need for the 2397 // actual safepoint insertion as arguments. This ensures reference operands 2398 // in the deopt argument list are considered live through the safepoint (and 2399 // thus makes sure they get relocated.) 2400 for (CallBase *Call : ToUpdate) { 2401 SmallVector<Value *, 64> DeoptValues; 2402 2403 for (Value *Arg : GetDeoptBundleOperands(Call)) { 2404 assert(!isUnhandledGCPointerType(Arg->getType()) && 2405 "support for FCA unimplemented"); 2406 if (isHandledGCPointerType(Arg->getType())) 2407 DeoptValues.push_back(Arg); 2408 } 2409 2410 insertUseHolderAfter(Call, DeoptValues, Holders); 2411 } 2412 2413 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size()); 2414 2415 // A) Identify all gc pointers which are statically live at the given call 2416 // site. 2417 findLiveReferences(F, DT, ToUpdate, Records); 2418 2419 // B) Find the base pointers for each live pointer 2420 /* scope for caching */ { 2421 // Cache the 'defining value' relation used in the computation and 2422 // insertion of base phis and selects. This ensures that we don't insert 2423 // large numbers of duplicate base_phis. 2424 DefiningValueMapTy DVCache; 2425 2426 for (size_t i = 0; i < Records.size(); i++) { 2427 PartiallyConstructedSafepointRecord &info = Records[i]; 2428 findBasePointers(DT, DVCache, ToUpdate[i], info); 2429 } 2430 } // end of cache scope 2431 2432 // The base phi insertion logic (for any safepoint) may have inserted new 2433 // instructions which are now live at some safepoint. The simplest such 2434 // example is: 2435 // loop: 2436 // phi a <-- will be a new base_phi here 2437 // safepoint 1 <-- that needs to be live here 2438 // gep a + 1 2439 // safepoint 2 2440 // br loop 2441 // We insert some dummy calls after each safepoint to definitely hold live 2442 // the base pointers which were identified for that safepoint. We'll then 2443 // ask liveness for _every_ base inserted to see what is now live. Then we 2444 // remove the dummy calls. 2445 Holders.reserve(Holders.size() + Records.size()); 2446 for (size_t i = 0; i < Records.size(); i++) { 2447 PartiallyConstructedSafepointRecord &Info = Records[i]; 2448 2449 SmallVector<Value *, 128> Bases; 2450 for (auto Pair : Info.PointerToBase) 2451 Bases.push_back(Pair.second); 2452 2453 insertUseHolderAfter(ToUpdate[i], Bases, Holders); 2454 } 2455 2456 // By selecting base pointers, we've effectively inserted new uses. Thus, we 2457 // need to rerun liveness. We may *also* have inserted new defs, but that's 2458 // not the key issue. 2459 recomputeLiveInValues(F, DT, ToUpdate, Records); 2460 2461 if (PrintBasePointers) { 2462 for (auto &Info : Records) { 2463 errs() << "Base Pairs: (w/Relocation)\n"; 2464 for (auto Pair : Info.PointerToBase) { 2465 errs() << " derived "; 2466 Pair.first->printAsOperand(errs(), false); 2467 errs() << " base "; 2468 Pair.second->printAsOperand(errs(), false); 2469 errs() << "\n"; 2470 } 2471 } 2472 } 2473 2474 // It is possible that non-constant live variables have a constant base. For 2475 // example, a GEP with a variable offset from a global. In this case we can 2476 // remove it from the liveset. We already don't add constants to the liveset 2477 // because we assume they won't move at runtime and the GC doesn't need to be 2478 // informed about them. The same reasoning applies if the base is constant. 2479 // Note that the relocation placement code relies on this filtering for 2480 // correctness as it expects the base to be in the liveset, which isn't true 2481 // if the base is constant. 2482 for (auto &Info : Records) 2483 for (auto &BasePair : Info.PointerToBase) 2484 if (isa<Constant>(BasePair.second)) 2485 Info.LiveSet.remove(BasePair.first); 2486 2487 for (CallInst *CI : Holders) 2488 CI->eraseFromParent(); 2489 2490 Holders.clear(); 2491 2492 // In order to reduce live set of statepoint we might choose to rematerialize 2493 // some values instead of relocating them. This is purely an optimization and 2494 // does not influence correctness. 2495 for (size_t i = 0; i < Records.size(); i++) 2496 rematerializeLiveValues(ToUpdate[i], Records[i], TTI); 2497 2498 // We need this to safely RAUW and delete call or invoke return values that 2499 // may themselves be live over a statepoint. For details, please see usage in 2500 // makeStatepointExplicitImpl. 2501 std::vector<DeferredReplacement> Replacements; 2502 2503 // Now run through and replace the existing statepoints with new ones with 2504 // the live variables listed. We do not yet update uses of the values being 2505 // relocated. We have references to live variables that need to 2506 // survive to the last iteration of this loop. (By construction, the 2507 // previous statepoint can not be a live variable, thus we can and remove 2508 // the old statepoint calls as we go.) 2509 for (size_t i = 0; i < Records.size(); i++) 2510 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements); 2511 2512 ToUpdate.clear(); // prevent accident use of invalid calls. 2513 2514 for (auto &PR : Replacements) 2515 PR.doReplacement(); 2516 2517 Replacements.clear(); 2518 2519 for (auto &Info : Records) { 2520 // These live sets may contain state Value pointers, since we replaced calls 2521 // with operand bundles with calls wrapped in gc.statepoint, and some of 2522 // those calls may have been def'ing live gc pointers. Clear these out to 2523 // avoid accidentally using them. 2524 // 2525 // TODO: We should create a separate data structure that does not contain 2526 // these live sets, and migrate to using that data structure from this point 2527 // onward. 2528 Info.LiveSet.clear(); 2529 Info.PointerToBase.clear(); 2530 } 2531 2532 // Do all the fixups of the original live variables to their relocated selves 2533 SmallVector<Value *, 128> Live; 2534 for (size_t i = 0; i < Records.size(); i++) { 2535 PartiallyConstructedSafepointRecord &Info = Records[i]; 2536 2537 // We can't simply save the live set from the original insertion. One of 2538 // the live values might be the result of a call which needs a safepoint. 2539 // That Value* no longer exists and we need to use the new gc_result. 2540 // Thankfully, the live set is embedded in the statepoint (and updated), so 2541 // we just grab that. 2542 llvm::append_range(Live, Info.StatepointToken->gc_args()); 2543 #ifndef NDEBUG 2544 // Do some basic sanity checks on our liveness results before performing 2545 // relocation. Relocation can and will turn mistakes in liveness results 2546 // into non-sensical code which is must harder to debug. 2547 // TODO: It would be nice to test consistency as well 2548 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) && 2549 "statepoint must be reachable or liveness is meaningless"); 2550 for (Value *V : Info.StatepointToken->gc_args()) { 2551 if (!isa<Instruction>(V)) 2552 // Non-instruction values trivial dominate all possible uses 2553 continue; 2554 auto *LiveInst = cast<Instruction>(V); 2555 assert(DT.isReachableFromEntry(LiveInst->getParent()) && 2556 "unreachable values should never be live"); 2557 assert(DT.dominates(LiveInst, Info.StatepointToken) && 2558 "basic SSA liveness expectation violated by liveness analysis"); 2559 } 2560 #endif 2561 } 2562 unique_unsorted(Live); 2563 2564 #ifndef NDEBUG 2565 // sanity check 2566 for (auto *Ptr : Live) 2567 assert(isHandledGCPointerType(Ptr->getType()) && 2568 "must be a gc pointer type"); 2569 #endif 2570 2571 relocationViaAlloca(F, DT, Live, Records); 2572 return !Records.empty(); 2573 } 2574 2575 // Handles both return values and arguments for Functions and calls. 2576 template <typename AttrHolder> 2577 static void RemoveNonValidAttrAtIndex(LLVMContext &Ctx, AttrHolder &AH, 2578 unsigned Index) { 2579 AttrBuilder R; 2580 if (AH.getDereferenceableBytes(Index)) 2581 R.addAttribute(Attribute::get(Ctx, Attribute::Dereferenceable, 2582 AH.getDereferenceableBytes(Index))); 2583 if (AH.getDereferenceableOrNullBytes(Index)) 2584 R.addAttribute(Attribute::get(Ctx, Attribute::DereferenceableOrNull, 2585 AH.getDereferenceableOrNullBytes(Index))); 2586 for (auto Attr : {Attribute::NoAlias, Attribute::NoFree}) 2587 if (AH.getAttributes().hasAttribute(Index, Attr)) 2588 R.addAttribute(Attr); 2589 2590 if (!R.empty()) 2591 AH.setAttributes(AH.getAttributes().removeAttributes(Ctx, Index, R)); 2592 } 2593 2594 static void stripNonValidAttributesFromPrototype(Function &F) { 2595 LLVMContext &Ctx = F.getContext(); 2596 2597 // Intrinsics are very delicate. Lowering sometimes depends the presence 2598 // of certain attributes for correctness, but we may have also inferred 2599 // additional ones in the abstract machine model which need stripped. This 2600 // assumes that the attributes defined in Intrinsic.td are conservatively 2601 // correct for both physical and abstract model. 2602 if (Intrinsic::ID id = F.getIntrinsicID()) { 2603 F.setAttributes(Intrinsic::getAttributes(Ctx, id)); 2604 return; 2605 } 2606 2607 for (Argument &A : F.args()) 2608 if (isa<PointerType>(A.getType())) 2609 RemoveNonValidAttrAtIndex(Ctx, F, 2610 A.getArgNo() + AttributeList::FirstArgIndex); 2611 2612 if (isa<PointerType>(F.getReturnType())) 2613 RemoveNonValidAttrAtIndex(Ctx, F, AttributeList::ReturnIndex); 2614 2615 for (auto Attr : {Attribute::NoSync, Attribute::NoFree}) 2616 F.removeFnAttr(Attr); 2617 } 2618 2619 /// Certain metadata on instructions are invalid after running RS4GC. 2620 /// Optimizations that run after RS4GC can incorrectly use this metadata to 2621 /// optimize functions. We drop such metadata on the instruction. 2622 static void stripInvalidMetadataFromInstruction(Instruction &I) { 2623 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 2624 return; 2625 // These are the attributes that are still valid on loads and stores after 2626 // RS4GC. 2627 // The metadata implying dereferenceability and noalias are (conservatively) 2628 // dropped. This is because semantically, after RewriteStatepointsForGC runs, 2629 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can 2630 // touch the entire heap including noalias objects. Note: The reasoning is 2631 // same as stripping the dereferenceability and noalias attributes that are 2632 // analogous to the metadata counterparts. 2633 // We also drop the invariant.load metadata on the load because that metadata 2634 // implies the address operand to the load points to memory that is never 2635 // changed once it became dereferenceable. This is no longer true after RS4GC. 2636 // Similar reasoning applies to invariant.group metadata, which applies to 2637 // loads within a group. 2638 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa, 2639 LLVMContext::MD_range, 2640 LLVMContext::MD_alias_scope, 2641 LLVMContext::MD_nontemporal, 2642 LLVMContext::MD_nonnull, 2643 LLVMContext::MD_align, 2644 LLVMContext::MD_type}; 2645 2646 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC. 2647 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC); 2648 } 2649 2650 static void stripNonValidDataFromBody(Function &F) { 2651 if (F.empty()) 2652 return; 2653 2654 LLVMContext &Ctx = F.getContext(); 2655 MDBuilder Builder(Ctx); 2656 2657 // Set of invariantstart instructions that we need to remove. 2658 // Use this to avoid invalidating the instruction iterator. 2659 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions; 2660 2661 for (Instruction &I : instructions(F)) { 2662 // invariant.start on memory location implies that the referenced memory 2663 // location is constant and unchanging. This is no longer true after 2664 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint 2665 // which frees the entire heap and the presence of invariant.start allows 2666 // the optimizer to sink the load of a memory location past a statepoint, 2667 // which is incorrect. 2668 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 2669 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2670 InvariantStartInstructions.push_back(II); 2671 continue; 2672 } 2673 2674 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) { 2675 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag); 2676 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA); 2677 } 2678 2679 stripInvalidMetadataFromInstruction(I); 2680 2681 if (auto *Call = dyn_cast<CallBase>(&I)) { 2682 for (int i = 0, e = Call->arg_size(); i != e; i++) 2683 if (isa<PointerType>(Call->getArgOperand(i)->getType())) 2684 RemoveNonValidAttrAtIndex(Ctx, *Call, 2685 i + AttributeList::FirstArgIndex); 2686 if (isa<PointerType>(Call->getType())) 2687 RemoveNonValidAttrAtIndex(Ctx, *Call, AttributeList::ReturnIndex); 2688 } 2689 } 2690 2691 // Delete the invariant.start instructions and RAUW undef. 2692 for (auto *II : InvariantStartInstructions) { 2693 II->replaceAllUsesWith(UndefValue::get(II->getType())); 2694 II->eraseFromParent(); 2695 } 2696 } 2697 2698 /// Returns true if this function should be rewritten by this pass. The main 2699 /// point of this function is as an extension point for custom logic. 2700 static bool shouldRewriteStatepointsIn(Function &F) { 2701 // TODO: This should check the GCStrategy 2702 if (F.hasGC()) { 2703 const auto &FunctionGCName = F.getGC(); 2704 const StringRef StatepointExampleName("statepoint-example"); 2705 const StringRef CoreCLRName("coreclr"); 2706 return (StatepointExampleName == FunctionGCName) || 2707 (CoreCLRName == FunctionGCName); 2708 } else 2709 return false; 2710 } 2711 2712 static void stripNonValidData(Module &M) { 2713 #ifndef NDEBUG 2714 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!"); 2715 #endif 2716 2717 for (Function &F : M) 2718 stripNonValidAttributesFromPrototype(F); 2719 2720 for (Function &F : M) 2721 stripNonValidDataFromBody(F); 2722 } 2723 2724 bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT, 2725 TargetTransformInfo &TTI, 2726 const TargetLibraryInfo &TLI) { 2727 assert(!F.isDeclaration() && !F.empty() && 2728 "need function body to rewrite statepoints in"); 2729 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision"); 2730 2731 auto NeedsRewrite = [&TLI](Instruction &I) { 2732 if (const auto *Call = dyn_cast<CallBase>(&I)) { 2733 if (isa<GCStatepointInst>(Call)) 2734 return false; 2735 if (callsGCLeafFunction(Call, TLI)) 2736 return false; 2737 2738 // Normally it's up to the frontend to make sure that non-leaf calls also 2739 // have proper deopt state if it is required. We make an exception for 2740 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics 2741 // these are non-leaf by default. They might be generated by the optimizer 2742 // which doesn't know how to produce a proper deopt state. So if we see a 2743 // non-leaf memcpy/memmove without deopt state just treat it as a leaf 2744 // copy and don't produce a statepoint. 2745 if (!AllowStatepointWithNoDeoptInfo && 2746 !Call->getOperandBundle(LLVMContext::OB_deopt)) { 2747 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) && 2748 "Don't expect any other calls here!"); 2749 return false; 2750 } 2751 return true; 2752 } 2753 return false; 2754 }; 2755 2756 // Delete any unreachable statepoints so that we don't have unrewritten 2757 // statepoints surviving this pass. This makes testing easier and the 2758 // resulting IR less confusing to human readers. 2759 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 2760 bool MadeChange = removeUnreachableBlocks(F, &DTU); 2761 // Flush the Dominator Tree. 2762 DTU.getDomTree(); 2763 2764 // Gather all the statepoints which need rewritten. Be careful to only 2765 // consider those in reachable code since we need to ask dominance queries 2766 // when rewriting. We'll delete the unreachable ones in a moment. 2767 SmallVector<CallBase *, 64> ParsePointNeeded; 2768 for (Instruction &I : instructions(F)) { 2769 // TODO: only the ones with the flag set! 2770 if (NeedsRewrite(I)) { 2771 // NOTE removeUnreachableBlocks() is stronger than 2772 // DominatorTree::isReachableFromEntry(). In other words 2773 // removeUnreachableBlocks can remove some blocks for which 2774 // isReachableFromEntry() returns true. 2775 assert(DT.isReachableFromEntry(I.getParent()) && 2776 "no unreachable blocks expected"); 2777 ParsePointNeeded.push_back(cast<CallBase>(&I)); 2778 } 2779 } 2780 2781 // Return early if no work to do. 2782 if (ParsePointNeeded.empty()) 2783 return MadeChange; 2784 2785 // As a prepass, go ahead and aggressively destroy single entry phi nodes. 2786 // These are created by LCSSA. They have the effect of increasing the size 2787 // of liveness sets for no good reason. It may be harder to do this post 2788 // insertion since relocations and base phis can confuse things. 2789 for (BasicBlock &BB : F) 2790 if (BB.getUniquePredecessor()) 2791 MadeChange |= FoldSingleEntryPHINodes(&BB); 2792 2793 // Before we start introducing relocations, we want to tweak the IR a bit to 2794 // avoid unfortunate code generation effects. The main example is that we 2795 // want to try to make sure the comparison feeding a branch is after any 2796 // safepoints. Otherwise, we end up with a comparison of pre-relocation 2797 // values feeding a branch after relocation. This is semantically correct, 2798 // but results in extra register pressure since both the pre-relocation and 2799 // post-relocation copies must be available in registers. For code without 2800 // relocations this is handled elsewhere, but teaching the scheduler to 2801 // reverse the transform we're about to do would be slightly complex. 2802 // Note: This may extend the live range of the inputs to the icmp and thus 2803 // increase the liveset of any statepoint we move over. This is profitable 2804 // as long as all statepoints are in rare blocks. If we had in-register 2805 // lowering for live values this would be a much safer transform. 2806 auto getConditionInst = [](Instruction *TI) -> Instruction * { 2807 if (auto *BI = dyn_cast<BranchInst>(TI)) 2808 if (BI->isConditional()) 2809 return dyn_cast<Instruction>(BI->getCondition()); 2810 // TODO: Extend this to handle switches 2811 return nullptr; 2812 }; 2813 for (BasicBlock &BB : F) { 2814 Instruction *TI = BB.getTerminator(); 2815 if (auto *Cond = getConditionInst(TI)) 2816 // TODO: Handle more than just ICmps here. We should be able to move 2817 // most instructions without side effects or memory access. 2818 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) { 2819 MadeChange = true; 2820 Cond->moveBefore(TI); 2821 } 2822 } 2823 2824 // Nasty workaround - The base computation code in the main algorithm doesn't 2825 // consider the fact that a GEP can be used to convert a scalar to a vector. 2826 // The right fix for this is to integrate GEPs into the base rewriting 2827 // algorithm properly, this is just a short term workaround to prevent 2828 // crashes by canonicalizing such GEPs into fully vector GEPs. 2829 for (Instruction &I : instructions(F)) { 2830 if (!isa<GetElementPtrInst>(I)) 2831 continue; 2832 2833 unsigned VF = 0; 2834 for (unsigned i = 0; i < I.getNumOperands(); i++) 2835 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) { 2836 assert(VF == 0 || 2837 VF == cast<FixedVectorType>(OpndVTy)->getNumElements()); 2838 VF = cast<FixedVectorType>(OpndVTy)->getNumElements(); 2839 } 2840 2841 // It's the vector to scalar traversal through the pointer operand which 2842 // confuses base pointer rewriting, so limit ourselves to that case. 2843 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) { 2844 IRBuilder<> B(&I); 2845 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0)); 2846 I.setOperand(0, Splat); 2847 MadeChange = true; 2848 } 2849 } 2850 2851 MadeChange |= insertParsePoints(F, DT, TTI, ParsePointNeeded); 2852 return MadeChange; 2853 } 2854 2855 // liveness computation via standard dataflow 2856 // ------------------------------------------------------------------- 2857 2858 // TODO: Consider using bitvectors for liveness, the set of potentially 2859 // interesting values should be small and easy to pre-compute. 2860 2861 /// Compute the live-in set for the location rbegin starting from 2862 /// the live-out set of the basic block 2863 static void computeLiveInValues(BasicBlock::reverse_iterator Begin, 2864 BasicBlock::reverse_iterator End, 2865 SetVector<Value *> &LiveTmp) { 2866 for (auto &I : make_range(Begin, End)) { 2867 // KILL/Def - Remove this definition from LiveIn 2868 LiveTmp.remove(&I); 2869 2870 // Don't consider *uses* in PHI nodes, we handle their contribution to 2871 // predecessor blocks when we seed the LiveOut sets 2872 if (isa<PHINode>(I)) 2873 continue; 2874 2875 // USE - Add to the LiveIn set for this instruction 2876 for (Value *V : I.operands()) { 2877 assert(!isUnhandledGCPointerType(V->getType()) && 2878 "support for FCA unimplemented"); 2879 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) { 2880 // The choice to exclude all things constant here is slightly subtle. 2881 // There are two independent reasons: 2882 // - We assume that things which are constant (from LLVM's definition) 2883 // do not move at runtime. For example, the address of a global 2884 // variable is fixed, even though it's contents may not be. 2885 // - Second, we can't disallow arbitrary inttoptr constants even 2886 // if the language frontend does. Optimization passes are free to 2887 // locally exploit facts without respect to global reachability. This 2888 // can create sections of code which are dynamically unreachable and 2889 // contain just about anything. (see constants.ll in tests) 2890 LiveTmp.insert(V); 2891 } 2892 } 2893 } 2894 } 2895 2896 static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp) { 2897 for (BasicBlock *Succ : successors(BB)) { 2898 for (auto &I : *Succ) { 2899 PHINode *PN = dyn_cast<PHINode>(&I); 2900 if (!PN) 2901 break; 2902 2903 Value *V = PN->getIncomingValueForBlock(BB); 2904 assert(!isUnhandledGCPointerType(V->getType()) && 2905 "support for FCA unimplemented"); 2906 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) 2907 LiveTmp.insert(V); 2908 } 2909 } 2910 } 2911 2912 static SetVector<Value *> computeKillSet(BasicBlock *BB) { 2913 SetVector<Value *> KillSet; 2914 for (Instruction &I : *BB) 2915 if (isHandledGCPointerType(I.getType())) 2916 KillSet.insert(&I); 2917 return KillSet; 2918 } 2919 2920 #ifndef NDEBUG 2921 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic 2922 /// sanity check for the liveness computation. 2923 static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live, 2924 Instruction *TI, bool TermOkay = false) { 2925 for (Value *V : Live) { 2926 if (auto *I = dyn_cast<Instruction>(V)) { 2927 // The terminator can be a member of the LiveOut set. LLVM's definition 2928 // of instruction dominance states that V does not dominate itself. As 2929 // such, we need to special case this to allow it. 2930 if (TermOkay && TI == I) 2931 continue; 2932 assert(DT.dominates(I, TI) && 2933 "basic SSA liveness expectation violated by liveness analysis"); 2934 } 2935 } 2936 } 2937 2938 /// Check that all the liveness sets used during the computation of liveness 2939 /// obey basic SSA properties. This is useful for finding cases where we miss 2940 /// a def. 2941 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, 2942 BasicBlock &BB) { 2943 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); 2944 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); 2945 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); 2946 } 2947 #endif 2948 2949 static void computeLiveInValues(DominatorTree &DT, Function &F, 2950 GCPtrLivenessData &Data) { 2951 SmallSetVector<BasicBlock *, 32> Worklist; 2952 2953 // Seed the liveness for each individual block 2954 for (BasicBlock &BB : F) { 2955 Data.KillSet[&BB] = computeKillSet(&BB); 2956 Data.LiveSet[&BB].clear(); 2957 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]); 2958 2959 #ifndef NDEBUG 2960 for (Value *Kill : Data.KillSet[&BB]) 2961 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); 2962 #endif 2963 2964 Data.LiveOut[&BB] = SetVector<Value *>(); 2965 computeLiveOutSeed(&BB, Data.LiveOut[&BB]); 2966 Data.LiveIn[&BB] = Data.LiveSet[&BB]; 2967 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]); 2968 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]); 2969 if (!Data.LiveIn[&BB].empty()) 2970 Worklist.insert(pred_begin(&BB), pred_end(&BB)); 2971 } 2972 2973 // Propagate that liveness until stable 2974 while (!Worklist.empty()) { 2975 BasicBlock *BB = Worklist.pop_back_val(); 2976 2977 // Compute our new liveout set, then exit early if it hasn't changed despite 2978 // the contribution of our successor. 2979 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 2980 const auto OldLiveOutSize = LiveOut.size(); 2981 for (BasicBlock *Succ : successors(BB)) { 2982 assert(Data.LiveIn.count(Succ)); 2983 LiveOut.set_union(Data.LiveIn[Succ]); 2984 } 2985 // assert OutLiveOut is a subset of LiveOut 2986 if (OldLiveOutSize == LiveOut.size()) { 2987 // If the sets are the same size, then we didn't actually add anything 2988 // when unioning our successors LiveIn. Thus, the LiveIn of this block 2989 // hasn't changed. 2990 continue; 2991 } 2992 Data.LiveOut[BB] = LiveOut; 2993 2994 // Apply the effects of this basic block 2995 SetVector<Value *> LiveTmp = LiveOut; 2996 LiveTmp.set_union(Data.LiveSet[BB]); 2997 LiveTmp.set_subtract(Data.KillSet[BB]); 2998 2999 assert(Data.LiveIn.count(BB)); 3000 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB]; 3001 // assert: OldLiveIn is a subset of LiveTmp 3002 if (OldLiveIn.size() != LiveTmp.size()) { 3003 Data.LiveIn[BB] = LiveTmp; 3004 Worklist.insert(pred_begin(BB), pred_end(BB)); 3005 } 3006 } // while (!Worklist.empty()) 3007 3008 #ifndef NDEBUG 3009 // Sanity check our output against SSA properties. This helps catch any 3010 // missing kills during the above iteration. 3011 for (BasicBlock &BB : F) 3012 checkBasicSSA(DT, Data, BB); 3013 #endif 3014 } 3015 3016 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, 3017 StatepointLiveSetTy &Out) { 3018 BasicBlock *BB = Inst->getParent(); 3019 3020 // Note: The copy is intentional and required 3021 assert(Data.LiveOut.count(BB)); 3022 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 3023 3024 // We want to handle the statepoint itself oddly. It's 3025 // call result is not live (normal), nor are it's arguments 3026 // (unless they're used again later). This adjustment is 3027 // specifically what we need to relocate 3028 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(), 3029 LiveOut); 3030 LiveOut.remove(Inst); 3031 Out.insert(LiveOut.begin(), LiveOut.end()); 3032 } 3033 3034 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 3035 CallBase *Call, 3036 PartiallyConstructedSafepointRecord &Info) { 3037 StatepointLiveSetTy Updated; 3038 findLiveSetAtInst(Call, RevisedLivenessData, Updated); 3039 3040 // We may have base pointers which are now live that weren't before. We need 3041 // to update the PointerToBase structure to reflect this. 3042 for (auto V : Updated) 3043 Info.PointerToBase.insert({V, V}); 3044 3045 #ifndef NDEBUG 3046 for (auto V : Updated) 3047 assert(Info.PointerToBase.count(V) && 3048 "Must be able to find base for live value!"); 3049 #endif 3050 3051 // Remove any stale base mappings - this can happen since our liveness is 3052 // more precise then the one inherent in the base pointer analysis. 3053 DenseSet<Value *> ToErase; 3054 for (auto KVPair : Info.PointerToBase) 3055 if (!Updated.count(KVPair.first)) 3056 ToErase.insert(KVPair.first); 3057 3058 for (auto *V : ToErase) 3059 Info.PointerToBase.erase(V); 3060 3061 #ifndef NDEBUG 3062 for (auto KVPair : Info.PointerToBase) 3063 assert(Updated.count(KVPair.first) && "record for non-live value"); 3064 #endif 3065 3066 Info.LiveSet = Updated; 3067 } 3068