1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Rewrite call/invoke instructions so as to make potential relocations 10 // performed by the garbage collector explicit in the IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h" 15 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/DomTreeUpdater.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constant.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/InstIterator.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/MDBuilder.h" 50 #include "llvm/IR/Metadata.h" 51 #include "llvm/IR/Module.h" 52 #include "llvm/IR/Statepoint.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/ValueHandle.h" 57 #include "llvm/InitializePasses.h" 58 #include "llvm/Pass.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Compiler.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/ErrorHandling.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Transforms/Scalar.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 69 #include <algorithm> 70 #include <cassert> 71 #include <cstddef> 72 #include <cstdint> 73 #include <iterator> 74 #include <set> 75 #include <string> 76 #include <utility> 77 #include <vector> 78 79 #define DEBUG_TYPE "rewrite-statepoints-for-gc" 80 81 using namespace llvm; 82 83 // Print the liveset found at the insert location 84 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden, 85 cl::init(false)); 86 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, 87 cl::init(false)); 88 89 // Print out the base pointers for debugging 90 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden, 91 cl::init(false)); 92 93 // Cost threshold measuring when it is profitable to rematerialize value instead 94 // of relocating it 95 static cl::opt<unsigned> 96 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, 97 cl::init(6)); 98 99 #ifdef EXPENSIVE_CHECKS 100 static bool ClobberNonLive = true; 101 #else 102 static bool ClobberNonLive = false; 103 #endif 104 105 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live", 106 cl::location(ClobberNonLive), 107 cl::Hidden); 108 109 static cl::opt<bool> 110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", 111 cl::Hidden, cl::init(true)); 112 113 /// The IR fed into RewriteStatepointsForGC may have had attributes and 114 /// metadata implying dereferenceability that are no longer valid/correct after 115 /// RewriteStatepointsForGC has run. This is because semantically, after 116 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire 117 /// heap. stripNonValidData (conservatively) restores 118 /// correctness by erasing all attributes in the module that externally imply 119 /// dereferenceability. Similar reasoning also applies to the noalias 120 /// attributes and metadata. gc.statepoint can touch the entire heap including 121 /// noalias objects. 122 /// Apart from attributes and metadata, we also remove instructions that imply 123 /// constant physical memory: llvm.invariant.start. 124 static void stripNonValidData(Module &M); 125 126 static bool shouldRewriteStatepointsIn(Function &F); 127 128 PreservedAnalyses RewriteStatepointsForGC::run(Module &M, 129 ModuleAnalysisManager &AM) { 130 bool Changed = false; 131 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 132 for (Function &F : M) { 133 // Nothing to do for declarations. 134 if (F.isDeclaration() || F.empty()) 135 continue; 136 137 // Policy choice says not to rewrite - the most common reason is that we're 138 // compiling code without a GCStrategy. 139 if (!shouldRewriteStatepointsIn(F)) 140 continue; 141 142 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F); 143 auto &TTI = FAM.getResult<TargetIRAnalysis>(F); 144 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 145 Changed |= runOnFunction(F, DT, TTI, TLI); 146 } 147 if (!Changed) 148 return PreservedAnalyses::all(); 149 150 // stripNonValidData asserts that shouldRewriteStatepointsIn 151 // returns true for at least one function in the module. Since at least 152 // one function changed, we know that the precondition is satisfied. 153 stripNonValidData(M); 154 155 PreservedAnalyses PA; 156 PA.preserve<TargetIRAnalysis>(); 157 PA.preserve<TargetLibraryAnalysis>(); 158 return PA; 159 } 160 161 namespace { 162 163 class RewriteStatepointsForGCLegacyPass : public ModulePass { 164 RewriteStatepointsForGC Impl; 165 166 public: 167 static char ID; // Pass identification, replacement for typeid 168 169 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() { 170 initializeRewriteStatepointsForGCLegacyPassPass( 171 *PassRegistry::getPassRegistry()); 172 } 173 174 bool runOnModule(Module &M) override { 175 bool Changed = false; 176 for (Function &F : M) { 177 // Nothing to do for declarations. 178 if (F.isDeclaration() || F.empty()) 179 continue; 180 181 // Policy choice says not to rewrite - the most common reason is that 182 // we're compiling code without a GCStrategy. 183 if (!shouldRewriteStatepointsIn(F)) 184 continue; 185 186 TargetTransformInfo &TTI = 187 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 188 const TargetLibraryInfo &TLI = 189 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 190 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 191 192 Changed |= Impl.runOnFunction(F, DT, TTI, TLI); 193 } 194 195 if (!Changed) 196 return false; 197 198 // stripNonValidData asserts that shouldRewriteStatepointsIn 199 // returns true for at least one function in the module. Since at least 200 // one function changed, we know that the precondition is satisfied. 201 stripNonValidData(M); 202 return true; 203 } 204 205 void getAnalysisUsage(AnalysisUsage &AU) const override { 206 // We add and rewrite a bunch of instructions, but don't really do much 207 // else. We could in theory preserve a lot more analyses here. 208 AU.addRequired<DominatorTreeWrapperPass>(); 209 AU.addRequired<TargetTransformInfoWrapperPass>(); 210 AU.addRequired<TargetLibraryInfoWrapperPass>(); 211 } 212 }; 213 214 } // end anonymous namespace 215 216 char RewriteStatepointsForGCLegacyPass::ID = 0; 217 218 ModulePass *llvm::createRewriteStatepointsForGCLegacyPass() { 219 return new RewriteStatepointsForGCLegacyPass(); 220 } 221 222 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass, 223 "rewrite-statepoints-for-gc", 224 "Make relocations explicit at statepoints", false, false) 225 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 226 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 227 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass, 228 "rewrite-statepoints-for-gc", 229 "Make relocations explicit at statepoints", false, false) 230 231 namespace { 232 233 struct GCPtrLivenessData { 234 /// Values defined in this block. 235 MapVector<BasicBlock *, SetVector<Value *>> KillSet; 236 237 /// Values used in this block (and thus live); does not included values 238 /// killed within this block. 239 MapVector<BasicBlock *, SetVector<Value *>> LiveSet; 240 241 /// Values live into this basic block (i.e. used by any 242 /// instruction in this basic block or ones reachable from here) 243 MapVector<BasicBlock *, SetVector<Value *>> LiveIn; 244 245 /// Values live out of this basic block (i.e. live into 246 /// any successor block) 247 MapVector<BasicBlock *, SetVector<Value *>> LiveOut; 248 }; 249 250 // The type of the internal cache used inside the findBasePointers family 251 // of functions. From the callers perspective, this is an opaque type and 252 // should not be inspected. 253 // 254 // In the actual implementation this caches two relations: 255 // - The base relation itself (i.e. this pointer is based on that one) 256 // - The base defining value relation (i.e. before base_phi insertion) 257 // Generally, after the execution of a full findBasePointer call, only the 258 // base relation will remain. Internally, we add a mixture of the two 259 // types, then update all the second type to the first type 260 using DefiningValueMapTy = MapVector<Value *, Value *>; 261 using PointerToBaseTy = MapVector<Value *, Value *>; 262 using StatepointLiveSetTy = SetVector<Value *>; 263 using RematerializedValueMapTy = 264 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>; 265 266 struct PartiallyConstructedSafepointRecord { 267 /// The set of values known to be live across this safepoint 268 StatepointLiveSetTy LiveSet; 269 270 /// The *new* gc.statepoint instruction itself. This produces the token 271 /// that normal path gc.relocates and the gc.result are tied to. 272 GCStatepointInst *StatepointToken; 273 274 /// Instruction to which exceptional gc relocates are attached 275 /// Makes it easier to iterate through them during relocationViaAlloca. 276 Instruction *UnwindToken; 277 278 /// Record live values we are rematerialized instead of relocating. 279 /// They are not included into 'LiveSet' field. 280 /// Maps rematerialized copy to it's original value. 281 RematerializedValueMapTy RematerializedValues; 282 }; 283 284 struct RematerizlizationCandidateRecord { 285 // Chain from derived pointer to base. 286 SmallVector<Instruction *, 3> ChainToBase; 287 // Original base. 288 Value *RootOfChain; 289 // Cost of chain. 290 InstructionCost Cost; 291 }; 292 using RematCandTy = MapVector<Value *, RematerizlizationCandidateRecord>; 293 294 } // end anonymous namespace 295 296 static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) { 297 Optional<OperandBundleUse> DeoptBundle = 298 Call->getOperandBundle(LLVMContext::OB_deopt); 299 300 if (!DeoptBundle.hasValue()) { 301 assert(AllowStatepointWithNoDeoptInfo && 302 "Found non-leaf call without deopt info!"); 303 return None; 304 } 305 306 return DeoptBundle.getValue().Inputs; 307 } 308 309 /// Compute the live-in set for every basic block in the function 310 static void computeLiveInValues(DominatorTree &DT, Function &F, 311 GCPtrLivenessData &Data); 312 313 /// Given results from the dataflow liveness computation, find the set of live 314 /// Values at a particular instruction. 315 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, 316 StatepointLiveSetTy &out); 317 318 // TODO: Once we can get to the GCStrategy, this becomes 319 // Optional<bool> isGCManagedPointer(const Type *Ty) const override { 320 321 static bool isGCPointerType(Type *T) { 322 if (auto *PT = dyn_cast<PointerType>(T)) 323 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our 324 // GC managed heap. We know that a pointer into this heap needs to be 325 // updated and that no other pointer does. 326 return PT->getAddressSpace() == 1; 327 return false; 328 } 329 330 // Return true if this type is one which a) is a gc pointer or contains a GC 331 // pointer and b) is of a type this code expects to encounter as a live value. 332 // (The insertion code will assert that a type which matches (a) and not (b) 333 // is not encountered.) 334 static bool isHandledGCPointerType(Type *T) { 335 // We fully support gc pointers 336 if (isGCPointerType(T)) 337 return true; 338 // We partially support vectors of gc pointers. The code will assert if it 339 // can't handle something. 340 if (auto VT = dyn_cast<VectorType>(T)) 341 if (isGCPointerType(VT->getElementType())) 342 return true; 343 return false; 344 } 345 346 #ifndef NDEBUG 347 /// Returns true if this type contains a gc pointer whether we know how to 348 /// handle that type or not. 349 static bool containsGCPtrType(Type *Ty) { 350 if (isGCPointerType(Ty)) 351 return true; 352 if (VectorType *VT = dyn_cast<VectorType>(Ty)) 353 return isGCPointerType(VT->getScalarType()); 354 if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) 355 return containsGCPtrType(AT->getElementType()); 356 if (StructType *ST = dyn_cast<StructType>(Ty)) 357 return llvm::any_of(ST->elements(), containsGCPtrType); 358 return false; 359 } 360 361 // Returns true if this is a type which a) is a gc pointer or contains a GC 362 // pointer and b) is of a type which the code doesn't expect (i.e. first class 363 // aggregates). Used to trip assertions. 364 static bool isUnhandledGCPointerType(Type *Ty) { 365 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty); 366 } 367 #endif 368 369 // Return the name of the value suffixed with the provided value, or if the 370 // value didn't have a name, the default value specified. 371 static std::string suffixed_name_or(Value *V, StringRef Suffix, 372 StringRef DefaultName) { 373 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str(); 374 } 375 376 // Conservatively identifies any definitions which might be live at the 377 // given instruction. The analysis is performed immediately before the 378 // given instruction. Values defined by that instruction are not considered 379 // live. Values used by that instruction are considered live. 380 static void analyzeParsePointLiveness( 381 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, 382 PartiallyConstructedSafepointRecord &Result) { 383 StatepointLiveSetTy LiveSet; 384 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet); 385 386 if (PrintLiveSet) { 387 dbgs() << "Live Variables:\n"; 388 for (Value *V : LiveSet) 389 dbgs() << " " << V->getName() << " " << *V << "\n"; 390 } 391 if (PrintLiveSetSize) { 392 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n"; 393 dbgs() << "Number live values: " << LiveSet.size() << "\n"; 394 } 395 Result.LiveSet = LiveSet; 396 } 397 398 // Returns true is V is a knownBaseResult. 399 static bool isKnownBaseResult(Value *V); 400 401 // Returns true if V is a BaseResult that already exists in the IR, i.e. it is 402 // not created by the findBasePointers algorithm. 403 static bool isOriginalBaseResult(Value *V); 404 405 namespace { 406 407 /// A single base defining value - An immediate base defining value for an 408 /// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'. 409 /// For instructions which have multiple pointer [vector] inputs or that 410 /// transition between vector and scalar types, there is no immediate base 411 /// defining value. The 'base defining value' for 'Def' is the transitive 412 /// closure of this relation stopping at the first instruction which has no 413 /// immediate base defining value. The b.d.v. might itself be a base pointer, 414 /// but it can also be an arbitrary derived pointer. 415 struct BaseDefiningValueResult { 416 /// Contains the value which is the base defining value. 417 Value * const BDV; 418 419 /// True if the base defining value is also known to be an actual base 420 /// pointer. 421 const bool IsKnownBase; 422 423 BaseDefiningValueResult(Value *BDV, bool IsKnownBase) 424 : BDV(BDV), IsKnownBase(IsKnownBase) { 425 #ifndef NDEBUG 426 // Check consistency between new and old means of checking whether a BDV is 427 // a base. 428 bool MustBeBase = isKnownBaseResult(BDV); 429 assert(!MustBeBase || MustBeBase == IsKnownBase); 430 #endif 431 } 432 }; 433 434 } // end anonymous namespace 435 436 static BaseDefiningValueResult findBaseDefiningValue(Value *I); 437 438 /// Return a base defining value for the 'Index' element of the given vector 439 /// instruction 'I'. If Index is null, returns a BDV for the entire vector 440 /// 'I'. As an optimization, this method will try to determine when the 441 /// element is known to already be a base pointer. If this can be established, 442 /// the second value in the returned pair will be true. Note that either a 443 /// vector or a pointer typed value can be returned. For the former, the 444 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'. 445 /// If the later, the return pointer is a BDV (or possibly a base) for the 446 /// particular element in 'I'. 447 static BaseDefiningValueResult 448 findBaseDefiningValueOfVector(Value *I) { 449 // Each case parallels findBaseDefiningValue below, see that code for 450 // detailed motivation. 451 452 if (isa<Argument>(I)) 453 // An incoming argument to the function is a base pointer 454 return BaseDefiningValueResult(I, true); 455 456 if (isa<Constant>(I)) 457 // Base of constant vector consists only of constant null pointers. 458 // For reasoning see similar case inside 'findBaseDefiningValue' function. 459 return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()), 460 true); 461 462 if (isa<LoadInst>(I)) 463 return BaseDefiningValueResult(I, true); 464 465 if (isa<InsertElementInst>(I)) 466 // We don't know whether this vector contains entirely base pointers or 467 // not. To be conservatively correct, we treat it as a BDV and will 468 // duplicate code as needed to construct a parallel vector of bases. 469 return BaseDefiningValueResult(I, false); 470 471 if (isa<ShuffleVectorInst>(I)) 472 // We don't know whether this vector contains entirely base pointers or 473 // not. To be conservatively correct, we treat it as a BDV and will 474 // duplicate code as needed to construct a parallel vector of bases. 475 // TODO: There a number of local optimizations which could be applied here 476 // for particular sufflevector patterns. 477 return BaseDefiningValueResult(I, false); 478 479 // The behavior of getelementptr instructions is the same for vector and 480 // non-vector data types. 481 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 482 return findBaseDefiningValue(GEP->getPointerOperand()); 483 484 // If the pointer comes through a bitcast of a vector of pointers to 485 // a vector of another type of pointer, then look through the bitcast 486 if (auto *BC = dyn_cast<BitCastInst>(I)) 487 return findBaseDefiningValue(BC->getOperand(0)); 488 489 // We assume that functions in the source language only return base 490 // pointers. This should probably be generalized via attributes to support 491 // both source language and internal functions. 492 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 493 return BaseDefiningValueResult(I, true); 494 495 // A PHI or Select is a base defining value. The outer findBasePointer 496 // algorithm is responsible for constructing a base value for this BDV. 497 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 498 "unknown vector instruction - no base found for vector element"); 499 return BaseDefiningValueResult(I, false); 500 } 501 502 /// Helper function for findBasePointer - Will return a value which either a) 503 /// defines the base pointer for the input, b) blocks the simple search 504 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change 505 /// from pointer to vector type or back. 506 static BaseDefiningValueResult findBaseDefiningValue(Value *I) { 507 assert(I->getType()->isPtrOrPtrVectorTy() && 508 "Illegal to ask for the base pointer of a non-pointer type"); 509 510 if (I->getType()->isVectorTy()) 511 return findBaseDefiningValueOfVector(I); 512 513 if (isa<Argument>(I)) 514 // An incoming argument to the function is a base pointer 515 // We should have never reached here if this argument isn't an gc value 516 return BaseDefiningValueResult(I, true); 517 518 if (isa<Constant>(I)) { 519 // We assume that objects with a constant base (e.g. a global) can't move 520 // and don't need to be reported to the collector because they are always 521 // live. Besides global references, all kinds of constants (e.g. undef, 522 // constant expressions, null pointers) can be introduced by the inliner or 523 // the optimizer, especially on dynamically dead paths. 524 // Here we treat all of them as having single null base. By doing this we 525 // trying to avoid problems reporting various conflicts in a form of 526 // "phi (const1, const2)" or "phi (const, regular gc ptr)". 527 // See constant.ll file for relevant test cases. 528 529 return BaseDefiningValueResult( 530 ConstantPointerNull::get(cast<PointerType>(I->getType())), true); 531 } 532 533 // inttoptrs in an integral address space are currently ill-defined. We 534 // treat them as defining base pointers here for consistency with the 535 // constant rule above and because we don't really have a better semantic 536 // to give them. Note that the optimizer is always free to insert undefined 537 // behavior on dynamically dead paths as well. 538 if (isa<IntToPtrInst>(I)) 539 return BaseDefiningValueResult(I, true); 540 541 if (CastInst *CI = dyn_cast<CastInst>(I)) { 542 Value *Def = CI->stripPointerCasts(); 543 // If stripping pointer casts changes the address space there is an 544 // addrspacecast in between. 545 assert(cast<PointerType>(Def->getType())->getAddressSpace() == 546 cast<PointerType>(CI->getType())->getAddressSpace() && 547 "unsupported addrspacecast"); 548 // If we find a cast instruction here, it means we've found a cast which is 549 // not simply a pointer cast (i.e. an inttoptr). We don't know how to 550 // handle int->ptr conversion. 551 assert(!isa<CastInst>(Def) && "shouldn't find another cast here"); 552 return findBaseDefiningValue(Def); 553 } 554 555 if (isa<LoadInst>(I)) 556 // The value loaded is an gc base itself 557 return BaseDefiningValueResult(I, true); 558 559 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 560 // The base of this GEP is the base 561 return findBaseDefiningValue(GEP->getPointerOperand()); 562 563 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 564 switch (II->getIntrinsicID()) { 565 default: 566 // fall through to general call handling 567 break; 568 case Intrinsic::experimental_gc_statepoint: 569 llvm_unreachable("statepoints don't produce pointers"); 570 case Intrinsic::experimental_gc_relocate: 571 // Rerunning safepoint insertion after safepoints are already 572 // inserted is not supported. It could probably be made to work, 573 // but why are you doing this? There's no good reason. 574 llvm_unreachable("repeat safepoint insertion is not supported"); 575 case Intrinsic::gcroot: 576 // Currently, this mechanism hasn't been extended to work with gcroot. 577 // There's no reason it couldn't be, but I haven't thought about the 578 // implications much. 579 llvm_unreachable( 580 "interaction with the gcroot mechanism is not supported"); 581 case Intrinsic::experimental_gc_get_pointer_base: 582 return findBaseDefiningValue(II->getOperand(0)); 583 } 584 } 585 // We assume that functions in the source language only return base 586 // pointers. This should probably be generalized via attributes to support 587 // both source language and internal functions. 588 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 589 return BaseDefiningValueResult(I, true); 590 591 // TODO: I have absolutely no idea how to implement this part yet. It's not 592 // necessarily hard, I just haven't really looked at it yet. 593 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented"); 594 595 if (isa<AtomicCmpXchgInst>(I)) 596 // A CAS is effectively a atomic store and load combined under a 597 // predicate. From the perspective of base pointers, we just treat it 598 // like a load. 599 return BaseDefiningValueResult(I, true); 600 601 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are " 602 "binary ops which don't apply to pointers"); 603 604 // The aggregate ops. Aggregates can either be in the heap or on the 605 // stack, but in either case, this is simply a field load. As a result, 606 // this is a defining definition of the base just like a load is. 607 if (isa<ExtractValueInst>(I)) 608 return BaseDefiningValueResult(I, true); 609 610 // We should never see an insert vector since that would require we be 611 // tracing back a struct value not a pointer value. 612 assert(!isa<InsertValueInst>(I) && 613 "Base pointer for a struct is meaningless"); 614 615 // This value might have been generated by findBasePointer() called when 616 // substituting gc.get.pointer.base() intrinsic. 617 bool IsKnownBase = 618 isa<Instruction>(I) && cast<Instruction>(I)->getMetadata("is_base_value"); 619 620 // An extractelement produces a base result exactly when it's input does. 621 // We may need to insert a parallel instruction to extract the appropriate 622 // element out of the base vector corresponding to the input. Given this, 623 // it's analogous to the phi and select case even though it's not a merge. 624 if (isa<ExtractElementInst>(I)) 625 // Note: There a lot of obvious peephole cases here. This are deliberately 626 // handled after the main base pointer inference algorithm to make writing 627 // test cases to exercise that code easier. 628 return BaseDefiningValueResult(I, IsKnownBase); 629 630 // The last two cases here don't return a base pointer. Instead, they 631 // return a value which dynamically selects from among several base 632 // derived pointers (each with it's own base potentially). It's the job of 633 // the caller to resolve these. 634 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 635 "missing instruction case in findBaseDefiningValing"); 636 return BaseDefiningValueResult(I, IsKnownBase); 637 } 638 639 /// Returns the base defining value for this value. 640 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) { 641 Value *&Cached = Cache[I]; 642 if (!Cached) { 643 Cached = findBaseDefiningValue(I).BDV; 644 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> " 645 << Cached->getName() << "\n"); 646 } 647 assert(Cache[I] != nullptr); 648 return Cached; 649 } 650 651 /// Return a base pointer for this value if known. Otherwise, return it's 652 /// base defining value. 653 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) { 654 Value *Def = findBaseDefiningValueCached(I, Cache); 655 auto Found = Cache.find(Def); 656 if (Found != Cache.end()) { 657 // Either a base-of relation, or a self reference. Caller must check. 658 return Found->second; 659 } 660 // Only a BDV available 661 return Def; 662 } 663 664 /// This value is a base pointer that is not generated by RS4GC, i.e. it already 665 /// exists in the code. 666 static bool isOriginalBaseResult(Value *V) { 667 // no recursion possible 668 return !isa<PHINode>(V) && !isa<SelectInst>(V) && 669 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) && 670 !isa<ShuffleVectorInst>(V); 671 } 672 673 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, 674 /// is it known to be a base pointer? Or do we need to continue searching. 675 static bool isKnownBaseResult(Value *V) { 676 if (isOriginalBaseResult(V)) 677 return true; 678 if (isa<Instruction>(V) && 679 cast<Instruction>(V)->getMetadata("is_base_value")) { 680 // This is a previously inserted base phi or select. We know 681 // that this is a base value. 682 return true; 683 } 684 685 // We need to keep searching 686 return false; 687 } 688 689 // Returns true if First and Second values are both scalar or both vector. 690 static bool areBothVectorOrScalar(Value *First, Value *Second) { 691 return isa<VectorType>(First->getType()) == 692 isa<VectorType>(Second->getType()); 693 } 694 695 namespace { 696 697 /// Models the state of a single base defining value in the findBasePointer 698 /// algorithm for determining where a new instruction is needed to propagate 699 /// the base of this BDV. 700 class BDVState { 701 public: 702 enum StatusTy { 703 // Starting state of lattice 704 Unknown, 705 // Some specific base value -- does *not* mean that instruction 706 // propagates the base of the object 707 // ex: gep %arg, 16 -> %arg is the base value 708 Base, 709 // Need to insert a node to represent a merge. 710 Conflict 711 }; 712 713 BDVState() { 714 llvm_unreachable("missing state in map"); 715 } 716 717 explicit BDVState(Value *OriginalValue) 718 : OriginalValue(OriginalValue) {} 719 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr) 720 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) { 721 assert(Status != Base || BaseValue); 722 } 723 724 StatusTy getStatus() const { return Status; } 725 Value *getOriginalValue() const { return OriginalValue; } 726 Value *getBaseValue() const { return BaseValue; } 727 728 bool isBase() const { return getStatus() == Base; } 729 bool isUnknown() const { return getStatus() == Unknown; } 730 bool isConflict() const { return getStatus() == Conflict; } 731 732 // Values of type BDVState form a lattice, and this function implements the 733 // meet 734 // operation. 735 void meet(const BDVState &Other) { 736 auto markConflict = [&]() { 737 Status = BDVState::Conflict; 738 BaseValue = nullptr; 739 }; 740 // Conflict is a final state. 741 if (isConflict()) 742 return; 743 // if we are not known - just take other state. 744 if (isUnknown()) { 745 Status = Other.getStatus(); 746 BaseValue = Other.getBaseValue(); 747 return; 748 } 749 // We are base. 750 assert(isBase() && "Unknown state"); 751 // If other is unknown - just keep our state. 752 if (Other.isUnknown()) 753 return; 754 // If other is conflict - it is a final state. 755 if (Other.isConflict()) 756 return markConflict(); 757 // Other is base as well. 758 assert(Other.isBase() && "Unknown state"); 759 // If bases are different - Conflict. 760 if (getBaseValue() != Other.getBaseValue()) 761 return markConflict(); 762 // We are identical, do nothing. 763 } 764 765 bool operator==(const BDVState &Other) const { 766 return OriginalValue == Other.OriginalValue && BaseValue == Other.BaseValue && 767 Status == Other.Status; 768 } 769 770 bool operator!=(const BDVState &other) const { return !(*this == other); } 771 772 LLVM_DUMP_METHOD 773 void dump() const { 774 print(dbgs()); 775 dbgs() << '\n'; 776 } 777 778 void print(raw_ostream &OS) const { 779 switch (getStatus()) { 780 case Unknown: 781 OS << "U"; 782 break; 783 case Base: 784 OS << "B"; 785 break; 786 case Conflict: 787 OS << "C"; 788 break; 789 } 790 OS << " (base " << getBaseValue() << " - " 791 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")" 792 << " for " << OriginalValue->getName() << ":"; 793 } 794 795 private: 796 AssertingVH<Value> OriginalValue; // instruction this state corresponds to 797 StatusTy Status = Unknown; 798 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base. 799 }; 800 801 } // end anonymous namespace 802 803 #ifndef NDEBUG 804 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) { 805 State.print(OS); 806 return OS; 807 } 808 #endif 809 810 /// For a given value or instruction, figure out what base ptr its derived from. 811 /// For gc objects, this is simply itself. On success, returns a value which is 812 /// the base pointer. (This is reliable and can be used for relocation.) On 813 /// failure, returns nullptr. 814 static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) { 815 Value *Def = findBaseOrBDV(I, Cache); 816 817 if (isKnownBaseResult(Def) && areBothVectorOrScalar(Def, I)) 818 return Def; 819 820 // Here's the rough algorithm: 821 // - For every SSA value, construct a mapping to either an actual base 822 // pointer or a PHI which obscures the base pointer. 823 // - Construct a mapping from PHI to unknown TOP state. Use an 824 // optimistic algorithm to propagate base pointer information. Lattice 825 // looks like: 826 // UNKNOWN 827 // b1 b2 b3 b4 828 // CONFLICT 829 // When algorithm terminates, all PHIs will either have a single concrete 830 // base or be in a conflict state. 831 // - For every conflict, insert a dummy PHI node without arguments. Add 832 // these to the base[Instruction] = BasePtr mapping. For every 833 // non-conflict, add the actual base. 834 // - For every conflict, add arguments for the base[a] of each input 835 // arguments. 836 // 837 // Note: A simpler form of this would be to add the conflict form of all 838 // PHIs without running the optimistic algorithm. This would be 839 // analogous to pessimistic data flow and would likely lead to an 840 // overall worse solution. 841 842 #ifndef NDEBUG 843 auto isExpectedBDVType = [](Value *BDV) { 844 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) || 845 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) || 846 isa<ShuffleVectorInst>(BDV); 847 }; 848 #endif 849 850 // Once populated, will contain a mapping from each potentially non-base BDV 851 // to a lattice value (described above) which corresponds to that BDV. 852 // We use the order of insertion (DFS over the def/use graph) to provide a 853 // stable deterministic ordering for visiting DenseMaps (which are unordered) 854 // below. This is important for deterministic compilation. 855 MapVector<Value *, BDVState> States; 856 857 #ifndef NDEBUG 858 auto VerifyStates = [&]() { 859 for (auto &Entry : States) { 860 assert(Entry.first == Entry.second.getOriginalValue()); 861 } 862 }; 863 #endif 864 865 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) { 866 if (PHINode *PN = dyn_cast<PHINode>(BDV)) { 867 for (Value *InVal : PN->incoming_values()) 868 F(InVal); 869 } else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) { 870 F(SI->getTrueValue()); 871 F(SI->getFalseValue()); 872 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) { 873 F(EE->getVectorOperand()); 874 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) { 875 F(IE->getOperand(0)); 876 F(IE->getOperand(1)); 877 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) { 878 // For a canonical broadcast, ignore the undef argument 879 // (without this, we insert a parallel base shuffle for every broadcast) 880 F(SV->getOperand(0)); 881 if (!SV->isZeroEltSplat()) 882 F(SV->getOperand(1)); 883 } else { 884 llvm_unreachable("unexpected BDV type"); 885 } 886 }; 887 888 889 // Recursively fill in all base defining values reachable from the initial 890 // one for which we don't already know a definite base value for 891 /* scope */ { 892 SmallVector<Value*, 16> Worklist; 893 Worklist.push_back(Def); 894 States.insert({Def, BDVState(Def)}); 895 while (!Worklist.empty()) { 896 Value *Current = Worklist.pop_back_val(); 897 assert(!isOriginalBaseResult(Current) && "why did it get added?"); 898 899 auto visitIncomingValue = [&](Value *InVal) { 900 Value *Base = findBaseOrBDV(InVal, Cache); 901 if (isKnownBaseResult(Base) && areBothVectorOrScalar(Base, InVal)) 902 // Known bases won't need new instructions introduced and can be 903 // ignored safely. However, this can only be done when InVal and Base 904 // are both scalar or both vector. Otherwise, we need to find a 905 // correct BDV for InVal, by creating an entry in the lattice 906 // (States). 907 return; 908 assert(isExpectedBDVType(Base) && "the only non-base values " 909 "we see should be base defining values"); 910 if (States.insert(std::make_pair(Base, BDVState(Base))).second) 911 Worklist.push_back(Base); 912 }; 913 914 visitBDVOperands(Current, visitIncomingValue); 915 } 916 } 917 918 #ifndef NDEBUG 919 VerifyStates(); 920 LLVM_DEBUG(dbgs() << "States after initialization:\n"); 921 for (const auto &Pair : States) { 922 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 923 } 924 #endif 925 926 // Iterate forward through the value graph pruning any node from the state 927 // list where all of the inputs are base pointers. The purpose of this is to 928 // reuse existing values when the derived pointer we were asked to materialize 929 // a base pointer for happens to be a base pointer itself. (Or a sub-graph 930 // feeding it does.) 931 SmallVector<Value *> ToRemove; 932 do { 933 ToRemove.clear(); 934 for (auto Pair : States) { 935 Value *BDV = Pair.first; 936 auto canPruneInput = [&](Value *V) { 937 Value *BDV = findBaseOrBDV(V, Cache); 938 if (V->stripPointerCasts() != BDV) 939 return false; 940 // The assumption is that anything not in the state list is 941 // propagates a base pointer. 942 return States.count(BDV) == 0; 943 }; 944 945 bool CanPrune = true; 946 visitBDVOperands(BDV, [&](Value *Op) { 947 CanPrune = CanPrune && canPruneInput(Op); 948 }); 949 if (CanPrune) 950 ToRemove.push_back(BDV); 951 } 952 for (Value *V : ToRemove) { 953 States.erase(V); 954 // Cache the fact V is it's own base for later usage. 955 Cache[V] = V; 956 } 957 } while (!ToRemove.empty()); 958 959 // Did we manage to prove that Def itself must be a base pointer? 960 if (!States.count(Def)) 961 return Def; 962 963 // Return a phi state for a base defining value. We'll generate a new 964 // base state for known bases and expect to find a cached state otherwise. 965 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) { 966 auto I = States.find(BaseValue); 967 if (I != States.end()) 968 return I->second; 969 assert(areBothVectorOrScalar(BaseValue, Input)); 970 return BDVState(BaseValue, BDVState::Base, BaseValue); 971 }; 972 973 bool Progress = true; 974 while (Progress) { 975 #ifndef NDEBUG 976 const size_t OldSize = States.size(); 977 #endif 978 Progress = false; 979 // We're only changing values in this loop, thus safe to keep iterators. 980 // Since this is computing a fixed point, the order of visit does not 981 // effect the result. TODO: We could use a worklist here and make this run 982 // much faster. 983 for (auto Pair : States) { 984 Value *BDV = Pair.first; 985 // Only values that do not have known bases or those that have differing 986 // type (scalar versus vector) from a possible known base should be in the 987 // lattice. 988 assert((!isKnownBaseResult(BDV) || 989 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) && 990 "why did it get added?"); 991 992 BDVState NewState(BDV); 993 visitBDVOperands(BDV, [&](Value *Op) { 994 Value *BDV = findBaseOrBDV(Op, Cache); 995 auto OpState = GetStateForBDV(BDV, Op); 996 NewState.meet(OpState); 997 }); 998 999 BDVState OldState = States[BDV]; 1000 if (OldState != NewState) { 1001 Progress = true; 1002 States[BDV] = NewState; 1003 } 1004 } 1005 1006 assert(OldSize == States.size() && 1007 "fixed point shouldn't be adding any new nodes to state"); 1008 } 1009 1010 #ifndef NDEBUG 1011 VerifyStates(); 1012 LLVM_DEBUG(dbgs() << "States after meet iteration:\n"); 1013 for (const auto &Pair : States) { 1014 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 1015 } 1016 #endif 1017 1018 // Handle all instructions that have a vector BDV, but the instruction itself 1019 // is of scalar type. 1020 for (auto Pair : States) { 1021 Instruction *I = cast<Instruction>(Pair.first); 1022 BDVState State = Pair.second; 1023 auto *BaseValue = State.getBaseValue(); 1024 // Only values that do not have known bases or those that have differing 1025 // type (scalar versus vector) from a possible known base should be in the 1026 // lattice. 1027 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, BaseValue)) && 1028 "why did it get added?"); 1029 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1030 1031 if (!State.isBase() || !isa<VectorType>(BaseValue->getType())) 1032 continue; 1033 // extractelement instructions are a bit special in that we may need to 1034 // insert an extract even when we know an exact base for the instruction. 1035 // The problem is that we need to convert from a vector base to a scalar 1036 // base for the particular indice we're interested in. 1037 if (isa<ExtractElementInst>(I)) { 1038 auto *EE = cast<ExtractElementInst>(I); 1039 // TODO: In many cases, the new instruction is just EE itself. We should 1040 // exploit this, but can't do it here since it would break the invariant 1041 // about the BDV not being known to be a base. 1042 auto *BaseInst = ExtractElementInst::Create( 1043 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE); 1044 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1045 States[I] = BDVState(I, BDVState::Base, BaseInst); 1046 } else if (!isa<VectorType>(I->getType())) { 1047 // We need to handle cases that have a vector base but the instruction is 1048 // a scalar type (these could be phis or selects or any instruction that 1049 // are of scalar type, but the base can be a vector type). We 1050 // conservatively set this as conflict. Setting the base value for these 1051 // conflicts is handled in the next loop which traverses States. 1052 States[I] = BDVState(I, BDVState::Conflict); 1053 } 1054 } 1055 1056 #ifndef NDEBUG 1057 VerifyStates(); 1058 #endif 1059 1060 // Insert Phis for all conflicts 1061 // TODO: adjust naming patterns to avoid this order of iteration dependency 1062 for (auto Pair : States) { 1063 Instruction *I = cast<Instruction>(Pair.first); 1064 BDVState State = Pair.second; 1065 // Only values that do not have known bases or those that have differing 1066 // type (scalar versus vector) from a possible known base should be in the 1067 // lattice. 1068 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, State.getBaseValue())) && 1069 "why did it get added?"); 1070 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1071 1072 // Since we're joining a vector and scalar base, they can never be the 1073 // same. As a result, we should always see insert element having reached 1074 // the conflict state. 1075 assert(!isa<InsertElementInst>(I) || State.isConflict()); 1076 1077 if (!State.isConflict()) 1078 continue; 1079 1080 auto getMangledName = [](Instruction *I) -> std::string { 1081 if (isa<PHINode>(I)) { 1082 return suffixed_name_or(I, ".base", "base_phi"); 1083 } else if (isa<SelectInst>(I)) { 1084 return suffixed_name_or(I, ".base", "base_select"); 1085 } else if (isa<ExtractElementInst>(I)) { 1086 return suffixed_name_or(I, ".base", "base_ee"); 1087 } else if (isa<InsertElementInst>(I)) { 1088 return suffixed_name_or(I, ".base", "base_ie"); 1089 } else { 1090 return suffixed_name_or(I, ".base", "base_sv"); 1091 } 1092 }; 1093 1094 Instruction *BaseInst = I->clone(); 1095 BaseInst->insertBefore(I); 1096 BaseInst->setName(getMangledName(I)); 1097 // Add metadata marking this as a base value 1098 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1099 States[I] = BDVState(I, BDVState::Conflict, BaseInst); 1100 } 1101 1102 #ifndef NDEBUG 1103 VerifyStates(); 1104 #endif 1105 1106 // Returns a instruction which produces the base pointer for a given 1107 // instruction. The instruction is assumed to be an input to one of the BDVs 1108 // seen in the inference algorithm above. As such, we must either already 1109 // know it's base defining value is a base, or have inserted a new 1110 // instruction to propagate the base of it's BDV and have entered that newly 1111 // introduced instruction into the state table. In either case, we are 1112 // assured to be able to determine an instruction which produces it's base 1113 // pointer. 1114 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) { 1115 Value *BDV = findBaseOrBDV(Input, Cache); 1116 Value *Base = nullptr; 1117 if (!States.count(BDV)) { 1118 assert(areBothVectorOrScalar(BDV, Input)); 1119 Base = BDV; 1120 } else { 1121 // Either conflict or base. 1122 assert(States.count(BDV)); 1123 Base = States[BDV].getBaseValue(); 1124 } 1125 assert(Base && "Can't be null"); 1126 // The cast is needed since base traversal may strip away bitcasts 1127 if (Base->getType() != Input->getType() && InsertPt) 1128 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt); 1129 return Base; 1130 }; 1131 1132 // Fixup all the inputs of the new PHIs. Visit order needs to be 1133 // deterministic and predictable because we're naming newly created 1134 // instructions. 1135 for (auto Pair : States) { 1136 Instruction *BDV = cast<Instruction>(Pair.first); 1137 BDVState State = Pair.second; 1138 1139 // Only values that do not have known bases or those that have differing 1140 // type (scalar versus vector) from a possible known base should be in the 1141 // lattice. 1142 assert((!isKnownBaseResult(BDV) || 1143 !areBothVectorOrScalar(BDV, State.getBaseValue())) && 1144 "why did it get added?"); 1145 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1146 if (!State.isConflict()) 1147 continue; 1148 1149 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) { 1150 PHINode *PN = cast<PHINode>(BDV); 1151 const unsigned NumPHIValues = PN->getNumIncomingValues(); 1152 1153 // The IR verifier requires phi nodes with multiple entries from the 1154 // same basic block to have the same incoming value for each of those 1155 // entries. Since we're inserting bitcasts in the loop, make sure we 1156 // do so at least once per incoming block. 1157 DenseMap<BasicBlock *, Value*> BlockToValue; 1158 for (unsigned i = 0; i < NumPHIValues; i++) { 1159 Value *InVal = PN->getIncomingValue(i); 1160 BasicBlock *InBB = PN->getIncomingBlock(i); 1161 if (!BlockToValue.count(InBB)) 1162 BlockToValue[InBB] = getBaseForInput(InVal, InBB->getTerminator()); 1163 else { 1164 #ifndef NDEBUG 1165 Value *OldBase = BlockToValue[InBB]; 1166 Value *Base = getBaseForInput(InVal, nullptr); 1167 // In essence this assert states: the only way two values 1168 // incoming from the same basic block may be different is by 1169 // being different bitcasts of the same value. A cleanup 1170 // that remains TODO is changing findBaseOrBDV to return an 1171 // llvm::Value of the correct type (and still remain pure). 1172 // This will remove the need to add bitcasts. 1173 assert(Base->stripPointerCasts() == OldBase->stripPointerCasts() && 1174 "findBaseOrBDV should be pure!"); 1175 #endif 1176 } 1177 Value *Base = BlockToValue[InBB]; 1178 BasePHI->setIncomingValue(i, Base); 1179 } 1180 } else if (SelectInst *BaseSI = 1181 dyn_cast<SelectInst>(State.getBaseValue())) { 1182 SelectInst *SI = cast<SelectInst>(BDV); 1183 1184 // Find the instruction which produces the base for each input. 1185 // We may need to insert a bitcast. 1186 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI)); 1187 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI)); 1188 } else if (auto *BaseEE = 1189 dyn_cast<ExtractElementInst>(State.getBaseValue())) { 1190 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand(); 1191 // Find the instruction which produces the base for each input. We may 1192 // need to insert a bitcast. 1193 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE)); 1194 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){ 1195 auto *BdvIE = cast<InsertElementInst>(BDV); 1196 auto UpdateOperand = [&](int OperandIdx) { 1197 Value *InVal = BdvIE->getOperand(OperandIdx); 1198 Value *Base = getBaseForInput(InVal, BaseIE); 1199 BaseIE->setOperand(OperandIdx, Base); 1200 }; 1201 UpdateOperand(0); // vector operand 1202 UpdateOperand(1); // scalar operand 1203 } else { 1204 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue()); 1205 auto *BdvSV = cast<ShuffleVectorInst>(BDV); 1206 auto UpdateOperand = [&](int OperandIdx) { 1207 Value *InVal = BdvSV->getOperand(OperandIdx); 1208 Value *Base = getBaseForInput(InVal, BaseSV); 1209 BaseSV->setOperand(OperandIdx, Base); 1210 }; 1211 UpdateOperand(0); // vector operand 1212 if (!BdvSV->isZeroEltSplat()) 1213 UpdateOperand(1); // vector operand 1214 else { 1215 // Never read, so just use undef 1216 Value *InVal = BdvSV->getOperand(1); 1217 BaseSV->setOperand(1, UndefValue::get(InVal->getType())); 1218 } 1219 } 1220 } 1221 1222 #ifndef NDEBUG 1223 VerifyStates(); 1224 #endif 1225 1226 // Cache all of our results so we can cheaply reuse them 1227 // NOTE: This is actually two caches: one of the base defining value 1228 // relation and one of the base pointer relation! FIXME 1229 for (auto Pair : States) { 1230 auto *BDV = Pair.first; 1231 Value *Base = Pair.second.getBaseValue(); 1232 assert(BDV && Base); 1233 // Only values that do not have known bases or those that have differing 1234 // type (scalar versus vector) from a possible known base should be in the 1235 // lattice. 1236 assert((!isKnownBaseResult(BDV) || !areBothVectorOrScalar(BDV, Base)) && 1237 "why did it get added?"); 1238 1239 LLVM_DEBUG( 1240 dbgs() << "Updating base value cache" 1241 << " for: " << BDV->getName() << " from: " 1242 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none") 1243 << " to: " << Base->getName() << "\n"); 1244 1245 Cache[BDV] = Base; 1246 } 1247 assert(Cache.count(Def)); 1248 return Cache[Def]; 1249 } 1250 1251 // For a set of live pointers (base and/or derived), identify the base 1252 // pointer of the object which they are derived from. This routine will 1253 // mutate the IR graph as needed to make the 'base' pointer live at the 1254 // definition site of 'derived'. This ensures that any use of 'derived' can 1255 // also use 'base'. This may involve the insertion of a number of 1256 // additional PHI nodes. 1257 // 1258 // preconditions: live is a set of pointer type Values 1259 // 1260 // side effects: may insert PHI nodes into the existing CFG, will preserve 1261 // CFG, will not remove or mutate any existing nodes 1262 // 1263 // post condition: PointerToBase contains one (derived, base) pair for every 1264 // pointer in live. Note that derived can be equal to base if the original 1265 // pointer was a base pointer. 1266 static void findBasePointers(const StatepointLiveSetTy &live, 1267 PointerToBaseTy &PointerToBase, DominatorTree *DT, 1268 DefiningValueMapTy &DVCache) { 1269 for (Value *ptr : live) { 1270 Value *base = findBasePointer(ptr, DVCache); 1271 assert(base && "failed to find base pointer"); 1272 PointerToBase[ptr] = base; 1273 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) || 1274 DT->dominates(cast<Instruction>(base)->getParent(), 1275 cast<Instruction>(ptr)->getParent())) && 1276 "The base we found better dominate the derived pointer"); 1277 } 1278 } 1279 1280 /// Find the required based pointers (and adjust the live set) for the given 1281 /// parse point. 1282 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache, 1283 CallBase *Call, 1284 PartiallyConstructedSafepointRecord &result, 1285 PointerToBaseTy &PointerToBase) { 1286 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet; 1287 // We assume that all pointers passed to deopt are base pointers; as an 1288 // optimization, we can use this to avoid seperately materializing the base 1289 // pointer graph. This is only relevant since we're very conservative about 1290 // generating new conflict nodes during base pointer insertion. If we were 1291 // smarter there, this would be irrelevant. 1292 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt)) 1293 for (Value *V : Opt->Inputs) { 1294 if (!PotentiallyDerivedPointers.count(V)) 1295 continue; 1296 PotentiallyDerivedPointers.remove(V); 1297 PointerToBase[V] = V; 1298 } 1299 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache); 1300 } 1301 1302 /// Given an updated version of the dataflow liveness results, update the 1303 /// liveset and base pointer maps for the call site CS. 1304 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 1305 CallBase *Call, 1306 PartiallyConstructedSafepointRecord &result, 1307 PointerToBaseTy &PointerToBase); 1308 1309 static void recomputeLiveInValues( 1310 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 1311 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records, 1312 PointerToBaseTy &PointerToBase) { 1313 // TODO-PERF: reuse the original liveness, then simply run the dataflow 1314 // again. The old values are still live and will help it stabilize quickly. 1315 GCPtrLivenessData RevisedLivenessData; 1316 computeLiveInValues(DT, F, RevisedLivenessData); 1317 for (size_t i = 0; i < records.size(); i++) { 1318 struct PartiallyConstructedSafepointRecord &info = records[i]; 1319 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info, 1320 PointerToBase); 1321 } 1322 } 1323 1324 // When inserting gc.relocate and gc.result calls, we need to ensure there are 1325 // no uses of the original value / return value between the gc.statepoint and 1326 // the gc.relocate / gc.result call. One case which can arise is a phi node 1327 // starting one of the successor blocks. We also need to be able to insert the 1328 // gc.relocates only on the path which goes through the statepoint. We might 1329 // need to split an edge to make this possible. 1330 static BasicBlock * 1331 normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, 1332 DominatorTree &DT) { 1333 BasicBlock *Ret = BB; 1334 if (!BB->getUniquePredecessor()) 1335 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT); 1336 1337 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes 1338 // from it 1339 FoldSingleEntryPHINodes(Ret); 1340 assert(!isa<PHINode>(Ret->begin()) && 1341 "All PHI nodes should have been removed!"); 1342 1343 // At this point, we can safely insert a gc.relocate or gc.result as the first 1344 // instruction in Ret if needed. 1345 return Ret; 1346 } 1347 1348 // List of all function attributes which must be stripped when lowering from 1349 // abstract machine model to physical machine model. Essentially, these are 1350 // all the effects a safepoint might have which we ignored in the abstract 1351 // machine model for purposes of optimization. We have to strip these on 1352 // both function declarations and call sites. 1353 static constexpr Attribute::AttrKind FnAttrsToStrip[] = 1354 {Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly, 1355 Attribute::ArgMemOnly, Attribute::InaccessibleMemOnly, 1356 Attribute::InaccessibleMemOrArgMemOnly, 1357 Attribute::NoSync, Attribute::NoFree}; 1358 1359 // Create new attribute set containing only attributes which can be transferred 1360 // from original call to the safepoint. 1361 static AttributeList legalizeCallAttributes(LLVMContext &Ctx, 1362 AttributeList OrigAL, 1363 AttributeList StatepointAL) { 1364 if (OrigAL.isEmpty()) 1365 return StatepointAL; 1366 1367 // Remove the readonly, readnone, and statepoint function attributes. 1368 AttrBuilder FnAttrs(Ctx, OrigAL.getFnAttrs()); 1369 for (auto Attr : FnAttrsToStrip) 1370 FnAttrs.removeAttribute(Attr); 1371 1372 for (Attribute A : OrigAL.getFnAttrs()) { 1373 if (isStatepointDirectiveAttr(A)) 1374 FnAttrs.removeAttribute(A); 1375 } 1376 1377 // Just skip parameter and return attributes for now 1378 return StatepointAL.addFnAttributes(Ctx, FnAttrs); 1379 } 1380 1381 /// Helper function to place all gc relocates necessary for the given 1382 /// statepoint. 1383 /// Inputs: 1384 /// liveVariables - list of variables to be relocated. 1385 /// basePtrs - base pointers. 1386 /// statepointToken - statepoint instruction to which relocates should be 1387 /// bound. 1388 /// Builder - Llvm IR builder to be used to construct new calls. 1389 static void CreateGCRelocates(ArrayRef<Value *> LiveVariables, 1390 ArrayRef<Value *> BasePtrs, 1391 Instruction *StatepointToken, 1392 IRBuilder<> &Builder) { 1393 if (LiveVariables.empty()) 1394 return; 1395 1396 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) { 1397 auto ValIt = llvm::find(LiveVec, Val); 1398 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!"); 1399 size_t Index = std::distance(LiveVec.begin(), ValIt); 1400 assert(Index < LiveVec.size() && "Bug in std::find?"); 1401 return Index; 1402 }; 1403 Module *M = StatepointToken->getModule(); 1404 1405 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose 1406 // element type is i8 addrspace(1)*). We originally generated unique 1407 // declarations for each pointer type, but this proved problematic because 1408 // the intrinsic mangling code is incomplete and fragile. Since we're moving 1409 // towards a single unified pointer type anyways, we can just cast everything 1410 // to an i8* of the right address space. A bitcast is added later to convert 1411 // gc_relocate to the actual value's type. 1412 auto getGCRelocateDecl = [&] (Type *Ty) { 1413 assert(isHandledGCPointerType(Ty)); 1414 auto AS = Ty->getScalarType()->getPointerAddressSpace(); 1415 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS); 1416 if (auto *VT = dyn_cast<VectorType>(Ty)) 1417 NewTy = FixedVectorType::get(NewTy, 1418 cast<FixedVectorType>(VT)->getNumElements()); 1419 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, 1420 {NewTy}); 1421 }; 1422 1423 // Lazily populated map from input types to the canonicalized form mentioned 1424 // in the comment above. This should probably be cached somewhere more 1425 // broadly. 1426 DenseMap<Type *, Function *> TypeToDeclMap; 1427 1428 for (unsigned i = 0; i < LiveVariables.size(); i++) { 1429 // Generate the gc.relocate call and save the result 1430 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i])); 1431 Value *LiveIdx = Builder.getInt32(i); 1432 1433 Type *Ty = LiveVariables[i]->getType(); 1434 if (!TypeToDeclMap.count(Ty)) 1435 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty); 1436 Function *GCRelocateDecl = TypeToDeclMap[Ty]; 1437 1438 // only specify a debug name if we can give a useful one 1439 CallInst *Reloc = Builder.CreateCall( 1440 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx}, 1441 suffixed_name_or(LiveVariables[i], ".relocated", "")); 1442 // Trick CodeGen into thinking there are lots of free registers at this 1443 // fake call. 1444 Reloc->setCallingConv(CallingConv::Cold); 1445 } 1446 } 1447 1448 namespace { 1449 1450 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this 1451 /// avoids having to worry about keeping around dangling pointers to Values. 1452 class DeferredReplacement { 1453 AssertingVH<Instruction> Old; 1454 AssertingVH<Instruction> New; 1455 bool IsDeoptimize = false; 1456 1457 DeferredReplacement() = default; 1458 1459 public: 1460 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) { 1461 assert(Old != New && Old && New && 1462 "Cannot RAUW equal values or to / from null!"); 1463 1464 DeferredReplacement D; 1465 D.Old = Old; 1466 D.New = New; 1467 return D; 1468 } 1469 1470 static DeferredReplacement createDelete(Instruction *ToErase) { 1471 DeferredReplacement D; 1472 D.Old = ToErase; 1473 return D; 1474 } 1475 1476 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) { 1477 #ifndef NDEBUG 1478 auto *F = cast<CallInst>(Old)->getCalledFunction(); 1479 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize && 1480 "Only way to construct a deoptimize deferred replacement"); 1481 #endif 1482 DeferredReplacement D; 1483 D.Old = Old; 1484 D.IsDeoptimize = true; 1485 return D; 1486 } 1487 1488 /// Does the task represented by this instance. 1489 void doReplacement() { 1490 Instruction *OldI = Old; 1491 Instruction *NewI = New; 1492 1493 assert(OldI != NewI && "Disallowed at construction?!"); 1494 assert((!IsDeoptimize || !New) && 1495 "Deoptimize intrinsics are not replaced!"); 1496 1497 Old = nullptr; 1498 New = nullptr; 1499 1500 if (NewI) 1501 OldI->replaceAllUsesWith(NewI); 1502 1503 if (IsDeoptimize) { 1504 // Note: we've inserted instructions, so the call to llvm.deoptimize may 1505 // not necessarily be followed by the matching return. 1506 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator()); 1507 new UnreachableInst(RI->getContext(), RI); 1508 RI->eraseFromParent(); 1509 } 1510 1511 OldI->eraseFromParent(); 1512 } 1513 }; 1514 1515 } // end anonymous namespace 1516 1517 static StringRef getDeoptLowering(CallBase *Call) { 1518 const char *DeoptLowering = "deopt-lowering"; 1519 if (Call->hasFnAttr(DeoptLowering)) { 1520 // FIXME: Calls have a *really* confusing interface around attributes 1521 // with values. 1522 const AttributeList &CSAS = Call->getAttributes(); 1523 if (CSAS.hasFnAttr(DeoptLowering)) 1524 return CSAS.getFnAttr(DeoptLowering).getValueAsString(); 1525 Function *F = Call->getCalledFunction(); 1526 assert(F && F->hasFnAttribute(DeoptLowering)); 1527 return F->getFnAttribute(DeoptLowering).getValueAsString(); 1528 } 1529 return "live-through"; 1530 } 1531 1532 static void 1533 makeStatepointExplicitImpl(CallBase *Call, /* to replace */ 1534 const SmallVectorImpl<Value *> &BasePtrs, 1535 const SmallVectorImpl<Value *> &LiveVariables, 1536 PartiallyConstructedSafepointRecord &Result, 1537 std::vector<DeferredReplacement> &Replacements, 1538 const PointerToBaseTy &PointerToBase) { 1539 assert(BasePtrs.size() == LiveVariables.size()); 1540 1541 // Then go ahead and use the builder do actually do the inserts. We insert 1542 // immediately before the previous instruction under the assumption that all 1543 // arguments will be available here. We can't insert afterwards since we may 1544 // be replacing a terminator. 1545 IRBuilder<> Builder(Call); 1546 1547 ArrayRef<Value *> GCArgs(LiveVariables); 1548 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID; 1549 uint32_t NumPatchBytes = 0; 1550 uint32_t Flags = uint32_t(StatepointFlags::None); 1551 1552 SmallVector<Value *, 8> CallArgs(Call->args()); 1553 Optional<ArrayRef<Use>> DeoptArgs; 1554 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt)) 1555 DeoptArgs = Bundle->Inputs; 1556 Optional<ArrayRef<Use>> TransitionArgs; 1557 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) { 1558 TransitionArgs = Bundle->Inputs; 1559 // TODO: This flag no longer serves a purpose and can be removed later 1560 Flags |= uint32_t(StatepointFlags::GCTransition); 1561 } 1562 1563 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls 1564 // with a return value, we lower then as never returning calls to 1565 // __llvm_deoptimize that are followed by unreachable to get better codegen. 1566 bool IsDeoptimize = false; 1567 1568 StatepointDirectives SD = 1569 parseStatepointDirectivesFromAttrs(Call->getAttributes()); 1570 if (SD.NumPatchBytes) 1571 NumPatchBytes = *SD.NumPatchBytes; 1572 if (SD.StatepointID) 1573 StatepointID = *SD.StatepointID; 1574 1575 // Pass through the requested lowering if any. The default is live-through. 1576 StringRef DeoptLowering = getDeoptLowering(Call); 1577 if (DeoptLowering.equals("live-in")) 1578 Flags |= uint32_t(StatepointFlags::DeoptLiveIn); 1579 else { 1580 assert(DeoptLowering.equals("live-through") && "Unsupported value!"); 1581 } 1582 1583 FunctionCallee CallTarget(Call->getFunctionType(), Call->getCalledOperand()); 1584 if (Function *F = dyn_cast<Function>(CallTarget.getCallee())) { 1585 auto IID = F->getIntrinsicID(); 1586 if (IID == Intrinsic::experimental_deoptimize) { 1587 // Calls to llvm.experimental.deoptimize are lowered to calls to the 1588 // __llvm_deoptimize symbol. We want to resolve this now, since the 1589 // verifier does not allow taking the address of an intrinsic function. 1590 1591 SmallVector<Type *, 8> DomainTy; 1592 for (Value *Arg : CallArgs) 1593 DomainTy.push_back(Arg->getType()); 1594 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1595 /* isVarArg = */ false); 1596 1597 // Note: CallTarget can be a bitcast instruction of a symbol if there are 1598 // calls to @llvm.experimental.deoptimize with different argument types in 1599 // the same module. This is fine -- we assume the frontend knew what it 1600 // was doing when generating this kind of IR. 1601 CallTarget = F->getParent() 1602 ->getOrInsertFunction("__llvm_deoptimize", FTy); 1603 1604 IsDeoptimize = true; 1605 } else if (IID == Intrinsic::memcpy_element_unordered_atomic || 1606 IID == Intrinsic::memmove_element_unordered_atomic) { 1607 // Unordered atomic memcpy and memmove intrinsics which are not explicitly 1608 // marked as "gc-leaf-function" should be lowered in a GC parseable way. 1609 // Specifically, these calls should be lowered to the 1610 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols. 1611 // Similarly to __llvm_deoptimize we want to resolve this now, since the 1612 // verifier does not allow taking the address of an intrinsic function. 1613 // 1614 // Moreover we need to shuffle the arguments for the call in order to 1615 // accommodate GC. The underlying source and destination objects might be 1616 // relocated during copy operation should the GC occur. To relocate the 1617 // derived source and destination pointers the implementation of the 1618 // intrinsic should know the corresponding base pointers. 1619 // 1620 // To make the base pointers available pass them explicitly as arguments: 1621 // memcpy(dest_derived, source_derived, ...) => 1622 // memcpy(dest_base, dest_offset, source_base, source_offset, ...) 1623 auto &Context = Call->getContext(); 1624 auto &DL = Call->getModule()->getDataLayout(); 1625 auto GetBaseAndOffset = [&](Value *Derived) { 1626 assert(PointerToBase.count(Derived)); 1627 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace(); 1628 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace); 1629 Value *Base = PointerToBase.find(Derived)->second; 1630 Value *Base_int = Builder.CreatePtrToInt( 1631 Base, Type::getIntNTy(Context, IntPtrSize)); 1632 Value *Derived_int = Builder.CreatePtrToInt( 1633 Derived, Type::getIntNTy(Context, IntPtrSize)); 1634 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int)); 1635 }; 1636 1637 auto *Dest = CallArgs[0]; 1638 Value *DestBase, *DestOffset; 1639 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest); 1640 1641 auto *Source = CallArgs[1]; 1642 Value *SourceBase, *SourceOffset; 1643 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source); 1644 1645 auto *LengthInBytes = CallArgs[2]; 1646 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]); 1647 1648 CallArgs.clear(); 1649 CallArgs.push_back(DestBase); 1650 CallArgs.push_back(DestOffset); 1651 CallArgs.push_back(SourceBase); 1652 CallArgs.push_back(SourceOffset); 1653 CallArgs.push_back(LengthInBytes); 1654 1655 SmallVector<Type *, 8> DomainTy; 1656 for (Value *Arg : CallArgs) 1657 DomainTy.push_back(Arg->getType()); 1658 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1659 /* isVarArg = */ false); 1660 1661 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) { 1662 uint64_t ElementSize = ElementSizeCI->getZExtValue(); 1663 if (IID == Intrinsic::memcpy_element_unordered_atomic) { 1664 switch (ElementSize) { 1665 case 1: 1666 return "__llvm_memcpy_element_unordered_atomic_safepoint_1"; 1667 case 2: 1668 return "__llvm_memcpy_element_unordered_atomic_safepoint_2"; 1669 case 4: 1670 return "__llvm_memcpy_element_unordered_atomic_safepoint_4"; 1671 case 8: 1672 return "__llvm_memcpy_element_unordered_atomic_safepoint_8"; 1673 case 16: 1674 return "__llvm_memcpy_element_unordered_atomic_safepoint_16"; 1675 default: 1676 llvm_unreachable("unexpected element size!"); 1677 } 1678 } 1679 assert(IID == Intrinsic::memmove_element_unordered_atomic); 1680 switch (ElementSize) { 1681 case 1: 1682 return "__llvm_memmove_element_unordered_atomic_safepoint_1"; 1683 case 2: 1684 return "__llvm_memmove_element_unordered_atomic_safepoint_2"; 1685 case 4: 1686 return "__llvm_memmove_element_unordered_atomic_safepoint_4"; 1687 case 8: 1688 return "__llvm_memmove_element_unordered_atomic_safepoint_8"; 1689 case 16: 1690 return "__llvm_memmove_element_unordered_atomic_safepoint_16"; 1691 default: 1692 llvm_unreachable("unexpected element size!"); 1693 } 1694 }; 1695 1696 CallTarget = 1697 F->getParent() 1698 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy); 1699 } 1700 } 1701 1702 // Create the statepoint given all the arguments 1703 GCStatepointInst *Token = nullptr; 1704 if (auto *CI = dyn_cast<CallInst>(Call)) { 1705 CallInst *SPCall = Builder.CreateGCStatepointCall( 1706 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs, 1707 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token"); 1708 1709 SPCall->setTailCallKind(CI->getTailCallKind()); 1710 SPCall->setCallingConv(CI->getCallingConv()); 1711 1712 // Currently we will fail on parameter attributes and on certain 1713 // function attributes. In case if we can handle this set of attributes - 1714 // set up function attrs directly on statepoint and return attrs later for 1715 // gc_result intrinsic. 1716 SPCall->setAttributes(legalizeCallAttributes( 1717 CI->getContext(), CI->getAttributes(), SPCall->getAttributes())); 1718 1719 Token = cast<GCStatepointInst>(SPCall); 1720 1721 // Put the following gc_result and gc_relocate calls immediately after the 1722 // the old call (which we're about to delete) 1723 assert(CI->getNextNode() && "Not a terminator, must have next!"); 1724 Builder.SetInsertPoint(CI->getNextNode()); 1725 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc()); 1726 } else { 1727 auto *II = cast<InvokeInst>(Call); 1728 1729 // Insert the new invoke into the old block. We'll remove the old one in a 1730 // moment at which point this will become the new terminator for the 1731 // original block. 1732 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke( 1733 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(), 1734 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs, 1735 "statepoint_token"); 1736 1737 SPInvoke->setCallingConv(II->getCallingConv()); 1738 1739 // Currently we will fail on parameter attributes and on certain 1740 // function attributes. In case if we can handle this set of attributes - 1741 // set up function attrs directly on statepoint and return attrs later for 1742 // gc_result intrinsic. 1743 SPInvoke->setAttributes(legalizeCallAttributes( 1744 II->getContext(), II->getAttributes(), SPInvoke->getAttributes())); 1745 1746 Token = cast<GCStatepointInst>(SPInvoke); 1747 1748 // Generate gc relocates in exceptional path 1749 BasicBlock *UnwindBlock = II->getUnwindDest(); 1750 assert(!isa<PHINode>(UnwindBlock->begin()) && 1751 UnwindBlock->getUniquePredecessor() && 1752 "can't safely insert in this block!"); 1753 1754 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt()); 1755 Builder.SetCurrentDebugLocation(II->getDebugLoc()); 1756 1757 // Attach exceptional gc relocates to the landingpad. 1758 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst(); 1759 Result.UnwindToken = ExceptionalToken; 1760 1761 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder); 1762 1763 // Generate gc relocates and returns for normal block 1764 BasicBlock *NormalDest = II->getNormalDest(); 1765 assert(!isa<PHINode>(NormalDest->begin()) && 1766 NormalDest->getUniquePredecessor() && 1767 "can't safely insert in this block!"); 1768 1769 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt()); 1770 1771 // gc relocates will be generated later as if it were regular call 1772 // statepoint 1773 } 1774 assert(Token && "Should be set in one of the above branches!"); 1775 1776 if (IsDeoptimize) { 1777 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we 1778 // transform the tail-call like structure to a call to a void function 1779 // followed by unreachable to get better codegen. 1780 Replacements.push_back( 1781 DeferredReplacement::createDeoptimizeReplacement(Call)); 1782 } else { 1783 Token->setName("statepoint_token"); 1784 if (!Call->getType()->isVoidTy() && !Call->use_empty()) { 1785 StringRef Name = Call->hasName() ? Call->getName() : ""; 1786 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name); 1787 GCResult->setAttributes( 1788 AttributeList::get(GCResult->getContext(), AttributeList::ReturnIndex, 1789 Call->getAttributes().getRetAttrs())); 1790 1791 // We cannot RAUW or delete CS.getInstruction() because it could be in the 1792 // live set of some other safepoint, in which case that safepoint's 1793 // PartiallyConstructedSafepointRecord will hold a raw pointer to this 1794 // llvm::Instruction. Instead, we defer the replacement and deletion to 1795 // after the live sets have been made explicit in the IR, and we no longer 1796 // have raw pointers to worry about. 1797 Replacements.emplace_back( 1798 DeferredReplacement::createRAUW(Call, GCResult)); 1799 } else { 1800 Replacements.emplace_back(DeferredReplacement::createDelete(Call)); 1801 } 1802 } 1803 1804 Result.StatepointToken = Token; 1805 1806 // Second, create a gc.relocate for every live variable 1807 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder); 1808 } 1809 1810 // Replace an existing gc.statepoint with a new one and a set of gc.relocates 1811 // which make the relocations happening at this safepoint explicit. 1812 // 1813 // WARNING: Does not do any fixup to adjust users of the original live 1814 // values. That's the callers responsibility. 1815 static void 1816 makeStatepointExplicit(DominatorTree &DT, CallBase *Call, 1817 PartiallyConstructedSafepointRecord &Result, 1818 std::vector<DeferredReplacement> &Replacements, 1819 const PointerToBaseTy &PointerToBase) { 1820 const auto &LiveSet = Result.LiveSet; 1821 1822 // Convert to vector for efficient cross referencing. 1823 SmallVector<Value *, 64> BaseVec, LiveVec; 1824 LiveVec.reserve(LiveSet.size()); 1825 BaseVec.reserve(LiveSet.size()); 1826 for (Value *L : LiveSet) { 1827 LiveVec.push_back(L); 1828 assert(PointerToBase.count(L)); 1829 Value *Base = PointerToBase.find(L)->second; 1830 BaseVec.push_back(Base); 1831 } 1832 assert(LiveVec.size() == BaseVec.size()); 1833 1834 // Do the actual rewriting and delete the old statepoint 1835 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements, 1836 PointerToBase); 1837 } 1838 1839 // Helper function for the relocationViaAlloca. 1840 // 1841 // It receives iterator to the statepoint gc relocates and emits a store to the 1842 // assigned location (via allocaMap) for the each one of them. It adds the 1843 // visited values into the visitedLiveValues set, which we will later use them 1844 // for validation checking. 1845 static void 1846 insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs, 1847 DenseMap<Value *, AllocaInst *> &AllocaMap, 1848 DenseSet<Value *> &VisitedLiveValues) { 1849 for (User *U : GCRelocs) { 1850 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U); 1851 if (!Relocate) 1852 continue; 1853 1854 Value *OriginalValue = Relocate->getDerivedPtr(); 1855 assert(AllocaMap.count(OriginalValue)); 1856 Value *Alloca = AllocaMap[OriginalValue]; 1857 1858 // Emit store into the related alloca 1859 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to 1860 // the correct type according to alloca. 1861 assert(Relocate->getNextNode() && 1862 "Should always have one since it's not a terminator"); 1863 IRBuilder<> Builder(Relocate->getNextNode()); 1864 Value *CastedRelocatedValue = 1865 Builder.CreateBitCast(Relocate, 1866 cast<AllocaInst>(Alloca)->getAllocatedType(), 1867 suffixed_name_or(Relocate, ".casted", "")); 1868 1869 new StoreInst(CastedRelocatedValue, Alloca, 1870 cast<Instruction>(CastedRelocatedValue)->getNextNode()); 1871 1872 #ifndef NDEBUG 1873 VisitedLiveValues.insert(OriginalValue); 1874 #endif 1875 } 1876 } 1877 1878 // Helper function for the "relocationViaAlloca". Similar to the 1879 // "insertRelocationStores" but works for rematerialized values. 1880 static void insertRematerializationStores( 1881 const RematerializedValueMapTy &RematerializedValues, 1882 DenseMap<Value *, AllocaInst *> &AllocaMap, 1883 DenseSet<Value *> &VisitedLiveValues) { 1884 for (auto RematerializedValuePair: RematerializedValues) { 1885 Instruction *RematerializedValue = RematerializedValuePair.first; 1886 Value *OriginalValue = RematerializedValuePair.second; 1887 1888 assert(AllocaMap.count(OriginalValue) && 1889 "Can not find alloca for rematerialized value"); 1890 Value *Alloca = AllocaMap[OriginalValue]; 1891 1892 new StoreInst(RematerializedValue, Alloca, 1893 RematerializedValue->getNextNode()); 1894 1895 #ifndef NDEBUG 1896 VisitedLiveValues.insert(OriginalValue); 1897 #endif 1898 } 1899 } 1900 1901 /// Do all the relocation update via allocas and mem2reg 1902 static void relocationViaAlloca( 1903 Function &F, DominatorTree &DT, ArrayRef<Value *> Live, 1904 ArrayRef<PartiallyConstructedSafepointRecord> Records) { 1905 #ifndef NDEBUG 1906 // record initial number of (static) allocas; we'll check we have the same 1907 // number when we get done. 1908 int InitialAllocaNum = 0; 1909 for (Instruction &I : F.getEntryBlock()) 1910 if (isa<AllocaInst>(I)) 1911 InitialAllocaNum++; 1912 #endif 1913 1914 // TODO-PERF: change data structures, reserve 1915 DenseMap<Value *, AllocaInst *> AllocaMap; 1916 SmallVector<AllocaInst *, 200> PromotableAllocas; 1917 // Used later to chack that we have enough allocas to store all values 1918 std::size_t NumRematerializedValues = 0; 1919 PromotableAllocas.reserve(Live.size()); 1920 1921 // Emit alloca for "LiveValue" and record it in "allocaMap" and 1922 // "PromotableAllocas" 1923 const DataLayout &DL = F.getParent()->getDataLayout(); 1924 auto emitAllocaFor = [&](Value *LiveValue) { 1925 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), 1926 DL.getAllocaAddrSpace(), "", 1927 F.getEntryBlock().getFirstNonPHI()); 1928 AllocaMap[LiveValue] = Alloca; 1929 PromotableAllocas.push_back(Alloca); 1930 }; 1931 1932 // Emit alloca for each live gc pointer 1933 for (Value *V : Live) 1934 emitAllocaFor(V); 1935 1936 // Emit allocas for rematerialized values 1937 for (const auto &Info : Records) 1938 for (auto RematerializedValuePair : Info.RematerializedValues) { 1939 Value *OriginalValue = RematerializedValuePair.second; 1940 if (AllocaMap.count(OriginalValue) != 0) 1941 continue; 1942 1943 emitAllocaFor(OriginalValue); 1944 ++NumRematerializedValues; 1945 } 1946 1947 // The next two loops are part of the same conceptual operation. We need to 1948 // insert a store to the alloca after the original def and at each 1949 // redefinition. We need to insert a load before each use. These are split 1950 // into distinct loops for performance reasons. 1951 1952 // Update gc pointer after each statepoint: either store a relocated value or 1953 // null (if no relocated value was found for this gc pointer and it is not a 1954 // gc_result). This must happen before we update the statepoint with load of 1955 // alloca otherwise we lose the link between statepoint and old def. 1956 for (const auto &Info : Records) { 1957 Value *Statepoint = Info.StatepointToken; 1958 1959 // This will be used for consistency check 1960 DenseSet<Value *> VisitedLiveValues; 1961 1962 // Insert stores for normal statepoint gc relocates 1963 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues); 1964 1965 // In case if it was invoke statepoint 1966 // we will insert stores for exceptional path gc relocates. 1967 if (isa<InvokeInst>(Statepoint)) { 1968 insertRelocationStores(Info.UnwindToken->users(), AllocaMap, 1969 VisitedLiveValues); 1970 } 1971 1972 // Do similar thing with rematerialized values 1973 insertRematerializationStores(Info.RematerializedValues, AllocaMap, 1974 VisitedLiveValues); 1975 1976 if (ClobberNonLive) { 1977 // As a debugging aid, pretend that an unrelocated pointer becomes null at 1978 // the gc.statepoint. This will turn some subtle GC problems into 1979 // slightly easier to debug SEGVs. Note that on large IR files with 1980 // lots of gc.statepoints this is extremely costly both memory and time 1981 // wise. 1982 SmallVector<AllocaInst *, 64> ToClobber; 1983 for (auto Pair : AllocaMap) { 1984 Value *Def = Pair.first; 1985 AllocaInst *Alloca = Pair.second; 1986 1987 // This value was relocated 1988 if (VisitedLiveValues.count(Def)) { 1989 continue; 1990 } 1991 ToClobber.push_back(Alloca); 1992 } 1993 1994 auto InsertClobbersAt = [&](Instruction *IP) { 1995 for (auto *AI : ToClobber) { 1996 auto PT = cast<PointerType>(AI->getAllocatedType()); 1997 Constant *CPN = ConstantPointerNull::get(PT); 1998 new StoreInst(CPN, AI, IP); 1999 } 2000 }; 2001 2002 // Insert the clobbering stores. These may get intermixed with the 2003 // gc.results and gc.relocates, but that's fine. 2004 if (auto II = dyn_cast<InvokeInst>(Statepoint)) { 2005 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt()); 2006 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt()); 2007 } else { 2008 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode()); 2009 } 2010 } 2011 } 2012 2013 // Update use with load allocas and add store for gc_relocated. 2014 for (auto Pair : AllocaMap) { 2015 Value *Def = Pair.first; 2016 AllocaInst *Alloca = Pair.second; 2017 2018 // We pre-record the uses of allocas so that we dont have to worry about 2019 // later update that changes the user information.. 2020 2021 SmallVector<Instruction *, 20> Uses; 2022 // PERF: trade a linear scan for repeated reallocation 2023 Uses.reserve(Def->getNumUses()); 2024 for (User *U : Def->users()) { 2025 if (!isa<ConstantExpr>(U)) { 2026 // If the def has a ConstantExpr use, then the def is either a 2027 // ConstantExpr use itself or null. In either case 2028 // (recursively in the first, directly in the second), the oop 2029 // it is ultimately dependent on is null and this particular 2030 // use does not need to be fixed up. 2031 Uses.push_back(cast<Instruction>(U)); 2032 } 2033 } 2034 2035 llvm::sort(Uses); 2036 auto Last = std::unique(Uses.begin(), Uses.end()); 2037 Uses.erase(Last, Uses.end()); 2038 2039 for (Instruction *Use : Uses) { 2040 if (isa<PHINode>(Use)) { 2041 PHINode *Phi = cast<PHINode>(Use); 2042 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { 2043 if (Def == Phi->getIncomingValue(i)) { 2044 LoadInst *Load = 2045 new LoadInst(Alloca->getAllocatedType(), Alloca, "", 2046 Phi->getIncomingBlock(i)->getTerminator()); 2047 Phi->setIncomingValue(i, Load); 2048 } 2049 } 2050 } else { 2051 LoadInst *Load = 2052 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use); 2053 Use->replaceUsesOfWith(Def, Load); 2054 } 2055 } 2056 2057 // Emit store for the initial gc value. Store must be inserted after load, 2058 // otherwise store will be in alloca's use list and an extra load will be 2059 // inserted before it. 2060 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false, 2061 DL.getABITypeAlign(Def->getType())); 2062 if (Instruction *Inst = dyn_cast<Instruction>(Def)) { 2063 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) { 2064 // InvokeInst is a terminator so the store need to be inserted into its 2065 // normal destination block. 2066 BasicBlock *NormalDest = Invoke->getNormalDest(); 2067 Store->insertBefore(NormalDest->getFirstNonPHI()); 2068 } else { 2069 assert(!Inst->isTerminator() && 2070 "The only terminator that can produce a value is " 2071 "InvokeInst which is handled above."); 2072 Store->insertAfter(Inst); 2073 } 2074 } else { 2075 assert(isa<Argument>(Def)); 2076 Store->insertAfter(cast<Instruction>(Alloca)); 2077 } 2078 } 2079 2080 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues && 2081 "we must have the same allocas with lives"); 2082 if (!PromotableAllocas.empty()) { 2083 // Apply mem2reg to promote alloca to SSA 2084 PromoteMemToReg(PromotableAllocas, DT); 2085 } 2086 2087 #ifndef NDEBUG 2088 for (auto &I : F.getEntryBlock()) 2089 if (isa<AllocaInst>(I)) 2090 InitialAllocaNum--; 2091 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); 2092 #endif 2093 } 2094 2095 /// Implement a unique function which doesn't require we sort the input 2096 /// vector. Doing so has the effect of changing the output of a couple of 2097 /// tests in ways which make them less useful in testing fused safepoints. 2098 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { 2099 SmallSet<T, 8> Seen; 2100 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; }); 2101 } 2102 2103 /// Insert holders so that each Value is obviously live through the entire 2104 /// lifetime of the call. 2105 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values, 2106 SmallVectorImpl<CallInst *> &Holders) { 2107 if (Values.empty()) 2108 // No values to hold live, might as well not insert the empty holder 2109 return; 2110 2111 Module *M = Call->getModule(); 2112 // Use a dummy vararg function to actually hold the values live 2113 FunctionCallee Func = M->getOrInsertFunction( 2114 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true)); 2115 if (isa<CallInst>(Call)) { 2116 // For call safepoints insert dummy calls right after safepoint 2117 Holders.push_back( 2118 CallInst::Create(Func, Values, "", &*++Call->getIterator())); 2119 return; 2120 } 2121 // For invoke safepooints insert dummy calls both in normal and 2122 // exceptional destination blocks 2123 auto *II = cast<InvokeInst>(Call); 2124 Holders.push_back(CallInst::Create( 2125 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt())); 2126 Holders.push_back(CallInst::Create( 2127 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt())); 2128 } 2129 2130 static void findLiveReferences( 2131 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 2132 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 2133 GCPtrLivenessData OriginalLivenessData; 2134 computeLiveInValues(DT, F, OriginalLivenessData); 2135 for (size_t i = 0; i < records.size(); i++) { 2136 struct PartiallyConstructedSafepointRecord &info = records[i]; 2137 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info); 2138 } 2139 } 2140 2141 // Helper function for the "rematerializeLiveValues". It walks use chain 2142 // starting from the "CurrentValue" until it reaches the root of the chain, i.e. 2143 // the base or a value it cannot process. Only "simple" values are processed 2144 // (currently it is GEP's and casts). The returned root is examined by the 2145 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array 2146 // with all visited values. 2147 static Value* findRematerializableChainToBasePointer( 2148 SmallVectorImpl<Instruction*> &ChainToBase, 2149 Value *CurrentValue) { 2150 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) { 2151 ChainToBase.push_back(GEP); 2152 return findRematerializableChainToBasePointer(ChainToBase, 2153 GEP->getPointerOperand()); 2154 } 2155 2156 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) { 2157 if (!CI->isNoopCast(CI->getModule()->getDataLayout())) 2158 return CI; 2159 2160 ChainToBase.push_back(CI); 2161 return findRematerializableChainToBasePointer(ChainToBase, 2162 CI->getOperand(0)); 2163 } 2164 2165 // We have reached the root of the chain, which is either equal to the base or 2166 // is the first unsupported value along the use chain. 2167 return CurrentValue; 2168 } 2169 2170 // Helper function for the "rematerializeLiveValues". Compute cost of the use 2171 // chain we are going to rematerialize. 2172 static InstructionCost 2173 chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain, 2174 TargetTransformInfo &TTI) { 2175 InstructionCost Cost = 0; 2176 2177 for (Instruction *Instr : Chain) { 2178 if (CastInst *CI = dyn_cast<CastInst>(Instr)) { 2179 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) && 2180 "non noop cast is found during rematerialization"); 2181 2182 Type *SrcTy = CI->getOperand(0)->getType(); 2183 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy, 2184 TTI::getCastContextHint(CI), 2185 TargetTransformInfo::TCK_SizeAndLatency, CI); 2186 2187 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) { 2188 // Cost of the address calculation 2189 Type *ValTy = GEP->getSourceElementType(); 2190 Cost += TTI.getAddressComputationCost(ValTy); 2191 2192 // And cost of the GEP itself 2193 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not 2194 // allowed for the external usage) 2195 if (!GEP->hasAllConstantIndices()) 2196 Cost += 2; 2197 2198 } else { 2199 llvm_unreachable("unsupported instruction type during rematerialization"); 2200 } 2201 } 2202 2203 return Cost; 2204 } 2205 2206 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) { 2207 unsigned PhiNum = OrigRootPhi.getNumIncomingValues(); 2208 if (PhiNum != AlternateRootPhi.getNumIncomingValues() || 2209 OrigRootPhi.getParent() != AlternateRootPhi.getParent()) 2210 return false; 2211 // Map of incoming values and their corresponding basic blocks of 2212 // OrigRootPhi. 2213 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues; 2214 for (unsigned i = 0; i < PhiNum; i++) 2215 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] = 2216 OrigRootPhi.getIncomingBlock(i); 2217 2218 // Both current and base PHIs should have same incoming values and 2219 // the same basic blocks corresponding to the incoming values. 2220 for (unsigned i = 0; i < PhiNum; i++) { 2221 auto CIVI = 2222 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i)); 2223 if (CIVI == CurrentIncomingValues.end()) 2224 return false; 2225 BasicBlock *CurrentIncomingBB = CIVI->second; 2226 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i)) 2227 return false; 2228 } 2229 return true; 2230 } 2231 2232 // Find derived pointers that can be recomputed cheap enough and fill 2233 // RematerizationCandidates with such candidates. 2234 static void 2235 findRematerializationCandidates(PointerToBaseTy PointerToBase, 2236 RematCandTy &RematerizationCandidates, 2237 TargetTransformInfo &TTI) { 2238 const unsigned int ChainLengthThreshold = 10; 2239 2240 for (auto P2B : PointerToBase) { 2241 auto *Derived = P2B.first; 2242 auto *Base = P2B.second; 2243 // Consider only derived pointers. 2244 if (Derived == Base) 2245 continue; 2246 2247 // For each live pointer find its defining chain. 2248 SmallVector<Instruction *, 3> ChainToBase; 2249 Value *RootOfChain = 2250 findRematerializableChainToBasePointer(ChainToBase, Derived); 2251 2252 // Nothing to do, or chain is too long 2253 if ( ChainToBase.size() == 0 || 2254 ChainToBase.size() > ChainLengthThreshold) 2255 continue; 2256 2257 // Handle the scenario where the RootOfChain is not equal to the 2258 // Base Value, but they are essentially the same phi values. 2259 if (RootOfChain != PointerToBase[Derived]) { 2260 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain); 2261 PHINode *AlternateRootPhi = dyn_cast<PHINode>(PointerToBase[Derived]); 2262 if (!OrigRootPhi || !AlternateRootPhi) 2263 continue; 2264 // PHI nodes that have the same incoming values, and belonging to the same 2265 // basic blocks are essentially the same SSA value. When the original phi 2266 // has incoming values with different base pointers, the original phi is 2267 // marked as conflict, and an additional `AlternateRootPhi` with the same 2268 // incoming values get generated by the findBasePointer function. We need 2269 // to identify the newly generated AlternateRootPhi (.base version of phi) 2270 // and RootOfChain (the original phi node itself) are the same, so that we 2271 // can rematerialize the gep and casts. This is a workaround for the 2272 // deficiency in the findBasePointer algorithm. 2273 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi)) 2274 continue; 2275 } 2276 // Compute cost of this chain. 2277 InstructionCost Cost = chainToBasePointerCost(ChainToBase, TTI); 2278 // TODO: We can also account for cases when we will be able to remove some 2279 // of the rematerialized values by later optimization passes. I.e if 2280 // we rematerialized several intersecting chains. Or if original values 2281 // don't have any uses besides this statepoint. 2282 2283 // Ok, there is a candidate. 2284 RematerizlizationCandidateRecord Record; 2285 Record.ChainToBase = ChainToBase; 2286 Record.RootOfChain = RootOfChain; 2287 Record.Cost = Cost; 2288 RematerizationCandidates.insert({ Derived, Record }); 2289 } 2290 } 2291 2292 // From the statepoint live set pick values that are cheaper to recompute then 2293 // to relocate. Remove this values from the live set, rematerialize them after 2294 // statepoint and record them in "Info" structure. Note that similar to 2295 // relocated values we don't do any user adjustments here. 2296 static void rematerializeLiveValues(CallBase *Call, 2297 PartiallyConstructedSafepointRecord &Info, 2298 PointerToBaseTy &PointerToBase, 2299 RematCandTy &RematerizationCandidates, 2300 TargetTransformInfo &TTI) { 2301 // Record values we are going to delete from this statepoint live set. 2302 // We can not di this in following loop due to iterator invalidation. 2303 SmallVector<Value *, 32> LiveValuesToBeDeleted; 2304 2305 for (Value *LiveValue : Info.LiveSet) { 2306 auto It = RematerizationCandidates.find(LiveValue); 2307 if (It == RematerizationCandidates.end()) 2308 continue; 2309 2310 RematerizlizationCandidateRecord &Record = It->second; 2311 2312 InstructionCost Cost = Record.Cost; 2313 // For invokes we need to rematerialize each chain twice - for normal and 2314 // for unwind basic blocks. Model this by multiplying cost by two. 2315 if (isa<InvokeInst>(Call)) 2316 Cost *= 2; 2317 2318 // If it's too expensive - skip it. 2319 if (Cost >= RematerializationThreshold) 2320 continue; 2321 2322 // Remove value from the live set 2323 LiveValuesToBeDeleted.push_back(LiveValue); 2324 2325 // Clone instructions and record them inside "Info" structure. 2326 2327 // For each live pointer find get its defining chain. 2328 SmallVector<Instruction *, 3> ChainToBase = Record.ChainToBase; 2329 // Walk backwards to visit top-most instructions first. 2330 std::reverse(ChainToBase.begin(), ChainToBase.end()); 2331 2332 // Utility function which clones all instructions from "ChainToBase" 2333 // and inserts them before "InsertBefore". Returns rematerialized value 2334 // which should be used after statepoint. 2335 auto rematerializeChain = [&ChainToBase]( 2336 Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) { 2337 Instruction *LastClonedValue = nullptr; 2338 Instruction *LastValue = nullptr; 2339 for (Instruction *Instr: ChainToBase) { 2340 // Only GEP's and casts are supported as we need to be careful to not 2341 // introduce any new uses of pointers not in the liveset. 2342 // Note that it's fine to introduce new uses of pointers which were 2343 // otherwise not used after this statepoint. 2344 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr)); 2345 2346 Instruction *ClonedValue = Instr->clone(); 2347 ClonedValue->insertBefore(InsertBefore); 2348 ClonedValue->setName(Instr->getName() + ".remat"); 2349 2350 // If it is not first instruction in the chain then it uses previously 2351 // cloned value. We should update it to use cloned value. 2352 if (LastClonedValue) { 2353 assert(LastValue); 2354 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue); 2355 #ifndef NDEBUG 2356 for (auto OpValue : ClonedValue->operand_values()) { 2357 // Assert that cloned instruction does not use any instructions from 2358 // this chain other than LastClonedValue 2359 assert(!is_contained(ChainToBase, OpValue) && 2360 "incorrect use in rematerialization chain"); 2361 // Assert that the cloned instruction does not use the RootOfChain 2362 // or the AlternateLiveBase. 2363 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase); 2364 } 2365 #endif 2366 } else { 2367 // For the first instruction, replace the use of unrelocated base i.e. 2368 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the 2369 // live set. They have been proved to be the same PHI nodes. Note 2370 // that the *only* use of the RootOfChain in the ChainToBase list is 2371 // the first Value in the list. 2372 if (RootOfChain != AlternateLiveBase) 2373 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase); 2374 } 2375 2376 LastClonedValue = ClonedValue; 2377 LastValue = Instr; 2378 } 2379 assert(LastClonedValue); 2380 return LastClonedValue; 2381 }; 2382 2383 // Different cases for calls and invokes. For invokes we need to clone 2384 // instructions both on normal and unwind path. 2385 if (isa<CallInst>(Call)) { 2386 Instruction *InsertBefore = Call->getNextNode(); 2387 assert(InsertBefore); 2388 Instruction *RematerializedValue = rematerializeChain( 2389 InsertBefore, Record.RootOfChain, PointerToBase[LiveValue]); 2390 Info.RematerializedValues[RematerializedValue] = LiveValue; 2391 } else { 2392 auto *Invoke = cast<InvokeInst>(Call); 2393 2394 Instruction *NormalInsertBefore = 2395 &*Invoke->getNormalDest()->getFirstInsertionPt(); 2396 Instruction *UnwindInsertBefore = 2397 &*Invoke->getUnwindDest()->getFirstInsertionPt(); 2398 2399 Instruction *NormalRematerializedValue = rematerializeChain( 2400 NormalInsertBefore, Record.RootOfChain, PointerToBase[LiveValue]); 2401 Instruction *UnwindRematerializedValue = rematerializeChain( 2402 UnwindInsertBefore, Record.RootOfChain, PointerToBase[LiveValue]); 2403 2404 Info.RematerializedValues[NormalRematerializedValue] = LiveValue; 2405 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue; 2406 } 2407 } 2408 2409 // Remove rematerializaed values from the live set 2410 for (auto LiveValue: LiveValuesToBeDeleted) { 2411 Info.LiveSet.remove(LiveValue); 2412 } 2413 } 2414 2415 static bool inlineGetBaseAndOffset(Function &F, 2416 SmallVectorImpl<CallInst *> &Intrinsics, 2417 DefiningValueMapTy &DVCache) { 2418 auto &Context = F.getContext(); 2419 auto &DL = F.getParent()->getDataLayout(); 2420 bool Changed = false; 2421 2422 for (auto *Callsite : Intrinsics) 2423 switch (Callsite->getIntrinsicID()) { 2424 case Intrinsic::experimental_gc_get_pointer_base: { 2425 Changed = true; 2426 Value *Base = findBasePointer(Callsite->getOperand(0), DVCache); 2427 assert(!DVCache.count(Callsite)); 2428 auto *BaseBC = IRBuilder<>(Callsite).CreateBitCast( 2429 Base, Callsite->getType(), suffixed_name_or(Base, ".cast", "")); 2430 if (BaseBC != Base) 2431 DVCache[BaseBC] = Base; 2432 Callsite->replaceAllUsesWith(BaseBC); 2433 if (!BaseBC->hasName()) 2434 BaseBC->takeName(Callsite); 2435 Callsite->eraseFromParent(); 2436 break; 2437 } 2438 case Intrinsic::experimental_gc_get_pointer_offset: { 2439 Changed = true; 2440 Value *Derived = Callsite->getOperand(0); 2441 Value *Base = findBasePointer(Derived, DVCache); 2442 assert(!DVCache.count(Callsite)); 2443 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace(); 2444 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace); 2445 IRBuilder<> Builder(Callsite); 2446 Value *BaseInt = 2447 Builder.CreatePtrToInt(Base, Type::getIntNTy(Context, IntPtrSize), 2448 suffixed_name_or(Base, ".int", "")); 2449 Value *DerivedInt = 2450 Builder.CreatePtrToInt(Derived, Type::getIntNTy(Context, IntPtrSize), 2451 suffixed_name_or(Derived, ".int", "")); 2452 Value *Offset = Builder.CreateSub(DerivedInt, BaseInt); 2453 Callsite->replaceAllUsesWith(Offset); 2454 Offset->takeName(Callsite); 2455 Callsite->eraseFromParent(); 2456 break; 2457 } 2458 default: 2459 llvm_unreachable("Unknown intrinsic"); 2460 } 2461 2462 return Changed; 2463 } 2464 2465 static bool insertParsePoints(Function &F, DominatorTree &DT, 2466 TargetTransformInfo &TTI, 2467 SmallVectorImpl<CallBase *> &ToUpdate, 2468 DefiningValueMapTy &DVCache) { 2469 #ifndef NDEBUG 2470 // Validate the input 2471 std::set<CallBase *> Uniqued; 2472 Uniqued.insert(ToUpdate.begin(), ToUpdate.end()); 2473 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!"); 2474 2475 for (CallBase *Call : ToUpdate) 2476 assert(Call->getFunction() == &F); 2477 #endif 2478 2479 // When inserting gc.relocates for invokes, we need to be able to insert at 2480 // the top of the successor blocks. See the comment on 2481 // normalForInvokeSafepoint on exactly what is needed. Note that this step 2482 // may restructure the CFG. 2483 for (CallBase *Call : ToUpdate) { 2484 auto *II = dyn_cast<InvokeInst>(Call); 2485 if (!II) 2486 continue; 2487 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT); 2488 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT); 2489 } 2490 2491 // A list of dummy calls added to the IR to keep various values obviously 2492 // live in the IR. We'll remove all of these when done. 2493 SmallVector<CallInst *, 64> Holders; 2494 2495 // Insert a dummy call with all of the deopt operands we'll need for the 2496 // actual safepoint insertion as arguments. This ensures reference operands 2497 // in the deopt argument list are considered live through the safepoint (and 2498 // thus makes sure they get relocated.) 2499 for (CallBase *Call : ToUpdate) { 2500 SmallVector<Value *, 64> DeoptValues; 2501 2502 for (Value *Arg : GetDeoptBundleOperands(Call)) { 2503 assert(!isUnhandledGCPointerType(Arg->getType()) && 2504 "support for FCA unimplemented"); 2505 if (isHandledGCPointerType(Arg->getType())) 2506 DeoptValues.push_back(Arg); 2507 } 2508 2509 insertUseHolderAfter(Call, DeoptValues, Holders); 2510 } 2511 2512 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size()); 2513 2514 // A) Identify all gc pointers which are statically live at the given call 2515 // site. 2516 findLiveReferences(F, DT, ToUpdate, Records); 2517 2518 /// Global mapping from live pointers to a base-defining-value. 2519 PointerToBaseTy PointerToBase; 2520 2521 // B) Find the base pointers for each live pointer 2522 for (size_t i = 0; i < Records.size(); i++) { 2523 PartiallyConstructedSafepointRecord &info = Records[i]; 2524 findBasePointers(DT, DVCache, ToUpdate[i], info, PointerToBase); 2525 } 2526 if (PrintBasePointers) { 2527 errs() << "Base Pairs (w/o Relocation):\n"; 2528 for (auto &Pair : PointerToBase) { 2529 errs() << " derived "; 2530 Pair.first->printAsOperand(errs(), false); 2531 errs() << " base "; 2532 Pair.second->printAsOperand(errs(), false); 2533 errs() << "\n"; 2534 ; 2535 } 2536 } 2537 2538 // The base phi insertion logic (for any safepoint) may have inserted new 2539 // instructions which are now live at some safepoint. The simplest such 2540 // example is: 2541 // loop: 2542 // phi a <-- will be a new base_phi here 2543 // safepoint 1 <-- that needs to be live here 2544 // gep a + 1 2545 // safepoint 2 2546 // br loop 2547 // We insert some dummy calls after each safepoint to definitely hold live 2548 // the base pointers which were identified for that safepoint. We'll then 2549 // ask liveness for _every_ base inserted to see what is now live. Then we 2550 // remove the dummy calls. 2551 Holders.reserve(Holders.size() + Records.size()); 2552 for (size_t i = 0; i < Records.size(); i++) { 2553 PartiallyConstructedSafepointRecord &Info = Records[i]; 2554 2555 SmallVector<Value *, 128> Bases; 2556 for (auto *Derived : Info.LiveSet) { 2557 assert(PointerToBase.count(Derived) && "Missed base for derived pointer"); 2558 Bases.push_back(PointerToBase[Derived]); 2559 } 2560 2561 insertUseHolderAfter(ToUpdate[i], Bases, Holders); 2562 } 2563 2564 // By selecting base pointers, we've effectively inserted new uses. Thus, we 2565 // need to rerun liveness. We may *also* have inserted new defs, but that's 2566 // not the key issue. 2567 recomputeLiveInValues(F, DT, ToUpdate, Records, PointerToBase); 2568 2569 if (PrintBasePointers) { 2570 errs() << "Base Pairs: (w/Relocation)\n"; 2571 for (auto Pair : PointerToBase) { 2572 errs() << " derived "; 2573 Pair.first->printAsOperand(errs(), false); 2574 errs() << " base "; 2575 Pair.second->printAsOperand(errs(), false); 2576 errs() << "\n"; 2577 } 2578 } 2579 2580 // It is possible that non-constant live variables have a constant base. For 2581 // example, a GEP with a variable offset from a global. In this case we can 2582 // remove it from the liveset. We already don't add constants to the liveset 2583 // because we assume they won't move at runtime and the GC doesn't need to be 2584 // informed about them. The same reasoning applies if the base is constant. 2585 // Note that the relocation placement code relies on this filtering for 2586 // correctness as it expects the base to be in the liveset, which isn't true 2587 // if the base is constant. 2588 for (auto &Info : Records) { 2589 Info.LiveSet.remove_if([&](Value *LiveV) { 2590 assert(PointerToBase.count(LiveV) && "Missed base for derived pointer"); 2591 return isa<Constant>(PointerToBase[LiveV]); 2592 }); 2593 } 2594 2595 for (CallInst *CI : Holders) 2596 CI->eraseFromParent(); 2597 2598 Holders.clear(); 2599 2600 // Compute the cost of possible re-materialization of derived pointers. 2601 RematCandTy RematerizationCandidates; 2602 findRematerializationCandidates(PointerToBase, RematerizationCandidates, TTI); 2603 2604 // In order to reduce live set of statepoint we might choose to rematerialize 2605 // some values instead of relocating them. This is purely an optimization and 2606 // does not influence correctness. 2607 for (size_t i = 0; i < Records.size(); i++) 2608 rematerializeLiveValues(ToUpdate[i], Records[i], PointerToBase, 2609 RematerizationCandidates, TTI); 2610 2611 // We need this to safely RAUW and delete call or invoke return values that 2612 // may themselves be live over a statepoint. For details, please see usage in 2613 // makeStatepointExplicitImpl. 2614 std::vector<DeferredReplacement> Replacements; 2615 2616 // Now run through and replace the existing statepoints with new ones with 2617 // the live variables listed. We do not yet update uses of the values being 2618 // relocated. We have references to live variables that need to 2619 // survive to the last iteration of this loop. (By construction, the 2620 // previous statepoint can not be a live variable, thus we can and remove 2621 // the old statepoint calls as we go.) 2622 for (size_t i = 0; i < Records.size(); i++) 2623 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements, 2624 PointerToBase); 2625 2626 ToUpdate.clear(); // prevent accident use of invalid calls. 2627 2628 for (auto &PR : Replacements) 2629 PR.doReplacement(); 2630 2631 Replacements.clear(); 2632 2633 for (auto &Info : Records) { 2634 // These live sets may contain state Value pointers, since we replaced calls 2635 // with operand bundles with calls wrapped in gc.statepoint, and some of 2636 // those calls may have been def'ing live gc pointers. Clear these out to 2637 // avoid accidentally using them. 2638 // 2639 // TODO: We should create a separate data structure that does not contain 2640 // these live sets, and migrate to using that data structure from this point 2641 // onward. 2642 Info.LiveSet.clear(); 2643 } 2644 PointerToBase.clear(); 2645 2646 // Do all the fixups of the original live variables to their relocated selves 2647 SmallVector<Value *, 128> Live; 2648 for (size_t i = 0; i < Records.size(); i++) { 2649 PartiallyConstructedSafepointRecord &Info = Records[i]; 2650 2651 // We can't simply save the live set from the original insertion. One of 2652 // the live values might be the result of a call which needs a safepoint. 2653 // That Value* no longer exists and we need to use the new gc_result. 2654 // Thankfully, the live set is embedded in the statepoint (and updated), so 2655 // we just grab that. 2656 llvm::append_range(Live, Info.StatepointToken->gc_args()); 2657 #ifndef NDEBUG 2658 // Do some basic validation checking on our liveness results before 2659 // performing relocation. Relocation can and will turn mistakes in liveness 2660 // results into non-sensical code which is must harder to debug. 2661 // TODO: It would be nice to test consistency as well 2662 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) && 2663 "statepoint must be reachable or liveness is meaningless"); 2664 for (Value *V : Info.StatepointToken->gc_args()) { 2665 if (!isa<Instruction>(V)) 2666 // Non-instruction values trivial dominate all possible uses 2667 continue; 2668 auto *LiveInst = cast<Instruction>(V); 2669 assert(DT.isReachableFromEntry(LiveInst->getParent()) && 2670 "unreachable values should never be live"); 2671 assert(DT.dominates(LiveInst, Info.StatepointToken) && 2672 "basic SSA liveness expectation violated by liveness analysis"); 2673 } 2674 #endif 2675 } 2676 unique_unsorted(Live); 2677 2678 #ifndef NDEBUG 2679 // Validation check 2680 for (auto *Ptr : Live) 2681 assert(isHandledGCPointerType(Ptr->getType()) && 2682 "must be a gc pointer type"); 2683 #endif 2684 2685 relocationViaAlloca(F, DT, Live, Records); 2686 return !Records.empty(); 2687 } 2688 2689 // List of all parameter and return attributes which must be stripped when 2690 // lowering from the abstract machine model. Note that we list attributes 2691 // here which aren't valid as return attributes, that is okay. 2692 static AttributeMask getParamAndReturnAttributesToRemove() { 2693 AttributeMask R; 2694 R.addAttribute(Attribute::Dereferenceable); 2695 R.addAttribute(Attribute::DereferenceableOrNull); 2696 R.addAttribute(Attribute::ReadNone); 2697 R.addAttribute(Attribute::ReadOnly); 2698 R.addAttribute(Attribute::WriteOnly); 2699 R.addAttribute(Attribute::NoAlias); 2700 R.addAttribute(Attribute::NoFree); 2701 return R; 2702 } 2703 2704 static void stripNonValidAttributesFromPrototype(Function &F) { 2705 LLVMContext &Ctx = F.getContext(); 2706 2707 // Intrinsics are very delicate. Lowering sometimes depends the presence 2708 // of certain attributes for correctness, but we may have also inferred 2709 // additional ones in the abstract machine model which need stripped. This 2710 // assumes that the attributes defined in Intrinsic.td are conservatively 2711 // correct for both physical and abstract model. 2712 if (Intrinsic::ID id = F.getIntrinsicID()) { 2713 F.setAttributes(Intrinsic::getAttributes(Ctx, id)); 2714 return; 2715 } 2716 2717 AttributeMask R = getParamAndReturnAttributesToRemove(); 2718 for (Argument &A : F.args()) 2719 if (isa<PointerType>(A.getType())) 2720 F.removeParamAttrs(A.getArgNo(), R); 2721 2722 if (isa<PointerType>(F.getReturnType())) 2723 F.removeRetAttrs(R); 2724 2725 for (auto Attr : FnAttrsToStrip) 2726 F.removeFnAttr(Attr); 2727 } 2728 2729 /// Certain metadata on instructions are invalid after running RS4GC. 2730 /// Optimizations that run after RS4GC can incorrectly use this metadata to 2731 /// optimize functions. We drop such metadata on the instruction. 2732 static void stripInvalidMetadataFromInstruction(Instruction &I) { 2733 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 2734 return; 2735 // These are the attributes that are still valid on loads and stores after 2736 // RS4GC. 2737 // The metadata implying dereferenceability and noalias are (conservatively) 2738 // dropped. This is because semantically, after RewriteStatepointsForGC runs, 2739 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can 2740 // touch the entire heap including noalias objects. Note: The reasoning is 2741 // same as stripping the dereferenceability and noalias attributes that are 2742 // analogous to the metadata counterparts. 2743 // We also drop the invariant.load metadata on the load because that metadata 2744 // implies the address operand to the load points to memory that is never 2745 // changed once it became dereferenceable. This is no longer true after RS4GC. 2746 // Similar reasoning applies to invariant.group metadata, which applies to 2747 // loads within a group. 2748 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa, 2749 LLVMContext::MD_range, 2750 LLVMContext::MD_alias_scope, 2751 LLVMContext::MD_nontemporal, 2752 LLVMContext::MD_nonnull, 2753 LLVMContext::MD_align, 2754 LLVMContext::MD_type}; 2755 2756 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC. 2757 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC); 2758 } 2759 2760 static void stripNonValidDataFromBody(Function &F) { 2761 if (F.empty()) 2762 return; 2763 2764 LLVMContext &Ctx = F.getContext(); 2765 MDBuilder Builder(Ctx); 2766 2767 // Set of invariantstart instructions that we need to remove. 2768 // Use this to avoid invalidating the instruction iterator. 2769 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions; 2770 2771 for (Instruction &I : instructions(F)) { 2772 // invariant.start on memory location implies that the referenced memory 2773 // location is constant and unchanging. This is no longer true after 2774 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint 2775 // which frees the entire heap and the presence of invariant.start allows 2776 // the optimizer to sink the load of a memory location past a statepoint, 2777 // which is incorrect. 2778 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 2779 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2780 InvariantStartInstructions.push_back(II); 2781 continue; 2782 } 2783 2784 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) { 2785 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag); 2786 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA); 2787 } 2788 2789 stripInvalidMetadataFromInstruction(I); 2790 2791 AttributeMask R = getParamAndReturnAttributesToRemove(); 2792 if (auto *Call = dyn_cast<CallBase>(&I)) { 2793 for (int i = 0, e = Call->arg_size(); i != e; i++) 2794 if (isa<PointerType>(Call->getArgOperand(i)->getType())) 2795 Call->removeParamAttrs(i, R); 2796 if (isa<PointerType>(Call->getType())) 2797 Call->removeRetAttrs(R); 2798 } 2799 } 2800 2801 // Delete the invariant.start instructions and RAUW undef. 2802 for (auto *II : InvariantStartInstructions) { 2803 II->replaceAllUsesWith(UndefValue::get(II->getType())); 2804 II->eraseFromParent(); 2805 } 2806 } 2807 2808 /// Returns true if this function should be rewritten by this pass. The main 2809 /// point of this function is as an extension point for custom logic. 2810 static bool shouldRewriteStatepointsIn(Function &F) { 2811 // TODO: This should check the GCStrategy 2812 if (F.hasGC()) { 2813 const auto &FunctionGCName = F.getGC(); 2814 const StringRef StatepointExampleName("statepoint-example"); 2815 const StringRef CoreCLRName("coreclr"); 2816 return (StatepointExampleName == FunctionGCName) || 2817 (CoreCLRName == FunctionGCName); 2818 } else 2819 return false; 2820 } 2821 2822 static void stripNonValidData(Module &M) { 2823 #ifndef NDEBUG 2824 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!"); 2825 #endif 2826 2827 for (Function &F : M) 2828 stripNonValidAttributesFromPrototype(F); 2829 2830 for (Function &F : M) 2831 stripNonValidDataFromBody(F); 2832 } 2833 2834 bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT, 2835 TargetTransformInfo &TTI, 2836 const TargetLibraryInfo &TLI) { 2837 assert(!F.isDeclaration() && !F.empty() && 2838 "need function body to rewrite statepoints in"); 2839 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision"); 2840 2841 auto NeedsRewrite = [&TLI](Instruction &I) { 2842 if (const auto *Call = dyn_cast<CallBase>(&I)) { 2843 if (isa<GCStatepointInst>(Call)) 2844 return false; 2845 if (callsGCLeafFunction(Call, TLI)) 2846 return false; 2847 2848 // Normally it's up to the frontend to make sure that non-leaf calls also 2849 // have proper deopt state if it is required. We make an exception for 2850 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics 2851 // these are non-leaf by default. They might be generated by the optimizer 2852 // which doesn't know how to produce a proper deopt state. So if we see a 2853 // non-leaf memcpy/memmove without deopt state just treat it as a leaf 2854 // copy and don't produce a statepoint. 2855 if (!AllowStatepointWithNoDeoptInfo && 2856 !Call->getOperandBundle(LLVMContext::OB_deopt)) { 2857 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) && 2858 "Don't expect any other calls here!"); 2859 return false; 2860 } 2861 return true; 2862 } 2863 return false; 2864 }; 2865 2866 // Delete any unreachable statepoints so that we don't have unrewritten 2867 // statepoints surviving this pass. This makes testing easier and the 2868 // resulting IR less confusing to human readers. 2869 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 2870 bool MadeChange = removeUnreachableBlocks(F, &DTU); 2871 // Flush the Dominator Tree. 2872 DTU.getDomTree(); 2873 2874 // Gather all the statepoints which need rewritten. Be careful to only 2875 // consider those in reachable code since we need to ask dominance queries 2876 // when rewriting. We'll delete the unreachable ones in a moment. 2877 SmallVector<CallBase *, 64> ParsePointNeeded; 2878 SmallVector<CallInst *, 64> Intrinsics; 2879 for (Instruction &I : instructions(F)) { 2880 // TODO: only the ones with the flag set! 2881 if (NeedsRewrite(I)) { 2882 // NOTE removeUnreachableBlocks() is stronger than 2883 // DominatorTree::isReachableFromEntry(). In other words 2884 // removeUnreachableBlocks can remove some blocks for which 2885 // isReachableFromEntry() returns true. 2886 assert(DT.isReachableFromEntry(I.getParent()) && 2887 "no unreachable blocks expected"); 2888 ParsePointNeeded.push_back(cast<CallBase>(&I)); 2889 } 2890 if (auto *CI = dyn_cast<CallInst>(&I)) 2891 if (CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_base || 2892 CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_offset) 2893 Intrinsics.emplace_back(CI); 2894 } 2895 2896 // Return early if no work to do. 2897 if (ParsePointNeeded.empty() && Intrinsics.empty()) 2898 return MadeChange; 2899 2900 // As a prepass, go ahead and aggressively destroy single entry phi nodes. 2901 // These are created by LCSSA. They have the effect of increasing the size 2902 // of liveness sets for no good reason. It may be harder to do this post 2903 // insertion since relocations and base phis can confuse things. 2904 for (BasicBlock &BB : F) 2905 if (BB.getUniquePredecessor()) 2906 MadeChange |= FoldSingleEntryPHINodes(&BB); 2907 2908 // Before we start introducing relocations, we want to tweak the IR a bit to 2909 // avoid unfortunate code generation effects. The main example is that we 2910 // want to try to make sure the comparison feeding a branch is after any 2911 // safepoints. Otherwise, we end up with a comparison of pre-relocation 2912 // values feeding a branch after relocation. This is semantically correct, 2913 // but results in extra register pressure since both the pre-relocation and 2914 // post-relocation copies must be available in registers. For code without 2915 // relocations this is handled elsewhere, but teaching the scheduler to 2916 // reverse the transform we're about to do would be slightly complex. 2917 // Note: This may extend the live range of the inputs to the icmp and thus 2918 // increase the liveset of any statepoint we move over. This is profitable 2919 // as long as all statepoints are in rare blocks. If we had in-register 2920 // lowering for live values this would be a much safer transform. 2921 auto getConditionInst = [](Instruction *TI) -> Instruction * { 2922 if (auto *BI = dyn_cast<BranchInst>(TI)) 2923 if (BI->isConditional()) 2924 return dyn_cast<Instruction>(BI->getCondition()); 2925 // TODO: Extend this to handle switches 2926 return nullptr; 2927 }; 2928 for (BasicBlock &BB : F) { 2929 Instruction *TI = BB.getTerminator(); 2930 if (auto *Cond = getConditionInst(TI)) 2931 // TODO: Handle more than just ICmps here. We should be able to move 2932 // most instructions without side effects or memory access. 2933 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) { 2934 MadeChange = true; 2935 Cond->moveBefore(TI); 2936 } 2937 } 2938 2939 // Nasty workaround - The base computation code in the main algorithm doesn't 2940 // consider the fact that a GEP can be used to convert a scalar to a vector. 2941 // The right fix for this is to integrate GEPs into the base rewriting 2942 // algorithm properly, this is just a short term workaround to prevent 2943 // crashes by canonicalizing such GEPs into fully vector GEPs. 2944 for (Instruction &I : instructions(F)) { 2945 if (!isa<GetElementPtrInst>(I)) 2946 continue; 2947 2948 unsigned VF = 0; 2949 for (unsigned i = 0; i < I.getNumOperands(); i++) 2950 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) { 2951 assert(VF == 0 || 2952 VF == cast<FixedVectorType>(OpndVTy)->getNumElements()); 2953 VF = cast<FixedVectorType>(OpndVTy)->getNumElements(); 2954 } 2955 2956 // It's the vector to scalar traversal through the pointer operand which 2957 // confuses base pointer rewriting, so limit ourselves to that case. 2958 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) { 2959 IRBuilder<> B(&I); 2960 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0)); 2961 I.setOperand(0, Splat); 2962 MadeChange = true; 2963 } 2964 } 2965 2966 // Cache the 'defining value' relation used in the computation and 2967 // insertion of base phis and selects. This ensures that we don't insert 2968 // large numbers of duplicate base_phis. Use one cache for both 2969 // inlineGetBaseAndOffset() and insertParsePoints(). 2970 DefiningValueMapTy DVCache; 2971 2972 if (!Intrinsics.empty()) 2973 // Inline @gc.get.pointer.base() and @gc.get.pointer.offset() before finding 2974 // live references. 2975 MadeChange |= inlineGetBaseAndOffset(F, Intrinsics, DVCache); 2976 2977 if (!ParsePointNeeded.empty()) 2978 MadeChange |= insertParsePoints(F, DT, TTI, ParsePointNeeded, DVCache); 2979 2980 return MadeChange; 2981 } 2982 2983 // liveness computation via standard dataflow 2984 // ------------------------------------------------------------------- 2985 2986 // TODO: Consider using bitvectors for liveness, the set of potentially 2987 // interesting values should be small and easy to pre-compute. 2988 2989 /// Compute the live-in set for the location rbegin starting from 2990 /// the live-out set of the basic block 2991 static void computeLiveInValues(BasicBlock::reverse_iterator Begin, 2992 BasicBlock::reverse_iterator End, 2993 SetVector<Value *> &LiveTmp) { 2994 for (auto &I : make_range(Begin, End)) { 2995 // KILL/Def - Remove this definition from LiveIn 2996 LiveTmp.remove(&I); 2997 2998 // Don't consider *uses* in PHI nodes, we handle their contribution to 2999 // predecessor blocks when we seed the LiveOut sets 3000 if (isa<PHINode>(I)) 3001 continue; 3002 3003 // USE - Add to the LiveIn set for this instruction 3004 for (Value *V : I.operands()) { 3005 assert(!isUnhandledGCPointerType(V->getType()) && 3006 "support for FCA unimplemented"); 3007 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) { 3008 // The choice to exclude all things constant here is slightly subtle. 3009 // There are two independent reasons: 3010 // - We assume that things which are constant (from LLVM's definition) 3011 // do not move at runtime. For example, the address of a global 3012 // variable is fixed, even though it's contents may not be. 3013 // - Second, we can't disallow arbitrary inttoptr constants even 3014 // if the language frontend does. Optimization passes are free to 3015 // locally exploit facts without respect to global reachability. This 3016 // can create sections of code which are dynamically unreachable and 3017 // contain just about anything. (see constants.ll in tests) 3018 LiveTmp.insert(V); 3019 } 3020 } 3021 } 3022 } 3023 3024 static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp) { 3025 for (BasicBlock *Succ : successors(BB)) { 3026 for (auto &I : *Succ) { 3027 PHINode *PN = dyn_cast<PHINode>(&I); 3028 if (!PN) 3029 break; 3030 3031 Value *V = PN->getIncomingValueForBlock(BB); 3032 assert(!isUnhandledGCPointerType(V->getType()) && 3033 "support for FCA unimplemented"); 3034 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) 3035 LiveTmp.insert(V); 3036 } 3037 } 3038 } 3039 3040 static SetVector<Value *> computeKillSet(BasicBlock *BB) { 3041 SetVector<Value *> KillSet; 3042 for (Instruction &I : *BB) 3043 if (isHandledGCPointerType(I.getType())) 3044 KillSet.insert(&I); 3045 return KillSet; 3046 } 3047 3048 #ifndef NDEBUG 3049 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic 3050 /// validation check for the liveness computation. 3051 static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live, 3052 Instruction *TI, bool TermOkay = false) { 3053 for (Value *V : Live) { 3054 if (auto *I = dyn_cast<Instruction>(V)) { 3055 // The terminator can be a member of the LiveOut set. LLVM's definition 3056 // of instruction dominance states that V does not dominate itself. As 3057 // such, we need to special case this to allow it. 3058 if (TermOkay && TI == I) 3059 continue; 3060 assert(DT.dominates(I, TI) && 3061 "basic SSA liveness expectation violated by liveness analysis"); 3062 } 3063 } 3064 } 3065 3066 /// Check that all the liveness sets used during the computation of liveness 3067 /// obey basic SSA properties. This is useful for finding cases where we miss 3068 /// a def. 3069 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, 3070 BasicBlock &BB) { 3071 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); 3072 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); 3073 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); 3074 } 3075 #endif 3076 3077 static void computeLiveInValues(DominatorTree &DT, Function &F, 3078 GCPtrLivenessData &Data) { 3079 SmallSetVector<BasicBlock *, 32> Worklist; 3080 3081 // Seed the liveness for each individual block 3082 for (BasicBlock &BB : F) { 3083 Data.KillSet[&BB] = computeKillSet(&BB); 3084 Data.LiveSet[&BB].clear(); 3085 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]); 3086 3087 #ifndef NDEBUG 3088 for (Value *Kill : Data.KillSet[&BB]) 3089 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); 3090 #endif 3091 3092 Data.LiveOut[&BB] = SetVector<Value *>(); 3093 computeLiveOutSeed(&BB, Data.LiveOut[&BB]); 3094 Data.LiveIn[&BB] = Data.LiveSet[&BB]; 3095 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]); 3096 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]); 3097 if (!Data.LiveIn[&BB].empty()) 3098 Worklist.insert(pred_begin(&BB), pred_end(&BB)); 3099 } 3100 3101 // Propagate that liveness until stable 3102 while (!Worklist.empty()) { 3103 BasicBlock *BB = Worklist.pop_back_val(); 3104 3105 // Compute our new liveout set, then exit early if it hasn't changed despite 3106 // the contribution of our successor. 3107 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 3108 const auto OldLiveOutSize = LiveOut.size(); 3109 for (BasicBlock *Succ : successors(BB)) { 3110 assert(Data.LiveIn.count(Succ)); 3111 LiveOut.set_union(Data.LiveIn[Succ]); 3112 } 3113 // assert OutLiveOut is a subset of LiveOut 3114 if (OldLiveOutSize == LiveOut.size()) { 3115 // If the sets are the same size, then we didn't actually add anything 3116 // when unioning our successors LiveIn. Thus, the LiveIn of this block 3117 // hasn't changed. 3118 continue; 3119 } 3120 Data.LiveOut[BB] = LiveOut; 3121 3122 // Apply the effects of this basic block 3123 SetVector<Value *> LiveTmp = LiveOut; 3124 LiveTmp.set_union(Data.LiveSet[BB]); 3125 LiveTmp.set_subtract(Data.KillSet[BB]); 3126 3127 assert(Data.LiveIn.count(BB)); 3128 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB]; 3129 // assert: OldLiveIn is a subset of LiveTmp 3130 if (OldLiveIn.size() != LiveTmp.size()) { 3131 Data.LiveIn[BB] = LiveTmp; 3132 Worklist.insert(pred_begin(BB), pred_end(BB)); 3133 } 3134 } // while (!Worklist.empty()) 3135 3136 #ifndef NDEBUG 3137 // Verify our output against SSA properties. This helps catch any 3138 // missing kills during the above iteration. 3139 for (BasicBlock &BB : F) 3140 checkBasicSSA(DT, Data, BB); 3141 #endif 3142 } 3143 3144 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, 3145 StatepointLiveSetTy &Out) { 3146 BasicBlock *BB = Inst->getParent(); 3147 3148 // Note: The copy is intentional and required 3149 assert(Data.LiveOut.count(BB)); 3150 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 3151 3152 // We want to handle the statepoint itself oddly. It's 3153 // call result is not live (normal), nor are it's arguments 3154 // (unless they're used again later). This adjustment is 3155 // specifically what we need to relocate 3156 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(), 3157 LiveOut); 3158 LiveOut.remove(Inst); 3159 Out.insert(LiveOut.begin(), LiveOut.end()); 3160 } 3161 3162 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 3163 CallBase *Call, 3164 PartiallyConstructedSafepointRecord &Info, 3165 PointerToBaseTy &PointerToBase) { 3166 StatepointLiveSetTy Updated; 3167 findLiveSetAtInst(Call, RevisedLivenessData, Updated); 3168 3169 // We may have base pointers which are now live that weren't before. We need 3170 // to update the PointerToBase structure to reflect this. 3171 for (auto V : Updated) 3172 PointerToBase.insert({ V, V }); 3173 3174 Info.LiveSet = Updated; 3175 } 3176