1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Rewrite call/invoke instructions so as to make potential relocations 10 // performed by the garbage collector explicit in the IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h" 15 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/DomTreeUpdater.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constant.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/InstIterator.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/MDBuilder.h" 50 #include "llvm/IR/Metadata.h" 51 #include "llvm/IR/Module.h" 52 #include "llvm/IR/Statepoint.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/ValueHandle.h" 57 #include "llvm/InitializePasses.h" 58 #include "llvm/Pass.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Compiler.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/ErrorHandling.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Transforms/Scalar.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 69 #include <algorithm> 70 #include <cassert> 71 #include <cstddef> 72 #include <cstdint> 73 #include <iterator> 74 #include <set> 75 #include <string> 76 #include <utility> 77 #include <vector> 78 79 #define DEBUG_TYPE "rewrite-statepoints-for-gc" 80 81 using namespace llvm; 82 83 // Print the liveset found at the insert location 84 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden, 85 cl::init(false)); 86 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, 87 cl::init(false)); 88 89 // Print out the base pointers for debugging 90 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden, 91 cl::init(false)); 92 93 // Cost threshold measuring when it is profitable to rematerialize value instead 94 // of relocating it 95 static cl::opt<unsigned> 96 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, 97 cl::init(6)); 98 99 #ifdef EXPENSIVE_CHECKS 100 static bool ClobberNonLive = true; 101 #else 102 static bool ClobberNonLive = false; 103 #endif 104 105 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live", 106 cl::location(ClobberNonLive), 107 cl::Hidden); 108 109 static cl::opt<bool> 110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", 111 cl::Hidden, cl::init(true)); 112 113 /// The IR fed into RewriteStatepointsForGC may have had attributes and 114 /// metadata implying dereferenceability that are no longer valid/correct after 115 /// RewriteStatepointsForGC has run. This is because semantically, after 116 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire 117 /// heap. stripNonValidData (conservatively) restores 118 /// correctness by erasing all attributes in the module that externally imply 119 /// dereferenceability. Similar reasoning also applies to the noalias 120 /// attributes and metadata. gc.statepoint can touch the entire heap including 121 /// noalias objects. 122 /// Apart from attributes and metadata, we also remove instructions that imply 123 /// constant physical memory: llvm.invariant.start. 124 static void stripNonValidData(Module &M); 125 126 static bool shouldRewriteStatepointsIn(Function &F); 127 128 PreservedAnalyses RewriteStatepointsForGC::run(Module &M, 129 ModuleAnalysisManager &AM) { 130 bool Changed = false; 131 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 132 for (Function &F : M) { 133 // Nothing to do for declarations. 134 if (F.isDeclaration() || F.empty()) 135 continue; 136 137 // Policy choice says not to rewrite - the most common reason is that we're 138 // compiling code without a GCStrategy. 139 if (!shouldRewriteStatepointsIn(F)) 140 continue; 141 142 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F); 143 auto &TTI = FAM.getResult<TargetIRAnalysis>(F); 144 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 145 Changed |= runOnFunction(F, DT, TTI, TLI); 146 } 147 if (!Changed) 148 return PreservedAnalyses::all(); 149 150 // stripNonValidData asserts that shouldRewriteStatepointsIn 151 // returns true for at least one function in the module. Since at least 152 // one function changed, we know that the precondition is satisfied. 153 stripNonValidData(M); 154 155 PreservedAnalyses PA; 156 PA.preserve<TargetIRAnalysis>(); 157 PA.preserve<TargetLibraryAnalysis>(); 158 return PA; 159 } 160 161 namespace { 162 163 class RewriteStatepointsForGCLegacyPass : public ModulePass { 164 RewriteStatepointsForGC Impl; 165 166 public: 167 static char ID; // Pass identification, replacement for typeid 168 169 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() { 170 initializeRewriteStatepointsForGCLegacyPassPass( 171 *PassRegistry::getPassRegistry()); 172 } 173 174 bool runOnModule(Module &M) override { 175 bool Changed = false; 176 for (Function &F : M) { 177 // Nothing to do for declarations. 178 if (F.isDeclaration() || F.empty()) 179 continue; 180 181 // Policy choice says not to rewrite - the most common reason is that 182 // we're compiling code without a GCStrategy. 183 if (!shouldRewriteStatepointsIn(F)) 184 continue; 185 186 TargetTransformInfo &TTI = 187 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 188 const TargetLibraryInfo &TLI = 189 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 190 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 191 192 Changed |= Impl.runOnFunction(F, DT, TTI, TLI); 193 } 194 195 if (!Changed) 196 return false; 197 198 // stripNonValidData asserts that shouldRewriteStatepointsIn 199 // returns true for at least one function in the module. Since at least 200 // one function changed, we know that the precondition is satisfied. 201 stripNonValidData(M); 202 return true; 203 } 204 205 void getAnalysisUsage(AnalysisUsage &AU) const override { 206 // We add and rewrite a bunch of instructions, but don't really do much 207 // else. We could in theory preserve a lot more analyses here. 208 AU.addRequired<DominatorTreeWrapperPass>(); 209 AU.addRequired<TargetTransformInfoWrapperPass>(); 210 AU.addRequired<TargetLibraryInfoWrapperPass>(); 211 } 212 }; 213 214 } // end anonymous namespace 215 216 char RewriteStatepointsForGCLegacyPass::ID = 0; 217 218 ModulePass *llvm::createRewriteStatepointsForGCLegacyPass() { 219 return new RewriteStatepointsForGCLegacyPass(); 220 } 221 222 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass, 223 "rewrite-statepoints-for-gc", 224 "Make relocations explicit at statepoints", false, false) 225 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 226 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 227 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass, 228 "rewrite-statepoints-for-gc", 229 "Make relocations explicit at statepoints", false, false) 230 231 namespace { 232 233 struct GCPtrLivenessData { 234 /// Values defined in this block. 235 MapVector<BasicBlock *, SetVector<Value *>> KillSet; 236 237 /// Values used in this block (and thus live); does not included values 238 /// killed within this block. 239 MapVector<BasicBlock *, SetVector<Value *>> LiveSet; 240 241 /// Values live into this basic block (i.e. used by any 242 /// instruction in this basic block or ones reachable from here) 243 MapVector<BasicBlock *, SetVector<Value *>> LiveIn; 244 245 /// Values live out of this basic block (i.e. live into 246 /// any successor block) 247 MapVector<BasicBlock *, SetVector<Value *>> LiveOut; 248 }; 249 250 // The type of the internal cache used inside the findBasePointers family 251 // of functions. From the callers perspective, this is an opaque type and 252 // should not be inspected. 253 // 254 // In the actual implementation this caches two relations: 255 // - The base relation itself (i.e. this pointer is based on that one) 256 // - The base defining value relation (i.e. before base_phi insertion) 257 // Generally, after the execution of a full findBasePointer call, only the 258 // base relation will remain. Internally, we add a mixture of the two 259 // types, then update all the second type to the first type 260 using DefiningValueMapTy = MapVector<Value *, Value *>; 261 using PointerToBaseTy = MapVector<Value *, Value *>; 262 using StatepointLiveSetTy = SetVector<Value *>; 263 using RematerializedValueMapTy = 264 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>; 265 266 struct PartiallyConstructedSafepointRecord { 267 /// The set of values known to be live across this safepoint 268 StatepointLiveSetTy LiveSet; 269 270 /// The *new* gc.statepoint instruction itself. This produces the token 271 /// that normal path gc.relocates and the gc.result are tied to. 272 GCStatepointInst *StatepointToken; 273 274 /// Instruction to which exceptional gc relocates are attached 275 /// Makes it easier to iterate through them during relocationViaAlloca. 276 Instruction *UnwindToken; 277 278 /// Record live values we are rematerialized instead of relocating. 279 /// They are not included into 'LiveSet' field. 280 /// Maps rematerialized copy to it's original value. 281 RematerializedValueMapTy RematerializedValues; 282 }; 283 284 struct RematerizlizationCandidateRecord { 285 // Chain from derived pointer to base. 286 SmallVector<Instruction *, 3> ChainToBase; 287 // Original base. 288 Value *RootOfChain; 289 // Cost of chain. 290 InstructionCost Cost; 291 }; 292 using RematCandTy = MapVector<Value *, RematerizlizationCandidateRecord>; 293 294 } // end anonymous namespace 295 296 static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) { 297 Optional<OperandBundleUse> DeoptBundle = 298 Call->getOperandBundle(LLVMContext::OB_deopt); 299 300 if (!DeoptBundle.hasValue()) { 301 assert(AllowStatepointWithNoDeoptInfo && 302 "Found non-leaf call without deopt info!"); 303 return None; 304 } 305 306 return DeoptBundle.getValue().Inputs; 307 } 308 309 /// Compute the live-in set for every basic block in the function 310 static void computeLiveInValues(DominatorTree &DT, Function &F, 311 GCPtrLivenessData &Data); 312 313 /// Given results from the dataflow liveness computation, find the set of live 314 /// Values at a particular instruction. 315 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, 316 StatepointLiveSetTy &out); 317 318 // TODO: Once we can get to the GCStrategy, this becomes 319 // Optional<bool> isGCManagedPointer(const Type *Ty) const override { 320 321 static bool isGCPointerType(Type *T) { 322 if (auto *PT = dyn_cast<PointerType>(T)) 323 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our 324 // GC managed heap. We know that a pointer into this heap needs to be 325 // updated and that no other pointer does. 326 return PT->getAddressSpace() == 1; 327 return false; 328 } 329 330 // Return true if this type is one which a) is a gc pointer or contains a GC 331 // pointer and b) is of a type this code expects to encounter as a live value. 332 // (The insertion code will assert that a type which matches (a) and not (b) 333 // is not encountered.) 334 static bool isHandledGCPointerType(Type *T) { 335 // We fully support gc pointers 336 if (isGCPointerType(T)) 337 return true; 338 // We partially support vectors of gc pointers. The code will assert if it 339 // can't handle something. 340 if (auto VT = dyn_cast<VectorType>(T)) 341 if (isGCPointerType(VT->getElementType())) 342 return true; 343 return false; 344 } 345 346 #ifndef NDEBUG 347 /// Returns true if this type contains a gc pointer whether we know how to 348 /// handle that type or not. 349 static bool containsGCPtrType(Type *Ty) { 350 if (isGCPointerType(Ty)) 351 return true; 352 if (VectorType *VT = dyn_cast<VectorType>(Ty)) 353 return isGCPointerType(VT->getScalarType()); 354 if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) 355 return containsGCPtrType(AT->getElementType()); 356 if (StructType *ST = dyn_cast<StructType>(Ty)) 357 return llvm::any_of(ST->elements(), containsGCPtrType); 358 return false; 359 } 360 361 // Returns true if this is a type which a) is a gc pointer or contains a GC 362 // pointer and b) is of a type which the code doesn't expect (i.e. first class 363 // aggregates). Used to trip assertions. 364 static bool isUnhandledGCPointerType(Type *Ty) { 365 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty); 366 } 367 #endif 368 369 // Return the name of the value suffixed with the provided value, or if the 370 // value didn't have a name, the default value specified. 371 static std::string suffixed_name_or(Value *V, StringRef Suffix, 372 StringRef DefaultName) { 373 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str(); 374 } 375 376 // Conservatively identifies any definitions which might be live at the 377 // given instruction. The analysis is performed immediately before the 378 // given instruction. Values defined by that instruction are not considered 379 // live. Values used by that instruction are considered live. 380 static void analyzeParsePointLiveness( 381 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, 382 PartiallyConstructedSafepointRecord &Result) { 383 StatepointLiveSetTy LiveSet; 384 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet); 385 386 if (PrintLiveSet) { 387 dbgs() << "Live Variables:\n"; 388 for (Value *V : LiveSet) 389 dbgs() << " " << V->getName() << " " << *V << "\n"; 390 } 391 if (PrintLiveSetSize) { 392 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n"; 393 dbgs() << "Number live values: " << LiveSet.size() << "\n"; 394 } 395 Result.LiveSet = LiveSet; 396 } 397 398 // Returns true is V is a knownBaseResult. 399 static bool isKnownBaseResult(Value *V); 400 401 // Returns true if V is a BaseResult that already exists in the IR, i.e. it is 402 // not created by the findBasePointers algorithm. 403 static bool isOriginalBaseResult(Value *V); 404 405 namespace { 406 407 /// A single base defining value - An immediate base defining value for an 408 /// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'. 409 /// For instructions which have multiple pointer [vector] inputs or that 410 /// transition between vector and scalar types, there is no immediate base 411 /// defining value. The 'base defining value' for 'Def' is the transitive 412 /// closure of this relation stopping at the first instruction which has no 413 /// immediate base defining value. The b.d.v. might itself be a base pointer, 414 /// but it can also be an arbitrary derived pointer. 415 struct BaseDefiningValueResult { 416 /// Contains the value which is the base defining value. 417 Value * const BDV; 418 419 /// True if the base defining value is also known to be an actual base 420 /// pointer. 421 const bool IsKnownBase; 422 423 BaseDefiningValueResult(Value *BDV, bool IsKnownBase) 424 : BDV(BDV), IsKnownBase(IsKnownBase) { 425 #ifndef NDEBUG 426 // Check consistency between new and old means of checking whether a BDV is 427 // a base. 428 bool MustBeBase = isKnownBaseResult(BDV); 429 assert(!MustBeBase || MustBeBase == IsKnownBase); 430 #endif 431 } 432 }; 433 434 } // end anonymous namespace 435 436 static BaseDefiningValueResult findBaseDefiningValue(Value *I); 437 438 /// Return a base defining value for the 'Index' element of the given vector 439 /// instruction 'I'. If Index is null, returns a BDV for the entire vector 440 /// 'I'. As an optimization, this method will try to determine when the 441 /// element is known to already be a base pointer. If this can be established, 442 /// the second value in the returned pair will be true. Note that either a 443 /// vector or a pointer typed value can be returned. For the former, the 444 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'. 445 /// If the later, the return pointer is a BDV (or possibly a base) for the 446 /// particular element in 'I'. 447 static BaseDefiningValueResult 448 findBaseDefiningValueOfVector(Value *I) { 449 // Each case parallels findBaseDefiningValue below, see that code for 450 // detailed motivation. 451 452 if (isa<Argument>(I)) 453 // An incoming argument to the function is a base pointer 454 return BaseDefiningValueResult(I, true); 455 456 if (isa<Constant>(I)) 457 // Base of constant vector consists only of constant null pointers. 458 // For reasoning see similar case inside 'findBaseDefiningValue' function. 459 return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()), 460 true); 461 462 if (isa<LoadInst>(I)) 463 return BaseDefiningValueResult(I, true); 464 465 if (isa<InsertElementInst>(I)) 466 // We don't know whether this vector contains entirely base pointers or 467 // not. To be conservatively correct, we treat it as a BDV and will 468 // duplicate code as needed to construct a parallel vector of bases. 469 return BaseDefiningValueResult(I, false); 470 471 if (isa<ShuffleVectorInst>(I)) 472 // We don't know whether this vector contains entirely base pointers or 473 // not. To be conservatively correct, we treat it as a BDV and will 474 // duplicate code as needed to construct a parallel vector of bases. 475 // TODO: There a number of local optimizations which could be applied here 476 // for particular sufflevector patterns. 477 return BaseDefiningValueResult(I, false); 478 479 // The behavior of getelementptr instructions is the same for vector and 480 // non-vector data types. 481 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 482 return findBaseDefiningValue(GEP->getPointerOperand()); 483 484 // If the pointer comes through a bitcast of a vector of pointers to 485 // a vector of another type of pointer, then look through the bitcast 486 if (auto *BC = dyn_cast<BitCastInst>(I)) 487 return findBaseDefiningValue(BC->getOperand(0)); 488 489 // We assume that functions in the source language only return base 490 // pointers. This should probably be generalized via attributes to support 491 // both source language and internal functions. 492 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 493 return BaseDefiningValueResult(I, true); 494 495 // A PHI or Select is a base defining value. The outer findBasePointer 496 // algorithm is responsible for constructing a base value for this BDV. 497 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 498 "unknown vector instruction - no base found for vector element"); 499 return BaseDefiningValueResult(I, false); 500 } 501 502 /// Helper function for findBasePointer - Will return a value which either a) 503 /// defines the base pointer for the input, b) blocks the simple search 504 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change 505 /// from pointer to vector type or back. 506 static BaseDefiningValueResult findBaseDefiningValue(Value *I) { 507 assert(I->getType()->isPtrOrPtrVectorTy() && 508 "Illegal to ask for the base pointer of a non-pointer type"); 509 510 if (I->getType()->isVectorTy()) 511 return findBaseDefiningValueOfVector(I); 512 513 if (isa<Argument>(I)) 514 // An incoming argument to the function is a base pointer 515 // We should have never reached here if this argument isn't an gc value 516 return BaseDefiningValueResult(I, true); 517 518 if (isa<Constant>(I)) { 519 // We assume that objects with a constant base (e.g. a global) can't move 520 // and don't need to be reported to the collector because they are always 521 // live. Besides global references, all kinds of constants (e.g. undef, 522 // constant expressions, null pointers) can be introduced by the inliner or 523 // the optimizer, especially on dynamically dead paths. 524 // Here we treat all of them as having single null base. By doing this we 525 // trying to avoid problems reporting various conflicts in a form of 526 // "phi (const1, const2)" or "phi (const, regular gc ptr)". 527 // See constant.ll file for relevant test cases. 528 529 return BaseDefiningValueResult( 530 ConstantPointerNull::get(cast<PointerType>(I->getType())), true); 531 } 532 533 // inttoptrs in an integral address space are currently ill-defined. We 534 // treat them as defining base pointers here for consistency with the 535 // constant rule above and because we don't really have a better semantic 536 // to give them. Note that the optimizer is always free to insert undefined 537 // behavior on dynamically dead paths as well. 538 if (isa<IntToPtrInst>(I)) 539 return BaseDefiningValueResult(I, true); 540 541 if (CastInst *CI = dyn_cast<CastInst>(I)) { 542 Value *Def = CI->stripPointerCasts(); 543 // If stripping pointer casts changes the address space there is an 544 // addrspacecast in between. 545 assert(cast<PointerType>(Def->getType())->getAddressSpace() == 546 cast<PointerType>(CI->getType())->getAddressSpace() && 547 "unsupported addrspacecast"); 548 // If we find a cast instruction here, it means we've found a cast which is 549 // not simply a pointer cast (i.e. an inttoptr). We don't know how to 550 // handle int->ptr conversion. 551 assert(!isa<CastInst>(Def) && "shouldn't find another cast here"); 552 return findBaseDefiningValue(Def); 553 } 554 555 if (isa<LoadInst>(I)) 556 // The value loaded is an gc base itself 557 return BaseDefiningValueResult(I, true); 558 559 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 560 // The base of this GEP is the base 561 return findBaseDefiningValue(GEP->getPointerOperand()); 562 563 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 564 switch (II->getIntrinsicID()) { 565 default: 566 // fall through to general call handling 567 break; 568 case Intrinsic::experimental_gc_statepoint: 569 llvm_unreachable("statepoints don't produce pointers"); 570 case Intrinsic::experimental_gc_relocate: 571 // Rerunning safepoint insertion after safepoints are already 572 // inserted is not supported. It could probably be made to work, 573 // but why are you doing this? There's no good reason. 574 llvm_unreachable("repeat safepoint insertion is not supported"); 575 case Intrinsic::gcroot: 576 // Currently, this mechanism hasn't been extended to work with gcroot. 577 // There's no reason it couldn't be, but I haven't thought about the 578 // implications much. 579 llvm_unreachable( 580 "interaction with the gcroot mechanism is not supported"); 581 case Intrinsic::experimental_gc_get_pointer_base: 582 return findBaseDefiningValue(II->getOperand(0)); 583 } 584 } 585 // We assume that functions in the source language only return base 586 // pointers. This should probably be generalized via attributes to support 587 // both source language and internal functions. 588 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 589 return BaseDefiningValueResult(I, true); 590 591 // TODO: I have absolutely no idea how to implement this part yet. It's not 592 // necessarily hard, I just haven't really looked at it yet. 593 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented"); 594 595 if (isa<AtomicCmpXchgInst>(I)) 596 // A CAS is effectively a atomic store and load combined under a 597 // predicate. From the perspective of base pointers, we just treat it 598 // like a load. 599 return BaseDefiningValueResult(I, true); 600 601 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are " 602 "binary ops which don't apply to pointers"); 603 604 // The aggregate ops. Aggregates can either be in the heap or on the 605 // stack, but in either case, this is simply a field load. As a result, 606 // this is a defining definition of the base just like a load is. 607 if (isa<ExtractValueInst>(I)) 608 return BaseDefiningValueResult(I, true); 609 610 // We should never see an insert vector since that would require we be 611 // tracing back a struct value not a pointer value. 612 assert(!isa<InsertValueInst>(I) && 613 "Base pointer for a struct is meaningless"); 614 615 // This value might have been generated by findBasePointer() called when 616 // substituting gc.get.pointer.base() intrinsic. 617 bool IsKnownBase = 618 isa<Instruction>(I) && cast<Instruction>(I)->getMetadata("is_base_value"); 619 620 // An extractelement produces a base result exactly when it's input does. 621 // We may need to insert a parallel instruction to extract the appropriate 622 // element out of the base vector corresponding to the input. Given this, 623 // it's analogous to the phi and select case even though it's not a merge. 624 if (isa<ExtractElementInst>(I)) 625 // Note: There a lot of obvious peephole cases here. This are deliberately 626 // handled after the main base pointer inference algorithm to make writing 627 // test cases to exercise that code easier. 628 return BaseDefiningValueResult(I, IsKnownBase); 629 630 // The last two cases here don't return a base pointer. Instead, they 631 // return a value which dynamically selects from among several base 632 // derived pointers (each with it's own base potentially). It's the job of 633 // the caller to resolve these. 634 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 635 "missing instruction case in findBaseDefiningValing"); 636 return BaseDefiningValueResult(I, IsKnownBase); 637 } 638 639 /// Returns the base defining value for this value. 640 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) { 641 Value *&Cached = Cache[I]; 642 if (!Cached) { 643 Cached = findBaseDefiningValue(I).BDV; 644 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> " 645 << Cached->getName() << "\n"); 646 } 647 assert(Cache[I] != nullptr); 648 return Cached; 649 } 650 651 /// Return a base pointer for this value if known. Otherwise, return it's 652 /// base defining value. 653 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) { 654 Value *Def = findBaseDefiningValueCached(I, Cache); 655 auto Found = Cache.find(Def); 656 if (Found != Cache.end()) { 657 // Either a base-of relation, or a self reference. Caller must check. 658 return Found->second; 659 } 660 // Only a BDV available 661 return Def; 662 } 663 664 /// This value is a base pointer that is not generated by RS4GC, i.e. it already 665 /// exists in the code. 666 static bool isOriginalBaseResult(Value *V) { 667 // no recursion possible 668 return !isa<PHINode>(V) && !isa<SelectInst>(V) && 669 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) && 670 !isa<ShuffleVectorInst>(V); 671 } 672 673 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, 674 /// is it known to be a base pointer? Or do we need to continue searching. 675 static bool isKnownBaseResult(Value *V) { 676 if (isOriginalBaseResult(V)) 677 return true; 678 if (isa<Instruction>(V) && 679 cast<Instruction>(V)->getMetadata("is_base_value")) { 680 // This is a previously inserted base phi or select. We know 681 // that this is a base value. 682 return true; 683 } 684 685 // We need to keep searching 686 return false; 687 } 688 689 // Returns true if First and Second values are both scalar or both vector. 690 static bool areBothVectorOrScalar(Value *First, Value *Second) { 691 return isa<VectorType>(First->getType()) == 692 isa<VectorType>(Second->getType()); 693 } 694 695 namespace { 696 697 /// Models the state of a single base defining value in the findBasePointer 698 /// algorithm for determining where a new instruction is needed to propagate 699 /// the base of this BDV. 700 class BDVState { 701 public: 702 enum StatusTy { 703 // Starting state of lattice 704 Unknown, 705 // Some specific base value -- does *not* mean that instruction 706 // propagates the base of the object 707 // ex: gep %arg, 16 -> %arg is the base value 708 Base, 709 // Need to insert a node to represent a merge. 710 Conflict 711 }; 712 713 BDVState() { 714 llvm_unreachable("missing state in map"); 715 } 716 717 explicit BDVState(Value *OriginalValue) 718 : OriginalValue(OriginalValue) {} 719 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr) 720 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) { 721 assert(Status != Base || BaseValue); 722 } 723 724 StatusTy getStatus() const { return Status; } 725 Value *getOriginalValue() const { return OriginalValue; } 726 Value *getBaseValue() const { return BaseValue; } 727 728 bool isBase() const { return getStatus() == Base; } 729 bool isUnknown() const { return getStatus() == Unknown; } 730 bool isConflict() const { return getStatus() == Conflict; } 731 732 // Values of type BDVState form a lattice, and this function implements the 733 // meet 734 // operation. 735 void meet(const BDVState &Other) { 736 auto markConflict = [&]() { 737 Status = BDVState::Conflict; 738 BaseValue = nullptr; 739 }; 740 // Conflict is a final state. 741 if (isConflict()) 742 return; 743 // if we are not known - just take other state. 744 if (isUnknown()) { 745 Status = Other.getStatus(); 746 BaseValue = Other.getBaseValue(); 747 return; 748 } 749 // We are base. 750 assert(isBase() && "Unknown state"); 751 // If other is unknown - just keep our state. 752 if (Other.isUnknown()) 753 return; 754 // If other is conflict - it is a final state. 755 if (Other.isConflict()) 756 return markConflict(); 757 // Other is base as well. 758 assert(Other.isBase() && "Unknown state"); 759 // If bases are different - Conflict. 760 if (getBaseValue() != Other.getBaseValue()) 761 return markConflict(); 762 // We are identical, do nothing. 763 } 764 765 bool operator==(const BDVState &Other) const { 766 return OriginalValue == Other.OriginalValue && BaseValue == Other.BaseValue && 767 Status == Other.Status; 768 } 769 770 bool operator!=(const BDVState &other) const { return !(*this == other); } 771 772 LLVM_DUMP_METHOD 773 void dump() const { 774 print(dbgs()); 775 dbgs() << '\n'; 776 } 777 778 void print(raw_ostream &OS) const { 779 switch (getStatus()) { 780 case Unknown: 781 OS << "U"; 782 break; 783 case Base: 784 OS << "B"; 785 break; 786 case Conflict: 787 OS << "C"; 788 break; 789 } 790 OS << " (base " << getBaseValue() << " - " 791 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")" 792 << " for " << OriginalValue->getName() << ":"; 793 } 794 795 private: 796 AssertingVH<Value> OriginalValue; // instruction this state corresponds to 797 StatusTy Status = Unknown; 798 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base. 799 }; 800 801 } // end anonymous namespace 802 803 #ifndef NDEBUG 804 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) { 805 State.print(OS); 806 return OS; 807 } 808 #endif 809 810 /// For a given value or instruction, figure out what base ptr its derived from. 811 /// For gc objects, this is simply itself. On success, returns a value which is 812 /// the base pointer. (This is reliable and can be used for relocation.) On 813 /// failure, returns nullptr. 814 static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) { 815 Value *Def = findBaseOrBDV(I, Cache); 816 817 if (isKnownBaseResult(Def) && areBothVectorOrScalar(Def, I)) 818 return Def; 819 820 // Here's the rough algorithm: 821 // - For every SSA value, construct a mapping to either an actual base 822 // pointer or a PHI which obscures the base pointer. 823 // - Construct a mapping from PHI to unknown TOP state. Use an 824 // optimistic algorithm to propagate base pointer information. Lattice 825 // looks like: 826 // UNKNOWN 827 // b1 b2 b3 b4 828 // CONFLICT 829 // When algorithm terminates, all PHIs will either have a single concrete 830 // base or be in a conflict state. 831 // - For every conflict, insert a dummy PHI node without arguments. Add 832 // these to the base[Instruction] = BasePtr mapping. For every 833 // non-conflict, add the actual base. 834 // - For every conflict, add arguments for the base[a] of each input 835 // arguments. 836 // 837 // Note: A simpler form of this would be to add the conflict form of all 838 // PHIs without running the optimistic algorithm. This would be 839 // analogous to pessimistic data flow and would likely lead to an 840 // overall worse solution. 841 842 #ifndef NDEBUG 843 auto isExpectedBDVType = [](Value *BDV) { 844 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) || 845 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) || 846 isa<ShuffleVectorInst>(BDV); 847 }; 848 #endif 849 850 // Once populated, will contain a mapping from each potentially non-base BDV 851 // to a lattice value (described above) which corresponds to that BDV. 852 // We use the order of insertion (DFS over the def/use graph) to provide a 853 // stable deterministic ordering for visiting DenseMaps (which are unordered) 854 // below. This is important for deterministic compilation. 855 MapVector<Value *, BDVState> States; 856 857 #ifndef NDEBUG 858 auto VerifyStates = [&]() { 859 for (auto &Entry : States) { 860 assert(Entry.first == Entry.second.getOriginalValue()); 861 } 862 }; 863 #endif 864 865 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) { 866 if (PHINode *PN = dyn_cast<PHINode>(BDV)) { 867 for (Value *InVal : PN->incoming_values()) 868 F(InVal); 869 } else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) { 870 F(SI->getTrueValue()); 871 F(SI->getFalseValue()); 872 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) { 873 F(EE->getVectorOperand()); 874 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) { 875 F(IE->getOperand(0)); 876 F(IE->getOperand(1)); 877 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) { 878 // For a canonical broadcast, ignore the undef argument 879 // (without this, we insert a parallel base shuffle for every broadcast) 880 F(SV->getOperand(0)); 881 if (!SV->isZeroEltSplat()) 882 F(SV->getOperand(1)); 883 } else { 884 llvm_unreachable("unexpected BDV type"); 885 } 886 }; 887 888 889 // Recursively fill in all base defining values reachable from the initial 890 // one for which we don't already know a definite base value for 891 /* scope */ { 892 SmallVector<Value*, 16> Worklist; 893 Worklist.push_back(Def); 894 States.insert({Def, BDVState(Def)}); 895 while (!Worklist.empty()) { 896 Value *Current = Worklist.pop_back_val(); 897 assert(!isOriginalBaseResult(Current) && "why did it get added?"); 898 899 auto visitIncomingValue = [&](Value *InVal) { 900 Value *Base = findBaseOrBDV(InVal, Cache); 901 if (isKnownBaseResult(Base) && areBothVectorOrScalar(Base, InVal)) 902 // Known bases won't need new instructions introduced and can be 903 // ignored safely. However, this can only be done when InVal and Base 904 // are both scalar or both vector. Otherwise, we need to find a 905 // correct BDV for InVal, by creating an entry in the lattice 906 // (States). 907 return; 908 assert(isExpectedBDVType(Base) && "the only non-base values " 909 "we see should be base defining values"); 910 if (States.insert(std::make_pair(Base, BDVState(Base))).second) 911 Worklist.push_back(Base); 912 }; 913 914 visitBDVOperands(Current, visitIncomingValue); 915 } 916 } 917 918 #ifndef NDEBUG 919 VerifyStates(); 920 LLVM_DEBUG(dbgs() << "States after initialization:\n"); 921 for (const auto &Pair : States) { 922 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 923 } 924 #endif 925 926 // Iterate forward through the value graph pruning any node from the state 927 // list where all of the inputs are base pointers. The purpose of this is to 928 // reuse existing values when the derived pointer we were asked to materialize 929 // a base pointer for happens to be a base pointer itself. (Or a sub-graph 930 // feeding it does.) 931 SmallVector<Value *> ToRemove; 932 do { 933 ToRemove.clear(); 934 for (auto Pair : States) { 935 Value *BDV = Pair.first; 936 auto canPruneInput = [&](Value *V) { 937 // If the input of the BDV is the BDV itself we can prune it. This is 938 // only possible if the BDV is a PHI node. 939 if (V->stripPointerCasts() == BDV) 940 return true; 941 Value *VBDV = findBaseOrBDV(V, Cache); 942 if (V->stripPointerCasts() != VBDV) 943 return false; 944 // The assumption is that anything not in the state list is 945 // propagates a base pointer. 946 return States.count(VBDV) == 0; 947 }; 948 949 bool CanPrune = true; 950 visitBDVOperands(BDV, [&](Value *Op) { 951 CanPrune = CanPrune && canPruneInput(Op); 952 }); 953 if (CanPrune) 954 ToRemove.push_back(BDV); 955 } 956 for (Value *V : ToRemove) { 957 States.erase(V); 958 // Cache the fact V is it's own base for later usage. 959 Cache[V] = V; 960 } 961 } while (!ToRemove.empty()); 962 963 // Did we manage to prove that Def itself must be a base pointer? 964 if (!States.count(Def)) 965 return Def; 966 967 // Return a phi state for a base defining value. We'll generate a new 968 // base state for known bases and expect to find a cached state otherwise. 969 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) { 970 auto I = States.find(BaseValue); 971 if (I != States.end()) 972 return I->second; 973 assert(areBothVectorOrScalar(BaseValue, Input)); 974 return BDVState(BaseValue, BDVState::Base, BaseValue); 975 }; 976 977 bool Progress = true; 978 while (Progress) { 979 #ifndef NDEBUG 980 const size_t OldSize = States.size(); 981 #endif 982 Progress = false; 983 // We're only changing values in this loop, thus safe to keep iterators. 984 // Since this is computing a fixed point, the order of visit does not 985 // effect the result. TODO: We could use a worklist here and make this run 986 // much faster. 987 for (auto Pair : States) { 988 Value *BDV = Pair.first; 989 // Only values that do not have known bases or those that have differing 990 // type (scalar versus vector) from a possible known base should be in the 991 // lattice. 992 assert((!isKnownBaseResult(BDV) || 993 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) && 994 "why did it get added?"); 995 996 BDVState NewState(BDV); 997 visitBDVOperands(BDV, [&](Value *Op) { 998 Value *BDV = findBaseOrBDV(Op, Cache); 999 auto OpState = GetStateForBDV(BDV, Op); 1000 NewState.meet(OpState); 1001 }); 1002 1003 BDVState OldState = States[BDV]; 1004 if (OldState != NewState) { 1005 Progress = true; 1006 States[BDV] = NewState; 1007 } 1008 } 1009 1010 assert(OldSize == States.size() && 1011 "fixed point shouldn't be adding any new nodes to state"); 1012 } 1013 1014 #ifndef NDEBUG 1015 VerifyStates(); 1016 LLVM_DEBUG(dbgs() << "States after meet iteration:\n"); 1017 for (const auto &Pair : States) { 1018 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 1019 } 1020 #endif 1021 1022 // Handle all instructions that have a vector BDV, but the instruction itself 1023 // is of scalar type. 1024 for (auto Pair : States) { 1025 Instruction *I = cast<Instruction>(Pair.first); 1026 BDVState State = Pair.second; 1027 auto *BaseValue = State.getBaseValue(); 1028 // Only values that do not have known bases or those that have differing 1029 // type (scalar versus vector) from a possible known base should be in the 1030 // lattice. 1031 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, BaseValue)) && 1032 "why did it get added?"); 1033 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1034 1035 if (!State.isBase() || !isa<VectorType>(BaseValue->getType())) 1036 continue; 1037 // extractelement instructions are a bit special in that we may need to 1038 // insert an extract even when we know an exact base for the instruction. 1039 // The problem is that we need to convert from a vector base to a scalar 1040 // base for the particular indice we're interested in. 1041 if (isa<ExtractElementInst>(I)) { 1042 auto *EE = cast<ExtractElementInst>(I); 1043 // TODO: In many cases, the new instruction is just EE itself. We should 1044 // exploit this, but can't do it here since it would break the invariant 1045 // about the BDV not being known to be a base. 1046 auto *BaseInst = ExtractElementInst::Create( 1047 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE); 1048 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1049 States[I] = BDVState(I, BDVState::Base, BaseInst); 1050 } else if (!isa<VectorType>(I->getType())) { 1051 // We need to handle cases that have a vector base but the instruction is 1052 // a scalar type (these could be phis or selects or any instruction that 1053 // are of scalar type, but the base can be a vector type). We 1054 // conservatively set this as conflict. Setting the base value for these 1055 // conflicts is handled in the next loop which traverses States. 1056 States[I] = BDVState(I, BDVState::Conflict); 1057 } 1058 } 1059 1060 #ifndef NDEBUG 1061 VerifyStates(); 1062 #endif 1063 1064 // Insert Phis for all conflicts 1065 // TODO: adjust naming patterns to avoid this order of iteration dependency 1066 for (auto Pair : States) { 1067 Instruction *I = cast<Instruction>(Pair.first); 1068 BDVState State = Pair.second; 1069 // Only values that do not have known bases or those that have differing 1070 // type (scalar versus vector) from a possible known base should be in the 1071 // lattice. 1072 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, State.getBaseValue())) && 1073 "why did it get added?"); 1074 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1075 1076 // Since we're joining a vector and scalar base, they can never be the 1077 // same. As a result, we should always see insert element having reached 1078 // the conflict state. 1079 assert(!isa<InsertElementInst>(I) || State.isConflict()); 1080 1081 if (!State.isConflict()) 1082 continue; 1083 1084 auto getMangledName = [](Instruction *I) -> std::string { 1085 if (isa<PHINode>(I)) { 1086 return suffixed_name_or(I, ".base", "base_phi"); 1087 } else if (isa<SelectInst>(I)) { 1088 return suffixed_name_or(I, ".base", "base_select"); 1089 } else if (isa<ExtractElementInst>(I)) { 1090 return suffixed_name_or(I, ".base", "base_ee"); 1091 } else if (isa<InsertElementInst>(I)) { 1092 return suffixed_name_or(I, ".base", "base_ie"); 1093 } else { 1094 return suffixed_name_or(I, ".base", "base_sv"); 1095 } 1096 }; 1097 1098 Instruction *BaseInst = I->clone(); 1099 BaseInst->insertBefore(I); 1100 BaseInst->setName(getMangledName(I)); 1101 // Add metadata marking this as a base value 1102 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1103 States[I] = BDVState(I, BDVState::Conflict, BaseInst); 1104 } 1105 1106 #ifndef NDEBUG 1107 VerifyStates(); 1108 #endif 1109 1110 // Returns a instruction which produces the base pointer for a given 1111 // instruction. The instruction is assumed to be an input to one of the BDVs 1112 // seen in the inference algorithm above. As such, we must either already 1113 // know it's base defining value is a base, or have inserted a new 1114 // instruction to propagate the base of it's BDV and have entered that newly 1115 // introduced instruction into the state table. In either case, we are 1116 // assured to be able to determine an instruction which produces it's base 1117 // pointer. 1118 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) { 1119 Value *BDV = findBaseOrBDV(Input, Cache); 1120 Value *Base = nullptr; 1121 if (!States.count(BDV)) { 1122 assert(areBothVectorOrScalar(BDV, Input)); 1123 Base = BDV; 1124 } else { 1125 // Either conflict or base. 1126 assert(States.count(BDV)); 1127 Base = States[BDV].getBaseValue(); 1128 } 1129 assert(Base && "Can't be null"); 1130 // The cast is needed since base traversal may strip away bitcasts 1131 if (Base->getType() != Input->getType() && InsertPt) 1132 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt); 1133 return Base; 1134 }; 1135 1136 // Fixup all the inputs of the new PHIs. Visit order needs to be 1137 // deterministic and predictable because we're naming newly created 1138 // instructions. 1139 for (auto Pair : States) { 1140 Instruction *BDV = cast<Instruction>(Pair.first); 1141 BDVState State = Pair.second; 1142 1143 // Only values that do not have known bases or those that have differing 1144 // type (scalar versus vector) from a possible known base should be in the 1145 // lattice. 1146 assert((!isKnownBaseResult(BDV) || 1147 !areBothVectorOrScalar(BDV, State.getBaseValue())) && 1148 "why did it get added?"); 1149 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1150 if (!State.isConflict()) 1151 continue; 1152 1153 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) { 1154 PHINode *PN = cast<PHINode>(BDV); 1155 const unsigned NumPHIValues = PN->getNumIncomingValues(); 1156 1157 // The IR verifier requires phi nodes with multiple entries from the 1158 // same basic block to have the same incoming value for each of those 1159 // entries. Since we're inserting bitcasts in the loop, make sure we 1160 // do so at least once per incoming block. 1161 DenseMap<BasicBlock *, Value*> BlockToValue; 1162 for (unsigned i = 0; i < NumPHIValues; i++) { 1163 Value *InVal = PN->getIncomingValue(i); 1164 BasicBlock *InBB = PN->getIncomingBlock(i); 1165 if (!BlockToValue.count(InBB)) 1166 BlockToValue[InBB] = getBaseForInput(InVal, InBB->getTerminator()); 1167 else { 1168 #ifndef NDEBUG 1169 Value *OldBase = BlockToValue[InBB]; 1170 Value *Base = getBaseForInput(InVal, nullptr); 1171 1172 // We can't use `stripPointerCasts` instead of this function because 1173 // `stripPointerCasts` doesn't handle vectors of pointers. 1174 auto StripBitCasts = [](Value *V) -> Value * { 1175 while (auto *BC = dyn_cast<BitCastInst>(V)) 1176 V = BC->getOperand(0); 1177 return V; 1178 }; 1179 // In essence this assert states: the only way two values 1180 // incoming from the same basic block may be different is by 1181 // being different bitcasts of the same value. A cleanup 1182 // that remains TODO is changing findBaseOrBDV to return an 1183 // llvm::Value of the correct type (and still remain pure). 1184 // This will remove the need to add bitcasts. 1185 assert(StripBitCasts(Base) == StripBitCasts(OldBase) && 1186 "findBaseOrBDV should be pure!"); 1187 #endif 1188 } 1189 Value *Base = BlockToValue[InBB]; 1190 BasePHI->setIncomingValue(i, Base); 1191 } 1192 } else if (SelectInst *BaseSI = 1193 dyn_cast<SelectInst>(State.getBaseValue())) { 1194 SelectInst *SI = cast<SelectInst>(BDV); 1195 1196 // Find the instruction which produces the base for each input. 1197 // We may need to insert a bitcast. 1198 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI)); 1199 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI)); 1200 } else if (auto *BaseEE = 1201 dyn_cast<ExtractElementInst>(State.getBaseValue())) { 1202 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand(); 1203 // Find the instruction which produces the base for each input. We may 1204 // need to insert a bitcast. 1205 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE)); 1206 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){ 1207 auto *BdvIE = cast<InsertElementInst>(BDV); 1208 auto UpdateOperand = [&](int OperandIdx) { 1209 Value *InVal = BdvIE->getOperand(OperandIdx); 1210 Value *Base = getBaseForInput(InVal, BaseIE); 1211 BaseIE->setOperand(OperandIdx, Base); 1212 }; 1213 UpdateOperand(0); // vector operand 1214 UpdateOperand(1); // scalar operand 1215 } else { 1216 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue()); 1217 auto *BdvSV = cast<ShuffleVectorInst>(BDV); 1218 auto UpdateOperand = [&](int OperandIdx) { 1219 Value *InVal = BdvSV->getOperand(OperandIdx); 1220 Value *Base = getBaseForInput(InVal, BaseSV); 1221 BaseSV->setOperand(OperandIdx, Base); 1222 }; 1223 UpdateOperand(0); // vector operand 1224 if (!BdvSV->isZeroEltSplat()) 1225 UpdateOperand(1); // vector operand 1226 else { 1227 // Never read, so just use undef 1228 Value *InVal = BdvSV->getOperand(1); 1229 BaseSV->setOperand(1, UndefValue::get(InVal->getType())); 1230 } 1231 } 1232 } 1233 1234 #ifndef NDEBUG 1235 VerifyStates(); 1236 #endif 1237 1238 // Cache all of our results so we can cheaply reuse them 1239 // NOTE: This is actually two caches: one of the base defining value 1240 // relation and one of the base pointer relation! FIXME 1241 for (auto Pair : States) { 1242 auto *BDV = Pair.first; 1243 Value *Base = Pair.second.getBaseValue(); 1244 assert(BDV && Base); 1245 // Only values that do not have known bases or those that have differing 1246 // type (scalar versus vector) from a possible known base should be in the 1247 // lattice. 1248 assert((!isKnownBaseResult(BDV) || !areBothVectorOrScalar(BDV, Base)) && 1249 "why did it get added?"); 1250 1251 LLVM_DEBUG( 1252 dbgs() << "Updating base value cache" 1253 << " for: " << BDV->getName() << " from: " 1254 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none") 1255 << " to: " << Base->getName() << "\n"); 1256 1257 Cache[BDV] = Base; 1258 } 1259 assert(Cache.count(Def)); 1260 return Cache[Def]; 1261 } 1262 1263 // For a set of live pointers (base and/or derived), identify the base 1264 // pointer of the object which they are derived from. This routine will 1265 // mutate the IR graph as needed to make the 'base' pointer live at the 1266 // definition site of 'derived'. This ensures that any use of 'derived' can 1267 // also use 'base'. This may involve the insertion of a number of 1268 // additional PHI nodes. 1269 // 1270 // preconditions: live is a set of pointer type Values 1271 // 1272 // side effects: may insert PHI nodes into the existing CFG, will preserve 1273 // CFG, will not remove or mutate any existing nodes 1274 // 1275 // post condition: PointerToBase contains one (derived, base) pair for every 1276 // pointer in live. Note that derived can be equal to base if the original 1277 // pointer was a base pointer. 1278 static void findBasePointers(const StatepointLiveSetTy &live, 1279 PointerToBaseTy &PointerToBase, DominatorTree *DT, 1280 DefiningValueMapTy &DVCache) { 1281 for (Value *ptr : live) { 1282 Value *base = findBasePointer(ptr, DVCache); 1283 assert(base && "failed to find base pointer"); 1284 PointerToBase[ptr] = base; 1285 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) || 1286 DT->dominates(cast<Instruction>(base)->getParent(), 1287 cast<Instruction>(ptr)->getParent())) && 1288 "The base we found better dominate the derived pointer"); 1289 } 1290 } 1291 1292 /// Find the required based pointers (and adjust the live set) for the given 1293 /// parse point. 1294 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache, 1295 CallBase *Call, 1296 PartiallyConstructedSafepointRecord &result, 1297 PointerToBaseTy &PointerToBase) { 1298 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet; 1299 // We assume that all pointers passed to deopt are base pointers; as an 1300 // optimization, we can use this to avoid seperately materializing the base 1301 // pointer graph. This is only relevant since we're very conservative about 1302 // generating new conflict nodes during base pointer insertion. If we were 1303 // smarter there, this would be irrelevant. 1304 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt)) 1305 for (Value *V : Opt->Inputs) { 1306 if (!PotentiallyDerivedPointers.count(V)) 1307 continue; 1308 PotentiallyDerivedPointers.remove(V); 1309 PointerToBase[V] = V; 1310 } 1311 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache); 1312 } 1313 1314 /// Given an updated version of the dataflow liveness results, update the 1315 /// liveset and base pointer maps for the call site CS. 1316 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 1317 CallBase *Call, 1318 PartiallyConstructedSafepointRecord &result, 1319 PointerToBaseTy &PointerToBase); 1320 1321 static void recomputeLiveInValues( 1322 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 1323 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records, 1324 PointerToBaseTy &PointerToBase) { 1325 // TODO-PERF: reuse the original liveness, then simply run the dataflow 1326 // again. The old values are still live and will help it stabilize quickly. 1327 GCPtrLivenessData RevisedLivenessData; 1328 computeLiveInValues(DT, F, RevisedLivenessData); 1329 for (size_t i = 0; i < records.size(); i++) { 1330 struct PartiallyConstructedSafepointRecord &info = records[i]; 1331 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info, 1332 PointerToBase); 1333 } 1334 } 1335 1336 // When inserting gc.relocate and gc.result calls, we need to ensure there are 1337 // no uses of the original value / return value between the gc.statepoint and 1338 // the gc.relocate / gc.result call. One case which can arise is a phi node 1339 // starting one of the successor blocks. We also need to be able to insert the 1340 // gc.relocates only on the path which goes through the statepoint. We might 1341 // need to split an edge to make this possible. 1342 static BasicBlock * 1343 normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, 1344 DominatorTree &DT) { 1345 BasicBlock *Ret = BB; 1346 if (!BB->getUniquePredecessor()) 1347 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT); 1348 1349 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes 1350 // from it 1351 FoldSingleEntryPHINodes(Ret); 1352 assert(!isa<PHINode>(Ret->begin()) && 1353 "All PHI nodes should have been removed!"); 1354 1355 // At this point, we can safely insert a gc.relocate or gc.result as the first 1356 // instruction in Ret if needed. 1357 return Ret; 1358 } 1359 1360 // List of all function attributes which must be stripped when lowering from 1361 // abstract machine model to physical machine model. Essentially, these are 1362 // all the effects a safepoint might have which we ignored in the abstract 1363 // machine model for purposes of optimization. We have to strip these on 1364 // both function declarations and call sites. 1365 static constexpr Attribute::AttrKind FnAttrsToStrip[] = 1366 {Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly, 1367 Attribute::ArgMemOnly, Attribute::InaccessibleMemOnly, 1368 Attribute::InaccessibleMemOrArgMemOnly, 1369 Attribute::NoSync, Attribute::NoFree}; 1370 1371 // Create new attribute set containing only attributes which can be transferred 1372 // from original call to the safepoint. 1373 static AttributeList legalizeCallAttributes(LLVMContext &Ctx, 1374 AttributeList OrigAL, 1375 AttributeList StatepointAL) { 1376 if (OrigAL.isEmpty()) 1377 return StatepointAL; 1378 1379 // Remove the readonly, readnone, and statepoint function attributes. 1380 AttrBuilder FnAttrs(Ctx, OrigAL.getFnAttrs()); 1381 for (auto Attr : FnAttrsToStrip) 1382 FnAttrs.removeAttribute(Attr); 1383 1384 for (Attribute A : OrigAL.getFnAttrs()) { 1385 if (isStatepointDirectiveAttr(A)) 1386 FnAttrs.removeAttribute(A); 1387 } 1388 1389 // Just skip parameter and return attributes for now 1390 return StatepointAL.addFnAttributes(Ctx, FnAttrs); 1391 } 1392 1393 /// Helper function to place all gc relocates necessary for the given 1394 /// statepoint. 1395 /// Inputs: 1396 /// liveVariables - list of variables to be relocated. 1397 /// basePtrs - base pointers. 1398 /// statepointToken - statepoint instruction to which relocates should be 1399 /// bound. 1400 /// Builder - Llvm IR builder to be used to construct new calls. 1401 static void CreateGCRelocates(ArrayRef<Value *> LiveVariables, 1402 ArrayRef<Value *> BasePtrs, 1403 Instruction *StatepointToken, 1404 IRBuilder<> &Builder) { 1405 if (LiveVariables.empty()) 1406 return; 1407 1408 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) { 1409 auto ValIt = llvm::find(LiveVec, Val); 1410 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!"); 1411 size_t Index = std::distance(LiveVec.begin(), ValIt); 1412 assert(Index < LiveVec.size() && "Bug in std::find?"); 1413 return Index; 1414 }; 1415 Module *M = StatepointToken->getModule(); 1416 1417 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose 1418 // element type is i8 addrspace(1)*). We originally generated unique 1419 // declarations for each pointer type, but this proved problematic because 1420 // the intrinsic mangling code is incomplete and fragile. Since we're moving 1421 // towards a single unified pointer type anyways, we can just cast everything 1422 // to an i8* of the right address space. A bitcast is added later to convert 1423 // gc_relocate to the actual value's type. 1424 auto getGCRelocateDecl = [&] (Type *Ty) { 1425 assert(isHandledGCPointerType(Ty)); 1426 auto AS = Ty->getScalarType()->getPointerAddressSpace(); 1427 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS); 1428 if (auto *VT = dyn_cast<VectorType>(Ty)) 1429 NewTy = FixedVectorType::get(NewTy, 1430 cast<FixedVectorType>(VT)->getNumElements()); 1431 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, 1432 {NewTy}); 1433 }; 1434 1435 // Lazily populated map from input types to the canonicalized form mentioned 1436 // in the comment above. This should probably be cached somewhere more 1437 // broadly. 1438 DenseMap<Type *, Function *> TypeToDeclMap; 1439 1440 for (unsigned i = 0; i < LiveVariables.size(); i++) { 1441 // Generate the gc.relocate call and save the result 1442 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i])); 1443 Value *LiveIdx = Builder.getInt32(i); 1444 1445 Type *Ty = LiveVariables[i]->getType(); 1446 if (!TypeToDeclMap.count(Ty)) 1447 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty); 1448 Function *GCRelocateDecl = TypeToDeclMap[Ty]; 1449 1450 // only specify a debug name if we can give a useful one 1451 CallInst *Reloc = Builder.CreateCall( 1452 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx}, 1453 suffixed_name_or(LiveVariables[i], ".relocated", "")); 1454 // Trick CodeGen into thinking there are lots of free registers at this 1455 // fake call. 1456 Reloc->setCallingConv(CallingConv::Cold); 1457 } 1458 } 1459 1460 namespace { 1461 1462 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this 1463 /// avoids having to worry about keeping around dangling pointers to Values. 1464 class DeferredReplacement { 1465 AssertingVH<Instruction> Old; 1466 AssertingVH<Instruction> New; 1467 bool IsDeoptimize = false; 1468 1469 DeferredReplacement() = default; 1470 1471 public: 1472 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) { 1473 assert(Old != New && Old && New && 1474 "Cannot RAUW equal values or to / from null!"); 1475 1476 DeferredReplacement D; 1477 D.Old = Old; 1478 D.New = New; 1479 return D; 1480 } 1481 1482 static DeferredReplacement createDelete(Instruction *ToErase) { 1483 DeferredReplacement D; 1484 D.Old = ToErase; 1485 return D; 1486 } 1487 1488 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) { 1489 #ifndef NDEBUG 1490 auto *F = cast<CallInst>(Old)->getCalledFunction(); 1491 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize && 1492 "Only way to construct a deoptimize deferred replacement"); 1493 #endif 1494 DeferredReplacement D; 1495 D.Old = Old; 1496 D.IsDeoptimize = true; 1497 return D; 1498 } 1499 1500 /// Does the task represented by this instance. 1501 void doReplacement() { 1502 Instruction *OldI = Old; 1503 Instruction *NewI = New; 1504 1505 assert(OldI != NewI && "Disallowed at construction?!"); 1506 assert((!IsDeoptimize || !New) && 1507 "Deoptimize intrinsics are not replaced!"); 1508 1509 Old = nullptr; 1510 New = nullptr; 1511 1512 if (NewI) 1513 OldI->replaceAllUsesWith(NewI); 1514 1515 if (IsDeoptimize) { 1516 // Note: we've inserted instructions, so the call to llvm.deoptimize may 1517 // not necessarily be followed by the matching return. 1518 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator()); 1519 new UnreachableInst(RI->getContext(), RI); 1520 RI->eraseFromParent(); 1521 } 1522 1523 OldI->eraseFromParent(); 1524 } 1525 }; 1526 1527 } // end anonymous namespace 1528 1529 static StringRef getDeoptLowering(CallBase *Call) { 1530 const char *DeoptLowering = "deopt-lowering"; 1531 if (Call->hasFnAttr(DeoptLowering)) { 1532 // FIXME: Calls have a *really* confusing interface around attributes 1533 // with values. 1534 const AttributeList &CSAS = Call->getAttributes(); 1535 if (CSAS.hasFnAttr(DeoptLowering)) 1536 return CSAS.getFnAttr(DeoptLowering).getValueAsString(); 1537 Function *F = Call->getCalledFunction(); 1538 assert(F && F->hasFnAttribute(DeoptLowering)); 1539 return F->getFnAttribute(DeoptLowering).getValueAsString(); 1540 } 1541 return "live-through"; 1542 } 1543 1544 static void 1545 makeStatepointExplicitImpl(CallBase *Call, /* to replace */ 1546 const SmallVectorImpl<Value *> &BasePtrs, 1547 const SmallVectorImpl<Value *> &LiveVariables, 1548 PartiallyConstructedSafepointRecord &Result, 1549 std::vector<DeferredReplacement> &Replacements, 1550 const PointerToBaseTy &PointerToBase) { 1551 assert(BasePtrs.size() == LiveVariables.size()); 1552 1553 // Then go ahead and use the builder do actually do the inserts. We insert 1554 // immediately before the previous instruction under the assumption that all 1555 // arguments will be available here. We can't insert afterwards since we may 1556 // be replacing a terminator. 1557 IRBuilder<> Builder(Call); 1558 1559 ArrayRef<Value *> GCArgs(LiveVariables); 1560 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID; 1561 uint32_t NumPatchBytes = 0; 1562 uint32_t Flags = uint32_t(StatepointFlags::None); 1563 1564 SmallVector<Value *, 8> CallArgs(Call->args()); 1565 Optional<ArrayRef<Use>> DeoptArgs; 1566 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt)) 1567 DeoptArgs = Bundle->Inputs; 1568 Optional<ArrayRef<Use>> TransitionArgs; 1569 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) { 1570 TransitionArgs = Bundle->Inputs; 1571 // TODO: This flag no longer serves a purpose and can be removed later 1572 Flags |= uint32_t(StatepointFlags::GCTransition); 1573 } 1574 1575 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls 1576 // with a return value, we lower then as never returning calls to 1577 // __llvm_deoptimize that are followed by unreachable to get better codegen. 1578 bool IsDeoptimize = false; 1579 1580 StatepointDirectives SD = 1581 parseStatepointDirectivesFromAttrs(Call->getAttributes()); 1582 if (SD.NumPatchBytes) 1583 NumPatchBytes = *SD.NumPatchBytes; 1584 if (SD.StatepointID) 1585 StatepointID = *SD.StatepointID; 1586 1587 // Pass through the requested lowering if any. The default is live-through. 1588 StringRef DeoptLowering = getDeoptLowering(Call); 1589 if (DeoptLowering.equals("live-in")) 1590 Flags |= uint32_t(StatepointFlags::DeoptLiveIn); 1591 else { 1592 assert(DeoptLowering.equals("live-through") && "Unsupported value!"); 1593 } 1594 1595 FunctionCallee CallTarget(Call->getFunctionType(), Call->getCalledOperand()); 1596 if (Function *F = dyn_cast<Function>(CallTarget.getCallee())) { 1597 auto IID = F->getIntrinsicID(); 1598 if (IID == Intrinsic::experimental_deoptimize) { 1599 // Calls to llvm.experimental.deoptimize are lowered to calls to the 1600 // __llvm_deoptimize symbol. We want to resolve this now, since the 1601 // verifier does not allow taking the address of an intrinsic function. 1602 1603 SmallVector<Type *, 8> DomainTy; 1604 for (Value *Arg : CallArgs) 1605 DomainTy.push_back(Arg->getType()); 1606 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1607 /* isVarArg = */ false); 1608 1609 // Note: CallTarget can be a bitcast instruction of a symbol if there are 1610 // calls to @llvm.experimental.deoptimize with different argument types in 1611 // the same module. This is fine -- we assume the frontend knew what it 1612 // was doing when generating this kind of IR. 1613 CallTarget = F->getParent() 1614 ->getOrInsertFunction("__llvm_deoptimize", FTy); 1615 1616 IsDeoptimize = true; 1617 } else if (IID == Intrinsic::memcpy_element_unordered_atomic || 1618 IID == Intrinsic::memmove_element_unordered_atomic) { 1619 // Unordered atomic memcpy and memmove intrinsics which are not explicitly 1620 // marked as "gc-leaf-function" should be lowered in a GC parseable way. 1621 // Specifically, these calls should be lowered to the 1622 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols. 1623 // Similarly to __llvm_deoptimize we want to resolve this now, since the 1624 // verifier does not allow taking the address of an intrinsic function. 1625 // 1626 // Moreover we need to shuffle the arguments for the call in order to 1627 // accommodate GC. The underlying source and destination objects might be 1628 // relocated during copy operation should the GC occur. To relocate the 1629 // derived source and destination pointers the implementation of the 1630 // intrinsic should know the corresponding base pointers. 1631 // 1632 // To make the base pointers available pass them explicitly as arguments: 1633 // memcpy(dest_derived, source_derived, ...) => 1634 // memcpy(dest_base, dest_offset, source_base, source_offset, ...) 1635 auto &Context = Call->getContext(); 1636 auto &DL = Call->getModule()->getDataLayout(); 1637 auto GetBaseAndOffset = [&](Value *Derived) { 1638 assert(PointerToBase.count(Derived)); 1639 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace(); 1640 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace); 1641 Value *Base = PointerToBase.find(Derived)->second; 1642 Value *Base_int = Builder.CreatePtrToInt( 1643 Base, Type::getIntNTy(Context, IntPtrSize)); 1644 Value *Derived_int = Builder.CreatePtrToInt( 1645 Derived, Type::getIntNTy(Context, IntPtrSize)); 1646 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int)); 1647 }; 1648 1649 auto *Dest = CallArgs[0]; 1650 Value *DestBase, *DestOffset; 1651 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest); 1652 1653 auto *Source = CallArgs[1]; 1654 Value *SourceBase, *SourceOffset; 1655 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source); 1656 1657 auto *LengthInBytes = CallArgs[2]; 1658 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]); 1659 1660 CallArgs.clear(); 1661 CallArgs.push_back(DestBase); 1662 CallArgs.push_back(DestOffset); 1663 CallArgs.push_back(SourceBase); 1664 CallArgs.push_back(SourceOffset); 1665 CallArgs.push_back(LengthInBytes); 1666 1667 SmallVector<Type *, 8> DomainTy; 1668 for (Value *Arg : CallArgs) 1669 DomainTy.push_back(Arg->getType()); 1670 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1671 /* isVarArg = */ false); 1672 1673 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) { 1674 uint64_t ElementSize = ElementSizeCI->getZExtValue(); 1675 if (IID == Intrinsic::memcpy_element_unordered_atomic) { 1676 switch (ElementSize) { 1677 case 1: 1678 return "__llvm_memcpy_element_unordered_atomic_safepoint_1"; 1679 case 2: 1680 return "__llvm_memcpy_element_unordered_atomic_safepoint_2"; 1681 case 4: 1682 return "__llvm_memcpy_element_unordered_atomic_safepoint_4"; 1683 case 8: 1684 return "__llvm_memcpy_element_unordered_atomic_safepoint_8"; 1685 case 16: 1686 return "__llvm_memcpy_element_unordered_atomic_safepoint_16"; 1687 default: 1688 llvm_unreachable("unexpected element size!"); 1689 } 1690 } 1691 assert(IID == Intrinsic::memmove_element_unordered_atomic); 1692 switch (ElementSize) { 1693 case 1: 1694 return "__llvm_memmove_element_unordered_atomic_safepoint_1"; 1695 case 2: 1696 return "__llvm_memmove_element_unordered_atomic_safepoint_2"; 1697 case 4: 1698 return "__llvm_memmove_element_unordered_atomic_safepoint_4"; 1699 case 8: 1700 return "__llvm_memmove_element_unordered_atomic_safepoint_8"; 1701 case 16: 1702 return "__llvm_memmove_element_unordered_atomic_safepoint_16"; 1703 default: 1704 llvm_unreachable("unexpected element size!"); 1705 } 1706 }; 1707 1708 CallTarget = 1709 F->getParent() 1710 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy); 1711 } 1712 } 1713 1714 // Create the statepoint given all the arguments 1715 GCStatepointInst *Token = nullptr; 1716 if (auto *CI = dyn_cast<CallInst>(Call)) { 1717 CallInst *SPCall = Builder.CreateGCStatepointCall( 1718 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs, 1719 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token"); 1720 1721 SPCall->setTailCallKind(CI->getTailCallKind()); 1722 SPCall->setCallingConv(CI->getCallingConv()); 1723 1724 // Currently we will fail on parameter attributes and on certain 1725 // function attributes. In case if we can handle this set of attributes - 1726 // set up function attrs directly on statepoint and return attrs later for 1727 // gc_result intrinsic. 1728 SPCall->setAttributes(legalizeCallAttributes( 1729 CI->getContext(), CI->getAttributes(), SPCall->getAttributes())); 1730 1731 Token = cast<GCStatepointInst>(SPCall); 1732 1733 // Put the following gc_result and gc_relocate calls immediately after the 1734 // the old call (which we're about to delete) 1735 assert(CI->getNextNode() && "Not a terminator, must have next!"); 1736 Builder.SetInsertPoint(CI->getNextNode()); 1737 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc()); 1738 } else { 1739 auto *II = cast<InvokeInst>(Call); 1740 1741 // Insert the new invoke into the old block. We'll remove the old one in a 1742 // moment at which point this will become the new terminator for the 1743 // original block. 1744 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke( 1745 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(), 1746 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs, 1747 "statepoint_token"); 1748 1749 SPInvoke->setCallingConv(II->getCallingConv()); 1750 1751 // Currently we will fail on parameter attributes and on certain 1752 // function attributes. In case if we can handle this set of attributes - 1753 // set up function attrs directly on statepoint and return attrs later for 1754 // gc_result intrinsic. 1755 SPInvoke->setAttributes(legalizeCallAttributes( 1756 II->getContext(), II->getAttributes(), SPInvoke->getAttributes())); 1757 1758 Token = cast<GCStatepointInst>(SPInvoke); 1759 1760 // Generate gc relocates in exceptional path 1761 BasicBlock *UnwindBlock = II->getUnwindDest(); 1762 assert(!isa<PHINode>(UnwindBlock->begin()) && 1763 UnwindBlock->getUniquePredecessor() && 1764 "can't safely insert in this block!"); 1765 1766 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt()); 1767 Builder.SetCurrentDebugLocation(II->getDebugLoc()); 1768 1769 // Attach exceptional gc relocates to the landingpad. 1770 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst(); 1771 Result.UnwindToken = ExceptionalToken; 1772 1773 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder); 1774 1775 // Generate gc relocates and returns for normal block 1776 BasicBlock *NormalDest = II->getNormalDest(); 1777 assert(!isa<PHINode>(NormalDest->begin()) && 1778 NormalDest->getUniquePredecessor() && 1779 "can't safely insert in this block!"); 1780 1781 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt()); 1782 1783 // gc relocates will be generated later as if it were regular call 1784 // statepoint 1785 } 1786 assert(Token && "Should be set in one of the above branches!"); 1787 1788 if (IsDeoptimize) { 1789 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we 1790 // transform the tail-call like structure to a call to a void function 1791 // followed by unreachable to get better codegen. 1792 Replacements.push_back( 1793 DeferredReplacement::createDeoptimizeReplacement(Call)); 1794 } else { 1795 Token->setName("statepoint_token"); 1796 if (!Call->getType()->isVoidTy() && !Call->use_empty()) { 1797 StringRef Name = Call->hasName() ? Call->getName() : ""; 1798 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name); 1799 GCResult->setAttributes( 1800 AttributeList::get(GCResult->getContext(), AttributeList::ReturnIndex, 1801 Call->getAttributes().getRetAttrs())); 1802 1803 // We cannot RAUW or delete CS.getInstruction() because it could be in the 1804 // live set of some other safepoint, in which case that safepoint's 1805 // PartiallyConstructedSafepointRecord will hold a raw pointer to this 1806 // llvm::Instruction. Instead, we defer the replacement and deletion to 1807 // after the live sets have been made explicit in the IR, and we no longer 1808 // have raw pointers to worry about. 1809 Replacements.emplace_back( 1810 DeferredReplacement::createRAUW(Call, GCResult)); 1811 } else { 1812 Replacements.emplace_back(DeferredReplacement::createDelete(Call)); 1813 } 1814 } 1815 1816 Result.StatepointToken = Token; 1817 1818 // Second, create a gc.relocate for every live variable 1819 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder); 1820 } 1821 1822 // Replace an existing gc.statepoint with a new one and a set of gc.relocates 1823 // which make the relocations happening at this safepoint explicit. 1824 // 1825 // WARNING: Does not do any fixup to adjust users of the original live 1826 // values. That's the callers responsibility. 1827 static void 1828 makeStatepointExplicit(DominatorTree &DT, CallBase *Call, 1829 PartiallyConstructedSafepointRecord &Result, 1830 std::vector<DeferredReplacement> &Replacements, 1831 const PointerToBaseTy &PointerToBase) { 1832 const auto &LiveSet = Result.LiveSet; 1833 1834 // Convert to vector for efficient cross referencing. 1835 SmallVector<Value *, 64> BaseVec, LiveVec; 1836 LiveVec.reserve(LiveSet.size()); 1837 BaseVec.reserve(LiveSet.size()); 1838 for (Value *L : LiveSet) { 1839 LiveVec.push_back(L); 1840 assert(PointerToBase.count(L)); 1841 Value *Base = PointerToBase.find(L)->second; 1842 BaseVec.push_back(Base); 1843 } 1844 assert(LiveVec.size() == BaseVec.size()); 1845 1846 // Do the actual rewriting and delete the old statepoint 1847 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements, 1848 PointerToBase); 1849 } 1850 1851 // Helper function for the relocationViaAlloca. 1852 // 1853 // It receives iterator to the statepoint gc relocates and emits a store to the 1854 // assigned location (via allocaMap) for the each one of them. It adds the 1855 // visited values into the visitedLiveValues set, which we will later use them 1856 // for validation checking. 1857 static void 1858 insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs, 1859 DenseMap<Value *, AllocaInst *> &AllocaMap, 1860 DenseSet<Value *> &VisitedLiveValues) { 1861 for (User *U : GCRelocs) { 1862 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U); 1863 if (!Relocate) 1864 continue; 1865 1866 Value *OriginalValue = Relocate->getDerivedPtr(); 1867 assert(AllocaMap.count(OriginalValue)); 1868 Value *Alloca = AllocaMap[OriginalValue]; 1869 1870 // Emit store into the related alloca 1871 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to 1872 // the correct type according to alloca. 1873 assert(Relocate->getNextNode() && 1874 "Should always have one since it's not a terminator"); 1875 IRBuilder<> Builder(Relocate->getNextNode()); 1876 Value *CastedRelocatedValue = 1877 Builder.CreateBitCast(Relocate, 1878 cast<AllocaInst>(Alloca)->getAllocatedType(), 1879 suffixed_name_or(Relocate, ".casted", "")); 1880 1881 new StoreInst(CastedRelocatedValue, Alloca, 1882 cast<Instruction>(CastedRelocatedValue)->getNextNode()); 1883 1884 #ifndef NDEBUG 1885 VisitedLiveValues.insert(OriginalValue); 1886 #endif 1887 } 1888 } 1889 1890 // Helper function for the "relocationViaAlloca". Similar to the 1891 // "insertRelocationStores" but works for rematerialized values. 1892 static void insertRematerializationStores( 1893 const RematerializedValueMapTy &RematerializedValues, 1894 DenseMap<Value *, AllocaInst *> &AllocaMap, 1895 DenseSet<Value *> &VisitedLiveValues) { 1896 for (auto RematerializedValuePair: RematerializedValues) { 1897 Instruction *RematerializedValue = RematerializedValuePair.first; 1898 Value *OriginalValue = RematerializedValuePair.second; 1899 1900 assert(AllocaMap.count(OriginalValue) && 1901 "Can not find alloca for rematerialized value"); 1902 Value *Alloca = AllocaMap[OriginalValue]; 1903 1904 new StoreInst(RematerializedValue, Alloca, 1905 RematerializedValue->getNextNode()); 1906 1907 #ifndef NDEBUG 1908 VisitedLiveValues.insert(OriginalValue); 1909 #endif 1910 } 1911 } 1912 1913 /// Do all the relocation update via allocas and mem2reg 1914 static void relocationViaAlloca( 1915 Function &F, DominatorTree &DT, ArrayRef<Value *> Live, 1916 ArrayRef<PartiallyConstructedSafepointRecord> Records) { 1917 #ifndef NDEBUG 1918 // record initial number of (static) allocas; we'll check we have the same 1919 // number when we get done. 1920 int InitialAllocaNum = 0; 1921 for (Instruction &I : F.getEntryBlock()) 1922 if (isa<AllocaInst>(I)) 1923 InitialAllocaNum++; 1924 #endif 1925 1926 // TODO-PERF: change data structures, reserve 1927 DenseMap<Value *, AllocaInst *> AllocaMap; 1928 SmallVector<AllocaInst *, 200> PromotableAllocas; 1929 // Used later to chack that we have enough allocas to store all values 1930 std::size_t NumRematerializedValues = 0; 1931 PromotableAllocas.reserve(Live.size()); 1932 1933 // Emit alloca for "LiveValue" and record it in "allocaMap" and 1934 // "PromotableAllocas" 1935 const DataLayout &DL = F.getParent()->getDataLayout(); 1936 auto emitAllocaFor = [&](Value *LiveValue) { 1937 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), 1938 DL.getAllocaAddrSpace(), "", 1939 F.getEntryBlock().getFirstNonPHI()); 1940 AllocaMap[LiveValue] = Alloca; 1941 PromotableAllocas.push_back(Alloca); 1942 }; 1943 1944 // Emit alloca for each live gc pointer 1945 for (Value *V : Live) 1946 emitAllocaFor(V); 1947 1948 // Emit allocas for rematerialized values 1949 for (const auto &Info : Records) 1950 for (auto RematerializedValuePair : Info.RematerializedValues) { 1951 Value *OriginalValue = RematerializedValuePair.second; 1952 if (AllocaMap.count(OriginalValue) != 0) 1953 continue; 1954 1955 emitAllocaFor(OriginalValue); 1956 ++NumRematerializedValues; 1957 } 1958 1959 // The next two loops are part of the same conceptual operation. We need to 1960 // insert a store to the alloca after the original def and at each 1961 // redefinition. We need to insert a load before each use. These are split 1962 // into distinct loops for performance reasons. 1963 1964 // Update gc pointer after each statepoint: either store a relocated value or 1965 // null (if no relocated value was found for this gc pointer and it is not a 1966 // gc_result). This must happen before we update the statepoint with load of 1967 // alloca otherwise we lose the link between statepoint and old def. 1968 for (const auto &Info : Records) { 1969 Value *Statepoint = Info.StatepointToken; 1970 1971 // This will be used for consistency check 1972 DenseSet<Value *> VisitedLiveValues; 1973 1974 // Insert stores for normal statepoint gc relocates 1975 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues); 1976 1977 // In case if it was invoke statepoint 1978 // we will insert stores for exceptional path gc relocates. 1979 if (isa<InvokeInst>(Statepoint)) { 1980 insertRelocationStores(Info.UnwindToken->users(), AllocaMap, 1981 VisitedLiveValues); 1982 } 1983 1984 // Do similar thing with rematerialized values 1985 insertRematerializationStores(Info.RematerializedValues, AllocaMap, 1986 VisitedLiveValues); 1987 1988 if (ClobberNonLive) { 1989 // As a debugging aid, pretend that an unrelocated pointer becomes null at 1990 // the gc.statepoint. This will turn some subtle GC problems into 1991 // slightly easier to debug SEGVs. Note that on large IR files with 1992 // lots of gc.statepoints this is extremely costly both memory and time 1993 // wise. 1994 SmallVector<AllocaInst *, 64> ToClobber; 1995 for (auto Pair : AllocaMap) { 1996 Value *Def = Pair.first; 1997 AllocaInst *Alloca = Pair.second; 1998 1999 // This value was relocated 2000 if (VisitedLiveValues.count(Def)) { 2001 continue; 2002 } 2003 ToClobber.push_back(Alloca); 2004 } 2005 2006 auto InsertClobbersAt = [&](Instruction *IP) { 2007 for (auto *AI : ToClobber) { 2008 auto PT = cast<PointerType>(AI->getAllocatedType()); 2009 Constant *CPN = ConstantPointerNull::get(PT); 2010 new StoreInst(CPN, AI, IP); 2011 } 2012 }; 2013 2014 // Insert the clobbering stores. These may get intermixed with the 2015 // gc.results and gc.relocates, but that's fine. 2016 if (auto II = dyn_cast<InvokeInst>(Statepoint)) { 2017 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt()); 2018 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt()); 2019 } else { 2020 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode()); 2021 } 2022 } 2023 } 2024 2025 // Update use with load allocas and add store for gc_relocated. 2026 for (auto Pair : AllocaMap) { 2027 Value *Def = Pair.first; 2028 AllocaInst *Alloca = Pair.second; 2029 2030 // We pre-record the uses of allocas so that we dont have to worry about 2031 // later update that changes the user information.. 2032 2033 SmallVector<Instruction *, 20> Uses; 2034 // PERF: trade a linear scan for repeated reallocation 2035 Uses.reserve(Def->getNumUses()); 2036 for (User *U : Def->users()) { 2037 if (!isa<ConstantExpr>(U)) { 2038 // If the def has a ConstantExpr use, then the def is either a 2039 // ConstantExpr use itself or null. In either case 2040 // (recursively in the first, directly in the second), the oop 2041 // it is ultimately dependent on is null and this particular 2042 // use does not need to be fixed up. 2043 Uses.push_back(cast<Instruction>(U)); 2044 } 2045 } 2046 2047 llvm::sort(Uses); 2048 auto Last = std::unique(Uses.begin(), Uses.end()); 2049 Uses.erase(Last, Uses.end()); 2050 2051 for (Instruction *Use : Uses) { 2052 if (isa<PHINode>(Use)) { 2053 PHINode *Phi = cast<PHINode>(Use); 2054 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { 2055 if (Def == Phi->getIncomingValue(i)) { 2056 LoadInst *Load = 2057 new LoadInst(Alloca->getAllocatedType(), Alloca, "", 2058 Phi->getIncomingBlock(i)->getTerminator()); 2059 Phi->setIncomingValue(i, Load); 2060 } 2061 } 2062 } else { 2063 LoadInst *Load = 2064 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use); 2065 Use->replaceUsesOfWith(Def, Load); 2066 } 2067 } 2068 2069 // Emit store for the initial gc value. Store must be inserted after load, 2070 // otherwise store will be in alloca's use list and an extra load will be 2071 // inserted before it. 2072 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false, 2073 DL.getABITypeAlign(Def->getType())); 2074 if (Instruction *Inst = dyn_cast<Instruction>(Def)) { 2075 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) { 2076 // InvokeInst is a terminator so the store need to be inserted into its 2077 // normal destination block. 2078 BasicBlock *NormalDest = Invoke->getNormalDest(); 2079 Store->insertBefore(NormalDest->getFirstNonPHI()); 2080 } else { 2081 assert(!Inst->isTerminator() && 2082 "The only terminator that can produce a value is " 2083 "InvokeInst which is handled above."); 2084 Store->insertAfter(Inst); 2085 } 2086 } else { 2087 assert(isa<Argument>(Def)); 2088 Store->insertAfter(cast<Instruction>(Alloca)); 2089 } 2090 } 2091 2092 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues && 2093 "we must have the same allocas with lives"); 2094 (void) NumRematerializedValues; 2095 if (!PromotableAllocas.empty()) { 2096 // Apply mem2reg to promote alloca to SSA 2097 PromoteMemToReg(PromotableAllocas, DT); 2098 } 2099 2100 #ifndef NDEBUG 2101 for (auto &I : F.getEntryBlock()) 2102 if (isa<AllocaInst>(I)) 2103 InitialAllocaNum--; 2104 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); 2105 #endif 2106 } 2107 2108 /// Implement a unique function which doesn't require we sort the input 2109 /// vector. Doing so has the effect of changing the output of a couple of 2110 /// tests in ways which make them less useful in testing fused safepoints. 2111 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { 2112 SmallSet<T, 8> Seen; 2113 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; }); 2114 } 2115 2116 /// Insert holders so that each Value is obviously live through the entire 2117 /// lifetime of the call. 2118 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values, 2119 SmallVectorImpl<CallInst *> &Holders) { 2120 if (Values.empty()) 2121 // No values to hold live, might as well not insert the empty holder 2122 return; 2123 2124 Module *M = Call->getModule(); 2125 // Use a dummy vararg function to actually hold the values live 2126 FunctionCallee Func = M->getOrInsertFunction( 2127 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true)); 2128 if (isa<CallInst>(Call)) { 2129 // For call safepoints insert dummy calls right after safepoint 2130 Holders.push_back( 2131 CallInst::Create(Func, Values, "", &*++Call->getIterator())); 2132 return; 2133 } 2134 // For invoke safepooints insert dummy calls both in normal and 2135 // exceptional destination blocks 2136 auto *II = cast<InvokeInst>(Call); 2137 Holders.push_back(CallInst::Create( 2138 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt())); 2139 Holders.push_back(CallInst::Create( 2140 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt())); 2141 } 2142 2143 static void findLiveReferences( 2144 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 2145 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 2146 GCPtrLivenessData OriginalLivenessData; 2147 computeLiveInValues(DT, F, OriginalLivenessData); 2148 for (size_t i = 0; i < records.size(); i++) { 2149 struct PartiallyConstructedSafepointRecord &info = records[i]; 2150 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info); 2151 } 2152 } 2153 2154 // Helper function for the "rematerializeLiveValues". It walks use chain 2155 // starting from the "CurrentValue" until it reaches the root of the chain, i.e. 2156 // the base or a value it cannot process. Only "simple" values are processed 2157 // (currently it is GEP's and casts). The returned root is examined by the 2158 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array 2159 // with all visited values. 2160 static Value* findRematerializableChainToBasePointer( 2161 SmallVectorImpl<Instruction*> &ChainToBase, 2162 Value *CurrentValue) { 2163 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) { 2164 ChainToBase.push_back(GEP); 2165 return findRematerializableChainToBasePointer(ChainToBase, 2166 GEP->getPointerOperand()); 2167 } 2168 2169 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) { 2170 if (!CI->isNoopCast(CI->getModule()->getDataLayout())) 2171 return CI; 2172 2173 ChainToBase.push_back(CI); 2174 return findRematerializableChainToBasePointer(ChainToBase, 2175 CI->getOperand(0)); 2176 } 2177 2178 // We have reached the root of the chain, which is either equal to the base or 2179 // is the first unsupported value along the use chain. 2180 return CurrentValue; 2181 } 2182 2183 // Helper function for the "rematerializeLiveValues". Compute cost of the use 2184 // chain we are going to rematerialize. 2185 static InstructionCost 2186 chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain, 2187 TargetTransformInfo &TTI) { 2188 InstructionCost Cost = 0; 2189 2190 for (Instruction *Instr : Chain) { 2191 if (CastInst *CI = dyn_cast<CastInst>(Instr)) { 2192 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) && 2193 "non noop cast is found during rematerialization"); 2194 2195 Type *SrcTy = CI->getOperand(0)->getType(); 2196 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy, 2197 TTI::getCastContextHint(CI), 2198 TargetTransformInfo::TCK_SizeAndLatency, CI); 2199 2200 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) { 2201 // Cost of the address calculation 2202 Type *ValTy = GEP->getSourceElementType(); 2203 Cost += TTI.getAddressComputationCost(ValTy); 2204 2205 // And cost of the GEP itself 2206 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not 2207 // allowed for the external usage) 2208 if (!GEP->hasAllConstantIndices()) 2209 Cost += 2; 2210 2211 } else { 2212 llvm_unreachable("unsupported instruction type during rematerialization"); 2213 } 2214 } 2215 2216 return Cost; 2217 } 2218 2219 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) { 2220 unsigned PhiNum = OrigRootPhi.getNumIncomingValues(); 2221 if (PhiNum != AlternateRootPhi.getNumIncomingValues() || 2222 OrigRootPhi.getParent() != AlternateRootPhi.getParent()) 2223 return false; 2224 // Map of incoming values and their corresponding basic blocks of 2225 // OrigRootPhi. 2226 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues; 2227 for (unsigned i = 0; i < PhiNum; i++) 2228 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] = 2229 OrigRootPhi.getIncomingBlock(i); 2230 2231 // Both current and base PHIs should have same incoming values and 2232 // the same basic blocks corresponding to the incoming values. 2233 for (unsigned i = 0; i < PhiNum; i++) { 2234 auto CIVI = 2235 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i)); 2236 if (CIVI == CurrentIncomingValues.end()) 2237 return false; 2238 BasicBlock *CurrentIncomingBB = CIVI->second; 2239 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i)) 2240 return false; 2241 } 2242 return true; 2243 } 2244 2245 // Find derived pointers that can be recomputed cheap enough and fill 2246 // RematerizationCandidates with such candidates. 2247 static void 2248 findRematerializationCandidates(PointerToBaseTy PointerToBase, 2249 RematCandTy &RematerizationCandidates, 2250 TargetTransformInfo &TTI) { 2251 const unsigned int ChainLengthThreshold = 10; 2252 2253 for (auto P2B : PointerToBase) { 2254 auto *Derived = P2B.first; 2255 auto *Base = P2B.second; 2256 // Consider only derived pointers. 2257 if (Derived == Base) 2258 continue; 2259 2260 // For each live pointer find its defining chain. 2261 SmallVector<Instruction *, 3> ChainToBase; 2262 Value *RootOfChain = 2263 findRematerializableChainToBasePointer(ChainToBase, Derived); 2264 2265 // Nothing to do, or chain is too long 2266 if ( ChainToBase.size() == 0 || 2267 ChainToBase.size() > ChainLengthThreshold) 2268 continue; 2269 2270 // Handle the scenario where the RootOfChain is not equal to the 2271 // Base Value, but they are essentially the same phi values. 2272 if (RootOfChain != PointerToBase[Derived]) { 2273 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain); 2274 PHINode *AlternateRootPhi = dyn_cast<PHINode>(PointerToBase[Derived]); 2275 if (!OrigRootPhi || !AlternateRootPhi) 2276 continue; 2277 // PHI nodes that have the same incoming values, and belonging to the same 2278 // basic blocks are essentially the same SSA value. When the original phi 2279 // has incoming values with different base pointers, the original phi is 2280 // marked as conflict, and an additional `AlternateRootPhi` with the same 2281 // incoming values get generated by the findBasePointer function. We need 2282 // to identify the newly generated AlternateRootPhi (.base version of phi) 2283 // and RootOfChain (the original phi node itself) are the same, so that we 2284 // can rematerialize the gep and casts. This is a workaround for the 2285 // deficiency in the findBasePointer algorithm. 2286 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi)) 2287 continue; 2288 } 2289 // Compute cost of this chain. 2290 InstructionCost Cost = chainToBasePointerCost(ChainToBase, TTI); 2291 // TODO: We can also account for cases when we will be able to remove some 2292 // of the rematerialized values by later optimization passes. I.e if 2293 // we rematerialized several intersecting chains. Or if original values 2294 // don't have any uses besides this statepoint. 2295 2296 // Ok, there is a candidate. 2297 RematerizlizationCandidateRecord Record; 2298 Record.ChainToBase = ChainToBase; 2299 Record.RootOfChain = RootOfChain; 2300 Record.Cost = Cost; 2301 RematerizationCandidates.insert({ Derived, Record }); 2302 } 2303 } 2304 2305 // From the statepoint live set pick values that are cheaper to recompute then 2306 // to relocate. Remove this values from the live set, rematerialize them after 2307 // statepoint and record them in "Info" structure. Note that similar to 2308 // relocated values we don't do any user adjustments here. 2309 static void rematerializeLiveValues(CallBase *Call, 2310 PartiallyConstructedSafepointRecord &Info, 2311 PointerToBaseTy &PointerToBase, 2312 RematCandTy &RematerizationCandidates, 2313 TargetTransformInfo &TTI) { 2314 // Record values we are going to delete from this statepoint live set. 2315 // We can not di this in following loop due to iterator invalidation. 2316 SmallVector<Value *, 32> LiveValuesToBeDeleted; 2317 2318 for (Value *LiveValue : Info.LiveSet) { 2319 auto It = RematerizationCandidates.find(LiveValue); 2320 if (It == RematerizationCandidates.end()) 2321 continue; 2322 2323 RematerizlizationCandidateRecord &Record = It->second; 2324 2325 InstructionCost Cost = Record.Cost; 2326 // For invokes we need to rematerialize each chain twice - for normal and 2327 // for unwind basic blocks. Model this by multiplying cost by two. 2328 if (isa<InvokeInst>(Call)) 2329 Cost *= 2; 2330 2331 // If it's too expensive - skip it. 2332 if (Cost >= RematerializationThreshold) 2333 continue; 2334 2335 // Remove value from the live set 2336 LiveValuesToBeDeleted.push_back(LiveValue); 2337 2338 // Clone instructions and record them inside "Info" structure. 2339 2340 // For each live pointer find get its defining chain. 2341 SmallVector<Instruction *, 3> ChainToBase = Record.ChainToBase; 2342 // Walk backwards to visit top-most instructions first. 2343 std::reverse(ChainToBase.begin(), ChainToBase.end()); 2344 2345 // Utility function which clones all instructions from "ChainToBase" 2346 // and inserts them before "InsertBefore". Returns rematerialized value 2347 // which should be used after statepoint. 2348 auto rematerializeChain = [&ChainToBase]( 2349 Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) { 2350 Instruction *LastClonedValue = nullptr; 2351 Instruction *LastValue = nullptr; 2352 for (Instruction *Instr: ChainToBase) { 2353 // Only GEP's and casts are supported as we need to be careful to not 2354 // introduce any new uses of pointers not in the liveset. 2355 // Note that it's fine to introduce new uses of pointers which were 2356 // otherwise not used after this statepoint. 2357 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr)); 2358 2359 Instruction *ClonedValue = Instr->clone(); 2360 ClonedValue->insertBefore(InsertBefore); 2361 ClonedValue->setName(Instr->getName() + ".remat"); 2362 2363 // If it is not first instruction in the chain then it uses previously 2364 // cloned value. We should update it to use cloned value. 2365 if (LastClonedValue) { 2366 assert(LastValue); 2367 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue); 2368 #ifndef NDEBUG 2369 for (auto OpValue : ClonedValue->operand_values()) { 2370 // Assert that cloned instruction does not use any instructions from 2371 // this chain other than LastClonedValue 2372 assert(!is_contained(ChainToBase, OpValue) && 2373 "incorrect use in rematerialization chain"); 2374 // Assert that the cloned instruction does not use the RootOfChain 2375 // or the AlternateLiveBase. 2376 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase); 2377 } 2378 #endif 2379 } else { 2380 // For the first instruction, replace the use of unrelocated base i.e. 2381 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the 2382 // live set. They have been proved to be the same PHI nodes. Note 2383 // that the *only* use of the RootOfChain in the ChainToBase list is 2384 // the first Value in the list. 2385 if (RootOfChain != AlternateLiveBase) 2386 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase); 2387 } 2388 2389 LastClonedValue = ClonedValue; 2390 LastValue = Instr; 2391 } 2392 assert(LastClonedValue); 2393 return LastClonedValue; 2394 }; 2395 2396 // Different cases for calls and invokes. For invokes we need to clone 2397 // instructions both on normal and unwind path. 2398 if (isa<CallInst>(Call)) { 2399 Instruction *InsertBefore = Call->getNextNode(); 2400 assert(InsertBefore); 2401 Instruction *RematerializedValue = rematerializeChain( 2402 InsertBefore, Record.RootOfChain, PointerToBase[LiveValue]); 2403 Info.RematerializedValues[RematerializedValue] = LiveValue; 2404 } else { 2405 auto *Invoke = cast<InvokeInst>(Call); 2406 2407 Instruction *NormalInsertBefore = 2408 &*Invoke->getNormalDest()->getFirstInsertionPt(); 2409 Instruction *UnwindInsertBefore = 2410 &*Invoke->getUnwindDest()->getFirstInsertionPt(); 2411 2412 Instruction *NormalRematerializedValue = rematerializeChain( 2413 NormalInsertBefore, Record.RootOfChain, PointerToBase[LiveValue]); 2414 Instruction *UnwindRematerializedValue = rematerializeChain( 2415 UnwindInsertBefore, Record.RootOfChain, PointerToBase[LiveValue]); 2416 2417 Info.RematerializedValues[NormalRematerializedValue] = LiveValue; 2418 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue; 2419 } 2420 } 2421 2422 // Remove rematerializaed values from the live set 2423 for (auto LiveValue: LiveValuesToBeDeleted) { 2424 Info.LiveSet.remove(LiveValue); 2425 } 2426 } 2427 2428 static bool inlineGetBaseAndOffset(Function &F, 2429 SmallVectorImpl<CallInst *> &Intrinsics, 2430 DefiningValueMapTy &DVCache) { 2431 auto &Context = F.getContext(); 2432 auto &DL = F.getParent()->getDataLayout(); 2433 bool Changed = false; 2434 2435 for (auto *Callsite : Intrinsics) 2436 switch (Callsite->getIntrinsicID()) { 2437 case Intrinsic::experimental_gc_get_pointer_base: { 2438 Changed = true; 2439 Value *Base = findBasePointer(Callsite->getOperand(0), DVCache); 2440 assert(!DVCache.count(Callsite)); 2441 auto *BaseBC = IRBuilder<>(Callsite).CreateBitCast( 2442 Base, Callsite->getType(), suffixed_name_or(Base, ".cast", "")); 2443 if (BaseBC != Base) 2444 DVCache[BaseBC] = Base; 2445 Callsite->replaceAllUsesWith(BaseBC); 2446 if (!BaseBC->hasName()) 2447 BaseBC->takeName(Callsite); 2448 Callsite->eraseFromParent(); 2449 break; 2450 } 2451 case Intrinsic::experimental_gc_get_pointer_offset: { 2452 Changed = true; 2453 Value *Derived = Callsite->getOperand(0); 2454 Value *Base = findBasePointer(Derived, DVCache); 2455 assert(!DVCache.count(Callsite)); 2456 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace(); 2457 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace); 2458 IRBuilder<> Builder(Callsite); 2459 Value *BaseInt = 2460 Builder.CreatePtrToInt(Base, Type::getIntNTy(Context, IntPtrSize), 2461 suffixed_name_or(Base, ".int", "")); 2462 Value *DerivedInt = 2463 Builder.CreatePtrToInt(Derived, Type::getIntNTy(Context, IntPtrSize), 2464 suffixed_name_or(Derived, ".int", "")); 2465 Value *Offset = Builder.CreateSub(DerivedInt, BaseInt); 2466 Callsite->replaceAllUsesWith(Offset); 2467 Offset->takeName(Callsite); 2468 Callsite->eraseFromParent(); 2469 break; 2470 } 2471 default: 2472 llvm_unreachable("Unknown intrinsic"); 2473 } 2474 2475 return Changed; 2476 } 2477 2478 static bool insertParsePoints(Function &F, DominatorTree &DT, 2479 TargetTransformInfo &TTI, 2480 SmallVectorImpl<CallBase *> &ToUpdate, 2481 DefiningValueMapTy &DVCache) { 2482 #ifndef NDEBUG 2483 // Validate the input 2484 std::set<CallBase *> Uniqued; 2485 Uniqued.insert(ToUpdate.begin(), ToUpdate.end()); 2486 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!"); 2487 2488 for (CallBase *Call : ToUpdate) 2489 assert(Call->getFunction() == &F); 2490 #endif 2491 2492 // When inserting gc.relocates for invokes, we need to be able to insert at 2493 // the top of the successor blocks. See the comment on 2494 // normalForInvokeSafepoint on exactly what is needed. Note that this step 2495 // may restructure the CFG. 2496 for (CallBase *Call : ToUpdate) { 2497 auto *II = dyn_cast<InvokeInst>(Call); 2498 if (!II) 2499 continue; 2500 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT); 2501 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT); 2502 } 2503 2504 // A list of dummy calls added to the IR to keep various values obviously 2505 // live in the IR. We'll remove all of these when done. 2506 SmallVector<CallInst *, 64> Holders; 2507 2508 // Insert a dummy call with all of the deopt operands we'll need for the 2509 // actual safepoint insertion as arguments. This ensures reference operands 2510 // in the deopt argument list are considered live through the safepoint (and 2511 // thus makes sure they get relocated.) 2512 for (CallBase *Call : ToUpdate) { 2513 SmallVector<Value *, 64> DeoptValues; 2514 2515 for (Value *Arg : GetDeoptBundleOperands(Call)) { 2516 assert(!isUnhandledGCPointerType(Arg->getType()) && 2517 "support for FCA unimplemented"); 2518 if (isHandledGCPointerType(Arg->getType())) 2519 DeoptValues.push_back(Arg); 2520 } 2521 2522 insertUseHolderAfter(Call, DeoptValues, Holders); 2523 } 2524 2525 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size()); 2526 2527 // A) Identify all gc pointers which are statically live at the given call 2528 // site. 2529 findLiveReferences(F, DT, ToUpdate, Records); 2530 2531 /// Global mapping from live pointers to a base-defining-value. 2532 PointerToBaseTy PointerToBase; 2533 2534 // B) Find the base pointers for each live pointer 2535 for (size_t i = 0; i < Records.size(); i++) { 2536 PartiallyConstructedSafepointRecord &info = Records[i]; 2537 findBasePointers(DT, DVCache, ToUpdate[i], info, PointerToBase); 2538 } 2539 if (PrintBasePointers) { 2540 errs() << "Base Pairs (w/o Relocation):\n"; 2541 for (auto &Pair : PointerToBase) { 2542 errs() << " derived "; 2543 Pair.first->printAsOperand(errs(), false); 2544 errs() << " base "; 2545 Pair.second->printAsOperand(errs(), false); 2546 errs() << "\n"; 2547 ; 2548 } 2549 } 2550 2551 // The base phi insertion logic (for any safepoint) may have inserted new 2552 // instructions which are now live at some safepoint. The simplest such 2553 // example is: 2554 // loop: 2555 // phi a <-- will be a new base_phi here 2556 // safepoint 1 <-- that needs to be live here 2557 // gep a + 1 2558 // safepoint 2 2559 // br loop 2560 // We insert some dummy calls after each safepoint to definitely hold live 2561 // the base pointers which were identified for that safepoint. We'll then 2562 // ask liveness for _every_ base inserted to see what is now live. Then we 2563 // remove the dummy calls. 2564 Holders.reserve(Holders.size() + Records.size()); 2565 for (size_t i = 0; i < Records.size(); i++) { 2566 PartiallyConstructedSafepointRecord &Info = Records[i]; 2567 2568 SmallVector<Value *, 128> Bases; 2569 for (auto *Derived : Info.LiveSet) { 2570 assert(PointerToBase.count(Derived) && "Missed base for derived pointer"); 2571 Bases.push_back(PointerToBase[Derived]); 2572 } 2573 2574 insertUseHolderAfter(ToUpdate[i], Bases, Holders); 2575 } 2576 2577 // By selecting base pointers, we've effectively inserted new uses. Thus, we 2578 // need to rerun liveness. We may *also* have inserted new defs, but that's 2579 // not the key issue. 2580 recomputeLiveInValues(F, DT, ToUpdate, Records, PointerToBase); 2581 2582 if (PrintBasePointers) { 2583 errs() << "Base Pairs: (w/Relocation)\n"; 2584 for (auto Pair : PointerToBase) { 2585 errs() << " derived "; 2586 Pair.first->printAsOperand(errs(), false); 2587 errs() << " base "; 2588 Pair.second->printAsOperand(errs(), false); 2589 errs() << "\n"; 2590 } 2591 } 2592 2593 // It is possible that non-constant live variables have a constant base. For 2594 // example, a GEP with a variable offset from a global. In this case we can 2595 // remove it from the liveset. We already don't add constants to the liveset 2596 // because we assume they won't move at runtime and the GC doesn't need to be 2597 // informed about them. The same reasoning applies if the base is constant. 2598 // Note that the relocation placement code relies on this filtering for 2599 // correctness as it expects the base to be in the liveset, which isn't true 2600 // if the base is constant. 2601 for (auto &Info : Records) { 2602 Info.LiveSet.remove_if([&](Value *LiveV) { 2603 assert(PointerToBase.count(LiveV) && "Missed base for derived pointer"); 2604 return isa<Constant>(PointerToBase[LiveV]); 2605 }); 2606 } 2607 2608 for (CallInst *CI : Holders) 2609 CI->eraseFromParent(); 2610 2611 Holders.clear(); 2612 2613 // Compute the cost of possible re-materialization of derived pointers. 2614 RematCandTy RematerizationCandidates; 2615 findRematerializationCandidates(PointerToBase, RematerizationCandidates, TTI); 2616 2617 // In order to reduce live set of statepoint we might choose to rematerialize 2618 // some values instead of relocating them. This is purely an optimization and 2619 // does not influence correctness. 2620 for (size_t i = 0; i < Records.size(); i++) 2621 rematerializeLiveValues(ToUpdate[i], Records[i], PointerToBase, 2622 RematerizationCandidates, TTI); 2623 2624 // We need this to safely RAUW and delete call or invoke return values that 2625 // may themselves be live over a statepoint. For details, please see usage in 2626 // makeStatepointExplicitImpl. 2627 std::vector<DeferredReplacement> Replacements; 2628 2629 // Now run through and replace the existing statepoints with new ones with 2630 // the live variables listed. We do not yet update uses of the values being 2631 // relocated. We have references to live variables that need to 2632 // survive to the last iteration of this loop. (By construction, the 2633 // previous statepoint can not be a live variable, thus we can and remove 2634 // the old statepoint calls as we go.) 2635 for (size_t i = 0; i < Records.size(); i++) 2636 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements, 2637 PointerToBase); 2638 2639 ToUpdate.clear(); // prevent accident use of invalid calls. 2640 2641 for (auto &PR : Replacements) 2642 PR.doReplacement(); 2643 2644 Replacements.clear(); 2645 2646 for (auto &Info : Records) { 2647 // These live sets may contain state Value pointers, since we replaced calls 2648 // with operand bundles with calls wrapped in gc.statepoint, and some of 2649 // those calls may have been def'ing live gc pointers. Clear these out to 2650 // avoid accidentally using them. 2651 // 2652 // TODO: We should create a separate data structure that does not contain 2653 // these live sets, and migrate to using that data structure from this point 2654 // onward. 2655 Info.LiveSet.clear(); 2656 } 2657 PointerToBase.clear(); 2658 2659 // Do all the fixups of the original live variables to their relocated selves 2660 SmallVector<Value *, 128> Live; 2661 for (size_t i = 0; i < Records.size(); i++) { 2662 PartiallyConstructedSafepointRecord &Info = Records[i]; 2663 2664 // We can't simply save the live set from the original insertion. One of 2665 // the live values might be the result of a call which needs a safepoint. 2666 // That Value* no longer exists and we need to use the new gc_result. 2667 // Thankfully, the live set is embedded in the statepoint (and updated), so 2668 // we just grab that. 2669 llvm::append_range(Live, Info.StatepointToken->gc_args()); 2670 #ifndef NDEBUG 2671 // Do some basic validation checking on our liveness results before 2672 // performing relocation. Relocation can and will turn mistakes in liveness 2673 // results into non-sensical code which is must harder to debug. 2674 // TODO: It would be nice to test consistency as well 2675 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) && 2676 "statepoint must be reachable or liveness is meaningless"); 2677 for (Value *V : Info.StatepointToken->gc_args()) { 2678 if (!isa<Instruction>(V)) 2679 // Non-instruction values trivial dominate all possible uses 2680 continue; 2681 auto *LiveInst = cast<Instruction>(V); 2682 assert(DT.isReachableFromEntry(LiveInst->getParent()) && 2683 "unreachable values should never be live"); 2684 assert(DT.dominates(LiveInst, Info.StatepointToken) && 2685 "basic SSA liveness expectation violated by liveness analysis"); 2686 } 2687 #endif 2688 } 2689 unique_unsorted(Live); 2690 2691 #ifndef NDEBUG 2692 // Validation check 2693 for (auto *Ptr : Live) 2694 assert(isHandledGCPointerType(Ptr->getType()) && 2695 "must be a gc pointer type"); 2696 #endif 2697 2698 relocationViaAlloca(F, DT, Live, Records); 2699 return !Records.empty(); 2700 } 2701 2702 // List of all parameter and return attributes which must be stripped when 2703 // lowering from the abstract machine model. Note that we list attributes 2704 // here which aren't valid as return attributes, that is okay. 2705 static AttributeMask getParamAndReturnAttributesToRemove() { 2706 AttributeMask R; 2707 R.addAttribute(Attribute::Dereferenceable); 2708 R.addAttribute(Attribute::DereferenceableOrNull); 2709 R.addAttribute(Attribute::ReadNone); 2710 R.addAttribute(Attribute::ReadOnly); 2711 R.addAttribute(Attribute::WriteOnly); 2712 R.addAttribute(Attribute::NoAlias); 2713 R.addAttribute(Attribute::NoFree); 2714 return R; 2715 } 2716 2717 static void stripNonValidAttributesFromPrototype(Function &F) { 2718 LLVMContext &Ctx = F.getContext(); 2719 2720 // Intrinsics are very delicate. Lowering sometimes depends the presence 2721 // of certain attributes for correctness, but we may have also inferred 2722 // additional ones in the abstract machine model which need stripped. This 2723 // assumes that the attributes defined in Intrinsic.td are conservatively 2724 // correct for both physical and abstract model. 2725 if (Intrinsic::ID id = F.getIntrinsicID()) { 2726 F.setAttributes(Intrinsic::getAttributes(Ctx, id)); 2727 return; 2728 } 2729 2730 AttributeMask R = getParamAndReturnAttributesToRemove(); 2731 for (Argument &A : F.args()) 2732 if (isa<PointerType>(A.getType())) 2733 F.removeParamAttrs(A.getArgNo(), R); 2734 2735 if (isa<PointerType>(F.getReturnType())) 2736 F.removeRetAttrs(R); 2737 2738 for (auto Attr : FnAttrsToStrip) 2739 F.removeFnAttr(Attr); 2740 } 2741 2742 /// Certain metadata on instructions are invalid after running RS4GC. 2743 /// Optimizations that run after RS4GC can incorrectly use this metadata to 2744 /// optimize functions. We drop such metadata on the instruction. 2745 static void stripInvalidMetadataFromInstruction(Instruction &I) { 2746 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 2747 return; 2748 // These are the attributes that are still valid on loads and stores after 2749 // RS4GC. 2750 // The metadata implying dereferenceability and noalias are (conservatively) 2751 // dropped. This is because semantically, after RewriteStatepointsForGC runs, 2752 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can 2753 // touch the entire heap including noalias objects. Note: The reasoning is 2754 // same as stripping the dereferenceability and noalias attributes that are 2755 // analogous to the metadata counterparts. 2756 // We also drop the invariant.load metadata on the load because that metadata 2757 // implies the address operand to the load points to memory that is never 2758 // changed once it became dereferenceable. This is no longer true after RS4GC. 2759 // Similar reasoning applies to invariant.group metadata, which applies to 2760 // loads within a group. 2761 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa, 2762 LLVMContext::MD_range, 2763 LLVMContext::MD_alias_scope, 2764 LLVMContext::MD_nontemporal, 2765 LLVMContext::MD_nonnull, 2766 LLVMContext::MD_align, 2767 LLVMContext::MD_type}; 2768 2769 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC. 2770 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC); 2771 } 2772 2773 static void stripNonValidDataFromBody(Function &F) { 2774 if (F.empty()) 2775 return; 2776 2777 LLVMContext &Ctx = F.getContext(); 2778 MDBuilder Builder(Ctx); 2779 2780 // Set of invariantstart instructions that we need to remove. 2781 // Use this to avoid invalidating the instruction iterator. 2782 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions; 2783 2784 for (Instruction &I : instructions(F)) { 2785 // invariant.start on memory location implies that the referenced memory 2786 // location is constant and unchanging. This is no longer true after 2787 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint 2788 // which frees the entire heap and the presence of invariant.start allows 2789 // the optimizer to sink the load of a memory location past a statepoint, 2790 // which is incorrect. 2791 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 2792 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2793 InvariantStartInstructions.push_back(II); 2794 continue; 2795 } 2796 2797 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) { 2798 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag); 2799 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA); 2800 } 2801 2802 stripInvalidMetadataFromInstruction(I); 2803 2804 AttributeMask R = getParamAndReturnAttributesToRemove(); 2805 if (auto *Call = dyn_cast<CallBase>(&I)) { 2806 for (int i = 0, e = Call->arg_size(); i != e; i++) 2807 if (isa<PointerType>(Call->getArgOperand(i)->getType())) 2808 Call->removeParamAttrs(i, R); 2809 if (isa<PointerType>(Call->getType())) 2810 Call->removeRetAttrs(R); 2811 } 2812 } 2813 2814 // Delete the invariant.start instructions and RAUW undef. 2815 for (auto *II : InvariantStartInstructions) { 2816 II->replaceAllUsesWith(UndefValue::get(II->getType())); 2817 II->eraseFromParent(); 2818 } 2819 } 2820 2821 /// Returns true if this function should be rewritten by this pass. The main 2822 /// point of this function is as an extension point for custom logic. 2823 static bool shouldRewriteStatepointsIn(Function &F) { 2824 // TODO: This should check the GCStrategy 2825 if (F.hasGC()) { 2826 const auto &FunctionGCName = F.getGC(); 2827 const StringRef StatepointExampleName("statepoint-example"); 2828 const StringRef CoreCLRName("coreclr"); 2829 return (StatepointExampleName == FunctionGCName) || 2830 (CoreCLRName == FunctionGCName); 2831 } else 2832 return false; 2833 } 2834 2835 static void stripNonValidData(Module &M) { 2836 #ifndef NDEBUG 2837 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!"); 2838 #endif 2839 2840 for (Function &F : M) 2841 stripNonValidAttributesFromPrototype(F); 2842 2843 for (Function &F : M) 2844 stripNonValidDataFromBody(F); 2845 } 2846 2847 bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT, 2848 TargetTransformInfo &TTI, 2849 const TargetLibraryInfo &TLI) { 2850 assert(!F.isDeclaration() && !F.empty() && 2851 "need function body to rewrite statepoints in"); 2852 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision"); 2853 2854 auto NeedsRewrite = [&TLI](Instruction &I) { 2855 if (const auto *Call = dyn_cast<CallBase>(&I)) { 2856 if (isa<GCStatepointInst>(Call)) 2857 return false; 2858 if (callsGCLeafFunction(Call, TLI)) 2859 return false; 2860 2861 // Normally it's up to the frontend to make sure that non-leaf calls also 2862 // have proper deopt state if it is required. We make an exception for 2863 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics 2864 // these are non-leaf by default. They might be generated by the optimizer 2865 // which doesn't know how to produce a proper deopt state. So if we see a 2866 // non-leaf memcpy/memmove without deopt state just treat it as a leaf 2867 // copy and don't produce a statepoint. 2868 if (!AllowStatepointWithNoDeoptInfo && 2869 !Call->getOperandBundle(LLVMContext::OB_deopt)) { 2870 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) && 2871 "Don't expect any other calls here!"); 2872 return false; 2873 } 2874 return true; 2875 } 2876 return false; 2877 }; 2878 2879 // Delete any unreachable statepoints so that we don't have unrewritten 2880 // statepoints surviving this pass. This makes testing easier and the 2881 // resulting IR less confusing to human readers. 2882 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 2883 bool MadeChange = removeUnreachableBlocks(F, &DTU); 2884 // Flush the Dominator Tree. 2885 DTU.getDomTree(); 2886 2887 // Gather all the statepoints which need rewritten. Be careful to only 2888 // consider those in reachable code since we need to ask dominance queries 2889 // when rewriting. We'll delete the unreachable ones in a moment. 2890 SmallVector<CallBase *, 64> ParsePointNeeded; 2891 SmallVector<CallInst *, 64> Intrinsics; 2892 for (Instruction &I : instructions(F)) { 2893 // TODO: only the ones with the flag set! 2894 if (NeedsRewrite(I)) { 2895 // NOTE removeUnreachableBlocks() is stronger than 2896 // DominatorTree::isReachableFromEntry(). In other words 2897 // removeUnreachableBlocks can remove some blocks for which 2898 // isReachableFromEntry() returns true. 2899 assert(DT.isReachableFromEntry(I.getParent()) && 2900 "no unreachable blocks expected"); 2901 ParsePointNeeded.push_back(cast<CallBase>(&I)); 2902 } 2903 if (auto *CI = dyn_cast<CallInst>(&I)) 2904 if (CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_base || 2905 CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_offset) 2906 Intrinsics.emplace_back(CI); 2907 } 2908 2909 // Return early if no work to do. 2910 if (ParsePointNeeded.empty() && Intrinsics.empty()) 2911 return MadeChange; 2912 2913 // As a prepass, go ahead and aggressively destroy single entry phi nodes. 2914 // These are created by LCSSA. They have the effect of increasing the size 2915 // of liveness sets for no good reason. It may be harder to do this post 2916 // insertion since relocations and base phis can confuse things. 2917 for (BasicBlock &BB : F) 2918 if (BB.getUniquePredecessor()) 2919 MadeChange |= FoldSingleEntryPHINodes(&BB); 2920 2921 // Before we start introducing relocations, we want to tweak the IR a bit to 2922 // avoid unfortunate code generation effects. The main example is that we 2923 // want to try to make sure the comparison feeding a branch is after any 2924 // safepoints. Otherwise, we end up with a comparison of pre-relocation 2925 // values feeding a branch after relocation. This is semantically correct, 2926 // but results in extra register pressure since both the pre-relocation and 2927 // post-relocation copies must be available in registers. For code without 2928 // relocations this is handled elsewhere, but teaching the scheduler to 2929 // reverse the transform we're about to do would be slightly complex. 2930 // Note: This may extend the live range of the inputs to the icmp and thus 2931 // increase the liveset of any statepoint we move over. This is profitable 2932 // as long as all statepoints are in rare blocks. If we had in-register 2933 // lowering for live values this would be a much safer transform. 2934 auto getConditionInst = [](Instruction *TI) -> Instruction * { 2935 if (auto *BI = dyn_cast<BranchInst>(TI)) 2936 if (BI->isConditional()) 2937 return dyn_cast<Instruction>(BI->getCondition()); 2938 // TODO: Extend this to handle switches 2939 return nullptr; 2940 }; 2941 for (BasicBlock &BB : F) { 2942 Instruction *TI = BB.getTerminator(); 2943 if (auto *Cond = getConditionInst(TI)) 2944 // TODO: Handle more than just ICmps here. We should be able to move 2945 // most instructions without side effects or memory access. 2946 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) { 2947 MadeChange = true; 2948 Cond->moveBefore(TI); 2949 } 2950 } 2951 2952 // Nasty workaround - The base computation code in the main algorithm doesn't 2953 // consider the fact that a GEP can be used to convert a scalar to a vector. 2954 // The right fix for this is to integrate GEPs into the base rewriting 2955 // algorithm properly, this is just a short term workaround to prevent 2956 // crashes by canonicalizing such GEPs into fully vector GEPs. 2957 for (Instruction &I : instructions(F)) { 2958 if (!isa<GetElementPtrInst>(I)) 2959 continue; 2960 2961 unsigned VF = 0; 2962 for (unsigned i = 0; i < I.getNumOperands(); i++) 2963 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) { 2964 assert(VF == 0 || 2965 VF == cast<FixedVectorType>(OpndVTy)->getNumElements()); 2966 VF = cast<FixedVectorType>(OpndVTy)->getNumElements(); 2967 } 2968 2969 // It's the vector to scalar traversal through the pointer operand which 2970 // confuses base pointer rewriting, so limit ourselves to that case. 2971 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) { 2972 IRBuilder<> B(&I); 2973 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0)); 2974 I.setOperand(0, Splat); 2975 MadeChange = true; 2976 } 2977 } 2978 2979 // Cache the 'defining value' relation used in the computation and 2980 // insertion of base phis and selects. This ensures that we don't insert 2981 // large numbers of duplicate base_phis. Use one cache for both 2982 // inlineGetBaseAndOffset() and insertParsePoints(). 2983 DefiningValueMapTy DVCache; 2984 2985 if (!Intrinsics.empty()) 2986 // Inline @gc.get.pointer.base() and @gc.get.pointer.offset() before finding 2987 // live references. 2988 MadeChange |= inlineGetBaseAndOffset(F, Intrinsics, DVCache); 2989 2990 if (!ParsePointNeeded.empty()) 2991 MadeChange |= insertParsePoints(F, DT, TTI, ParsePointNeeded, DVCache); 2992 2993 return MadeChange; 2994 } 2995 2996 // liveness computation via standard dataflow 2997 // ------------------------------------------------------------------- 2998 2999 // TODO: Consider using bitvectors for liveness, the set of potentially 3000 // interesting values should be small and easy to pre-compute. 3001 3002 /// Compute the live-in set for the location rbegin starting from 3003 /// the live-out set of the basic block 3004 static void computeLiveInValues(BasicBlock::reverse_iterator Begin, 3005 BasicBlock::reverse_iterator End, 3006 SetVector<Value *> &LiveTmp) { 3007 for (auto &I : make_range(Begin, End)) { 3008 // KILL/Def - Remove this definition from LiveIn 3009 LiveTmp.remove(&I); 3010 3011 // Don't consider *uses* in PHI nodes, we handle their contribution to 3012 // predecessor blocks when we seed the LiveOut sets 3013 if (isa<PHINode>(I)) 3014 continue; 3015 3016 // USE - Add to the LiveIn set for this instruction 3017 for (Value *V : I.operands()) { 3018 assert(!isUnhandledGCPointerType(V->getType()) && 3019 "support for FCA unimplemented"); 3020 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) { 3021 // The choice to exclude all things constant here is slightly subtle. 3022 // There are two independent reasons: 3023 // - We assume that things which are constant (from LLVM's definition) 3024 // do not move at runtime. For example, the address of a global 3025 // variable is fixed, even though it's contents may not be. 3026 // - Second, we can't disallow arbitrary inttoptr constants even 3027 // if the language frontend does. Optimization passes are free to 3028 // locally exploit facts without respect to global reachability. This 3029 // can create sections of code which are dynamically unreachable and 3030 // contain just about anything. (see constants.ll in tests) 3031 LiveTmp.insert(V); 3032 } 3033 } 3034 } 3035 } 3036 3037 static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp) { 3038 for (BasicBlock *Succ : successors(BB)) { 3039 for (auto &I : *Succ) { 3040 PHINode *PN = dyn_cast<PHINode>(&I); 3041 if (!PN) 3042 break; 3043 3044 Value *V = PN->getIncomingValueForBlock(BB); 3045 assert(!isUnhandledGCPointerType(V->getType()) && 3046 "support for FCA unimplemented"); 3047 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) 3048 LiveTmp.insert(V); 3049 } 3050 } 3051 } 3052 3053 static SetVector<Value *> computeKillSet(BasicBlock *BB) { 3054 SetVector<Value *> KillSet; 3055 for (Instruction &I : *BB) 3056 if (isHandledGCPointerType(I.getType())) 3057 KillSet.insert(&I); 3058 return KillSet; 3059 } 3060 3061 #ifndef NDEBUG 3062 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic 3063 /// validation check for the liveness computation. 3064 static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live, 3065 Instruction *TI, bool TermOkay = false) { 3066 for (Value *V : Live) { 3067 if (auto *I = dyn_cast<Instruction>(V)) { 3068 // The terminator can be a member of the LiveOut set. LLVM's definition 3069 // of instruction dominance states that V does not dominate itself. As 3070 // such, we need to special case this to allow it. 3071 if (TermOkay && TI == I) 3072 continue; 3073 assert(DT.dominates(I, TI) && 3074 "basic SSA liveness expectation violated by liveness analysis"); 3075 } 3076 } 3077 } 3078 3079 /// Check that all the liveness sets used during the computation of liveness 3080 /// obey basic SSA properties. This is useful for finding cases where we miss 3081 /// a def. 3082 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, 3083 BasicBlock &BB) { 3084 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); 3085 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); 3086 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); 3087 } 3088 #endif 3089 3090 static void computeLiveInValues(DominatorTree &DT, Function &F, 3091 GCPtrLivenessData &Data) { 3092 SmallSetVector<BasicBlock *, 32> Worklist; 3093 3094 // Seed the liveness for each individual block 3095 for (BasicBlock &BB : F) { 3096 Data.KillSet[&BB] = computeKillSet(&BB); 3097 Data.LiveSet[&BB].clear(); 3098 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]); 3099 3100 #ifndef NDEBUG 3101 for (Value *Kill : Data.KillSet[&BB]) 3102 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); 3103 #endif 3104 3105 Data.LiveOut[&BB] = SetVector<Value *>(); 3106 computeLiveOutSeed(&BB, Data.LiveOut[&BB]); 3107 Data.LiveIn[&BB] = Data.LiveSet[&BB]; 3108 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]); 3109 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]); 3110 if (!Data.LiveIn[&BB].empty()) 3111 Worklist.insert(pred_begin(&BB), pred_end(&BB)); 3112 } 3113 3114 // Propagate that liveness until stable 3115 while (!Worklist.empty()) { 3116 BasicBlock *BB = Worklist.pop_back_val(); 3117 3118 // Compute our new liveout set, then exit early if it hasn't changed despite 3119 // the contribution of our successor. 3120 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 3121 const auto OldLiveOutSize = LiveOut.size(); 3122 for (BasicBlock *Succ : successors(BB)) { 3123 assert(Data.LiveIn.count(Succ)); 3124 LiveOut.set_union(Data.LiveIn[Succ]); 3125 } 3126 // assert OutLiveOut is a subset of LiveOut 3127 if (OldLiveOutSize == LiveOut.size()) { 3128 // If the sets are the same size, then we didn't actually add anything 3129 // when unioning our successors LiveIn. Thus, the LiveIn of this block 3130 // hasn't changed. 3131 continue; 3132 } 3133 Data.LiveOut[BB] = LiveOut; 3134 3135 // Apply the effects of this basic block 3136 SetVector<Value *> LiveTmp = LiveOut; 3137 LiveTmp.set_union(Data.LiveSet[BB]); 3138 LiveTmp.set_subtract(Data.KillSet[BB]); 3139 3140 assert(Data.LiveIn.count(BB)); 3141 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB]; 3142 // assert: OldLiveIn is a subset of LiveTmp 3143 if (OldLiveIn.size() != LiveTmp.size()) { 3144 Data.LiveIn[BB] = LiveTmp; 3145 Worklist.insert(pred_begin(BB), pred_end(BB)); 3146 } 3147 } // while (!Worklist.empty()) 3148 3149 #ifndef NDEBUG 3150 // Verify our output against SSA properties. This helps catch any 3151 // missing kills during the above iteration. 3152 for (BasicBlock &BB : F) 3153 checkBasicSSA(DT, Data, BB); 3154 #endif 3155 } 3156 3157 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, 3158 StatepointLiveSetTy &Out) { 3159 BasicBlock *BB = Inst->getParent(); 3160 3161 // Note: The copy is intentional and required 3162 assert(Data.LiveOut.count(BB)); 3163 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 3164 3165 // We want to handle the statepoint itself oddly. It's 3166 // call result is not live (normal), nor are it's arguments 3167 // (unless they're used again later). This adjustment is 3168 // specifically what we need to relocate 3169 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(), 3170 LiveOut); 3171 LiveOut.remove(Inst); 3172 Out.insert(LiveOut.begin(), LiveOut.end()); 3173 } 3174 3175 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 3176 CallBase *Call, 3177 PartiallyConstructedSafepointRecord &Info, 3178 PointerToBaseTy &PointerToBase) { 3179 StatepointLiveSetTy Updated; 3180 findLiveSetAtInst(Call, RevisedLivenessData, Updated); 3181 3182 // We may have base pointers which are now live that weren't before. We need 3183 // to update the PointerToBase structure to reflect this. 3184 for (auto V : Updated) 3185 PointerToBase.insert({ V, V }); 3186 3187 Info.LiveSet = Updated; 3188 } 3189