1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Rewrite call/invoke instructions so as to make potential relocations 10 // performed by the garbage collector explicit in the IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h" 15 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/DomTreeUpdater.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constant.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/IRBuilder.h" 42 #include "llvm/IR/InstIterator.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/MDBuilder.h" 50 #include "llvm/IR/Metadata.h" 51 #include "llvm/IR/Module.h" 52 #include "llvm/IR/Statepoint.h" 53 #include "llvm/IR/Type.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/ValueHandle.h" 57 #include "llvm/InitializePasses.h" 58 #include "llvm/Pass.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Compiler.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/ErrorHandling.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Transforms/Scalar.h" 66 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 69 #include <algorithm> 70 #include <cassert> 71 #include <cstddef> 72 #include <cstdint> 73 #include <iterator> 74 #include <set> 75 #include <string> 76 #include <utility> 77 #include <vector> 78 79 #define DEBUG_TYPE "rewrite-statepoints-for-gc" 80 81 using namespace llvm; 82 83 // Print the liveset found at the insert location 84 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden, 85 cl::init(false)); 86 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, 87 cl::init(false)); 88 89 // Print out the base pointers for debugging 90 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden, 91 cl::init(false)); 92 93 // Cost threshold measuring when it is profitable to rematerialize value instead 94 // of relocating it 95 static cl::opt<unsigned> 96 RematerializationThreshold("spp-rematerialization-threshold", cl::Hidden, 97 cl::init(6)); 98 99 #ifdef EXPENSIVE_CHECKS 100 static bool ClobberNonLive = true; 101 #else 102 static bool ClobberNonLive = false; 103 #endif 104 105 static cl::opt<bool, true> ClobberNonLiveOverride("rs4gc-clobber-non-live", 106 cl::location(ClobberNonLive), 107 cl::Hidden); 108 109 static cl::opt<bool> 110 AllowStatepointWithNoDeoptInfo("rs4gc-allow-statepoint-with-no-deopt-info", 111 cl::Hidden, cl::init(true)); 112 113 /// The IR fed into RewriteStatepointsForGC may have had attributes and 114 /// metadata implying dereferenceability that are no longer valid/correct after 115 /// RewriteStatepointsForGC has run. This is because semantically, after 116 /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire 117 /// heap. stripNonValidData (conservatively) restores 118 /// correctness by erasing all attributes in the module that externally imply 119 /// dereferenceability. Similar reasoning also applies to the noalias 120 /// attributes and metadata. gc.statepoint can touch the entire heap including 121 /// noalias objects. 122 /// Apart from attributes and metadata, we also remove instructions that imply 123 /// constant physical memory: llvm.invariant.start. 124 static void stripNonValidData(Module &M); 125 126 static bool shouldRewriteStatepointsIn(Function &F); 127 128 PreservedAnalyses RewriteStatepointsForGC::run(Module &M, 129 ModuleAnalysisManager &AM) { 130 bool Changed = false; 131 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 132 for (Function &F : M) { 133 // Nothing to do for declarations. 134 if (F.isDeclaration() || F.empty()) 135 continue; 136 137 // Policy choice says not to rewrite - the most common reason is that we're 138 // compiling code without a GCStrategy. 139 if (!shouldRewriteStatepointsIn(F)) 140 continue; 141 142 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F); 143 auto &TTI = FAM.getResult<TargetIRAnalysis>(F); 144 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 145 Changed |= runOnFunction(F, DT, TTI, TLI); 146 } 147 if (!Changed) 148 return PreservedAnalyses::all(); 149 150 // stripNonValidData asserts that shouldRewriteStatepointsIn 151 // returns true for at least one function in the module. Since at least 152 // one function changed, we know that the precondition is satisfied. 153 stripNonValidData(M); 154 155 PreservedAnalyses PA; 156 PA.preserve<TargetIRAnalysis>(); 157 PA.preserve<TargetLibraryAnalysis>(); 158 return PA; 159 } 160 161 namespace { 162 163 class RewriteStatepointsForGCLegacyPass : public ModulePass { 164 RewriteStatepointsForGC Impl; 165 166 public: 167 static char ID; // Pass identification, replacement for typeid 168 169 RewriteStatepointsForGCLegacyPass() : ModulePass(ID), Impl() { 170 initializeRewriteStatepointsForGCLegacyPassPass( 171 *PassRegistry::getPassRegistry()); 172 } 173 174 bool runOnModule(Module &M) override { 175 bool Changed = false; 176 for (Function &F : M) { 177 // Nothing to do for declarations. 178 if (F.isDeclaration() || F.empty()) 179 continue; 180 181 // Policy choice says not to rewrite - the most common reason is that 182 // we're compiling code without a GCStrategy. 183 if (!shouldRewriteStatepointsIn(F)) 184 continue; 185 186 TargetTransformInfo &TTI = 187 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 188 const TargetLibraryInfo &TLI = 189 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 190 auto &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 191 192 Changed |= Impl.runOnFunction(F, DT, TTI, TLI); 193 } 194 195 if (!Changed) 196 return false; 197 198 // stripNonValidData asserts that shouldRewriteStatepointsIn 199 // returns true for at least one function in the module. Since at least 200 // one function changed, we know that the precondition is satisfied. 201 stripNonValidData(M); 202 return true; 203 } 204 205 void getAnalysisUsage(AnalysisUsage &AU) const override { 206 // We add and rewrite a bunch of instructions, but don't really do much 207 // else. We could in theory preserve a lot more analyses here. 208 AU.addRequired<DominatorTreeWrapperPass>(); 209 AU.addRequired<TargetTransformInfoWrapperPass>(); 210 AU.addRequired<TargetLibraryInfoWrapperPass>(); 211 } 212 }; 213 214 } // end anonymous namespace 215 216 char RewriteStatepointsForGCLegacyPass::ID = 0; 217 218 ModulePass *llvm::createRewriteStatepointsForGCLegacyPass() { 219 return new RewriteStatepointsForGCLegacyPass(); 220 } 221 222 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGCLegacyPass, 223 "rewrite-statepoints-for-gc", 224 "Make relocations explicit at statepoints", false, false) 225 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 226 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 227 INITIALIZE_PASS_END(RewriteStatepointsForGCLegacyPass, 228 "rewrite-statepoints-for-gc", 229 "Make relocations explicit at statepoints", false, false) 230 231 namespace { 232 233 struct GCPtrLivenessData { 234 /// Values defined in this block. 235 MapVector<BasicBlock *, SetVector<Value *>> KillSet; 236 237 /// Values used in this block (and thus live); does not included values 238 /// killed within this block. 239 MapVector<BasicBlock *, SetVector<Value *>> LiveSet; 240 241 /// Values live into this basic block (i.e. used by any 242 /// instruction in this basic block or ones reachable from here) 243 MapVector<BasicBlock *, SetVector<Value *>> LiveIn; 244 245 /// Values live out of this basic block (i.e. live into 246 /// any successor block) 247 MapVector<BasicBlock *, SetVector<Value *>> LiveOut; 248 }; 249 250 // The type of the internal cache used inside the findBasePointers family 251 // of functions. From the callers perspective, this is an opaque type and 252 // should not be inspected. 253 // 254 // In the actual implementation this caches two relations: 255 // - The base relation itself (i.e. this pointer is based on that one) 256 // - The base defining value relation (i.e. before base_phi insertion) 257 // Generally, after the execution of a full findBasePointer call, only the 258 // base relation will remain. Internally, we add a mixture of the two 259 // types, then update all the second type to the first type 260 using DefiningValueMapTy = MapVector<Value *, Value *>; 261 using PointerToBaseTy = MapVector<Value *, Value *>; 262 using StatepointLiveSetTy = SetVector<Value *>; 263 using RematerializedValueMapTy = 264 MapVector<AssertingVH<Instruction>, AssertingVH<Value>>; 265 266 struct PartiallyConstructedSafepointRecord { 267 /// The set of values known to be live across this safepoint 268 StatepointLiveSetTy LiveSet; 269 270 /// The *new* gc.statepoint instruction itself. This produces the token 271 /// that normal path gc.relocates and the gc.result are tied to. 272 GCStatepointInst *StatepointToken; 273 274 /// Instruction to which exceptional gc relocates are attached 275 /// Makes it easier to iterate through them during relocationViaAlloca. 276 Instruction *UnwindToken; 277 278 /// Record live values we are rematerialized instead of relocating. 279 /// They are not included into 'LiveSet' field. 280 /// Maps rematerialized copy to it's original value. 281 RematerializedValueMapTy RematerializedValues; 282 }; 283 284 struct RematerizlizationCandidateRecord { 285 // Chain from derived pointer to base. 286 SmallVector<Instruction *, 3> ChainToBase; 287 // Original base. 288 Value *RootOfChain; 289 // Cost of chain. 290 InstructionCost Cost; 291 }; 292 using RematCandTy = MapVector<Value *, RematerizlizationCandidateRecord>; 293 294 } // end anonymous namespace 295 296 static ArrayRef<Use> GetDeoptBundleOperands(const CallBase *Call) { 297 Optional<OperandBundleUse> DeoptBundle = 298 Call->getOperandBundle(LLVMContext::OB_deopt); 299 300 if (!DeoptBundle.hasValue()) { 301 assert(AllowStatepointWithNoDeoptInfo && 302 "Found non-leaf call without deopt info!"); 303 return None; 304 } 305 306 return DeoptBundle.getValue().Inputs; 307 } 308 309 /// Compute the live-in set for every basic block in the function 310 static void computeLiveInValues(DominatorTree &DT, Function &F, 311 GCPtrLivenessData &Data); 312 313 /// Given results from the dataflow liveness computation, find the set of live 314 /// Values at a particular instruction. 315 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, 316 StatepointLiveSetTy &out); 317 318 // TODO: Once we can get to the GCStrategy, this becomes 319 // Optional<bool> isGCManagedPointer(const Type *Ty) const override { 320 321 static bool isGCPointerType(Type *T) { 322 if (auto *PT = dyn_cast<PointerType>(T)) 323 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our 324 // GC managed heap. We know that a pointer into this heap needs to be 325 // updated and that no other pointer does. 326 return PT->getAddressSpace() == 1; 327 return false; 328 } 329 330 // Return true if this type is one which a) is a gc pointer or contains a GC 331 // pointer and b) is of a type this code expects to encounter as a live value. 332 // (The insertion code will assert that a type which matches (a) and not (b) 333 // is not encountered.) 334 static bool isHandledGCPointerType(Type *T) { 335 // We fully support gc pointers 336 if (isGCPointerType(T)) 337 return true; 338 // We partially support vectors of gc pointers. The code will assert if it 339 // can't handle something. 340 if (auto VT = dyn_cast<VectorType>(T)) 341 if (isGCPointerType(VT->getElementType())) 342 return true; 343 return false; 344 } 345 346 #ifndef NDEBUG 347 /// Returns true if this type contains a gc pointer whether we know how to 348 /// handle that type or not. 349 static bool containsGCPtrType(Type *Ty) { 350 if (isGCPointerType(Ty)) 351 return true; 352 if (VectorType *VT = dyn_cast<VectorType>(Ty)) 353 return isGCPointerType(VT->getScalarType()); 354 if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) 355 return containsGCPtrType(AT->getElementType()); 356 if (StructType *ST = dyn_cast<StructType>(Ty)) 357 return llvm::any_of(ST->elements(), containsGCPtrType); 358 return false; 359 } 360 361 // Returns true if this is a type which a) is a gc pointer or contains a GC 362 // pointer and b) is of a type which the code doesn't expect (i.e. first class 363 // aggregates). Used to trip assertions. 364 static bool isUnhandledGCPointerType(Type *Ty) { 365 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty); 366 } 367 #endif 368 369 // Return the name of the value suffixed with the provided value, or if the 370 // value didn't have a name, the default value specified. 371 static std::string suffixed_name_or(Value *V, StringRef Suffix, 372 StringRef DefaultName) { 373 return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str(); 374 } 375 376 // Conservatively identifies any definitions which might be live at the 377 // given instruction. The analysis is performed immediately before the 378 // given instruction. Values defined by that instruction are not considered 379 // live. Values used by that instruction are considered live. 380 static void analyzeParsePointLiveness( 381 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, CallBase *Call, 382 PartiallyConstructedSafepointRecord &Result) { 383 StatepointLiveSetTy LiveSet; 384 findLiveSetAtInst(Call, OriginalLivenessData, LiveSet); 385 386 if (PrintLiveSet) { 387 dbgs() << "Live Variables:\n"; 388 for (Value *V : LiveSet) 389 dbgs() << " " << V->getName() << " " << *V << "\n"; 390 } 391 if (PrintLiveSetSize) { 392 dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n"; 393 dbgs() << "Number live values: " << LiveSet.size() << "\n"; 394 } 395 Result.LiveSet = LiveSet; 396 } 397 398 // Returns true is V is a knownBaseResult. 399 static bool isKnownBaseResult(Value *V); 400 401 // Returns true if V is a BaseResult that already exists in the IR, i.e. it is 402 // not created by the findBasePointers algorithm. 403 static bool isOriginalBaseResult(Value *V); 404 405 namespace { 406 407 /// A single base defining value - An immediate base defining value for an 408 /// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'. 409 /// For instructions which have multiple pointer [vector] inputs or that 410 /// transition between vector and scalar types, there is no immediate base 411 /// defining value. The 'base defining value' for 'Def' is the transitive 412 /// closure of this relation stopping at the first instruction which has no 413 /// immediate base defining value. The b.d.v. might itself be a base pointer, 414 /// but it can also be an arbitrary derived pointer. 415 struct BaseDefiningValueResult { 416 /// Contains the value which is the base defining value. 417 Value * const BDV; 418 419 /// True if the base defining value is also known to be an actual base 420 /// pointer. 421 const bool IsKnownBase; 422 423 BaseDefiningValueResult(Value *BDV, bool IsKnownBase) 424 : BDV(BDV), IsKnownBase(IsKnownBase) { 425 #ifndef NDEBUG 426 // Check consistency between new and old means of checking whether a BDV is 427 // a base. 428 bool MustBeBase = isKnownBaseResult(BDV); 429 assert(!MustBeBase || MustBeBase == IsKnownBase); 430 #endif 431 } 432 }; 433 434 } // end anonymous namespace 435 436 static BaseDefiningValueResult findBaseDefiningValue(Value *I); 437 438 /// Return a base defining value for the 'Index' element of the given vector 439 /// instruction 'I'. If Index is null, returns a BDV for the entire vector 440 /// 'I'. As an optimization, this method will try to determine when the 441 /// element is known to already be a base pointer. If this can be established, 442 /// the second value in the returned pair will be true. Note that either a 443 /// vector or a pointer typed value can be returned. For the former, the 444 /// vector returned is a BDV (and possibly a base) of the entire vector 'I'. 445 /// If the later, the return pointer is a BDV (or possibly a base) for the 446 /// particular element in 'I'. 447 static BaseDefiningValueResult 448 findBaseDefiningValueOfVector(Value *I) { 449 // Each case parallels findBaseDefiningValue below, see that code for 450 // detailed motivation. 451 452 if (isa<Argument>(I)) 453 // An incoming argument to the function is a base pointer 454 return BaseDefiningValueResult(I, true); 455 456 if (isa<Constant>(I)) 457 // Base of constant vector consists only of constant null pointers. 458 // For reasoning see similar case inside 'findBaseDefiningValue' function. 459 return BaseDefiningValueResult(ConstantAggregateZero::get(I->getType()), 460 true); 461 462 if (isa<LoadInst>(I)) 463 return BaseDefiningValueResult(I, true); 464 465 if (isa<InsertElementInst>(I)) 466 // We don't know whether this vector contains entirely base pointers or 467 // not. To be conservatively correct, we treat it as a BDV and will 468 // duplicate code as needed to construct a parallel vector of bases. 469 return BaseDefiningValueResult(I, false); 470 471 if (isa<ShuffleVectorInst>(I)) 472 // We don't know whether this vector contains entirely base pointers or 473 // not. To be conservatively correct, we treat it as a BDV and will 474 // duplicate code as needed to construct a parallel vector of bases. 475 // TODO: There a number of local optimizations which could be applied here 476 // for particular sufflevector patterns. 477 return BaseDefiningValueResult(I, false); 478 479 // The behavior of getelementptr instructions is the same for vector and 480 // non-vector data types. 481 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 482 return findBaseDefiningValue(GEP->getPointerOperand()); 483 484 // If the pointer comes through a bitcast of a vector of pointers to 485 // a vector of another type of pointer, then look through the bitcast 486 if (auto *BC = dyn_cast<BitCastInst>(I)) 487 return findBaseDefiningValue(BC->getOperand(0)); 488 489 // We assume that functions in the source language only return base 490 // pointers. This should probably be generalized via attributes to support 491 // both source language and internal functions. 492 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 493 return BaseDefiningValueResult(I, true); 494 495 // A PHI or Select is a base defining value. The outer findBasePointer 496 // algorithm is responsible for constructing a base value for this BDV. 497 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 498 "unknown vector instruction - no base found for vector element"); 499 return BaseDefiningValueResult(I, false); 500 } 501 502 /// Helper function for findBasePointer - Will return a value which either a) 503 /// defines the base pointer for the input, b) blocks the simple search 504 /// (i.e. a PHI or Select of two derived pointers), or c) involves a change 505 /// from pointer to vector type or back. 506 static BaseDefiningValueResult findBaseDefiningValue(Value *I) { 507 assert(I->getType()->isPtrOrPtrVectorTy() && 508 "Illegal to ask for the base pointer of a non-pointer type"); 509 510 if (I->getType()->isVectorTy()) 511 return findBaseDefiningValueOfVector(I); 512 513 if (isa<Argument>(I)) 514 // An incoming argument to the function is a base pointer 515 // We should have never reached here if this argument isn't an gc value 516 return BaseDefiningValueResult(I, true); 517 518 if (isa<Constant>(I)) { 519 // We assume that objects with a constant base (e.g. a global) can't move 520 // and don't need to be reported to the collector because they are always 521 // live. Besides global references, all kinds of constants (e.g. undef, 522 // constant expressions, null pointers) can be introduced by the inliner or 523 // the optimizer, especially on dynamically dead paths. 524 // Here we treat all of them as having single null base. By doing this we 525 // trying to avoid problems reporting various conflicts in a form of 526 // "phi (const1, const2)" or "phi (const, regular gc ptr)". 527 // See constant.ll file for relevant test cases. 528 529 return BaseDefiningValueResult( 530 ConstantPointerNull::get(cast<PointerType>(I->getType())), true); 531 } 532 533 // inttoptrs in an integral address space are currently ill-defined. We 534 // treat them as defining base pointers here for consistency with the 535 // constant rule above and because we don't really have a better semantic 536 // to give them. Note that the optimizer is always free to insert undefined 537 // behavior on dynamically dead paths as well. 538 if (isa<IntToPtrInst>(I)) 539 return BaseDefiningValueResult(I, true); 540 541 if (CastInst *CI = dyn_cast<CastInst>(I)) { 542 Value *Def = CI->stripPointerCasts(); 543 // If stripping pointer casts changes the address space there is an 544 // addrspacecast in between. 545 assert(cast<PointerType>(Def->getType())->getAddressSpace() == 546 cast<PointerType>(CI->getType())->getAddressSpace() && 547 "unsupported addrspacecast"); 548 // If we find a cast instruction here, it means we've found a cast which is 549 // not simply a pointer cast (i.e. an inttoptr). We don't know how to 550 // handle int->ptr conversion. 551 assert(!isa<CastInst>(Def) && "shouldn't find another cast here"); 552 return findBaseDefiningValue(Def); 553 } 554 555 if (isa<LoadInst>(I)) 556 // The value loaded is an gc base itself 557 return BaseDefiningValueResult(I, true); 558 559 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 560 // The base of this GEP is the base 561 return findBaseDefiningValue(GEP->getPointerOperand()); 562 563 if (auto *Freeze = dyn_cast<FreezeInst>(I)) 564 return findBaseDefiningValue(Freeze->getOperand(0)); 565 566 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 567 switch (II->getIntrinsicID()) { 568 default: 569 // fall through to general call handling 570 break; 571 case Intrinsic::experimental_gc_statepoint: 572 llvm_unreachable("statepoints don't produce pointers"); 573 case Intrinsic::experimental_gc_relocate: 574 // Rerunning safepoint insertion after safepoints are already 575 // inserted is not supported. It could probably be made to work, 576 // but why are you doing this? There's no good reason. 577 llvm_unreachable("repeat safepoint insertion is not supported"); 578 case Intrinsic::gcroot: 579 // Currently, this mechanism hasn't been extended to work with gcroot. 580 // There's no reason it couldn't be, but I haven't thought about the 581 // implications much. 582 llvm_unreachable( 583 "interaction with the gcroot mechanism is not supported"); 584 case Intrinsic::experimental_gc_get_pointer_base: 585 return findBaseDefiningValue(II->getOperand(0)); 586 } 587 } 588 // We assume that functions in the source language only return base 589 // pointers. This should probably be generalized via attributes to support 590 // both source language and internal functions. 591 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 592 return BaseDefiningValueResult(I, true); 593 594 // TODO: I have absolutely no idea how to implement this part yet. It's not 595 // necessarily hard, I just haven't really looked at it yet. 596 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented"); 597 598 if (isa<AtomicCmpXchgInst>(I)) 599 // A CAS is effectively a atomic store and load combined under a 600 // predicate. From the perspective of base pointers, we just treat it 601 // like a load. 602 return BaseDefiningValueResult(I, true); 603 604 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are " 605 "binary ops which don't apply to pointers"); 606 607 // The aggregate ops. Aggregates can either be in the heap or on the 608 // stack, but in either case, this is simply a field load. As a result, 609 // this is a defining definition of the base just like a load is. 610 if (isa<ExtractValueInst>(I)) 611 return BaseDefiningValueResult(I, true); 612 613 // We should never see an insert vector since that would require we be 614 // tracing back a struct value not a pointer value. 615 assert(!isa<InsertValueInst>(I) && 616 "Base pointer for a struct is meaningless"); 617 618 // This value might have been generated by findBasePointer() called when 619 // substituting gc.get.pointer.base() intrinsic. 620 bool IsKnownBase = 621 isa<Instruction>(I) && cast<Instruction>(I)->getMetadata("is_base_value"); 622 623 // An extractelement produces a base result exactly when it's input does. 624 // We may need to insert a parallel instruction to extract the appropriate 625 // element out of the base vector corresponding to the input. Given this, 626 // it's analogous to the phi and select case even though it's not a merge. 627 if (isa<ExtractElementInst>(I)) 628 // Note: There a lot of obvious peephole cases here. This are deliberately 629 // handled after the main base pointer inference algorithm to make writing 630 // test cases to exercise that code easier. 631 return BaseDefiningValueResult(I, IsKnownBase); 632 633 // The last two cases here don't return a base pointer. Instead, they 634 // return a value which dynamically selects from among several base 635 // derived pointers (each with it's own base potentially). It's the job of 636 // the caller to resolve these. 637 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 638 "missing instruction case in findBaseDefiningValue"); 639 return BaseDefiningValueResult(I, IsKnownBase); 640 } 641 642 /// Returns the base defining value for this value. 643 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) { 644 Value *&Cached = Cache[I]; 645 if (!Cached) { 646 Cached = findBaseDefiningValue(I).BDV; 647 LLVM_DEBUG(dbgs() << "fBDV-cached: " << I->getName() << " -> " 648 << Cached->getName() << "\n"); 649 } 650 assert(Cache[I] != nullptr); 651 return Cached; 652 } 653 654 /// Return a base pointer for this value if known. Otherwise, return it's 655 /// base defining value. 656 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) { 657 Value *Def = findBaseDefiningValueCached(I, Cache); 658 auto Found = Cache.find(Def); 659 if (Found != Cache.end()) { 660 // Either a base-of relation, or a self reference. Caller must check. 661 return Found->second; 662 } 663 // Only a BDV available 664 return Def; 665 } 666 667 /// This value is a base pointer that is not generated by RS4GC, i.e. it already 668 /// exists in the code. 669 static bool isOriginalBaseResult(Value *V) { 670 // no recursion possible 671 return !isa<PHINode>(V) && !isa<SelectInst>(V) && 672 !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) && 673 !isa<ShuffleVectorInst>(V); 674 } 675 676 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, 677 /// is it known to be a base pointer? Or do we need to continue searching. 678 static bool isKnownBaseResult(Value *V) { 679 if (isOriginalBaseResult(V)) 680 return true; 681 if (isa<Instruction>(V) && 682 cast<Instruction>(V)->getMetadata("is_base_value")) { 683 // This is a previously inserted base phi or select. We know 684 // that this is a base value. 685 return true; 686 } 687 688 // We need to keep searching 689 return false; 690 } 691 692 // Returns true if First and Second values are both scalar or both vector. 693 static bool areBothVectorOrScalar(Value *First, Value *Second) { 694 return isa<VectorType>(First->getType()) == 695 isa<VectorType>(Second->getType()); 696 } 697 698 namespace { 699 700 /// Models the state of a single base defining value in the findBasePointer 701 /// algorithm for determining where a new instruction is needed to propagate 702 /// the base of this BDV. 703 class BDVState { 704 public: 705 enum StatusTy { 706 // Starting state of lattice 707 Unknown, 708 // Some specific base value -- does *not* mean that instruction 709 // propagates the base of the object 710 // ex: gep %arg, 16 -> %arg is the base value 711 Base, 712 // Need to insert a node to represent a merge. 713 Conflict 714 }; 715 716 BDVState() { 717 llvm_unreachable("missing state in map"); 718 } 719 720 explicit BDVState(Value *OriginalValue) 721 : OriginalValue(OriginalValue) {} 722 explicit BDVState(Value *OriginalValue, StatusTy Status, Value *BaseValue = nullptr) 723 : OriginalValue(OriginalValue), Status(Status), BaseValue(BaseValue) { 724 assert(Status != Base || BaseValue); 725 } 726 727 StatusTy getStatus() const { return Status; } 728 Value *getOriginalValue() const { return OriginalValue; } 729 Value *getBaseValue() const { return BaseValue; } 730 731 bool isBase() const { return getStatus() == Base; } 732 bool isUnknown() const { return getStatus() == Unknown; } 733 bool isConflict() const { return getStatus() == Conflict; } 734 735 // Values of type BDVState form a lattice, and this function implements the 736 // meet 737 // operation. 738 void meet(const BDVState &Other) { 739 auto markConflict = [&]() { 740 Status = BDVState::Conflict; 741 BaseValue = nullptr; 742 }; 743 // Conflict is a final state. 744 if (isConflict()) 745 return; 746 // if we are not known - just take other state. 747 if (isUnknown()) { 748 Status = Other.getStatus(); 749 BaseValue = Other.getBaseValue(); 750 return; 751 } 752 // We are base. 753 assert(isBase() && "Unknown state"); 754 // If other is unknown - just keep our state. 755 if (Other.isUnknown()) 756 return; 757 // If other is conflict - it is a final state. 758 if (Other.isConflict()) 759 return markConflict(); 760 // Other is base as well. 761 assert(Other.isBase() && "Unknown state"); 762 // If bases are different - Conflict. 763 if (getBaseValue() != Other.getBaseValue()) 764 return markConflict(); 765 // We are identical, do nothing. 766 } 767 768 bool operator==(const BDVState &Other) const { 769 return OriginalValue == Other.OriginalValue && BaseValue == Other.BaseValue && 770 Status == Other.Status; 771 } 772 773 bool operator!=(const BDVState &other) const { return !(*this == other); } 774 775 LLVM_DUMP_METHOD 776 void dump() const { 777 print(dbgs()); 778 dbgs() << '\n'; 779 } 780 781 void print(raw_ostream &OS) const { 782 switch (getStatus()) { 783 case Unknown: 784 OS << "U"; 785 break; 786 case Base: 787 OS << "B"; 788 break; 789 case Conflict: 790 OS << "C"; 791 break; 792 } 793 OS << " (base " << getBaseValue() << " - " 794 << (getBaseValue() ? getBaseValue()->getName() : "nullptr") << ")" 795 << " for " << OriginalValue->getName() << ":"; 796 } 797 798 private: 799 AssertingVH<Value> OriginalValue; // instruction this state corresponds to 800 StatusTy Status = Unknown; 801 AssertingVH<Value> BaseValue = nullptr; // Non-null only if Status == Base. 802 }; 803 804 } // end anonymous namespace 805 806 #ifndef NDEBUG 807 static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) { 808 State.print(OS); 809 return OS; 810 } 811 #endif 812 813 /// For a given value or instruction, figure out what base ptr its derived from. 814 /// For gc objects, this is simply itself. On success, returns a value which is 815 /// the base pointer. (This is reliable and can be used for relocation.) On 816 /// failure, returns nullptr. 817 static Value *findBasePointer(Value *I, DefiningValueMapTy &Cache) { 818 Value *Def = findBaseOrBDV(I, Cache); 819 820 if (isKnownBaseResult(Def) && areBothVectorOrScalar(Def, I)) 821 return Def; 822 823 // Here's the rough algorithm: 824 // - For every SSA value, construct a mapping to either an actual base 825 // pointer or a PHI which obscures the base pointer. 826 // - Construct a mapping from PHI to unknown TOP state. Use an 827 // optimistic algorithm to propagate base pointer information. Lattice 828 // looks like: 829 // UNKNOWN 830 // b1 b2 b3 b4 831 // CONFLICT 832 // When algorithm terminates, all PHIs will either have a single concrete 833 // base or be in a conflict state. 834 // - For every conflict, insert a dummy PHI node without arguments. Add 835 // these to the base[Instruction] = BasePtr mapping. For every 836 // non-conflict, add the actual base. 837 // - For every conflict, add arguments for the base[a] of each input 838 // arguments. 839 // 840 // Note: A simpler form of this would be to add the conflict form of all 841 // PHIs without running the optimistic algorithm. This would be 842 // analogous to pessimistic data flow and would likely lead to an 843 // overall worse solution. 844 845 #ifndef NDEBUG 846 auto isExpectedBDVType = [](Value *BDV) { 847 return isa<PHINode>(BDV) || isa<SelectInst>(BDV) || 848 isa<ExtractElementInst>(BDV) || isa<InsertElementInst>(BDV) || 849 isa<ShuffleVectorInst>(BDV); 850 }; 851 #endif 852 853 // Once populated, will contain a mapping from each potentially non-base BDV 854 // to a lattice value (described above) which corresponds to that BDV. 855 // We use the order of insertion (DFS over the def/use graph) to provide a 856 // stable deterministic ordering for visiting DenseMaps (which are unordered) 857 // below. This is important for deterministic compilation. 858 MapVector<Value *, BDVState> States; 859 860 #ifndef NDEBUG 861 auto VerifyStates = [&]() { 862 for (auto &Entry : States) { 863 assert(Entry.first == Entry.second.getOriginalValue()); 864 } 865 }; 866 #endif 867 868 auto visitBDVOperands = [](Value *BDV, std::function<void (Value*)> F) { 869 if (PHINode *PN = dyn_cast<PHINode>(BDV)) { 870 for (Value *InVal : PN->incoming_values()) 871 F(InVal); 872 } else if (SelectInst *SI = dyn_cast<SelectInst>(BDV)) { 873 F(SI->getTrueValue()); 874 F(SI->getFalseValue()); 875 } else if (auto *EE = dyn_cast<ExtractElementInst>(BDV)) { 876 F(EE->getVectorOperand()); 877 } else if (auto *IE = dyn_cast<InsertElementInst>(BDV)) { 878 F(IE->getOperand(0)); 879 F(IE->getOperand(1)); 880 } else if (auto *SV = dyn_cast<ShuffleVectorInst>(BDV)) { 881 // For a canonical broadcast, ignore the undef argument 882 // (without this, we insert a parallel base shuffle for every broadcast) 883 F(SV->getOperand(0)); 884 if (!SV->isZeroEltSplat()) 885 F(SV->getOperand(1)); 886 } else { 887 llvm_unreachable("unexpected BDV type"); 888 } 889 }; 890 891 892 // Recursively fill in all base defining values reachable from the initial 893 // one for which we don't already know a definite base value for 894 /* scope */ { 895 SmallVector<Value*, 16> Worklist; 896 Worklist.push_back(Def); 897 States.insert({Def, BDVState(Def)}); 898 while (!Worklist.empty()) { 899 Value *Current = Worklist.pop_back_val(); 900 assert(!isOriginalBaseResult(Current) && "why did it get added?"); 901 902 auto visitIncomingValue = [&](Value *InVal) { 903 Value *Base = findBaseOrBDV(InVal, Cache); 904 if (isKnownBaseResult(Base) && areBothVectorOrScalar(Base, InVal)) 905 // Known bases won't need new instructions introduced and can be 906 // ignored safely. However, this can only be done when InVal and Base 907 // are both scalar or both vector. Otherwise, we need to find a 908 // correct BDV for InVal, by creating an entry in the lattice 909 // (States). 910 return; 911 assert(isExpectedBDVType(Base) && "the only non-base values " 912 "we see should be base defining values"); 913 if (States.insert(std::make_pair(Base, BDVState(Base))).second) 914 Worklist.push_back(Base); 915 }; 916 917 visitBDVOperands(Current, visitIncomingValue); 918 } 919 } 920 921 #ifndef NDEBUG 922 VerifyStates(); 923 LLVM_DEBUG(dbgs() << "States after initialization:\n"); 924 for (const auto &Pair : States) { 925 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 926 } 927 #endif 928 929 // Iterate forward through the value graph pruning any node from the state 930 // list where all of the inputs are base pointers. The purpose of this is to 931 // reuse existing values when the derived pointer we were asked to materialize 932 // a base pointer for happens to be a base pointer itself. (Or a sub-graph 933 // feeding it does.) 934 SmallVector<Value *> ToRemove; 935 do { 936 ToRemove.clear(); 937 for (auto Pair : States) { 938 Value *BDV = Pair.first; 939 auto canPruneInput = [&](Value *V) { 940 // If the input of the BDV is the BDV itself we can prune it. This is 941 // only possible if the BDV is a PHI node. 942 if (V->stripPointerCasts() == BDV) 943 return true; 944 Value *VBDV = findBaseOrBDV(V, Cache); 945 if (V->stripPointerCasts() != VBDV) 946 return false; 947 // The assumption is that anything not in the state list is 948 // propagates a base pointer. 949 return States.count(VBDV) == 0; 950 }; 951 952 bool CanPrune = true; 953 visitBDVOperands(BDV, [&](Value *Op) { 954 CanPrune = CanPrune && canPruneInput(Op); 955 }); 956 if (CanPrune) 957 ToRemove.push_back(BDV); 958 } 959 for (Value *V : ToRemove) { 960 States.erase(V); 961 // Cache the fact V is it's own base for later usage. 962 Cache[V] = V; 963 } 964 } while (!ToRemove.empty()); 965 966 // Did we manage to prove that Def itself must be a base pointer? 967 if (!States.count(Def)) 968 return Def; 969 970 // Return a phi state for a base defining value. We'll generate a new 971 // base state for known bases and expect to find a cached state otherwise. 972 auto GetStateForBDV = [&](Value *BaseValue, Value *Input) { 973 auto I = States.find(BaseValue); 974 if (I != States.end()) 975 return I->second; 976 assert(areBothVectorOrScalar(BaseValue, Input)); 977 return BDVState(BaseValue, BDVState::Base, BaseValue); 978 }; 979 980 bool Progress = true; 981 while (Progress) { 982 #ifndef NDEBUG 983 const size_t OldSize = States.size(); 984 #endif 985 Progress = false; 986 // We're only changing values in this loop, thus safe to keep iterators. 987 // Since this is computing a fixed point, the order of visit does not 988 // effect the result. TODO: We could use a worklist here and make this run 989 // much faster. 990 for (auto Pair : States) { 991 Value *BDV = Pair.first; 992 // Only values that do not have known bases or those that have differing 993 // type (scalar versus vector) from a possible known base should be in the 994 // lattice. 995 assert((!isKnownBaseResult(BDV) || 996 !areBothVectorOrScalar(BDV, Pair.second.getBaseValue())) && 997 "why did it get added?"); 998 999 BDVState NewState(BDV); 1000 visitBDVOperands(BDV, [&](Value *Op) { 1001 Value *BDV = findBaseOrBDV(Op, Cache); 1002 auto OpState = GetStateForBDV(BDV, Op); 1003 NewState.meet(OpState); 1004 }); 1005 1006 BDVState OldState = States[BDV]; 1007 if (OldState != NewState) { 1008 Progress = true; 1009 States[BDV] = NewState; 1010 } 1011 } 1012 1013 assert(OldSize == States.size() && 1014 "fixed point shouldn't be adding any new nodes to state"); 1015 } 1016 1017 #ifndef NDEBUG 1018 VerifyStates(); 1019 LLVM_DEBUG(dbgs() << "States after meet iteration:\n"); 1020 for (const auto &Pair : States) { 1021 LLVM_DEBUG(dbgs() << " " << Pair.second << " for " << *Pair.first << "\n"); 1022 } 1023 #endif 1024 1025 // Handle all instructions that have a vector BDV, but the instruction itself 1026 // is of scalar type. 1027 for (auto Pair : States) { 1028 Instruction *I = cast<Instruction>(Pair.first); 1029 BDVState State = Pair.second; 1030 auto *BaseValue = State.getBaseValue(); 1031 // Only values that do not have known bases or those that have differing 1032 // type (scalar versus vector) from a possible known base should be in the 1033 // lattice. 1034 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, BaseValue)) && 1035 "why did it get added?"); 1036 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1037 1038 if (!State.isBase() || !isa<VectorType>(BaseValue->getType())) 1039 continue; 1040 // extractelement instructions are a bit special in that we may need to 1041 // insert an extract even when we know an exact base for the instruction. 1042 // The problem is that we need to convert from a vector base to a scalar 1043 // base for the particular indice we're interested in. 1044 if (isa<ExtractElementInst>(I)) { 1045 auto *EE = cast<ExtractElementInst>(I); 1046 // TODO: In many cases, the new instruction is just EE itself. We should 1047 // exploit this, but can't do it here since it would break the invariant 1048 // about the BDV not being known to be a base. 1049 auto *BaseInst = ExtractElementInst::Create( 1050 State.getBaseValue(), EE->getIndexOperand(), "base_ee", EE); 1051 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1052 States[I] = BDVState(I, BDVState::Base, BaseInst); 1053 } else if (!isa<VectorType>(I->getType())) { 1054 // We need to handle cases that have a vector base but the instruction is 1055 // a scalar type (these could be phis or selects or any instruction that 1056 // are of scalar type, but the base can be a vector type). We 1057 // conservatively set this as conflict. Setting the base value for these 1058 // conflicts is handled in the next loop which traverses States. 1059 States[I] = BDVState(I, BDVState::Conflict); 1060 } 1061 } 1062 1063 #ifndef NDEBUG 1064 VerifyStates(); 1065 #endif 1066 1067 // Insert Phis for all conflicts 1068 // TODO: adjust naming patterns to avoid this order of iteration dependency 1069 for (auto Pair : States) { 1070 Instruction *I = cast<Instruction>(Pair.first); 1071 BDVState State = Pair.second; 1072 // Only values that do not have known bases or those that have differing 1073 // type (scalar versus vector) from a possible known base should be in the 1074 // lattice. 1075 assert((!isKnownBaseResult(I) || !areBothVectorOrScalar(I, State.getBaseValue())) && 1076 "why did it get added?"); 1077 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1078 1079 // Since we're joining a vector and scalar base, they can never be the 1080 // same. As a result, we should always see insert element having reached 1081 // the conflict state. 1082 assert(!isa<InsertElementInst>(I) || State.isConflict()); 1083 1084 if (!State.isConflict()) 1085 continue; 1086 1087 auto getMangledName = [](Instruction *I) -> std::string { 1088 if (isa<PHINode>(I)) { 1089 return suffixed_name_or(I, ".base", "base_phi"); 1090 } else if (isa<SelectInst>(I)) { 1091 return suffixed_name_or(I, ".base", "base_select"); 1092 } else if (isa<ExtractElementInst>(I)) { 1093 return suffixed_name_or(I, ".base", "base_ee"); 1094 } else if (isa<InsertElementInst>(I)) { 1095 return suffixed_name_or(I, ".base", "base_ie"); 1096 } else { 1097 return suffixed_name_or(I, ".base", "base_sv"); 1098 } 1099 }; 1100 1101 Instruction *BaseInst = I->clone(); 1102 BaseInst->insertBefore(I); 1103 BaseInst->setName(getMangledName(I)); 1104 // Add metadata marking this as a base value 1105 BaseInst->setMetadata("is_base_value", MDNode::get(I->getContext(), {})); 1106 States[I] = BDVState(I, BDVState::Conflict, BaseInst); 1107 } 1108 1109 #ifndef NDEBUG 1110 VerifyStates(); 1111 #endif 1112 1113 // Returns a instruction which produces the base pointer for a given 1114 // instruction. The instruction is assumed to be an input to one of the BDVs 1115 // seen in the inference algorithm above. As such, we must either already 1116 // know it's base defining value is a base, or have inserted a new 1117 // instruction to propagate the base of it's BDV and have entered that newly 1118 // introduced instruction into the state table. In either case, we are 1119 // assured to be able to determine an instruction which produces it's base 1120 // pointer. 1121 auto getBaseForInput = [&](Value *Input, Instruction *InsertPt) { 1122 Value *BDV = findBaseOrBDV(Input, Cache); 1123 Value *Base = nullptr; 1124 if (!States.count(BDV)) { 1125 assert(areBothVectorOrScalar(BDV, Input)); 1126 Base = BDV; 1127 } else { 1128 // Either conflict or base. 1129 assert(States.count(BDV)); 1130 Base = States[BDV].getBaseValue(); 1131 } 1132 assert(Base && "Can't be null"); 1133 // The cast is needed since base traversal may strip away bitcasts 1134 if (Base->getType() != Input->getType() && InsertPt) 1135 Base = new BitCastInst(Base, Input->getType(), "cast", InsertPt); 1136 return Base; 1137 }; 1138 1139 // Fixup all the inputs of the new PHIs. Visit order needs to be 1140 // deterministic and predictable because we're naming newly created 1141 // instructions. 1142 for (auto Pair : States) { 1143 Instruction *BDV = cast<Instruction>(Pair.first); 1144 BDVState State = Pair.second; 1145 1146 // Only values that do not have known bases or those that have differing 1147 // type (scalar versus vector) from a possible known base should be in the 1148 // lattice. 1149 assert((!isKnownBaseResult(BDV) || 1150 !areBothVectorOrScalar(BDV, State.getBaseValue())) && 1151 "why did it get added?"); 1152 assert(!State.isUnknown() && "Optimistic algorithm didn't complete!"); 1153 if (!State.isConflict()) 1154 continue; 1155 1156 if (PHINode *BasePHI = dyn_cast<PHINode>(State.getBaseValue())) { 1157 PHINode *PN = cast<PHINode>(BDV); 1158 const unsigned NumPHIValues = PN->getNumIncomingValues(); 1159 1160 // The IR verifier requires phi nodes with multiple entries from the 1161 // same basic block to have the same incoming value for each of those 1162 // entries. Since we're inserting bitcasts in the loop, make sure we 1163 // do so at least once per incoming block. 1164 DenseMap<BasicBlock *, Value*> BlockToValue; 1165 for (unsigned i = 0; i < NumPHIValues; i++) { 1166 Value *InVal = PN->getIncomingValue(i); 1167 BasicBlock *InBB = PN->getIncomingBlock(i); 1168 if (!BlockToValue.count(InBB)) 1169 BlockToValue[InBB] = getBaseForInput(InVal, InBB->getTerminator()); 1170 else { 1171 #ifndef NDEBUG 1172 Value *OldBase = BlockToValue[InBB]; 1173 Value *Base = getBaseForInput(InVal, nullptr); 1174 1175 // We can't use `stripPointerCasts` instead of this function because 1176 // `stripPointerCasts` doesn't handle vectors of pointers. 1177 auto StripBitCasts = [](Value *V) -> Value * { 1178 while (auto *BC = dyn_cast<BitCastInst>(V)) 1179 V = BC->getOperand(0); 1180 return V; 1181 }; 1182 // In essence this assert states: the only way two values 1183 // incoming from the same basic block may be different is by 1184 // being different bitcasts of the same value. A cleanup 1185 // that remains TODO is changing findBaseOrBDV to return an 1186 // llvm::Value of the correct type (and still remain pure). 1187 // This will remove the need to add bitcasts. 1188 assert(StripBitCasts(Base) == StripBitCasts(OldBase) && 1189 "findBaseOrBDV should be pure!"); 1190 #endif 1191 } 1192 Value *Base = BlockToValue[InBB]; 1193 BasePHI->setIncomingValue(i, Base); 1194 } 1195 } else if (SelectInst *BaseSI = 1196 dyn_cast<SelectInst>(State.getBaseValue())) { 1197 SelectInst *SI = cast<SelectInst>(BDV); 1198 1199 // Find the instruction which produces the base for each input. 1200 // We may need to insert a bitcast. 1201 BaseSI->setTrueValue(getBaseForInput(SI->getTrueValue(), BaseSI)); 1202 BaseSI->setFalseValue(getBaseForInput(SI->getFalseValue(), BaseSI)); 1203 } else if (auto *BaseEE = 1204 dyn_cast<ExtractElementInst>(State.getBaseValue())) { 1205 Value *InVal = cast<ExtractElementInst>(BDV)->getVectorOperand(); 1206 // Find the instruction which produces the base for each input. We may 1207 // need to insert a bitcast. 1208 BaseEE->setOperand(0, getBaseForInput(InVal, BaseEE)); 1209 } else if (auto *BaseIE = dyn_cast<InsertElementInst>(State.getBaseValue())){ 1210 auto *BdvIE = cast<InsertElementInst>(BDV); 1211 auto UpdateOperand = [&](int OperandIdx) { 1212 Value *InVal = BdvIE->getOperand(OperandIdx); 1213 Value *Base = getBaseForInput(InVal, BaseIE); 1214 BaseIE->setOperand(OperandIdx, Base); 1215 }; 1216 UpdateOperand(0); // vector operand 1217 UpdateOperand(1); // scalar operand 1218 } else { 1219 auto *BaseSV = cast<ShuffleVectorInst>(State.getBaseValue()); 1220 auto *BdvSV = cast<ShuffleVectorInst>(BDV); 1221 auto UpdateOperand = [&](int OperandIdx) { 1222 Value *InVal = BdvSV->getOperand(OperandIdx); 1223 Value *Base = getBaseForInput(InVal, BaseSV); 1224 BaseSV->setOperand(OperandIdx, Base); 1225 }; 1226 UpdateOperand(0); // vector operand 1227 if (!BdvSV->isZeroEltSplat()) 1228 UpdateOperand(1); // vector operand 1229 else { 1230 // Never read, so just use undef 1231 Value *InVal = BdvSV->getOperand(1); 1232 BaseSV->setOperand(1, UndefValue::get(InVal->getType())); 1233 } 1234 } 1235 } 1236 1237 #ifndef NDEBUG 1238 VerifyStates(); 1239 #endif 1240 1241 // Cache all of our results so we can cheaply reuse them 1242 // NOTE: This is actually two caches: one of the base defining value 1243 // relation and one of the base pointer relation! FIXME 1244 for (auto Pair : States) { 1245 auto *BDV = Pair.first; 1246 Value *Base = Pair.second.getBaseValue(); 1247 assert(BDV && Base); 1248 // Only values that do not have known bases or those that have differing 1249 // type (scalar versus vector) from a possible known base should be in the 1250 // lattice. 1251 assert((!isKnownBaseResult(BDV) || !areBothVectorOrScalar(BDV, Base)) && 1252 "why did it get added?"); 1253 1254 LLVM_DEBUG( 1255 dbgs() << "Updating base value cache" 1256 << " for: " << BDV->getName() << " from: " 1257 << (Cache.count(BDV) ? Cache[BDV]->getName().str() : "none") 1258 << " to: " << Base->getName() << "\n"); 1259 1260 Cache[BDV] = Base; 1261 } 1262 assert(Cache.count(Def)); 1263 return Cache[Def]; 1264 } 1265 1266 // For a set of live pointers (base and/or derived), identify the base 1267 // pointer of the object which they are derived from. This routine will 1268 // mutate the IR graph as needed to make the 'base' pointer live at the 1269 // definition site of 'derived'. This ensures that any use of 'derived' can 1270 // also use 'base'. This may involve the insertion of a number of 1271 // additional PHI nodes. 1272 // 1273 // preconditions: live is a set of pointer type Values 1274 // 1275 // side effects: may insert PHI nodes into the existing CFG, will preserve 1276 // CFG, will not remove or mutate any existing nodes 1277 // 1278 // post condition: PointerToBase contains one (derived, base) pair for every 1279 // pointer in live. Note that derived can be equal to base if the original 1280 // pointer was a base pointer. 1281 static void findBasePointers(const StatepointLiveSetTy &live, 1282 PointerToBaseTy &PointerToBase, DominatorTree *DT, 1283 DefiningValueMapTy &DVCache) { 1284 for (Value *ptr : live) { 1285 Value *base = findBasePointer(ptr, DVCache); 1286 assert(base && "failed to find base pointer"); 1287 PointerToBase[ptr] = base; 1288 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) || 1289 DT->dominates(cast<Instruction>(base)->getParent(), 1290 cast<Instruction>(ptr)->getParent())) && 1291 "The base we found better dominate the derived pointer"); 1292 } 1293 } 1294 1295 /// Find the required based pointers (and adjust the live set) for the given 1296 /// parse point. 1297 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache, 1298 CallBase *Call, 1299 PartiallyConstructedSafepointRecord &result, 1300 PointerToBaseTy &PointerToBase) { 1301 StatepointLiveSetTy PotentiallyDerivedPointers = result.LiveSet; 1302 // We assume that all pointers passed to deopt are base pointers; as an 1303 // optimization, we can use this to avoid seperately materializing the base 1304 // pointer graph. This is only relevant since we're very conservative about 1305 // generating new conflict nodes during base pointer insertion. If we were 1306 // smarter there, this would be irrelevant. 1307 if (auto Opt = Call->getOperandBundle(LLVMContext::OB_deopt)) 1308 for (Value *V : Opt->Inputs) { 1309 if (!PotentiallyDerivedPointers.count(V)) 1310 continue; 1311 PotentiallyDerivedPointers.remove(V); 1312 PointerToBase[V] = V; 1313 } 1314 findBasePointers(PotentiallyDerivedPointers, PointerToBase, &DT, DVCache); 1315 } 1316 1317 /// Given an updated version of the dataflow liveness results, update the 1318 /// liveset and base pointer maps for the call site CS. 1319 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 1320 CallBase *Call, 1321 PartiallyConstructedSafepointRecord &result, 1322 PointerToBaseTy &PointerToBase); 1323 1324 static void recomputeLiveInValues( 1325 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 1326 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records, 1327 PointerToBaseTy &PointerToBase) { 1328 // TODO-PERF: reuse the original liveness, then simply run the dataflow 1329 // again. The old values are still live and will help it stabilize quickly. 1330 GCPtrLivenessData RevisedLivenessData; 1331 computeLiveInValues(DT, F, RevisedLivenessData); 1332 for (size_t i = 0; i < records.size(); i++) { 1333 struct PartiallyConstructedSafepointRecord &info = records[i]; 1334 recomputeLiveInValues(RevisedLivenessData, toUpdate[i], info, 1335 PointerToBase); 1336 } 1337 } 1338 1339 // When inserting gc.relocate and gc.result calls, we need to ensure there are 1340 // no uses of the original value / return value between the gc.statepoint and 1341 // the gc.relocate / gc.result call. One case which can arise is a phi node 1342 // starting one of the successor blocks. We also need to be able to insert the 1343 // gc.relocates only on the path which goes through the statepoint. We might 1344 // need to split an edge to make this possible. 1345 static BasicBlock * 1346 normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, 1347 DominatorTree &DT) { 1348 BasicBlock *Ret = BB; 1349 if (!BB->getUniquePredecessor()) 1350 Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT); 1351 1352 // Now that 'Ret' has unique predecessor we can safely remove all phi nodes 1353 // from it 1354 FoldSingleEntryPHINodes(Ret); 1355 assert(!isa<PHINode>(Ret->begin()) && 1356 "All PHI nodes should have been removed!"); 1357 1358 // At this point, we can safely insert a gc.relocate or gc.result as the first 1359 // instruction in Ret if needed. 1360 return Ret; 1361 } 1362 1363 // List of all function attributes which must be stripped when lowering from 1364 // abstract machine model to physical machine model. Essentially, these are 1365 // all the effects a safepoint might have which we ignored in the abstract 1366 // machine model for purposes of optimization. We have to strip these on 1367 // both function declarations and call sites. 1368 static constexpr Attribute::AttrKind FnAttrsToStrip[] = 1369 {Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly, 1370 Attribute::ArgMemOnly, Attribute::InaccessibleMemOnly, 1371 Attribute::InaccessibleMemOrArgMemOnly, 1372 Attribute::NoSync, Attribute::NoFree}; 1373 1374 // Create new attribute set containing only attributes which can be transferred 1375 // from original call to the safepoint. 1376 static AttributeList legalizeCallAttributes(LLVMContext &Ctx, 1377 AttributeList OrigAL, 1378 AttributeList StatepointAL) { 1379 if (OrigAL.isEmpty()) 1380 return StatepointAL; 1381 1382 // Remove the readonly, readnone, and statepoint function attributes. 1383 AttrBuilder FnAttrs(Ctx, OrigAL.getFnAttrs()); 1384 for (auto Attr : FnAttrsToStrip) 1385 FnAttrs.removeAttribute(Attr); 1386 1387 for (Attribute A : OrigAL.getFnAttrs()) { 1388 if (isStatepointDirectiveAttr(A)) 1389 FnAttrs.removeAttribute(A); 1390 } 1391 1392 // Just skip parameter and return attributes for now 1393 return StatepointAL.addFnAttributes(Ctx, FnAttrs); 1394 } 1395 1396 /// Helper function to place all gc relocates necessary for the given 1397 /// statepoint. 1398 /// Inputs: 1399 /// liveVariables - list of variables to be relocated. 1400 /// basePtrs - base pointers. 1401 /// statepointToken - statepoint instruction to which relocates should be 1402 /// bound. 1403 /// Builder - Llvm IR builder to be used to construct new calls. 1404 static void CreateGCRelocates(ArrayRef<Value *> LiveVariables, 1405 ArrayRef<Value *> BasePtrs, 1406 Instruction *StatepointToken, 1407 IRBuilder<> &Builder) { 1408 if (LiveVariables.empty()) 1409 return; 1410 1411 auto FindIndex = [](ArrayRef<Value *> LiveVec, Value *Val) { 1412 auto ValIt = llvm::find(LiveVec, Val); 1413 assert(ValIt != LiveVec.end() && "Val not found in LiveVec!"); 1414 size_t Index = std::distance(LiveVec.begin(), ValIt); 1415 assert(Index < LiveVec.size() && "Bug in std::find?"); 1416 return Index; 1417 }; 1418 Module *M = StatepointToken->getModule(); 1419 1420 // All gc_relocate are generated as i8 addrspace(1)* (or a vector type whose 1421 // element type is i8 addrspace(1)*). We originally generated unique 1422 // declarations for each pointer type, but this proved problematic because 1423 // the intrinsic mangling code is incomplete and fragile. Since we're moving 1424 // towards a single unified pointer type anyways, we can just cast everything 1425 // to an i8* of the right address space. A bitcast is added later to convert 1426 // gc_relocate to the actual value's type. 1427 auto getGCRelocateDecl = [&] (Type *Ty) { 1428 assert(isHandledGCPointerType(Ty)); 1429 auto AS = Ty->getScalarType()->getPointerAddressSpace(); 1430 Type *NewTy = Type::getInt8PtrTy(M->getContext(), AS); 1431 if (auto *VT = dyn_cast<VectorType>(Ty)) 1432 NewTy = FixedVectorType::get(NewTy, 1433 cast<FixedVectorType>(VT)->getNumElements()); 1434 return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, 1435 {NewTy}); 1436 }; 1437 1438 // Lazily populated map from input types to the canonicalized form mentioned 1439 // in the comment above. This should probably be cached somewhere more 1440 // broadly. 1441 DenseMap<Type *, Function *> TypeToDeclMap; 1442 1443 for (unsigned i = 0; i < LiveVariables.size(); i++) { 1444 // Generate the gc.relocate call and save the result 1445 Value *BaseIdx = Builder.getInt32(FindIndex(LiveVariables, BasePtrs[i])); 1446 Value *LiveIdx = Builder.getInt32(i); 1447 1448 Type *Ty = LiveVariables[i]->getType(); 1449 if (!TypeToDeclMap.count(Ty)) 1450 TypeToDeclMap[Ty] = getGCRelocateDecl(Ty); 1451 Function *GCRelocateDecl = TypeToDeclMap[Ty]; 1452 1453 // only specify a debug name if we can give a useful one 1454 CallInst *Reloc = Builder.CreateCall( 1455 GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx}, 1456 suffixed_name_or(LiveVariables[i], ".relocated", "")); 1457 // Trick CodeGen into thinking there are lots of free registers at this 1458 // fake call. 1459 Reloc->setCallingConv(CallingConv::Cold); 1460 } 1461 } 1462 1463 namespace { 1464 1465 /// This struct is used to defer RAUWs and `eraseFromParent` s. Using this 1466 /// avoids having to worry about keeping around dangling pointers to Values. 1467 class DeferredReplacement { 1468 AssertingVH<Instruction> Old; 1469 AssertingVH<Instruction> New; 1470 bool IsDeoptimize = false; 1471 1472 DeferredReplacement() = default; 1473 1474 public: 1475 static DeferredReplacement createRAUW(Instruction *Old, Instruction *New) { 1476 assert(Old != New && Old && New && 1477 "Cannot RAUW equal values or to / from null!"); 1478 1479 DeferredReplacement D; 1480 D.Old = Old; 1481 D.New = New; 1482 return D; 1483 } 1484 1485 static DeferredReplacement createDelete(Instruction *ToErase) { 1486 DeferredReplacement D; 1487 D.Old = ToErase; 1488 return D; 1489 } 1490 1491 static DeferredReplacement createDeoptimizeReplacement(Instruction *Old) { 1492 #ifndef NDEBUG 1493 auto *F = cast<CallInst>(Old)->getCalledFunction(); 1494 assert(F && F->getIntrinsicID() == Intrinsic::experimental_deoptimize && 1495 "Only way to construct a deoptimize deferred replacement"); 1496 #endif 1497 DeferredReplacement D; 1498 D.Old = Old; 1499 D.IsDeoptimize = true; 1500 return D; 1501 } 1502 1503 /// Does the task represented by this instance. 1504 void doReplacement() { 1505 Instruction *OldI = Old; 1506 Instruction *NewI = New; 1507 1508 assert(OldI != NewI && "Disallowed at construction?!"); 1509 assert((!IsDeoptimize || !New) && 1510 "Deoptimize intrinsics are not replaced!"); 1511 1512 Old = nullptr; 1513 New = nullptr; 1514 1515 if (NewI) 1516 OldI->replaceAllUsesWith(NewI); 1517 1518 if (IsDeoptimize) { 1519 // Note: we've inserted instructions, so the call to llvm.deoptimize may 1520 // not necessarily be followed by the matching return. 1521 auto *RI = cast<ReturnInst>(OldI->getParent()->getTerminator()); 1522 new UnreachableInst(RI->getContext(), RI); 1523 RI->eraseFromParent(); 1524 } 1525 1526 OldI->eraseFromParent(); 1527 } 1528 }; 1529 1530 } // end anonymous namespace 1531 1532 static StringRef getDeoptLowering(CallBase *Call) { 1533 const char *DeoptLowering = "deopt-lowering"; 1534 if (Call->hasFnAttr(DeoptLowering)) { 1535 // FIXME: Calls have a *really* confusing interface around attributes 1536 // with values. 1537 const AttributeList &CSAS = Call->getAttributes(); 1538 if (CSAS.hasFnAttr(DeoptLowering)) 1539 return CSAS.getFnAttr(DeoptLowering).getValueAsString(); 1540 Function *F = Call->getCalledFunction(); 1541 assert(F && F->hasFnAttribute(DeoptLowering)); 1542 return F->getFnAttribute(DeoptLowering).getValueAsString(); 1543 } 1544 return "live-through"; 1545 } 1546 1547 static void 1548 makeStatepointExplicitImpl(CallBase *Call, /* to replace */ 1549 const SmallVectorImpl<Value *> &BasePtrs, 1550 const SmallVectorImpl<Value *> &LiveVariables, 1551 PartiallyConstructedSafepointRecord &Result, 1552 std::vector<DeferredReplacement> &Replacements, 1553 const PointerToBaseTy &PointerToBase) { 1554 assert(BasePtrs.size() == LiveVariables.size()); 1555 1556 // Then go ahead and use the builder do actually do the inserts. We insert 1557 // immediately before the previous instruction under the assumption that all 1558 // arguments will be available here. We can't insert afterwards since we may 1559 // be replacing a terminator. 1560 IRBuilder<> Builder(Call); 1561 1562 ArrayRef<Value *> GCArgs(LiveVariables); 1563 uint64_t StatepointID = StatepointDirectives::DefaultStatepointID; 1564 uint32_t NumPatchBytes = 0; 1565 uint32_t Flags = uint32_t(StatepointFlags::None); 1566 1567 SmallVector<Value *, 8> CallArgs(Call->args()); 1568 Optional<ArrayRef<Use>> DeoptArgs; 1569 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_deopt)) 1570 DeoptArgs = Bundle->Inputs; 1571 Optional<ArrayRef<Use>> TransitionArgs; 1572 if (auto Bundle = Call->getOperandBundle(LLVMContext::OB_gc_transition)) { 1573 TransitionArgs = Bundle->Inputs; 1574 // TODO: This flag no longer serves a purpose and can be removed later 1575 Flags |= uint32_t(StatepointFlags::GCTransition); 1576 } 1577 1578 // Instead of lowering calls to @llvm.experimental.deoptimize as normal calls 1579 // with a return value, we lower then as never returning calls to 1580 // __llvm_deoptimize that are followed by unreachable to get better codegen. 1581 bool IsDeoptimize = false; 1582 1583 StatepointDirectives SD = 1584 parseStatepointDirectivesFromAttrs(Call->getAttributes()); 1585 if (SD.NumPatchBytes) 1586 NumPatchBytes = *SD.NumPatchBytes; 1587 if (SD.StatepointID) 1588 StatepointID = *SD.StatepointID; 1589 1590 // Pass through the requested lowering if any. The default is live-through. 1591 StringRef DeoptLowering = getDeoptLowering(Call); 1592 if (DeoptLowering.equals("live-in")) 1593 Flags |= uint32_t(StatepointFlags::DeoptLiveIn); 1594 else { 1595 assert(DeoptLowering.equals("live-through") && "Unsupported value!"); 1596 } 1597 1598 FunctionCallee CallTarget(Call->getFunctionType(), Call->getCalledOperand()); 1599 if (Function *F = dyn_cast<Function>(CallTarget.getCallee())) { 1600 auto IID = F->getIntrinsicID(); 1601 if (IID == Intrinsic::experimental_deoptimize) { 1602 // Calls to llvm.experimental.deoptimize are lowered to calls to the 1603 // __llvm_deoptimize symbol. We want to resolve this now, since the 1604 // verifier does not allow taking the address of an intrinsic function. 1605 1606 SmallVector<Type *, 8> DomainTy; 1607 for (Value *Arg : CallArgs) 1608 DomainTy.push_back(Arg->getType()); 1609 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1610 /* isVarArg = */ false); 1611 1612 // Note: CallTarget can be a bitcast instruction of a symbol if there are 1613 // calls to @llvm.experimental.deoptimize with different argument types in 1614 // the same module. This is fine -- we assume the frontend knew what it 1615 // was doing when generating this kind of IR. 1616 CallTarget = F->getParent() 1617 ->getOrInsertFunction("__llvm_deoptimize", FTy); 1618 1619 IsDeoptimize = true; 1620 } else if (IID == Intrinsic::memcpy_element_unordered_atomic || 1621 IID == Intrinsic::memmove_element_unordered_atomic) { 1622 // Unordered atomic memcpy and memmove intrinsics which are not explicitly 1623 // marked as "gc-leaf-function" should be lowered in a GC parseable way. 1624 // Specifically, these calls should be lowered to the 1625 // __llvm_{memcpy|memmove}_element_unordered_atomic_safepoint symbols. 1626 // Similarly to __llvm_deoptimize we want to resolve this now, since the 1627 // verifier does not allow taking the address of an intrinsic function. 1628 // 1629 // Moreover we need to shuffle the arguments for the call in order to 1630 // accommodate GC. The underlying source and destination objects might be 1631 // relocated during copy operation should the GC occur. To relocate the 1632 // derived source and destination pointers the implementation of the 1633 // intrinsic should know the corresponding base pointers. 1634 // 1635 // To make the base pointers available pass them explicitly as arguments: 1636 // memcpy(dest_derived, source_derived, ...) => 1637 // memcpy(dest_base, dest_offset, source_base, source_offset, ...) 1638 auto &Context = Call->getContext(); 1639 auto &DL = Call->getModule()->getDataLayout(); 1640 auto GetBaseAndOffset = [&](Value *Derived) { 1641 assert(PointerToBase.count(Derived)); 1642 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace(); 1643 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace); 1644 Value *Base = PointerToBase.find(Derived)->second; 1645 Value *Base_int = Builder.CreatePtrToInt( 1646 Base, Type::getIntNTy(Context, IntPtrSize)); 1647 Value *Derived_int = Builder.CreatePtrToInt( 1648 Derived, Type::getIntNTy(Context, IntPtrSize)); 1649 return std::make_pair(Base, Builder.CreateSub(Derived_int, Base_int)); 1650 }; 1651 1652 auto *Dest = CallArgs[0]; 1653 Value *DestBase, *DestOffset; 1654 std::tie(DestBase, DestOffset) = GetBaseAndOffset(Dest); 1655 1656 auto *Source = CallArgs[1]; 1657 Value *SourceBase, *SourceOffset; 1658 std::tie(SourceBase, SourceOffset) = GetBaseAndOffset(Source); 1659 1660 auto *LengthInBytes = CallArgs[2]; 1661 auto *ElementSizeCI = cast<ConstantInt>(CallArgs[3]); 1662 1663 CallArgs.clear(); 1664 CallArgs.push_back(DestBase); 1665 CallArgs.push_back(DestOffset); 1666 CallArgs.push_back(SourceBase); 1667 CallArgs.push_back(SourceOffset); 1668 CallArgs.push_back(LengthInBytes); 1669 1670 SmallVector<Type *, 8> DomainTy; 1671 for (Value *Arg : CallArgs) 1672 DomainTy.push_back(Arg->getType()); 1673 auto *FTy = FunctionType::get(Type::getVoidTy(F->getContext()), DomainTy, 1674 /* isVarArg = */ false); 1675 1676 auto GetFunctionName = [](Intrinsic::ID IID, ConstantInt *ElementSizeCI) { 1677 uint64_t ElementSize = ElementSizeCI->getZExtValue(); 1678 if (IID == Intrinsic::memcpy_element_unordered_atomic) { 1679 switch (ElementSize) { 1680 case 1: 1681 return "__llvm_memcpy_element_unordered_atomic_safepoint_1"; 1682 case 2: 1683 return "__llvm_memcpy_element_unordered_atomic_safepoint_2"; 1684 case 4: 1685 return "__llvm_memcpy_element_unordered_atomic_safepoint_4"; 1686 case 8: 1687 return "__llvm_memcpy_element_unordered_atomic_safepoint_8"; 1688 case 16: 1689 return "__llvm_memcpy_element_unordered_atomic_safepoint_16"; 1690 default: 1691 llvm_unreachable("unexpected element size!"); 1692 } 1693 } 1694 assert(IID == Intrinsic::memmove_element_unordered_atomic); 1695 switch (ElementSize) { 1696 case 1: 1697 return "__llvm_memmove_element_unordered_atomic_safepoint_1"; 1698 case 2: 1699 return "__llvm_memmove_element_unordered_atomic_safepoint_2"; 1700 case 4: 1701 return "__llvm_memmove_element_unordered_atomic_safepoint_4"; 1702 case 8: 1703 return "__llvm_memmove_element_unordered_atomic_safepoint_8"; 1704 case 16: 1705 return "__llvm_memmove_element_unordered_atomic_safepoint_16"; 1706 default: 1707 llvm_unreachable("unexpected element size!"); 1708 } 1709 }; 1710 1711 CallTarget = 1712 F->getParent() 1713 ->getOrInsertFunction(GetFunctionName(IID, ElementSizeCI), FTy); 1714 } 1715 } 1716 1717 // Create the statepoint given all the arguments 1718 GCStatepointInst *Token = nullptr; 1719 if (auto *CI = dyn_cast<CallInst>(Call)) { 1720 CallInst *SPCall = Builder.CreateGCStatepointCall( 1721 StatepointID, NumPatchBytes, CallTarget, Flags, CallArgs, 1722 TransitionArgs, DeoptArgs, GCArgs, "safepoint_token"); 1723 1724 SPCall->setTailCallKind(CI->getTailCallKind()); 1725 SPCall->setCallingConv(CI->getCallingConv()); 1726 1727 // Currently we will fail on parameter attributes and on certain 1728 // function attributes. In case if we can handle this set of attributes - 1729 // set up function attrs directly on statepoint and return attrs later for 1730 // gc_result intrinsic. 1731 SPCall->setAttributes(legalizeCallAttributes( 1732 CI->getContext(), CI->getAttributes(), SPCall->getAttributes())); 1733 1734 Token = cast<GCStatepointInst>(SPCall); 1735 1736 // Put the following gc_result and gc_relocate calls immediately after the 1737 // the old call (which we're about to delete) 1738 assert(CI->getNextNode() && "Not a terminator, must have next!"); 1739 Builder.SetInsertPoint(CI->getNextNode()); 1740 Builder.SetCurrentDebugLocation(CI->getNextNode()->getDebugLoc()); 1741 } else { 1742 auto *II = cast<InvokeInst>(Call); 1743 1744 // Insert the new invoke into the old block. We'll remove the old one in a 1745 // moment at which point this will become the new terminator for the 1746 // original block. 1747 InvokeInst *SPInvoke = Builder.CreateGCStatepointInvoke( 1748 StatepointID, NumPatchBytes, CallTarget, II->getNormalDest(), 1749 II->getUnwindDest(), Flags, CallArgs, TransitionArgs, DeoptArgs, GCArgs, 1750 "statepoint_token"); 1751 1752 SPInvoke->setCallingConv(II->getCallingConv()); 1753 1754 // Currently we will fail on parameter attributes and on certain 1755 // function attributes. In case if we can handle this set of attributes - 1756 // set up function attrs directly on statepoint and return attrs later for 1757 // gc_result intrinsic. 1758 SPInvoke->setAttributes(legalizeCallAttributes( 1759 II->getContext(), II->getAttributes(), SPInvoke->getAttributes())); 1760 1761 Token = cast<GCStatepointInst>(SPInvoke); 1762 1763 // Generate gc relocates in exceptional path 1764 BasicBlock *UnwindBlock = II->getUnwindDest(); 1765 assert(!isa<PHINode>(UnwindBlock->begin()) && 1766 UnwindBlock->getUniquePredecessor() && 1767 "can't safely insert in this block!"); 1768 1769 Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt()); 1770 Builder.SetCurrentDebugLocation(II->getDebugLoc()); 1771 1772 // Attach exceptional gc relocates to the landingpad. 1773 Instruction *ExceptionalToken = UnwindBlock->getLandingPadInst(); 1774 Result.UnwindToken = ExceptionalToken; 1775 1776 CreateGCRelocates(LiveVariables, BasePtrs, ExceptionalToken, Builder); 1777 1778 // Generate gc relocates and returns for normal block 1779 BasicBlock *NormalDest = II->getNormalDest(); 1780 assert(!isa<PHINode>(NormalDest->begin()) && 1781 NormalDest->getUniquePredecessor() && 1782 "can't safely insert in this block!"); 1783 1784 Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt()); 1785 1786 // gc relocates will be generated later as if it were regular call 1787 // statepoint 1788 } 1789 assert(Token && "Should be set in one of the above branches!"); 1790 1791 if (IsDeoptimize) { 1792 // If we're wrapping an @llvm.experimental.deoptimize in a statepoint, we 1793 // transform the tail-call like structure to a call to a void function 1794 // followed by unreachable to get better codegen. 1795 Replacements.push_back( 1796 DeferredReplacement::createDeoptimizeReplacement(Call)); 1797 } else { 1798 Token->setName("statepoint_token"); 1799 if (!Call->getType()->isVoidTy() && !Call->use_empty()) { 1800 StringRef Name = Call->hasName() ? Call->getName() : ""; 1801 CallInst *GCResult = Builder.CreateGCResult(Token, Call->getType(), Name); 1802 GCResult->setAttributes( 1803 AttributeList::get(GCResult->getContext(), AttributeList::ReturnIndex, 1804 Call->getAttributes().getRetAttrs())); 1805 1806 // We cannot RAUW or delete CS.getInstruction() because it could be in the 1807 // live set of some other safepoint, in which case that safepoint's 1808 // PartiallyConstructedSafepointRecord will hold a raw pointer to this 1809 // llvm::Instruction. Instead, we defer the replacement and deletion to 1810 // after the live sets have been made explicit in the IR, and we no longer 1811 // have raw pointers to worry about. 1812 Replacements.emplace_back( 1813 DeferredReplacement::createRAUW(Call, GCResult)); 1814 } else { 1815 Replacements.emplace_back(DeferredReplacement::createDelete(Call)); 1816 } 1817 } 1818 1819 Result.StatepointToken = Token; 1820 1821 // Second, create a gc.relocate for every live variable 1822 CreateGCRelocates(LiveVariables, BasePtrs, Token, Builder); 1823 } 1824 1825 // Replace an existing gc.statepoint with a new one and a set of gc.relocates 1826 // which make the relocations happening at this safepoint explicit. 1827 // 1828 // WARNING: Does not do any fixup to adjust users of the original live 1829 // values. That's the callers responsibility. 1830 static void 1831 makeStatepointExplicit(DominatorTree &DT, CallBase *Call, 1832 PartiallyConstructedSafepointRecord &Result, 1833 std::vector<DeferredReplacement> &Replacements, 1834 const PointerToBaseTy &PointerToBase) { 1835 const auto &LiveSet = Result.LiveSet; 1836 1837 // Convert to vector for efficient cross referencing. 1838 SmallVector<Value *, 64> BaseVec, LiveVec; 1839 LiveVec.reserve(LiveSet.size()); 1840 BaseVec.reserve(LiveSet.size()); 1841 for (Value *L : LiveSet) { 1842 LiveVec.push_back(L); 1843 assert(PointerToBase.count(L)); 1844 Value *Base = PointerToBase.find(L)->second; 1845 BaseVec.push_back(Base); 1846 } 1847 assert(LiveVec.size() == BaseVec.size()); 1848 1849 // Do the actual rewriting and delete the old statepoint 1850 makeStatepointExplicitImpl(Call, BaseVec, LiveVec, Result, Replacements, 1851 PointerToBase); 1852 } 1853 1854 // Helper function for the relocationViaAlloca. 1855 // 1856 // It receives iterator to the statepoint gc relocates and emits a store to the 1857 // assigned location (via allocaMap) for the each one of them. It adds the 1858 // visited values into the visitedLiveValues set, which we will later use them 1859 // for validation checking. 1860 static void 1861 insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs, 1862 DenseMap<Value *, AllocaInst *> &AllocaMap, 1863 DenseSet<Value *> &VisitedLiveValues) { 1864 for (User *U : GCRelocs) { 1865 GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U); 1866 if (!Relocate) 1867 continue; 1868 1869 Value *OriginalValue = Relocate->getDerivedPtr(); 1870 assert(AllocaMap.count(OriginalValue)); 1871 Value *Alloca = AllocaMap[OriginalValue]; 1872 1873 // Emit store into the related alloca 1874 // All gc_relocates are i8 addrspace(1)* typed, and it must be bitcasted to 1875 // the correct type according to alloca. 1876 assert(Relocate->getNextNode() && 1877 "Should always have one since it's not a terminator"); 1878 IRBuilder<> Builder(Relocate->getNextNode()); 1879 Value *CastedRelocatedValue = 1880 Builder.CreateBitCast(Relocate, 1881 cast<AllocaInst>(Alloca)->getAllocatedType(), 1882 suffixed_name_or(Relocate, ".casted", "")); 1883 1884 new StoreInst(CastedRelocatedValue, Alloca, 1885 cast<Instruction>(CastedRelocatedValue)->getNextNode()); 1886 1887 #ifndef NDEBUG 1888 VisitedLiveValues.insert(OriginalValue); 1889 #endif 1890 } 1891 } 1892 1893 // Helper function for the "relocationViaAlloca". Similar to the 1894 // "insertRelocationStores" but works for rematerialized values. 1895 static void insertRematerializationStores( 1896 const RematerializedValueMapTy &RematerializedValues, 1897 DenseMap<Value *, AllocaInst *> &AllocaMap, 1898 DenseSet<Value *> &VisitedLiveValues) { 1899 for (auto RematerializedValuePair: RematerializedValues) { 1900 Instruction *RematerializedValue = RematerializedValuePair.first; 1901 Value *OriginalValue = RematerializedValuePair.second; 1902 1903 assert(AllocaMap.count(OriginalValue) && 1904 "Can not find alloca for rematerialized value"); 1905 Value *Alloca = AllocaMap[OriginalValue]; 1906 1907 new StoreInst(RematerializedValue, Alloca, 1908 RematerializedValue->getNextNode()); 1909 1910 #ifndef NDEBUG 1911 VisitedLiveValues.insert(OriginalValue); 1912 #endif 1913 } 1914 } 1915 1916 /// Do all the relocation update via allocas and mem2reg 1917 static void relocationViaAlloca( 1918 Function &F, DominatorTree &DT, ArrayRef<Value *> Live, 1919 ArrayRef<PartiallyConstructedSafepointRecord> Records) { 1920 #ifndef NDEBUG 1921 // record initial number of (static) allocas; we'll check we have the same 1922 // number when we get done. 1923 int InitialAllocaNum = 0; 1924 for (Instruction &I : F.getEntryBlock()) 1925 if (isa<AllocaInst>(I)) 1926 InitialAllocaNum++; 1927 #endif 1928 1929 // TODO-PERF: change data structures, reserve 1930 DenseMap<Value *, AllocaInst *> AllocaMap; 1931 SmallVector<AllocaInst *, 200> PromotableAllocas; 1932 // Used later to chack that we have enough allocas to store all values 1933 std::size_t NumRematerializedValues = 0; 1934 PromotableAllocas.reserve(Live.size()); 1935 1936 // Emit alloca for "LiveValue" and record it in "allocaMap" and 1937 // "PromotableAllocas" 1938 const DataLayout &DL = F.getParent()->getDataLayout(); 1939 auto emitAllocaFor = [&](Value *LiveValue) { 1940 AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), 1941 DL.getAllocaAddrSpace(), "", 1942 F.getEntryBlock().getFirstNonPHI()); 1943 AllocaMap[LiveValue] = Alloca; 1944 PromotableAllocas.push_back(Alloca); 1945 }; 1946 1947 // Emit alloca for each live gc pointer 1948 for (Value *V : Live) 1949 emitAllocaFor(V); 1950 1951 // Emit allocas for rematerialized values 1952 for (const auto &Info : Records) 1953 for (auto RematerializedValuePair : Info.RematerializedValues) { 1954 Value *OriginalValue = RematerializedValuePair.second; 1955 if (AllocaMap.count(OriginalValue) != 0) 1956 continue; 1957 1958 emitAllocaFor(OriginalValue); 1959 ++NumRematerializedValues; 1960 } 1961 1962 // The next two loops are part of the same conceptual operation. We need to 1963 // insert a store to the alloca after the original def and at each 1964 // redefinition. We need to insert a load before each use. These are split 1965 // into distinct loops for performance reasons. 1966 1967 // Update gc pointer after each statepoint: either store a relocated value or 1968 // null (if no relocated value was found for this gc pointer and it is not a 1969 // gc_result). This must happen before we update the statepoint with load of 1970 // alloca otherwise we lose the link between statepoint and old def. 1971 for (const auto &Info : Records) { 1972 Value *Statepoint = Info.StatepointToken; 1973 1974 // This will be used for consistency check 1975 DenseSet<Value *> VisitedLiveValues; 1976 1977 // Insert stores for normal statepoint gc relocates 1978 insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues); 1979 1980 // In case if it was invoke statepoint 1981 // we will insert stores for exceptional path gc relocates. 1982 if (isa<InvokeInst>(Statepoint)) { 1983 insertRelocationStores(Info.UnwindToken->users(), AllocaMap, 1984 VisitedLiveValues); 1985 } 1986 1987 // Do similar thing with rematerialized values 1988 insertRematerializationStores(Info.RematerializedValues, AllocaMap, 1989 VisitedLiveValues); 1990 1991 if (ClobberNonLive) { 1992 // As a debugging aid, pretend that an unrelocated pointer becomes null at 1993 // the gc.statepoint. This will turn some subtle GC problems into 1994 // slightly easier to debug SEGVs. Note that on large IR files with 1995 // lots of gc.statepoints this is extremely costly both memory and time 1996 // wise. 1997 SmallVector<AllocaInst *, 64> ToClobber; 1998 for (auto Pair : AllocaMap) { 1999 Value *Def = Pair.first; 2000 AllocaInst *Alloca = Pair.second; 2001 2002 // This value was relocated 2003 if (VisitedLiveValues.count(Def)) { 2004 continue; 2005 } 2006 ToClobber.push_back(Alloca); 2007 } 2008 2009 auto InsertClobbersAt = [&](Instruction *IP) { 2010 for (auto *AI : ToClobber) { 2011 auto PT = cast<PointerType>(AI->getAllocatedType()); 2012 Constant *CPN = ConstantPointerNull::get(PT); 2013 new StoreInst(CPN, AI, IP); 2014 } 2015 }; 2016 2017 // Insert the clobbering stores. These may get intermixed with the 2018 // gc.results and gc.relocates, but that's fine. 2019 if (auto II = dyn_cast<InvokeInst>(Statepoint)) { 2020 InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt()); 2021 InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt()); 2022 } else { 2023 InsertClobbersAt(cast<Instruction>(Statepoint)->getNextNode()); 2024 } 2025 } 2026 } 2027 2028 // Update use with load allocas and add store for gc_relocated. 2029 for (auto Pair : AllocaMap) { 2030 Value *Def = Pair.first; 2031 AllocaInst *Alloca = Pair.second; 2032 2033 // We pre-record the uses of allocas so that we dont have to worry about 2034 // later update that changes the user information.. 2035 2036 SmallVector<Instruction *, 20> Uses; 2037 // PERF: trade a linear scan for repeated reallocation 2038 Uses.reserve(Def->getNumUses()); 2039 for (User *U : Def->users()) { 2040 if (!isa<ConstantExpr>(U)) { 2041 // If the def has a ConstantExpr use, then the def is either a 2042 // ConstantExpr use itself or null. In either case 2043 // (recursively in the first, directly in the second), the oop 2044 // it is ultimately dependent on is null and this particular 2045 // use does not need to be fixed up. 2046 Uses.push_back(cast<Instruction>(U)); 2047 } 2048 } 2049 2050 llvm::sort(Uses); 2051 auto Last = std::unique(Uses.begin(), Uses.end()); 2052 Uses.erase(Last, Uses.end()); 2053 2054 for (Instruction *Use : Uses) { 2055 if (isa<PHINode>(Use)) { 2056 PHINode *Phi = cast<PHINode>(Use); 2057 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { 2058 if (Def == Phi->getIncomingValue(i)) { 2059 LoadInst *Load = 2060 new LoadInst(Alloca->getAllocatedType(), Alloca, "", 2061 Phi->getIncomingBlock(i)->getTerminator()); 2062 Phi->setIncomingValue(i, Load); 2063 } 2064 } 2065 } else { 2066 LoadInst *Load = 2067 new LoadInst(Alloca->getAllocatedType(), Alloca, "", Use); 2068 Use->replaceUsesOfWith(Def, Load); 2069 } 2070 } 2071 2072 // Emit store for the initial gc value. Store must be inserted after load, 2073 // otherwise store will be in alloca's use list and an extra load will be 2074 // inserted before it. 2075 StoreInst *Store = new StoreInst(Def, Alloca, /*volatile*/ false, 2076 DL.getABITypeAlign(Def->getType())); 2077 if (Instruction *Inst = dyn_cast<Instruction>(Def)) { 2078 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) { 2079 // InvokeInst is a terminator so the store need to be inserted into its 2080 // normal destination block. 2081 BasicBlock *NormalDest = Invoke->getNormalDest(); 2082 Store->insertBefore(NormalDest->getFirstNonPHI()); 2083 } else { 2084 assert(!Inst->isTerminator() && 2085 "The only terminator that can produce a value is " 2086 "InvokeInst which is handled above."); 2087 Store->insertAfter(Inst); 2088 } 2089 } else { 2090 assert(isa<Argument>(Def)); 2091 Store->insertAfter(cast<Instruction>(Alloca)); 2092 } 2093 } 2094 2095 assert(PromotableAllocas.size() == Live.size() + NumRematerializedValues && 2096 "we must have the same allocas with lives"); 2097 (void) NumRematerializedValues; 2098 if (!PromotableAllocas.empty()) { 2099 // Apply mem2reg to promote alloca to SSA 2100 PromoteMemToReg(PromotableAllocas, DT); 2101 } 2102 2103 #ifndef NDEBUG 2104 for (auto &I : F.getEntryBlock()) 2105 if (isa<AllocaInst>(I)) 2106 InitialAllocaNum--; 2107 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); 2108 #endif 2109 } 2110 2111 /// Implement a unique function which doesn't require we sort the input 2112 /// vector. Doing so has the effect of changing the output of a couple of 2113 /// tests in ways which make them less useful in testing fused safepoints. 2114 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { 2115 SmallSet<T, 8> Seen; 2116 erase_if(Vec, [&](const T &V) { return !Seen.insert(V).second; }); 2117 } 2118 2119 /// Insert holders so that each Value is obviously live through the entire 2120 /// lifetime of the call. 2121 static void insertUseHolderAfter(CallBase *Call, const ArrayRef<Value *> Values, 2122 SmallVectorImpl<CallInst *> &Holders) { 2123 if (Values.empty()) 2124 // No values to hold live, might as well not insert the empty holder 2125 return; 2126 2127 Module *M = Call->getModule(); 2128 // Use a dummy vararg function to actually hold the values live 2129 FunctionCallee Func = M->getOrInsertFunction( 2130 "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true)); 2131 if (isa<CallInst>(Call)) { 2132 // For call safepoints insert dummy calls right after safepoint 2133 Holders.push_back( 2134 CallInst::Create(Func, Values, "", &*++Call->getIterator())); 2135 return; 2136 } 2137 // For invoke safepooints insert dummy calls both in normal and 2138 // exceptional destination blocks 2139 auto *II = cast<InvokeInst>(Call); 2140 Holders.push_back(CallInst::Create( 2141 Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt())); 2142 Holders.push_back(CallInst::Create( 2143 Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt())); 2144 } 2145 2146 static void findLiveReferences( 2147 Function &F, DominatorTree &DT, ArrayRef<CallBase *> toUpdate, 2148 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 2149 GCPtrLivenessData OriginalLivenessData; 2150 computeLiveInValues(DT, F, OriginalLivenessData); 2151 for (size_t i = 0; i < records.size(); i++) { 2152 struct PartiallyConstructedSafepointRecord &info = records[i]; 2153 analyzeParsePointLiveness(DT, OriginalLivenessData, toUpdate[i], info); 2154 } 2155 } 2156 2157 // Helper function for the "rematerializeLiveValues". It walks use chain 2158 // starting from the "CurrentValue" until it reaches the root of the chain, i.e. 2159 // the base or a value it cannot process. Only "simple" values are processed 2160 // (currently it is GEP's and casts). The returned root is examined by the 2161 // callers of findRematerializableChainToBasePointer. Fills "ChainToBase" array 2162 // with all visited values. 2163 static Value* findRematerializableChainToBasePointer( 2164 SmallVectorImpl<Instruction*> &ChainToBase, 2165 Value *CurrentValue) { 2166 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurrentValue)) { 2167 ChainToBase.push_back(GEP); 2168 return findRematerializableChainToBasePointer(ChainToBase, 2169 GEP->getPointerOperand()); 2170 } 2171 2172 if (CastInst *CI = dyn_cast<CastInst>(CurrentValue)) { 2173 if (!CI->isNoopCast(CI->getModule()->getDataLayout())) 2174 return CI; 2175 2176 ChainToBase.push_back(CI); 2177 return findRematerializableChainToBasePointer(ChainToBase, 2178 CI->getOperand(0)); 2179 } 2180 2181 // We have reached the root of the chain, which is either equal to the base or 2182 // is the first unsupported value along the use chain. 2183 return CurrentValue; 2184 } 2185 2186 // Helper function for the "rematerializeLiveValues". Compute cost of the use 2187 // chain we are going to rematerialize. 2188 static InstructionCost 2189 chainToBasePointerCost(SmallVectorImpl<Instruction *> &Chain, 2190 TargetTransformInfo &TTI) { 2191 InstructionCost Cost = 0; 2192 2193 for (Instruction *Instr : Chain) { 2194 if (CastInst *CI = dyn_cast<CastInst>(Instr)) { 2195 assert(CI->isNoopCast(CI->getModule()->getDataLayout()) && 2196 "non noop cast is found during rematerialization"); 2197 2198 Type *SrcTy = CI->getOperand(0)->getType(); 2199 Cost += TTI.getCastInstrCost(CI->getOpcode(), CI->getType(), SrcTy, 2200 TTI::getCastContextHint(CI), 2201 TargetTransformInfo::TCK_SizeAndLatency, CI); 2202 2203 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Instr)) { 2204 // Cost of the address calculation 2205 Type *ValTy = GEP->getSourceElementType(); 2206 Cost += TTI.getAddressComputationCost(ValTy); 2207 2208 // And cost of the GEP itself 2209 // TODO: Use TTI->getGEPCost here (it exists, but appears to be not 2210 // allowed for the external usage) 2211 if (!GEP->hasAllConstantIndices()) 2212 Cost += 2; 2213 2214 } else { 2215 llvm_unreachable("unsupported instruction type during rematerialization"); 2216 } 2217 } 2218 2219 return Cost; 2220 } 2221 2222 static bool AreEquivalentPhiNodes(PHINode &OrigRootPhi, PHINode &AlternateRootPhi) { 2223 unsigned PhiNum = OrigRootPhi.getNumIncomingValues(); 2224 if (PhiNum != AlternateRootPhi.getNumIncomingValues() || 2225 OrigRootPhi.getParent() != AlternateRootPhi.getParent()) 2226 return false; 2227 // Map of incoming values and their corresponding basic blocks of 2228 // OrigRootPhi. 2229 SmallDenseMap<Value *, BasicBlock *, 8> CurrentIncomingValues; 2230 for (unsigned i = 0; i < PhiNum; i++) 2231 CurrentIncomingValues[OrigRootPhi.getIncomingValue(i)] = 2232 OrigRootPhi.getIncomingBlock(i); 2233 2234 // Both current and base PHIs should have same incoming values and 2235 // the same basic blocks corresponding to the incoming values. 2236 for (unsigned i = 0; i < PhiNum; i++) { 2237 auto CIVI = 2238 CurrentIncomingValues.find(AlternateRootPhi.getIncomingValue(i)); 2239 if (CIVI == CurrentIncomingValues.end()) 2240 return false; 2241 BasicBlock *CurrentIncomingBB = CIVI->second; 2242 if (CurrentIncomingBB != AlternateRootPhi.getIncomingBlock(i)) 2243 return false; 2244 } 2245 return true; 2246 } 2247 2248 // Find derived pointers that can be recomputed cheap enough and fill 2249 // RematerizationCandidates with such candidates. 2250 static void 2251 findRematerializationCandidates(PointerToBaseTy PointerToBase, 2252 RematCandTy &RematerizationCandidates, 2253 TargetTransformInfo &TTI) { 2254 const unsigned int ChainLengthThreshold = 10; 2255 2256 for (auto P2B : PointerToBase) { 2257 auto *Derived = P2B.first; 2258 auto *Base = P2B.second; 2259 // Consider only derived pointers. 2260 if (Derived == Base) 2261 continue; 2262 2263 // For each live pointer find its defining chain. 2264 SmallVector<Instruction *, 3> ChainToBase; 2265 Value *RootOfChain = 2266 findRematerializableChainToBasePointer(ChainToBase, Derived); 2267 2268 // Nothing to do, or chain is too long 2269 if ( ChainToBase.size() == 0 || 2270 ChainToBase.size() > ChainLengthThreshold) 2271 continue; 2272 2273 // Handle the scenario where the RootOfChain is not equal to the 2274 // Base Value, but they are essentially the same phi values. 2275 if (RootOfChain != PointerToBase[Derived]) { 2276 PHINode *OrigRootPhi = dyn_cast<PHINode>(RootOfChain); 2277 PHINode *AlternateRootPhi = dyn_cast<PHINode>(PointerToBase[Derived]); 2278 if (!OrigRootPhi || !AlternateRootPhi) 2279 continue; 2280 // PHI nodes that have the same incoming values, and belonging to the same 2281 // basic blocks are essentially the same SSA value. When the original phi 2282 // has incoming values with different base pointers, the original phi is 2283 // marked as conflict, and an additional `AlternateRootPhi` with the same 2284 // incoming values get generated by the findBasePointer function. We need 2285 // to identify the newly generated AlternateRootPhi (.base version of phi) 2286 // and RootOfChain (the original phi node itself) are the same, so that we 2287 // can rematerialize the gep and casts. This is a workaround for the 2288 // deficiency in the findBasePointer algorithm. 2289 if (!AreEquivalentPhiNodes(*OrigRootPhi, *AlternateRootPhi)) 2290 continue; 2291 } 2292 // Compute cost of this chain. 2293 InstructionCost Cost = chainToBasePointerCost(ChainToBase, TTI); 2294 // TODO: We can also account for cases when we will be able to remove some 2295 // of the rematerialized values by later optimization passes. I.e if 2296 // we rematerialized several intersecting chains. Or if original values 2297 // don't have any uses besides this statepoint. 2298 2299 // Ok, there is a candidate. 2300 RematerizlizationCandidateRecord Record; 2301 Record.ChainToBase = ChainToBase; 2302 Record.RootOfChain = RootOfChain; 2303 Record.Cost = Cost; 2304 RematerizationCandidates.insert({ Derived, Record }); 2305 } 2306 } 2307 2308 // From the statepoint live set pick values that are cheaper to recompute then 2309 // to relocate. Remove this values from the live set, rematerialize them after 2310 // statepoint and record them in "Info" structure. Note that similar to 2311 // relocated values we don't do any user adjustments here. 2312 static void rematerializeLiveValues(CallBase *Call, 2313 PartiallyConstructedSafepointRecord &Info, 2314 PointerToBaseTy &PointerToBase, 2315 RematCandTy &RematerizationCandidates, 2316 TargetTransformInfo &TTI) { 2317 // Record values we are going to delete from this statepoint live set. 2318 // We can not di this in following loop due to iterator invalidation. 2319 SmallVector<Value *, 32> LiveValuesToBeDeleted; 2320 2321 for (Value *LiveValue : Info.LiveSet) { 2322 auto It = RematerizationCandidates.find(LiveValue); 2323 if (It == RematerizationCandidates.end()) 2324 continue; 2325 2326 RematerizlizationCandidateRecord &Record = It->second; 2327 2328 InstructionCost Cost = Record.Cost; 2329 // For invokes we need to rematerialize each chain twice - for normal and 2330 // for unwind basic blocks. Model this by multiplying cost by two. 2331 if (isa<InvokeInst>(Call)) 2332 Cost *= 2; 2333 2334 // If it's too expensive - skip it. 2335 if (Cost >= RematerializationThreshold) 2336 continue; 2337 2338 // Remove value from the live set 2339 LiveValuesToBeDeleted.push_back(LiveValue); 2340 2341 // Clone instructions and record them inside "Info" structure. 2342 2343 // For each live pointer find get its defining chain. 2344 SmallVector<Instruction *, 3> ChainToBase = Record.ChainToBase; 2345 // Walk backwards to visit top-most instructions first. 2346 std::reverse(ChainToBase.begin(), ChainToBase.end()); 2347 2348 // Utility function which clones all instructions from "ChainToBase" 2349 // and inserts them before "InsertBefore". Returns rematerialized value 2350 // which should be used after statepoint. 2351 auto rematerializeChain = [&ChainToBase]( 2352 Instruction *InsertBefore, Value *RootOfChain, Value *AlternateLiveBase) { 2353 Instruction *LastClonedValue = nullptr; 2354 Instruction *LastValue = nullptr; 2355 for (Instruction *Instr: ChainToBase) { 2356 // Only GEP's and casts are supported as we need to be careful to not 2357 // introduce any new uses of pointers not in the liveset. 2358 // Note that it's fine to introduce new uses of pointers which were 2359 // otherwise not used after this statepoint. 2360 assert(isa<GetElementPtrInst>(Instr) || isa<CastInst>(Instr)); 2361 2362 Instruction *ClonedValue = Instr->clone(); 2363 ClonedValue->insertBefore(InsertBefore); 2364 ClonedValue->setName(Instr->getName() + ".remat"); 2365 2366 // If it is not first instruction in the chain then it uses previously 2367 // cloned value. We should update it to use cloned value. 2368 if (LastClonedValue) { 2369 assert(LastValue); 2370 ClonedValue->replaceUsesOfWith(LastValue, LastClonedValue); 2371 #ifndef NDEBUG 2372 for (auto OpValue : ClonedValue->operand_values()) { 2373 // Assert that cloned instruction does not use any instructions from 2374 // this chain other than LastClonedValue 2375 assert(!is_contained(ChainToBase, OpValue) && 2376 "incorrect use in rematerialization chain"); 2377 // Assert that the cloned instruction does not use the RootOfChain 2378 // or the AlternateLiveBase. 2379 assert(OpValue != RootOfChain && OpValue != AlternateLiveBase); 2380 } 2381 #endif 2382 } else { 2383 // For the first instruction, replace the use of unrelocated base i.e. 2384 // RootOfChain/OrigRootPhi, with the corresponding PHI present in the 2385 // live set. They have been proved to be the same PHI nodes. Note 2386 // that the *only* use of the RootOfChain in the ChainToBase list is 2387 // the first Value in the list. 2388 if (RootOfChain != AlternateLiveBase) 2389 ClonedValue->replaceUsesOfWith(RootOfChain, AlternateLiveBase); 2390 } 2391 2392 LastClonedValue = ClonedValue; 2393 LastValue = Instr; 2394 } 2395 assert(LastClonedValue); 2396 return LastClonedValue; 2397 }; 2398 2399 // Different cases for calls and invokes. For invokes we need to clone 2400 // instructions both on normal and unwind path. 2401 if (isa<CallInst>(Call)) { 2402 Instruction *InsertBefore = Call->getNextNode(); 2403 assert(InsertBefore); 2404 Instruction *RematerializedValue = rematerializeChain( 2405 InsertBefore, Record.RootOfChain, PointerToBase[LiveValue]); 2406 Info.RematerializedValues[RematerializedValue] = LiveValue; 2407 } else { 2408 auto *Invoke = cast<InvokeInst>(Call); 2409 2410 Instruction *NormalInsertBefore = 2411 &*Invoke->getNormalDest()->getFirstInsertionPt(); 2412 Instruction *UnwindInsertBefore = 2413 &*Invoke->getUnwindDest()->getFirstInsertionPt(); 2414 2415 Instruction *NormalRematerializedValue = rematerializeChain( 2416 NormalInsertBefore, Record.RootOfChain, PointerToBase[LiveValue]); 2417 Instruction *UnwindRematerializedValue = rematerializeChain( 2418 UnwindInsertBefore, Record.RootOfChain, PointerToBase[LiveValue]); 2419 2420 Info.RematerializedValues[NormalRematerializedValue] = LiveValue; 2421 Info.RematerializedValues[UnwindRematerializedValue] = LiveValue; 2422 } 2423 } 2424 2425 // Remove rematerializaed values from the live set 2426 for (auto LiveValue: LiveValuesToBeDeleted) { 2427 Info.LiveSet.remove(LiveValue); 2428 } 2429 } 2430 2431 static bool inlineGetBaseAndOffset(Function &F, 2432 SmallVectorImpl<CallInst *> &Intrinsics, 2433 DefiningValueMapTy &DVCache) { 2434 auto &Context = F.getContext(); 2435 auto &DL = F.getParent()->getDataLayout(); 2436 bool Changed = false; 2437 2438 for (auto *Callsite : Intrinsics) 2439 switch (Callsite->getIntrinsicID()) { 2440 case Intrinsic::experimental_gc_get_pointer_base: { 2441 Changed = true; 2442 Value *Base = findBasePointer(Callsite->getOperand(0), DVCache); 2443 assert(!DVCache.count(Callsite)); 2444 auto *BaseBC = IRBuilder<>(Callsite).CreateBitCast( 2445 Base, Callsite->getType(), suffixed_name_or(Base, ".cast", "")); 2446 if (BaseBC != Base) 2447 DVCache[BaseBC] = Base; 2448 Callsite->replaceAllUsesWith(BaseBC); 2449 if (!BaseBC->hasName()) 2450 BaseBC->takeName(Callsite); 2451 Callsite->eraseFromParent(); 2452 break; 2453 } 2454 case Intrinsic::experimental_gc_get_pointer_offset: { 2455 Changed = true; 2456 Value *Derived = Callsite->getOperand(0); 2457 Value *Base = findBasePointer(Derived, DVCache); 2458 assert(!DVCache.count(Callsite)); 2459 unsigned AddressSpace = Derived->getType()->getPointerAddressSpace(); 2460 unsigned IntPtrSize = DL.getPointerSizeInBits(AddressSpace); 2461 IRBuilder<> Builder(Callsite); 2462 Value *BaseInt = 2463 Builder.CreatePtrToInt(Base, Type::getIntNTy(Context, IntPtrSize), 2464 suffixed_name_or(Base, ".int", "")); 2465 Value *DerivedInt = 2466 Builder.CreatePtrToInt(Derived, Type::getIntNTy(Context, IntPtrSize), 2467 suffixed_name_or(Derived, ".int", "")); 2468 Value *Offset = Builder.CreateSub(DerivedInt, BaseInt); 2469 Callsite->replaceAllUsesWith(Offset); 2470 Offset->takeName(Callsite); 2471 Callsite->eraseFromParent(); 2472 break; 2473 } 2474 default: 2475 llvm_unreachable("Unknown intrinsic"); 2476 } 2477 2478 return Changed; 2479 } 2480 2481 static bool insertParsePoints(Function &F, DominatorTree &DT, 2482 TargetTransformInfo &TTI, 2483 SmallVectorImpl<CallBase *> &ToUpdate, 2484 DefiningValueMapTy &DVCache) { 2485 #ifndef NDEBUG 2486 // Validate the input 2487 std::set<CallBase *> Uniqued; 2488 Uniqued.insert(ToUpdate.begin(), ToUpdate.end()); 2489 assert(Uniqued.size() == ToUpdate.size() && "no duplicates please!"); 2490 2491 for (CallBase *Call : ToUpdate) 2492 assert(Call->getFunction() == &F); 2493 #endif 2494 2495 // When inserting gc.relocates for invokes, we need to be able to insert at 2496 // the top of the successor blocks. See the comment on 2497 // normalForInvokeSafepoint on exactly what is needed. Note that this step 2498 // may restructure the CFG. 2499 for (CallBase *Call : ToUpdate) { 2500 auto *II = dyn_cast<InvokeInst>(Call); 2501 if (!II) 2502 continue; 2503 normalizeForInvokeSafepoint(II->getNormalDest(), II->getParent(), DT); 2504 normalizeForInvokeSafepoint(II->getUnwindDest(), II->getParent(), DT); 2505 } 2506 2507 // A list of dummy calls added to the IR to keep various values obviously 2508 // live in the IR. We'll remove all of these when done. 2509 SmallVector<CallInst *, 64> Holders; 2510 2511 // Insert a dummy call with all of the deopt operands we'll need for the 2512 // actual safepoint insertion as arguments. This ensures reference operands 2513 // in the deopt argument list are considered live through the safepoint (and 2514 // thus makes sure they get relocated.) 2515 for (CallBase *Call : ToUpdate) { 2516 SmallVector<Value *, 64> DeoptValues; 2517 2518 for (Value *Arg : GetDeoptBundleOperands(Call)) { 2519 assert(!isUnhandledGCPointerType(Arg->getType()) && 2520 "support for FCA unimplemented"); 2521 if (isHandledGCPointerType(Arg->getType())) 2522 DeoptValues.push_back(Arg); 2523 } 2524 2525 insertUseHolderAfter(Call, DeoptValues, Holders); 2526 } 2527 2528 SmallVector<PartiallyConstructedSafepointRecord, 64> Records(ToUpdate.size()); 2529 2530 // A) Identify all gc pointers which are statically live at the given call 2531 // site. 2532 findLiveReferences(F, DT, ToUpdate, Records); 2533 2534 /// Global mapping from live pointers to a base-defining-value. 2535 PointerToBaseTy PointerToBase; 2536 2537 // B) Find the base pointers for each live pointer 2538 for (size_t i = 0; i < Records.size(); i++) { 2539 PartiallyConstructedSafepointRecord &info = Records[i]; 2540 findBasePointers(DT, DVCache, ToUpdate[i], info, PointerToBase); 2541 } 2542 if (PrintBasePointers) { 2543 errs() << "Base Pairs (w/o Relocation):\n"; 2544 for (auto &Pair : PointerToBase) { 2545 errs() << " derived "; 2546 Pair.first->printAsOperand(errs(), false); 2547 errs() << " base "; 2548 Pair.second->printAsOperand(errs(), false); 2549 errs() << "\n"; 2550 ; 2551 } 2552 } 2553 2554 // The base phi insertion logic (for any safepoint) may have inserted new 2555 // instructions which are now live at some safepoint. The simplest such 2556 // example is: 2557 // loop: 2558 // phi a <-- will be a new base_phi here 2559 // safepoint 1 <-- that needs to be live here 2560 // gep a + 1 2561 // safepoint 2 2562 // br loop 2563 // We insert some dummy calls after each safepoint to definitely hold live 2564 // the base pointers which were identified for that safepoint. We'll then 2565 // ask liveness for _every_ base inserted to see what is now live. Then we 2566 // remove the dummy calls. 2567 Holders.reserve(Holders.size() + Records.size()); 2568 for (size_t i = 0; i < Records.size(); i++) { 2569 PartiallyConstructedSafepointRecord &Info = Records[i]; 2570 2571 SmallVector<Value *, 128> Bases; 2572 for (auto *Derived : Info.LiveSet) { 2573 assert(PointerToBase.count(Derived) && "Missed base for derived pointer"); 2574 Bases.push_back(PointerToBase[Derived]); 2575 } 2576 2577 insertUseHolderAfter(ToUpdate[i], Bases, Holders); 2578 } 2579 2580 // By selecting base pointers, we've effectively inserted new uses. Thus, we 2581 // need to rerun liveness. We may *also* have inserted new defs, but that's 2582 // not the key issue. 2583 recomputeLiveInValues(F, DT, ToUpdate, Records, PointerToBase); 2584 2585 if (PrintBasePointers) { 2586 errs() << "Base Pairs: (w/Relocation)\n"; 2587 for (auto Pair : PointerToBase) { 2588 errs() << " derived "; 2589 Pair.first->printAsOperand(errs(), false); 2590 errs() << " base "; 2591 Pair.second->printAsOperand(errs(), false); 2592 errs() << "\n"; 2593 } 2594 } 2595 2596 // It is possible that non-constant live variables have a constant base. For 2597 // example, a GEP with a variable offset from a global. In this case we can 2598 // remove it from the liveset. We already don't add constants to the liveset 2599 // because we assume they won't move at runtime and the GC doesn't need to be 2600 // informed about them. The same reasoning applies if the base is constant. 2601 // Note that the relocation placement code relies on this filtering for 2602 // correctness as it expects the base to be in the liveset, which isn't true 2603 // if the base is constant. 2604 for (auto &Info : Records) { 2605 Info.LiveSet.remove_if([&](Value *LiveV) { 2606 assert(PointerToBase.count(LiveV) && "Missed base for derived pointer"); 2607 return isa<Constant>(PointerToBase[LiveV]); 2608 }); 2609 } 2610 2611 for (CallInst *CI : Holders) 2612 CI->eraseFromParent(); 2613 2614 Holders.clear(); 2615 2616 // Compute the cost of possible re-materialization of derived pointers. 2617 RematCandTy RematerizationCandidates; 2618 findRematerializationCandidates(PointerToBase, RematerizationCandidates, TTI); 2619 2620 // In order to reduce live set of statepoint we might choose to rematerialize 2621 // some values instead of relocating them. This is purely an optimization and 2622 // does not influence correctness. 2623 for (size_t i = 0; i < Records.size(); i++) 2624 rematerializeLiveValues(ToUpdate[i], Records[i], PointerToBase, 2625 RematerizationCandidates, TTI); 2626 2627 // We need this to safely RAUW and delete call or invoke return values that 2628 // may themselves be live over a statepoint. For details, please see usage in 2629 // makeStatepointExplicitImpl. 2630 std::vector<DeferredReplacement> Replacements; 2631 2632 // Now run through and replace the existing statepoints with new ones with 2633 // the live variables listed. We do not yet update uses of the values being 2634 // relocated. We have references to live variables that need to 2635 // survive to the last iteration of this loop. (By construction, the 2636 // previous statepoint can not be a live variable, thus we can and remove 2637 // the old statepoint calls as we go.) 2638 for (size_t i = 0; i < Records.size(); i++) 2639 makeStatepointExplicit(DT, ToUpdate[i], Records[i], Replacements, 2640 PointerToBase); 2641 2642 ToUpdate.clear(); // prevent accident use of invalid calls. 2643 2644 for (auto &PR : Replacements) 2645 PR.doReplacement(); 2646 2647 Replacements.clear(); 2648 2649 for (auto &Info : Records) { 2650 // These live sets may contain state Value pointers, since we replaced calls 2651 // with operand bundles with calls wrapped in gc.statepoint, and some of 2652 // those calls may have been def'ing live gc pointers. Clear these out to 2653 // avoid accidentally using them. 2654 // 2655 // TODO: We should create a separate data structure that does not contain 2656 // these live sets, and migrate to using that data structure from this point 2657 // onward. 2658 Info.LiveSet.clear(); 2659 } 2660 PointerToBase.clear(); 2661 2662 // Do all the fixups of the original live variables to their relocated selves 2663 SmallVector<Value *, 128> Live; 2664 for (size_t i = 0; i < Records.size(); i++) { 2665 PartiallyConstructedSafepointRecord &Info = Records[i]; 2666 2667 // We can't simply save the live set from the original insertion. One of 2668 // the live values might be the result of a call which needs a safepoint. 2669 // That Value* no longer exists and we need to use the new gc_result. 2670 // Thankfully, the live set is embedded in the statepoint (and updated), so 2671 // we just grab that. 2672 llvm::append_range(Live, Info.StatepointToken->gc_args()); 2673 #ifndef NDEBUG 2674 // Do some basic validation checking on our liveness results before 2675 // performing relocation. Relocation can and will turn mistakes in liveness 2676 // results into non-sensical code which is must harder to debug. 2677 // TODO: It would be nice to test consistency as well 2678 assert(DT.isReachableFromEntry(Info.StatepointToken->getParent()) && 2679 "statepoint must be reachable or liveness is meaningless"); 2680 for (Value *V : Info.StatepointToken->gc_args()) { 2681 if (!isa<Instruction>(V)) 2682 // Non-instruction values trivial dominate all possible uses 2683 continue; 2684 auto *LiveInst = cast<Instruction>(V); 2685 assert(DT.isReachableFromEntry(LiveInst->getParent()) && 2686 "unreachable values should never be live"); 2687 assert(DT.dominates(LiveInst, Info.StatepointToken) && 2688 "basic SSA liveness expectation violated by liveness analysis"); 2689 } 2690 #endif 2691 } 2692 unique_unsorted(Live); 2693 2694 #ifndef NDEBUG 2695 // Validation check 2696 for (auto *Ptr : Live) 2697 assert(isHandledGCPointerType(Ptr->getType()) && 2698 "must be a gc pointer type"); 2699 #endif 2700 2701 relocationViaAlloca(F, DT, Live, Records); 2702 return !Records.empty(); 2703 } 2704 2705 // List of all parameter and return attributes which must be stripped when 2706 // lowering from the abstract machine model. Note that we list attributes 2707 // here which aren't valid as return attributes, that is okay. 2708 static AttributeMask getParamAndReturnAttributesToRemove() { 2709 AttributeMask R; 2710 R.addAttribute(Attribute::Dereferenceable); 2711 R.addAttribute(Attribute::DereferenceableOrNull); 2712 R.addAttribute(Attribute::ReadNone); 2713 R.addAttribute(Attribute::ReadOnly); 2714 R.addAttribute(Attribute::WriteOnly); 2715 R.addAttribute(Attribute::NoAlias); 2716 R.addAttribute(Attribute::NoFree); 2717 return R; 2718 } 2719 2720 static void stripNonValidAttributesFromPrototype(Function &F) { 2721 LLVMContext &Ctx = F.getContext(); 2722 2723 // Intrinsics are very delicate. Lowering sometimes depends the presence 2724 // of certain attributes for correctness, but we may have also inferred 2725 // additional ones in the abstract machine model which need stripped. This 2726 // assumes that the attributes defined in Intrinsic.td are conservatively 2727 // correct for both physical and abstract model. 2728 if (Intrinsic::ID id = F.getIntrinsicID()) { 2729 F.setAttributes(Intrinsic::getAttributes(Ctx, id)); 2730 return; 2731 } 2732 2733 AttributeMask R = getParamAndReturnAttributesToRemove(); 2734 for (Argument &A : F.args()) 2735 if (isa<PointerType>(A.getType())) 2736 F.removeParamAttrs(A.getArgNo(), R); 2737 2738 if (isa<PointerType>(F.getReturnType())) 2739 F.removeRetAttrs(R); 2740 2741 for (auto Attr : FnAttrsToStrip) 2742 F.removeFnAttr(Attr); 2743 } 2744 2745 /// Certain metadata on instructions are invalid after running RS4GC. 2746 /// Optimizations that run after RS4GC can incorrectly use this metadata to 2747 /// optimize functions. We drop such metadata on the instruction. 2748 static void stripInvalidMetadataFromInstruction(Instruction &I) { 2749 if (!isa<LoadInst>(I) && !isa<StoreInst>(I)) 2750 return; 2751 // These are the attributes that are still valid on loads and stores after 2752 // RS4GC. 2753 // The metadata implying dereferenceability and noalias are (conservatively) 2754 // dropped. This is because semantically, after RewriteStatepointsForGC runs, 2755 // all calls to gc.statepoint "free" the entire heap. Also, gc.statepoint can 2756 // touch the entire heap including noalias objects. Note: The reasoning is 2757 // same as stripping the dereferenceability and noalias attributes that are 2758 // analogous to the metadata counterparts. 2759 // We also drop the invariant.load metadata on the load because that metadata 2760 // implies the address operand to the load points to memory that is never 2761 // changed once it became dereferenceable. This is no longer true after RS4GC. 2762 // Similar reasoning applies to invariant.group metadata, which applies to 2763 // loads within a group. 2764 unsigned ValidMetadataAfterRS4GC[] = {LLVMContext::MD_tbaa, 2765 LLVMContext::MD_range, 2766 LLVMContext::MD_alias_scope, 2767 LLVMContext::MD_nontemporal, 2768 LLVMContext::MD_nonnull, 2769 LLVMContext::MD_align, 2770 LLVMContext::MD_type}; 2771 2772 // Drops all metadata on the instruction other than ValidMetadataAfterRS4GC. 2773 I.dropUnknownNonDebugMetadata(ValidMetadataAfterRS4GC); 2774 } 2775 2776 static void stripNonValidDataFromBody(Function &F) { 2777 if (F.empty()) 2778 return; 2779 2780 LLVMContext &Ctx = F.getContext(); 2781 MDBuilder Builder(Ctx); 2782 2783 // Set of invariantstart instructions that we need to remove. 2784 // Use this to avoid invalidating the instruction iterator. 2785 SmallVector<IntrinsicInst*, 12> InvariantStartInstructions; 2786 2787 for (Instruction &I : instructions(F)) { 2788 // invariant.start on memory location implies that the referenced memory 2789 // location is constant and unchanging. This is no longer true after 2790 // RewriteStatepointsForGC runs because there can be calls to gc.statepoint 2791 // which frees the entire heap and the presence of invariant.start allows 2792 // the optimizer to sink the load of a memory location past a statepoint, 2793 // which is incorrect. 2794 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 2795 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2796 InvariantStartInstructions.push_back(II); 2797 continue; 2798 } 2799 2800 if (MDNode *Tag = I.getMetadata(LLVMContext::MD_tbaa)) { 2801 MDNode *MutableTBAA = Builder.createMutableTBAAAccessTag(Tag); 2802 I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA); 2803 } 2804 2805 stripInvalidMetadataFromInstruction(I); 2806 2807 AttributeMask R = getParamAndReturnAttributesToRemove(); 2808 if (auto *Call = dyn_cast<CallBase>(&I)) { 2809 for (int i = 0, e = Call->arg_size(); i != e; i++) 2810 if (isa<PointerType>(Call->getArgOperand(i)->getType())) 2811 Call->removeParamAttrs(i, R); 2812 if (isa<PointerType>(Call->getType())) 2813 Call->removeRetAttrs(R); 2814 } 2815 } 2816 2817 // Delete the invariant.start instructions and RAUW undef. 2818 for (auto *II : InvariantStartInstructions) { 2819 II->replaceAllUsesWith(UndefValue::get(II->getType())); 2820 II->eraseFromParent(); 2821 } 2822 } 2823 2824 /// Returns true if this function should be rewritten by this pass. The main 2825 /// point of this function is as an extension point for custom logic. 2826 static bool shouldRewriteStatepointsIn(Function &F) { 2827 // TODO: This should check the GCStrategy 2828 if (F.hasGC()) { 2829 const auto &FunctionGCName = F.getGC(); 2830 const StringRef StatepointExampleName("statepoint-example"); 2831 const StringRef CoreCLRName("coreclr"); 2832 return (StatepointExampleName == FunctionGCName) || 2833 (CoreCLRName == FunctionGCName); 2834 } else 2835 return false; 2836 } 2837 2838 static void stripNonValidData(Module &M) { 2839 #ifndef NDEBUG 2840 assert(llvm::any_of(M, shouldRewriteStatepointsIn) && "precondition!"); 2841 #endif 2842 2843 for (Function &F : M) 2844 stripNonValidAttributesFromPrototype(F); 2845 2846 for (Function &F : M) 2847 stripNonValidDataFromBody(F); 2848 } 2849 2850 bool RewriteStatepointsForGC::runOnFunction(Function &F, DominatorTree &DT, 2851 TargetTransformInfo &TTI, 2852 const TargetLibraryInfo &TLI) { 2853 assert(!F.isDeclaration() && !F.empty() && 2854 "need function body to rewrite statepoints in"); 2855 assert(shouldRewriteStatepointsIn(F) && "mismatch in rewrite decision"); 2856 2857 auto NeedsRewrite = [&TLI](Instruction &I) { 2858 if (const auto *Call = dyn_cast<CallBase>(&I)) { 2859 if (isa<GCStatepointInst>(Call)) 2860 return false; 2861 if (callsGCLeafFunction(Call, TLI)) 2862 return false; 2863 2864 // Normally it's up to the frontend to make sure that non-leaf calls also 2865 // have proper deopt state if it is required. We make an exception for 2866 // element atomic memcpy/memmove intrinsics here. Unlike other intrinsics 2867 // these are non-leaf by default. They might be generated by the optimizer 2868 // which doesn't know how to produce a proper deopt state. So if we see a 2869 // non-leaf memcpy/memmove without deopt state just treat it as a leaf 2870 // copy and don't produce a statepoint. 2871 if (!AllowStatepointWithNoDeoptInfo && 2872 !Call->getOperandBundle(LLVMContext::OB_deopt)) { 2873 assert((isa<AtomicMemCpyInst>(Call) || isa<AtomicMemMoveInst>(Call)) && 2874 "Don't expect any other calls here!"); 2875 return false; 2876 } 2877 return true; 2878 } 2879 return false; 2880 }; 2881 2882 // Delete any unreachable statepoints so that we don't have unrewritten 2883 // statepoints surviving this pass. This makes testing easier and the 2884 // resulting IR less confusing to human readers. 2885 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 2886 bool MadeChange = removeUnreachableBlocks(F, &DTU); 2887 // Flush the Dominator Tree. 2888 DTU.getDomTree(); 2889 2890 // Gather all the statepoints which need rewritten. Be careful to only 2891 // consider those in reachable code since we need to ask dominance queries 2892 // when rewriting. We'll delete the unreachable ones in a moment. 2893 SmallVector<CallBase *, 64> ParsePointNeeded; 2894 SmallVector<CallInst *, 64> Intrinsics; 2895 for (Instruction &I : instructions(F)) { 2896 // TODO: only the ones with the flag set! 2897 if (NeedsRewrite(I)) { 2898 // NOTE removeUnreachableBlocks() is stronger than 2899 // DominatorTree::isReachableFromEntry(). In other words 2900 // removeUnreachableBlocks can remove some blocks for which 2901 // isReachableFromEntry() returns true. 2902 assert(DT.isReachableFromEntry(I.getParent()) && 2903 "no unreachable blocks expected"); 2904 ParsePointNeeded.push_back(cast<CallBase>(&I)); 2905 } 2906 if (auto *CI = dyn_cast<CallInst>(&I)) 2907 if (CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_base || 2908 CI->getIntrinsicID() == Intrinsic::experimental_gc_get_pointer_offset) 2909 Intrinsics.emplace_back(CI); 2910 } 2911 2912 // Return early if no work to do. 2913 if (ParsePointNeeded.empty() && Intrinsics.empty()) 2914 return MadeChange; 2915 2916 // As a prepass, go ahead and aggressively destroy single entry phi nodes. 2917 // These are created by LCSSA. They have the effect of increasing the size 2918 // of liveness sets for no good reason. It may be harder to do this post 2919 // insertion since relocations and base phis can confuse things. 2920 for (BasicBlock &BB : F) 2921 if (BB.getUniquePredecessor()) 2922 MadeChange |= FoldSingleEntryPHINodes(&BB); 2923 2924 // Before we start introducing relocations, we want to tweak the IR a bit to 2925 // avoid unfortunate code generation effects. The main example is that we 2926 // want to try to make sure the comparison feeding a branch is after any 2927 // safepoints. Otherwise, we end up with a comparison of pre-relocation 2928 // values feeding a branch after relocation. This is semantically correct, 2929 // but results in extra register pressure since both the pre-relocation and 2930 // post-relocation copies must be available in registers. For code without 2931 // relocations this is handled elsewhere, but teaching the scheduler to 2932 // reverse the transform we're about to do would be slightly complex. 2933 // Note: This may extend the live range of the inputs to the icmp and thus 2934 // increase the liveset of any statepoint we move over. This is profitable 2935 // as long as all statepoints are in rare blocks. If we had in-register 2936 // lowering for live values this would be a much safer transform. 2937 auto getConditionInst = [](Instruction *TI) -> Instruction * { 2938 if (auto *BI = dyn_cast<BranchInst>(TI)) 2939 if (BI->isConditional()) 2940 return dyn_cast<Instruction>(BI->getCondition()); 2941 // TODO: Extend this to handle switches 2942 return nullptr; 2943 }; 2944 for (BasicBlock &BB : F) { 2945 Instruction *TI = BB.getTerminator(); 2946 if (auto *Cond = getConditionInst(TI)) 2947 // TODO: Handle more than just ICmps here. We should be able to move 2948 // most instructions without side effects or memory access. 2949 if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) { 2950 MadeChange = true; 2951 Cond->moveBefore(TI); 2952 } 2953 } 2954 2955 // Nasty workaround - The base computation code in the main algorithm doesn't 2956 // consider the fact that a GEP can be used to convert a scalar to a vector. 2957 // The right fix for this is to integrate GEPs into the base rewriting 2958 // algorithm properly, this is just a short term workaround to prevent 2959 // crashes by canonicalizing such GEPs into fully vector GEPs. 2960 for (Instruction &I : instructions(F)) { 2961 if (!isa<GetElementPtrInst>(I)) 2962 continue; 2963 2964 unsigned VF = 0; 2965 for (unsigned i = 0; i < I.getNumOperands(); i++) 2966 if (auto *OpndVTy = dyn_cast<VectorType>(I.getOperand(i)->getType())) { 2967 assert(VF == 0 || 2968 VF == cast<FixedVectorType>(OpndVTy)->getNumElements()); 2969 VF = cast<FixedVectorType>(OpndVTy)->getNumElements(); 2970 } 2971 2972 // It's the vector to scalar traversal through the pointer operand which 2973 // confuses base pointer rewriting, so limit ourselves to that case. 2974 if (!I.getOperand(0)->getType()->isVectorTy() && VF != 0) { 2975 IRBuilder<> B(&I); 2976 auto *Splat = B.CreateVectorSplat(VF, I.getOperand(0)); 2977 I.setOperand(0, Splat); 2978 MadeChange = true; 2979 } 2980 } 2981 2982 // Cache the 'defining value' relation used in the computation and 2983 // insertion of base phis and selects. This ensures that we don't insert 2984 // large numbers of duplicate base_phis. Use one cache for both 2985 // inlineGetBaseAndOffset() and insertParsePoints(). 2986 DefiningValueMapTy DVCache; 2987 2988 if (!Intrinsics.empty()) 2989 // Inline @gc.get.pointer.base() and @gc.get.pointer.offset() before finding 2990 // live references. 2991 MadeChange |= inlineGetBaseAndOffset(F, Intrinsics, DVCache); 2992 2993 if (!ParsePointNeeded.empty()) 2994 MadeChange |= insertParsePoints(F, DT, TTI, ParsePointNeeded, DVCache); 2995 2996 return MadeChange; 2997 } 2998 2999 // liveness computation via standard dataflow 3000 // ------------------------------------------------------------------- 3001 3002 // TODO: Consider using bitvectors for liveness, the set of potentially 3003 // interesting values should be small and easy to pre-compute. 3004 3005 /// Compute the live-in set for the location rbegin starting from 3006 /// the live-out set of the basic block 3007 static void computeLiveInValues(BasicBlock::reverse_iterator Begin, 3008 BasicBlock::reverse_iterator End, 3009 SetVector<Value *> &LiveTmp) { 3010 for (auto &I : make_range(Begin, End)) { 3011 // KILL/Def - Remove this definition from LiveIn 3012 LiveTmp.remove(&I); 3013 3014 // Don't consider *uses* in PHI nodes, we handle their contribution to 3015 // predecessor blocks when we seed the LiveOut sets 3016 if (isa<PHINode>(I)) 3017 continue; 3018 3019 // USE - Add to the LiveIn set for this instruction 3020 for (Value *V : I.operands()) { 3021 assert(!isUnhandledGCPointerType(V->getType()) && 3022 "support for FCA unimplemented"); 3023 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) { 3024 // The choice to exclude all things constant here is slightly subtle. 3025 // There are two independent reasons: 3026 // - We assume that things which are constant (from LLVM's definition) 3027 // do not move at runtime. For example, the address of a global 3028 // variable is fixed, even though it's contents may not be. 3029 // - Second, we can't disallow arbitrary inttoptr constants even 3030 // if the language frontend does. Optimization passes are free to 3031 // locally exploit facts without respect to global reachability. This 3032 // can create sections of code which are dynamically unreachable and 3033 // contain just about anything. (see constants.ll in tests) 3034 LiveTmp.insert(V); 3035 } 3036 } 3037 } 3038 } 3039 3040 static void computeLiveOutSeed(BasicBlock *BB, SetVector<Value *> &LiveTmp) { 3041 for (BasicBlock *Succ : successors(BB)) { 3042 for (auto &I : *Succ) { 3043 PHINode *PN = dyn_cast<PHINode>(&I); 3044 if (!PN) 3045 break; 3046 3047 Value *V = PN->getIncomingValueForBlock(BB); 3048 assert(!isUnhandledGCPointerType(V->getType()) && 3049 "support for FCA unimplemented"); 3050 if (isHandledGCPointerType(V->getType()) && !isa<Constant>(V)) 3051 LiveTmp.insert(V); 3052 } 3053 } 3054 } 3055 3056 static SetVector<Value *> computeKillSet(BasicBlock *BB) { 3057 SetVector<Value *> KillSet; 3058 for (Instruction &I : *BB) 3059 if (isHandledGCPointerType(I.getType())) 3060 KillSet.insert(&I); 3061 return KillSet; 3062 } 3063 3064 #ifndef NDEBUG 3065 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic 3066 /// validation check for the liveness computation. 3067 static void checkBasicSSA(DominatorTree &DT, SetVector<Value *> &Live, 3068 Instruction *TI, bool TermOkay = false) { 3069 for (Value *V : Live) { 3070 if (auto *I = dyn_cast<Instruction>(V)) { 3071 // The terminator can be a member of the LiveOut set. LLVM's definition 3072 // of instruction dominance states that V does not dominate itself. As 3073 // such, we need to special case this to allow it. 3074 if (TermOkay && TI == I) 3075 continue; 3076 assert(DT.dominates(I, TI) && 3077 "basic SSA liveness expectation violated by liveness analysis"); 3078 } 3079 } 3080 } 3081 3082 /// Check that all the liveness sets used during the computation of liveness 3083 /// obey basic SSA properties. This is useful for finding cases where we miss 3084 /// a def. 3085 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, 3086 BasicBlock &BB) { 3087 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); 3088 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); 3089 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); 3090 } 3091 #endif 3092 3093 static void computeLiveInValues(DominatorTree &DT, Function &F, 3094 GCPtrLivenessData &Data) { 3095 SmallSetVector<BasicBlock *, 32> Worklist; 3096 3097 // Seed the liveness for each individual block 3098 for (BasicBlock &BB : F) { 3099 Data.KillSet[&BB] = computeKillSet(&BB); 3100 Data.LiveSet[&BB].clear(); 3101 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]); 3102 3103 #ifndef NDEBUG 3104 for (Value *Kill : Data.KillSet[&BB]) 3105 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); 3106 #endif 3107 3108 Data.LiveOut[&BB] = SetVector<Value *>(); 3109 computeLiveOutSeed(&BB, Data.LiveOut[&BB]); 3110 Data.LiveIn[&BB] = Data.LiveSet[&BB]; 3111 Data.LiveIn[&BB].set_union(Data.LiveOut[&BB]); 3112 Data.LiveIn[&BB].set_subtract(Data.KillSet[&BB]); 3113 if (!Data.LiveIn[&BB].empty()) 3114 Worklist.insert(pred_begin(&BB), pred_end(&BB)); 3115 } 3116 3117 // Propagate that liveness until stable 3118 while (!Worklist.empty()) { 3119 BasicBlock *BB = Worklist.pop_back_val(); 3120 3121 // Compute our new liveout set, then exit early if it hasn't changed despite 3122 // the contribution of our successor. 3123 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 3124 const auto OldLiveOutSize = LiveOut.size(); 3125 for (BasicBlock *Succ : successors(BB)) { 3126 assert(Data.LiveIn.count(Succ)); 3127 LiveOut.set_union(Data.LiveIn[Succ]); 3128 } 3129 // assert OutLiveOut is a subset of LiveOut 3130 if (OldLiveOutSize == LiveOut.size()) { 3131 // If the sets are the same size, then we didn't actually add anything 3132 // when unioning our successors LiveIn. Thus, the LiveIn of this block 3133 // hasn't changed. 3134 continue; 3135 } 3136 Data.LiveOut[BB] = LiveOut; 3137 3138 // Apply the effects of this basic block 3139 SetVector<Value *> LiveTmp = LiveOut; 3140 LiveTmp.set_union(Data.LiveSet[BB]); 3141 LiveTmp.set_subtract(Data.KillSet[BB]); 3142 3143 assert(Data.LiveIn.count(BB)); 3144 const SetVector<Value *> &OldLiveIn = Data.LiveIn[BB]; 3145 // assert: OldLiveIn is a subset of LiveTmp 3146 if (OldLiveIn.size() != LiveTmp.size()) { 3147 Data.LiveIn[BB] = LiveTmp; 3148 Worklist.insert(pred_begin(BB), pred_end(BB)); 3149 } 3150 } // while (!Worklist.empty()) 3151 3152 #ifndef NDEBUG 3153 // Verify our output against SSA properties. This helps catch any 3154 // missing kills during the above iteration. 3155 for (BasicBlock &BB : F) 3156 checkBasicSSA(DT, Data, BB); 3157 #endif 3158 } 3159 3160 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, 3161 StatepointLiveSetTy &Out) { 3162 BasicBlock *BB = Inst->getParent(); 3163 3164 // Note: The copy is intentional and required 3165 assert(Data.LiveOut.count(BB)); 3166 SetVector<Value *> LiveOut = Data.LiveOut[BB]; 3167 3168 // We want to handle the statepoint itself oddly. It's 3169 // call result is not live (normal), nor are it's arguments 3170 // (unless they're used again later). This adjustment is 3171 // specifically what we need to relocate 3172 computeLiveInValues(BB->rbegin(), ++Inst->getIterator().getReverse(), 3173 LiveOut); 3174 LiveOut.remove(Inst); 3175 Out.insert(LiveOut.begin(), LiveOut.end()); 3176 } 3177 3178 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 3179 CallBase *Call, 3180 PartiallyConstructedSafepointRecord &Info, 3181 PointerToBaseTy &PointerToBase) { 3182 StatepointLiveSetTy Updated; 3183 findLiveSetAtInst(Call, RevisedLivenessData, Updated); 3184 3185 // We may have base pointers which are now live that weren't before. We need 3186 // to update the PointerToBase structure to reflect this. 3187 for (auto V : Updated) 3188 PointerToBase.insert({ V, V }); 3189 3190 Info.LiveSet = Updated; 3191 } 3192