1 //===- RewriteStatepointsForGC.cpp - Make GC relocations explicit ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Rewrite an existing set of gc.statepoints such that they make potential 11 // relocations performed by the garbage collector explicit in the IR. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Pass.h" 16 #include "llvm/Analysis/CFG.h" 17 #include "llvm/ADT/SetOperations.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/ADT/DenseSet.h" 20 #include "llvm/IR/BasicBlock.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/Dominators.h" 23 #include "llvm/IR/Function.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/InstIterator.h" 26 #include "llvm/IR/Instructions.h" 27 #include "llvm/IR/Intrinsics.h" 28 #include "llvm/IR/IntrinsicInst.h" 29 #include "llvm/IR/Module.h" 30 #include "llvm/IR/Statepoint.h" 31 #include "llvm/IR/Value.h" 32 #include "llvm/IR/Verifier.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Transforms/Scalar.h" 36 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 37 #include "llvm/Transforms/Utils/Cloning.h" 38 #include "llvm/Transforms/Utils/Local.h" 39 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 40 41 #define DEBUG_TYPE "rewrite-statepoints-for-gc" 42 43 using namespace llvm; 44 45 // Print tracing output 46 static cl::opt<bool> TraceLSP("trace-rewrite-statepoints", cl::Hidden, 47 cl::init(false)); 48 49 // Print the liveset found at the insert location 50 static cl::opt<bool> PrintLiveSet("spp-print-liveset", cl::Hidden, 51 cl::init(false)); 52 static cl::opt<bool> PrintLiveSetSize("spp-print-liveset-size", cl::Hidden, 53 cl::init(false)); 54 // Print out the base pointers for debugging 55 static cl::opt<bool> PrintBasePointers("spp-print-base-pointers", cl::Hidden, 56 cl::init(false)); 57 58 namespace { 59 struct RewriteStatepointsForGC : public FunctionPass { 60 static char ID; // Pass identification, replacement for typeid 61 62 RewriteStatepointsForGC() : FunctionPass(ID) { 63 initializeRewriteStatepointsForGCPass(*PassRegistry::getPassRegistry()); 64 } 65 bool runOnFunction(Function &F) override; 66 67 void getAnalysisUsage(AnalysisUsage &AU) const override { 68 // We add and rewrite a bunch of instructions, but don't really do much 69 // else. We could in theory preserve a lot more analyses here. 70 AU.addRequired<DominatorTreeWrapperPass>(); 71 } 72 }; 73 } // namespace 74 75 char RewriteStatepointsForGC::ID = 0; 76 77 FunctionPass *llvm::createRewriteStatepointsForGCPass() { 78 return new RewriteStatepointsForGC(); 79 } 80 81 INITIALIZE_PASS_BEGIN(RewriteStatepointsForGC, "rewrite-statepoints-for-gc", 82 "Make relocations explicit at statepoints", false, false) 83 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 84 INITIALIZE_PASS_END(RewriteStatepointsForGC, "rewrite-statepoints-for-gc", 85 "Make relocations explicit at statepoints", false, false) 86 87 namespace { 88 struct GCPtrLivenessData { 89 /// Values defined in this block. 90 DenseMap<BasicBlock *, DenseSet<Value *>> KillSet; 91 /// Values used in this block (and thus live); does not included values 92 /// killed within this block. 93 DenseMap<BasicBlock *, DenseSet<Value *>> LiveSet; 94 95 /// Values live into this basic block (i.e. used by any 96 /// instruction in this basic block or ones reachable from here) 97 DenseMap<BasicBlock *, DenseSet<Value *>> LiveIn; 98 99 /// Values live out of this basic block (i.e. live into 100 /// any successor block) 101 DenseMap<BasicBlock *, DenseSet<Value *>> LiveOut; 102 }; 103 104 // The type of the internal cache used inside the findBasePointers family 105 // of functions. From the callers perspective, this is an opaque type and 106 // should not be inspected. 107 // 108 // In the actual implementation this caches two relations: 109 // - The base relation itself (i.e. this pointer is based on that one) 110 // - The base defining value relation (i.e. before base_phi insertion) 111 // Generally, after the execution of a full findBasePointer call, only the 112 // base relation will remain. Internally, we add a mixture of the two 113 // types, then update all the second type to the first type 114 typedef DenseMap<Value *, Value *> DefiningValueMapTy; 115 typedef DenseSet<llvm::Value *> StatepointLiveSetTy; 116 117 struct PartiallyConstructedSafepointRecord { 118 /// The set of values known to be live accross this safepoint 119 StatepointLiveSetTy liveset; 120 121 /// Mapping from live pointers to a base-defining-value 122 DenseMap<llvm::Value *, llvm::Value *> PointerToBase; 123 124 /// Any new values which were added to the IR during base pointer analysis 125 /// for this safepoint 126 DenseSet<llvm::Value *> NewInsertedDefs; 127 128 /// The *new* gc.statepoint instruction itself. This produces the token 129 /// that normal path gc.relocates and the gc.result are tied to. 130 Instruction *StatepointToken; 131 132 /// Instruction to which exceptional gc relocates are attached 133 /// Makes it easier to iterate through them during relocationViaAlloca. 134 Instruction *UnwindToken; 135 }; 136 } 137 138 /// Compute the live-in set for every basic block in the function 139 static void computeLiveInValues(DominatorTree &DT, Function &F, 140 GCPtrLivenessData &Data); 141 142 /// Given results from the dataflow liveness computation, find the set of live 143 /// Values at a particular instruction. 144 static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, 145 StatepointLiveSetTy &out); 146 147 // TODO: Once we can get to the GCStrategy, this becomes 148 // Optional<bool> isGCManagedPointer(const Value *V) const override { 149 150 static bool isGCPointerType(const Type *T) { 151 if (const PointerType *PT = dyn_cast<PointerType>(T)) 152 // For the sake of this example GC, we arbitrarily pick addrspace(1) as our 153 // GC managed heap. We know that a pointer into this heap needs to be 154 // updated and that no other pointer does. 155 return (1 == PT->getAddressSpace()); 156 return false; 157 } 158 159 // Return true if this type is one which a) is a gc pointer or contains a GC 160 // pointer and b) is of a type this code expects to encounter as a live value. 161 // (The insertion code will assert that a type which matches (a) and not (b) 162 // is not encountered.) 163 static bool isHandledGCPointerType(Type *T) { 164 // We fully support gc pointers 165 if (isGCPointerType(T)) 166 return true; 167 // We partially support vectors of gc pointers. The code will assert if it 168 // can't handle something. 169 if (auto VT = dyn_cast<VectorType>(T)) 170 if (isGCPointerType(VT->getElementType())) 171 return true; 172 return false; 173 } 174 175 #ifndef NDEBUG 176 /// Returns true if this type contains a gc pointer whether we know how to 177 /// handle that type or not. 178 static bool containsGCPtrType(Type *Ty) { 179 if (isGCPointerType(Ty)) 180 return true; 181 if (VectorType *VT = dyn_cast<VectorType>(Ty)) 182 return isGCPointerType(VT->getScalarType()); 183 if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) 184 return containsGCPtrType(AT->getElementType()); 185 if (StructType *ST = dyn_cast<StructType>(Ty)) 186 return std::any_of( 187 ST->subtypes().begin(), ST->subtypes().end(), 188 [](Type *SubType) { return containsGCPtrType(SubType); }); 189 return false; 190 } 191 192 // Returns true if this is a type which a) is a gc pointer or contains a GC 193 // pointer and b) is of a type which the code doesn't expect (i.e. first class 194 // aggregates). Used to trip assertions. 195 static bool isUnhandledGCPointerType(Type *Ty) { 196 return containsGCPtrType(Ty) && !isHandledGCPointerType(Ty); 197 } 198 #endif 199 200 static bool order_by_name(llvm::Value *a, llvm::Value *b) { 201 if (a->hasName() && b->hasName()) { 202 return -1 == a->getName().compare(b->getName()); 203 } else if (a->hasName() && !b->hasName()) { 204 return true; 205 } else if (!a->hasName() && b->hasName()) { 206 return false; 207 } else { 208 // Better than nothing, but not stable 209 return a < b; 210 } 211 } 212 213 // Conservatively identifies any definitions which might be live at the 214 // given instruction. The analysis is performed immediately before the 215 // given instruction. Values defined by that instruction are not considered 216 // live. Values used by that instruction are considered live. 217 static void analyzeParsePointLiveness( 218 DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, 219 const CallSite &CS, PartiallyConstructedSafepointRecord &result) { 220 Instruction *inst = CS.getInstruction(); 221 222 StatepointLiveSetTy liveset; 223 findLiveSetAtInst(inst, OriginalLivenessData, liveset); 224 225 if (PrintLiveSet) { 226 // Note: This output is used by several of the test cases 227 // The order of elemtns in a set is not stable, put them in a vec and sort 228 // by name 229 SmallVector<Value *, 64> temp; 230 temp.insert(temp.end(), liveset.begin(), liveset.end()); 231 std::sort(temp.begin(), temp.end(), order_by_name); 232 errs() << "Live Variables:\n"; 233 for (Value *V : temp) { 234 errs() << " " << V->getName(); // no newline 235 V->dump(); 236 } 237 } 238 if (PrintLiveSetSize) { 239 errs() << "Safepoint For: " << CS.getCalledValue()->getName() << "\n"; 240 errs() << "Number live values: " << liveset.size() << "\n"; 241 } 242 result.liveset = liveset; 243 } 244 245 /// If we can trivially determine that this vector contains only base pointers, 246 /// return the base instruction. 247 static Value *findBaseOfVector(Value *I) { 248 assert(I->getType()->isVectorTy() && 249 cast<VectorType>(I->getType())->getElementType()->isPointerTy() && 250 "Illegal to ask for the base pointer of a non-pointer type"); 251 252 // Each case parallels findBaseDefiningValue below, see that code for 253 // detailed motivation. 254 255 if (isa<Argument>(I)) 256 // An incoming argument to the function is a base pointer 257 return I; 258 259 // We shouldn't see the address of a global as a vector value? 260 assert(!isa<GlobalVariable>(I) && 261 "unexpected global variable found in base of vector"); 262 263 // inlining could possibly introduce phi node that contains 264 // undef if callee has multiple returns 265 if (isa<UndefValue>(I)) 266 // utterly meaningless, but useful for dealing with partially optimized 267 // code. 268 return I; 269 270 // Due to inheritance, this must be _after_ the global variable and undef 271 // checks 272 if (Constant *Con = dyn_cast<Constant>(I)) { 273 assert(!isa<GlobalVariable>(I) && !isa<UndefValue>(I) && 274 "order of checks wrong!"); 275 assert(Con->isNullValue() && "null is the only case which makes sense"); 276 return Con; 277 } 278 279 if (isa<LoadInst>(I)) 280 return I; 281 282 // Note: This code is currently rather incomplete. We are essentially only 283 // handling cases where the vector element is trivially a base pointer. We 284 // need to update the entire base pointer construction algorithm to know how 285 // to track vector elements and potentially scalarize, but the case which 286 // would motivate the work hasn't shown up in real workloads yet. 287 llvm_unreachable("no base found for vector element"); 288 } 289 290 /// Helper function for findBasePointer - Will return a value which either a) 291 /// defines the base pointer for the input or b) blocks the simple search 292 /// (i.e. a PHI or Select of two derived pointers) 293 static Value *findBaseDefiningValue(Value *I) { 294 assert(I->getType()->isPointerTy() && 295 "Illegal to ask for the base pointer of a non-pointer type"); 296 297 // This case is a bit of a hack - it only handles extracts from vectors which 298 // trivially contain only base pointers. See note inside the function for 299 // how to improve this. 300 if (auto *EEI = dyn_cast<ExtractElementInst>(I)) { 301 Value *VectorOperand = EEI->getVectorOperand(); 302 Value *VectorBase = findBaseOfVector(VectorOperand); 303 (void)VectorBase; 304 assert(VectorBase && "extract element not known to be a trivial base"); 305 return EEI; 306 } 307 308 if (isa<Argument>(I)) 309 // An incoming argument to the function is a base pointer 310 // We should have never reached here if this argument isn't an gc value 311 return I; 312 313 if (isa<GlobalVariable>(I)) 314 // base case 315 return I; 316 317 // inlining could possibly introduce phi node that contains 318 // undef if callee has multiple returns 319 if (isa<UndefValue>(I)) 320 // utterly meaningless, but useful for dealing with 321 // partially optimized code. 322 return I; 323 324 // Due to inheritance, this must be _after_ the global variable and undef 325 // checks 326 if (Constant *Con = dyn_cast<Constant>(I)) { 327 assert(!isa<GlobalVariable>(I) && !isa<UndefValue>(I) && 328 "order of checks wrong!"); 329 // Note: Finding a constant base for something marked for relocation 330 // doesn't really make sense. The most likely case is either a) some 331 // screwed up the address space usage or b) your validating against 332 // compiled C++ code w/o the proper separation. The only real exception 333 // is a null pointer. You could have generic code written to index of 334 // off a potentially null value and have proven it null. We also use 335 // null pointers in dead paths of relocation phis (which we might later 336 // want to find a base pointer for). 337 assert(isa<ConstantPointerNull>(Con) && 338 "null is the only case which makes sense"); 339 return Con; 340 } 341 342 if (CastInst *CI = dyn_cast<CastInst>(I)) { 343 Value *Def = CI->stripPointerCasts(); 344 // If we find a cast instruction here, it means we've found a cast which is 345 // not simply a pointer cast (i.e. an inttoptr). We don't know how to 346 // handle int->ptr conversion. 347 assert(!isa<CastInst>(Def) && "shouldn't find another cast here"); 348 return findBaseDefiningValue(Def); 349 } 350 351 if (isa<LoadInst>(I)) 352 return I; // The value loaded is an gc base itself 353 354 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) 355 // The base of this GEP is the base 356 return findBaseDefiningValue(GEP->getPointerOperand()); 357 358 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 359 switch (II->getIntrinsicID()) { 360 case Intrinsic::experimental_gc_result_ptr: 361 default: 362 // fall through to general call handling 363 break; 364 case Intrinsic::experimental_gc_statepoint: 365 case Intrinsic::experimental_gc_result_float: 366 case Intrinsic::experimental_gc_result_int: 367 llvm_unreachable("these don't produce pointers"); 368 case Intrinsic::experimental_gc_relocate: { 369 // Rerunning safepoint insertion after safepoints are already 370 // inserted is not supported. It could probably be made to work, 371 // but why are you doing this? There's no good reason. 372 llvm_unreachable("repeat safepoint insertion is not supported"); 373 } 374 case Intrinsic::gcroot: 375 // Currently, this mechanism hasn't been extended to work with gcroot. 376 // There's no reason it couldn't be, but I haven't thought about the 377 // implications much. 378 llvm_unreachable( 379 "interaction with the gcroot mechanism is not supported"); 380 } 381 } 382 // We assume that functions in the source language only return base 383 // pointers. This should probably be generalized via attributes to support 384 // both source language and internal functions. 385 if (isa<CallInst>(I) || isa<InvokeInst>(I)) 386 return I; 387 388 // I have absolutely no idea how to implement this part yet. It's not 389 // neccessarily hard, I just haven't really looked at it yet. 390 assert(!isa<LandingPadInst>(I) && "Landing Pad is unimplemented"); 391 392 if (isa<AtomicCmpXchgInst>(I)) 393 // A CAS is effectively a atomic store and load combined under a 394 // predicate. From the perspective of base pointers, we just treat it 395 // like a load. 396 return I; 397 398 assert(!isa<AtomicRMWInst>(I) && "Xchg handled above, all others are " 399 "binary ops which don't apply to pointers"); 400 401 // The aggregate ops. Aggregates can either be in the heap or on the 402 // stack, but in either case, this is simply a field load. As a result, 403 // this is a defining definition of the base just like a load is. 404 if (isa<ExtractValueInst>(I)) 405 return I; 406 407 // We should never see an insert vector since that would require we be 408 // tracing back a struct value not a pointer value. 409 assert(!isa<InsertValueInst>(I) && 410 "Base pointer for a struct is meaningless"); 411 412 // The last two cases here don't return a base pointer. Instead, they 413 // return a value which dynamically selects from amoung several base 414 // derived pointers (each with it's own base potentially). It's the job of 415 // the caller to resolve these. 416 assert((isa<SelectInst>(I) || isa<PHINode>(I)) && 417 "missing instruction case in findBaseDefiningValing"); 418 return I; 419 } 420 421 /// Returns the base defining value for this value. 422 static Value *findBaseDefiningValueCached(Value *I, DefiningValueMapTy &Cache) { 423 Value *&Cached = Cache[I]; 424 if (!Cached) { 425 Cached = findBaseDefiningValue(I); 426 } 427 assert(Cache[I] != nullptr); 428 429 if (TraceLSP) { 430 dbgs() << "fBDV-cached: " << I->getName() << " -> " << Cached->getName() 431 << "\n"; 432 } 433 return Cached; 434 } 435 436 /// Return a base pointer for this value if known. Otherwise, return it's 437 /// base defining value. 438 static Value *findBaseOrBDV(Value *I, DefiningValueMapTy &Cache) { 439 Value *Def = findBaseDefiningValueCached(I, Cache); 440 auto Found = Cache.find(Def); 441 if (Found != Cache.end()) { 442 // Either a base-of relation, or a self reference. Caller must check. 443 return Found->second; 444 } 445 // Only a BDV available 446 return Def; 447 } 448 449 /// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, 450 /// is it known to be a base pointer? Or do we need to continue searching. 451 static bool isKnownBaseResult(Value *V) { 452 if (!isa<PHINode>(V) && !isa<SelectInst>(V)) { 453 // no recursion possible 454 return true; 455 } 456 if (isa<Instruction>(V) && 457 cast<Instruction>(V)->getMetadata("is_base_value")) { 458 // This is a previously inserted base phi or select. We know 459 // that this is a base value. 460 return true; 461 } 462 463 // We need to keep searching 464 return false; 465 } 466 467 // TODO: find a better name for this 468 namespace { 469 class PhiState { 470 public: 471 enum Status { Unknown, Base, Conflict }; 472 473 PhiState(Status s, Value *b = nullptr) : status(s), base(b) { 474 assert(status != Base || b); 475 } 476 PhiState(Value *b) : status(Base), base(b) {} 477 PhiState() : status(Unknown), base(nullptr) {} 478 479 Status getStatus() const { return status; } 480 Value *getBase() const { return base; } 481 482 bool isBase() const { return getStatus() == Base; } 483 bool isUnknown() const { return getStatus() == Unknown; } 484 bool isConflict() const { return getStatus() == Conflict; } 485 486 bool operator==(const PhiState &other) const { 487 return base == other.base && status == other.status; 488 } 489 490 bool operator!=(const PhiState &other) const { return !(*this == other); } 491 492 void dump() { 493 errs() << status << " (" << base << " - " 494 << (base ? base->getName() : "nullptr") << "): "; 495 } 496 497 private: 498 Status status; 499 Value *base; // non null only if status == base 500 }; 501 502 typedef DenseMap<Value *, PhiState> ConflictStateMapTy; 503 // Values of type PhiState form a lattice, and this is a helper 504 // class that implementes the meet operation. The meat of the meet 505 // operation is implemented in MeetPhiStates::pureMeet 506 class MeetPhiStates { 507 public: 508 // phiStates is a mapping from PHINodes and SelectInst's to PhiStates. 509 explicit MeetPhiStates(const ConflictStateMapTy &phiStates) 510 : phiStates(phiStates) {} 511 512 // Destructively meet the current result with the base V. V can 513 // either be a merge instruction (SelectInst / PHINode), in which 514 // case its status is looked up in the phiStates map; or a regular 515 // SSA value, in which case it is assumed to be a base. 516 void meetWith(Value *V) { 517 PhiState otherState = getStateForBDV(V); 518 assert((MeetPhiStates::pureMeet(otherState, currentResult) == 519 MeetPhiStates::pureMeet(currentResult, otherState)) && 520 "math is wrong: meet does not commute!"); 521 currentResult = MeetPhiStates::pureMeet(otherState, currentResult); 522 } 523 524 PhiState getResult() const { return currentResult; } 525 526 private: 527 const ConflictStateMapTy &phiStates; 528 PhiState currentResult; 529 530 /// Return a phi state for a base defining value. We'll generate a new 531 /// base state for known bases and expect to find a cached state otherwise 532 PhiState getStateForBDV(Value *baseValue) { 533 if (isKnownBaseResult(baseValue)) { 534 return PhiState(baseValue); 535 } else { 536 return lookupFromMap(baseValue); 537 } 538 } 539 540 PhiState lookupFromMap(Value *V) { 541 auto I = phiStates.find(V); 542 assert(I != phiStates.end() && "lookup failed!"); 543 return I->second; 544 } 545 546 static PhiState pureMeet(const PhiState &stateA, const PhiState &stateB) { 547 switch (stateA.getStatus()) { 548 case PhiState::Unknown: 549 return stateB; 550 551 case PhiState::Base: 552 assert(stateA.getBase() && "can't be null"); 553 if (stateB.isUnknown()) 554 return stateA; 555 556 if (stateB.isBase()) { 557 if (stateA.getBase() == stateB.getBase()) { 558 assert(stateA == stateB && "equality broken!"); 559 return stateA; 560 } 561 return PhiState(PhiState::Conflict); 562 } 563 assert(stateB.isConflict() && "only three states!"); 564 return PhiState(PhiState::Conflict); 565 566 case PhiState::Conflict: 567 return stateA; 568 } 569 llvm_unreachable("only three states!"); 570 } 571 }; 572 } 573 /// For a given value or instruction, figure out what base ptr it's derived 574 /// from. For gc objects, this is simply itself. On success, returns a value 575 /// which is the base pointer. (This is reliable and can be used for 576 /// relocation.) On failure, returns nullptr. 577 static Value *findBasePointer(Value *I, DefiningValueMapTy &cache, 578 DenseSet<llvm::Value *> &NewInsertedDefs) { 579 Value *def = findBaseOrBDV(I, cache); 580 581 if (isKnownBaseResult(def)) { 582 return def; 583 } 584 585 // Here's the rough algorithm: 586 // - For every SSA value, construct a mapping to either an actual base 587 // pointer or a PHI which obscures the base pointer. 588 // - Construct a mapping from PHI to unknown TOP state. Use an 589 // optimistic algorithm to propagate base pointer information. Lattice 590 // looks like: 591 // UNKNOWN 592 // b1 b2 b3 b4 593 // CONFLICT 594 // When algorithm terminates, all PHIs will either have a single concrete 595 // base or be in a conflict state. 596 // - For every conflict, insert a dummy PHI node without arguments. Add 597 // these to the base[Instruction] = BasePtr mapping. For every 598 // non-conflict, add the actual base. 599 // - For every conflict, add arguments for the base[a] of each input 600 // arguments. 601 // 602 // Note: A simpler form of this would be to add the conflict form of all 603 // PHIs without running the optimistic algorithm. This would be 604 // analougous to pessimistic data flow and would likely lead to an 605 // overall worse solution. 606 607 ConflictStateMapTy states; 608 states[def] = PhiState(); 609 // Recursively fill in all phis & selects reachable from the initial one 610 // for which we don't already know a definite base value for 611 // TODO: This should be rewritten with a worklist 612 bool done = false; 613 while (!done) { 614 done = true; 615 // Since we're adding elements to 'states' as we run, we can't keep 616 // iterators into the set. 617 SmallVector<Value *, 16> Keys; 618 Keys.reserve(states.size()); 619 for (auto Pair : states) { 620 Value *V = Pair.first; 621 Keys.push_back(V); 622 } 623 for (Value *v : Keys) { 624 assert(!isKnownBaseResult(v) && "why did it get added?"); 625 if (PHINode *phi = dyn_cast<PHINode>(v)) { 626 assert(phi->getNumIncomingValues() > 0 && 627 "zero input phis are illegal"); 628 for (Value *InVal : phi->incoming_values()) { 629 Value *local = findBaseOrBDV(InVal, cache); 630 if (!isKnownBaseResult(local) && states.find(local) == states.end()) { 631 states[local] = PhiState(); 632 done = false; 633 } 634 } 635 } else if (SelectInst *sel = dyn_cast<SelectInst>(v)) { 636 Value *local = findBaseOrBDV(sel->getTrueValue(), cache); 637 if (!isKnownBaseResult(local) && states.find(local) == states.end()) { 638 states[local] = PhiState(); 639 done = false; 640 } 641 local = findBaseOrBDV(sel->getFalseValue(), cache); 642 if (!isKnownBaseResult(local) && states.find(local) == states.end()) { 643 states[local] = PhiState(); 644 done = false; 645 } 646 } 647 } 648 } 649 650 if (TraceLSP) { 651 errs() << "States after initialization:\n"; 652 for (auto Pair : states) { 653 Instruction *v = cast<Instruction>(Pair.first); 654 PhiState state = Pair.second; 655 state.dump(); 656 v->dump(); 657 } 658 } 659 660 // TODO: come back and revisit the state transitions around inputs which 661 // have reached conflict state. The current version seems too conservative. 662 663 bool progress = true; 664 while (progress) { 665 #ifndef NDEBUG 666 size_t oldSize = states.size(); 667 #endif 668 progress = false; 669 // We're only changing keys in this loop, thus safe to keep iterators 670 for (auto Pair : states) { 671 MeetPhiStates calculateMeet(states); 672 Value *v = Pair.first; 673 assert(!isKnownBaseResult(v) && "why did it get added?"); 674 if (SelectInst *select = dyn_cast<SelectInst>(v)) { 675 calculateMeet.meetWith(findBaseOrBDV(select->getTrueValue(), cache)); 676 calculateMeet.meetWith(findBaseOrBDV(select->getFalseValue(), cache)); 677 } else 678 for (Value *Val : cast<PHINode>(v)->incoming_values()) 679 calculateMeet.meetWith(findBaseOrBDV(Val, cache)); 680 681 PhiState oldState = states[v]; 682 PhiState newState = calculateMeet.getResult(); 683 if (oldState != newState) { 684 progress = true; 685 states[v] = newState; 686 } 687 } 688 689 assert(oldSize <= states.size()); 690 assert(oldSize == states.size() || progress); 691 } 692 693 if (TraceLSP) { 694 errs() << "States after meet iteration:\n"; 695 for (auto Pair : states) { 696 Instruction *v = cast<Instruction>(Pair.first); 697 PhiState state = Pair.second; 698 state.dump(); 699 v->dump(); 700 } 701 } 702 703 // Insert Phis for all conflicts 704 // We want to keep naming deterministic in the loop that follows, so 705 // sort the keys before iteration. This is useful in allowing us to 706 // write stable tests. Note that there is no invalidation issue here. 707 SmallVector<Value *, 16> Keys; 708 Keys.reserve(states.size()); 709 for (auto Pair : states) { 710 Value *V = Pair.first; 711 Keys.push_back(V); 712 } 713 std::sort(Keys.begin(), Keys.end(), order_by_name); 714 // TODO: adjust naming patterns to avoid this order of iteration dependency 715 for (Value *V : Keys) { 716 Instruction *v = cast<Instruction>(V); 717 PhiState state = states[V]; 718 assert(!isKnownBaseResult(v) && "why did it get added?"); 719 assert(!state.isUnknown() && "Optimistic algorithm didn't complete!"); 720 if (!state.isConflict()) 721 continue; 722 723 if (isa<PHINode>(v)) { 724 int num_preds = 725 std::distance(pred_begin(v->getParent()), pred_end(v->getParent())); 726 assert(num_preds > 0 && "how did we reach here"); 727 PHINode *phi = PHINode::Create(v->getType(), num_preds, "base_phi", v); 728 NewInsertedDefs.insert(phi); 729 // Add metadata marking this as a base value 730 auto *const_1 = ConstantInt::get( 731 Type::getInt32Ty( 732 v->getParent()->getParent()->getParent()->getContext()), 733 1); 734 auto MDConst = ConstantAsMetadata::get(const_1); 735 MDNode *md = MDNode::get( 736 v->getParent()->getParent()->getParent()->getContext(), MDConst); 737 phi->setMetadata("is_base_value", md); 738 states[v] = PhiState(PhiState::Conflict, phi); 739 } else { 740 SelectInst *sel = cast<SelectInst>(v); 741 // The undef will be replaced later 742 UndefValue *undef = UndefValue::get(sel->getType()); 743 SelectInst *basesel = SelectInst::Create(sel->getCondition(), undef, 744 undef, "base_select", sel); 745 NewInsertedDefs.insert(basesel); 746 // Add metadata marking this as a base value 747 auto *const_1 = ConstantInt::get( 748 Type::getInt32Ty( 749 v->getParent()->getParent()->getParent()->getContext()), 750 1); 751 auto MDConst = ConstantAsMetadata::get(const_1); 752 MDNode *md = MDNode::get( 753 v->getParent()->getParent()->getParent()->getContext(), MDConst); 754 basesel->setMetadata("is_base_value", md); 755 states[v] = PhiState(PhiState::Conflict, basesel); 756 } 757 } 758 759 // Fixup all the inputs of the new PHIs 760 for (auto Pair : states) { 761 Instruction *v = cast<Instruction>(Pair.first); 762 PhiState state = Pair.second; 763 764 assert(!isKnownBaseResult(v) && "why did it get added?"); 765 assert(!state.isUnknown() && "Optimistic algorithm didn't complete!"); 766 if (!state.isConflict()) 767 continue; 768 769 if (PHINode *basephi = dyn_cast<PHINode>(state.getBase())) { 770 PHINode *phi = cast<PHINode>(v); 771 unsigned NumPHIValues = phi->getNumIncomingValues(); 772 for (unsigned i = 0; i < NumPHIValues; i++) { 773 Value *InVal = phi->getIncomingValue(i); 774 BasicBlock *InBB = phi->getIncomingBlock(i); 775 776 // If we've already seen InBB, add the same incoming value 777 // we added for it earlier. The IR verifier requires phi 778 // nodes with multiple entries from the same basic block 779 // to have the same incoming value for each of those 780 // entries. If we don't do this check here and basephi 781 // has a different type than base, we'll end up adding two 782 // bitcasts (and hence two distinct values) as incoming 783 // values for the same basic block. 784 785 int blockIndex = basephi->getBasicBlockIndex(InBB); 786 if (blockIndex != -1) { 787 Value *oldBase = basephi->getIncomingValue(blockIndex); 788 basephi->addIncoming(oldBase, InBB); 789 #ifndef NDEBUG 790 Value *base = findBaseOrBDV(InVal, cache); 791 if (!isKnownBaseResult(base)) { 792 // Either conflict or base. 793 assert(states.count(base)); 794 base = states[base].getBase(); 795 assert(base != nullptr && "unknown PhiState!"); 796 assert(NewInsertedDefs.count(base) && 797 "should have already added this in a prev. iteration!"); 798 } 799 800 // In essense this assert states: the only way two 801 // values incoming from the same basic block may be 802 // different is by being different bitcasts of the same 803 // value. A cleanup that remains TODO is changing 804 // findBaseOrBDV to return an llvm::Value of the correct 805 // type (and still remain pure). This will remove the 806 // need to add bitcasts. 807 assert(base->stripPointerCasts() == oldBase->stripPointerCasts() && 808 "sanity -- findBaseOrBDV should be pure!"); 809 #endif 810 continue; 811 } 812 813 // Find either the defining value for the PHI or the normal base for 814 // a non-phi node 815 Value *base = findBaseOrBDV(InVal, cache); 816 if (!isKnownBaseResult(base)) { 817 // Either conflict or base. 818 assert(states.count(base)); 819 base = states[base].getBase(); 820 assert(base != nullptr && "unknown PhiState!"); 821 } 822 assert(base && "can't be null"); 823 // Must use original input BB since base may not be Instruction 824 // The cast is needed since base traversal may strip away bitcasts 825 if (base->getType() != basephi->getType()) { 826 base = new BitCastInst(base, basephi->getType(), "cast", 827 InBB->getTerminator()); 828 NewInsertedDefs.insert(base); 829 } 830 basephi->addIncoming(base, InBB); 831 } 832 assert(basephi->getNumIncomingValues() == NumPHIValues); 833 } else { 834 SelectInst *basesel = cast<SelectInst>(state.getBase()); 835 SelectInst *sel = cast<SelectInst>(v); 836 // Operand 1 & 2 are true, false path respectively. TODO: refactor to 837 // something more safe and less hacky. 838 for (int i = 1; i <= 2; i++) { 839 Value *InVal = sel->getOperand(i); 840 // Find either the defining value for the PHI or the normal base for 841 // a non-phi node 842 Value *base = findBaseOrBDV(InVal, cache); 843 if (!isKnownBaseResult(base)) { 844 // Either conflict or base. 845 assert(states.count(base)); 846 base = states[base].getBase(); 847 assert(base != nullptr && "unknown PhiState!"); 848 } 849 assert(base && "can't be null"); 850 // Must use original input BB since base may not be Instruction 851 // The cast is needed since base traversal may strip away bitcasts 852 if (base->getType() != basesel->getType()) { 853 base = new BitCastInst(base, basesel->getType(), "cast", basesel); 854 NewInsertedDefs.insert(base); 855 } 856 basesel->setOperand(i, base); 857 } 858 } 859 } 860 861 // Cache all of our results so we can cheaply reuse them 862 // NOTE: This is actually two caches: one of the base defining value 863 // relation and one of the base pointer relation! FIXME 864 for (auto item : states) { 865 Value *v = item.first; 866 Value *base = item.second.getBase(); 867 assert(v && base); 868 assert(!isKnownBaseResult(v) && "why did it get added?"); 869 870 if (TraceLSP) { 871 std::string fromstr = 872 cache.count(v) ? (cache[v]->hasName() ? cache[v]->getName() : "") 873 : "none"; 874 errs() << "Updating base value cache" 875 << " for: " << (v->hasName() ? v->getName() : "") 876 << " from: " << fromstr 877 << " to: " << (base->hasName() ? base->getName() : "") << "\n"; 878 } 879 880 assert(isKnownBaseResult(base) && 881 "must be something we 'know' is a base pointer"); 882 if (cache.count(v)) { 883 // Once we transition from the BDV relation being store in the cache to 884 // the base relation being stored, it must be stable 885 assert((!isKnownBaseResult(cache[v]) || cache[v] == base) && 886 "base relation should be stable"); 887 } 888 cache[v] = base; 889 } 890 assert(cache.find(def) != cache.end()); 891 return cache[def]; 892 } 893 894 // For a set of live pointers (base and/or derived), identify the base 895 // pointer of the object which they are derived from. This routine will 896 // mutate the IR graph as needed to make the 'base' pointer live at the 897 // definition site of 'derived'. This ensures that any use of 'derived' can 898 // also use 'base'. This may involve the insertion of a number of 899 // additional PHI nodes. 900 // 901 // preconditions: live is a set of pointer type Values 902 // 903 // side effects: may insert PHI nodes into the existing CFG, will preserve 904 // CFG, will not remove or mutate any existing nodes 905 // 906 // post condition: PointerToBase contains one (derived, base) pair for every 907 // pointer in live. Note that derived can be equal to base if the original 908 // pointer was a base pointer. 909 static void 910 findBasePointers(const StatepointLiveSetTy &live, 911 DenseMap<llvm::Value *, llvm::Value *> &PointerToBase, 912 DominatorTree *DT, DefiningValueMapTy &DVCache, 913 DenseSet<llvm::Value *> &NewInsertedDefs) { 914 // For the naming of values inserted to be deterministic - which makes for 915 // much cleaner and more stable tests - we need to assign an order to the 916 // live values. DenseSets do not provide a deterministic order across runs. 917 SmallVector<Value *, 64> Temp; 918 Temp.insert(Temp.end(), live.begin(), live.end()); 919 std::sort(Temp.begin(), Temp.end(), order_by_name); 920 for (Value *ptr : Temp) { 921 Value *base = findBasePointer(ptr, DVCache, NewInsertedDefs); 922 assert(base && "failed to find base pointer"); 923 PointerToBase[ptr] = base; 924 assert((!isa<Instruction>(base) || !isa<Instruction>(ptr) || 925 DT->dominates(cast<Instruction>(base)->getParent(), 926 cast<Instruction>(ptr)->getParent())) && 927 "The base we found better dominate the derived pointer"); 928 929 // If you see this trip and like to live really dangerously, the code should 930 // be correct, just with idioms the verifier can't handle. You can try 931 // disabling the verifier at your own substaintial risk. 932 assert(!isa<ConstantPointerNull>(base) && 933 "the relocation code needs adjustment to handle the relocation of " 934 "a null pointer constant without causing false positives in the " 935 "safepoint ir verifier."); 936 } 937 } 938 939 /// Find the required based pointers (and adjust the live set) for the given 940 /// parse point. 941 static void findBasePointers(DominatorTree &DT, DefiningValueMapTy &DVCache, 942 const CallSite &CS, 943 PartiallyConstructedSafepointRecord &result) { 944 DenseMap<llvm::Value *, llvm::Value *> PointerToBase; 945 DenseSet<llvm::Value *> NewInsertedDefs; 946 findBasePointers(result.liveset, PointerToBase, &DT, DVCache, 947 NewInsertedDefs); 948 949 if (PrintBasePointers) { 950 // Note: Need to print these in a stable order since this is checked in 951 // some tests. 952 errs() << "Base Pairs (w/o Relocation):\n"; 953 SmallVector<Value *, 64> Temp; 954 Temp.reserve(PointerToBase.size()); 955 for (auto Pair : PointerToBase) { 956 Temp.push_back(Pair.first); 957 } 958 std::sort(Temp.begin(), Temp.end(), order_by_name); 959 for (Value *Ptr : Temp) { 960 Value *Base = PointerToBase[Ptr]; 961 errs() << " derived %" << Ptr->getName() << " base %" << Base->getName() 962 << "\n"; 963 } 964 } 965 966 result.PointerToBase = PointerToBase; 967 result.NewInsertedDefs = NewInsertedDefs; 968 } 969 970 /// Given an updated version of the dataflow liveness results, update the 971 /// liveset and base pointer maps for the call site CS. 972 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 973 const CallSite &CS, 974 PartiallyConstructedSafepointRecord &result); 975 976 static void recomputeLiveInValues( 977 Function &F, DominatorTree &DT, Pass *P, ArrayRef<CallSite> toUpdate, 978 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 979 // TODO-PERF: reuse the original liveness, then simply run the dataflow 980 // again. The old values are still live and will help it stablize quickly. 981 GCPtrLivenessData RevisedLivenessData; 982 computeLiveInValues(DT, F, RevisedLivenessData); 983 for (size_t i = 0; i < records.size(); i++) { 984 struct PartiallyConstructedSafepointRecord &info = records[i]; 985 const CallSite &CS = toUpdate[i]; 986 recomputeLiveInValues(RevisedLivenessData, CS, info); 987 } 988 } 989 990 // Normalize basic block to make it ready to be target of invoke statepoint. 991 // It means spliting it to have single predecessor. Return newly created BB 992 // ready to be successor of invoke statepoint. 993 static BasicBlock *normalizeBBForInvokeSafepoint(BasicBlock *BB, 994 BasicBlock *InvokeParent, 995 Pass *P) { 996 BasicBlock *ret = BB; 997 998 if (!BB->getUniquePredecessor()) { 999 ret = SplitBlockPredecessors(BB, InvokeParent, ""); 1000 } 1001 1002 // Another requirement for such basic blocks is to not have any phi nodes. 1003 // Since we just ensured that new BB will have single predecessor, 1004 // all phi nodes in it will have one value. Here it would be naturall place 1005 // to 1006 // remove them all. But we can not do this because we are risking to remove 1007 // one of the values stored in liveset of another statepoint. We will do it 1008 // later after placing all safepoints. 1009 1010 return ret; 1011 } 1012 1013 static int find_index(ArrayRef<Value *> livevec, Value *val) { 1014 auto itr = std::find(livevec.begin(), livevec.end(), val); 1015 assert(livevec.end() != itr); 1016 size_t index = std::distance(livevec.begin(), itr); 1017 assert(index < livevec.size()); 1018 return index; 1019 } 1020 1021 // Create new attribute set containing only attributes which can be transfered 1022 // from original call to the safepoint. 1023 static AttributeSet legalizeCallAttributes(AttributeSet AS) { 1024 AttributeSet ret; 1025 1026 for (unsigned Slot = 0; Slot < AS.getNumSlots(); Slot++) { 1027 unsigned index = AS.getSlotIndex(Slot); 1028 1029 if (index == AttributeSet::ReturnIndex || 1030 index == AttributeSet::FunctionIndex) { 1031 1032 for (auto it = AS.begin(Slot), it_end = AS.end(Slot); it != it_end; 1033 ++it) { 1034 Attribute attr = *it; 1035 1036 // Do not allow certain attributes - just skip them 1037 // Safepoint can not be read only or read none. 1038 if (attr.hasAttribute(Attribute::ReadNone) || 1039 attr.hasAttribute(Attribute::ReadOnly)) 1040 continue; 1041 1042 ret = ret.addAttributes( 1043 AS.getContext(), index, 1044 AttributeSet::get(AS.getContext(), index, AttrBuilder(attr))); 1045 } 1046 } 1047 1048 // Just skip parameter attributes for now 1049 } 1050 1051 return ret; 1052 } 1053 1054 /// Helper function to place all gc relocates necessary for the given 1055 /// statepoint. 1056 /// Inputs: 1057 /// liveVariables - list of variables to be relocated. 1058 /// liveStart - index of the first live variable. 1059 /// basePtrs - base pointers. 1060 /// statepointToken - statepoint instruction to which relocates should be 1061 /// bound. 1062 /// Builder - Llvm IR builder to be used to construct new calls. 1063 static void CreateGCRelocates(ArrayRef<llvm::Value *> liveVariables, 1064 const int liveStart, 1065 ArrayRef<llvm::Value *> basePtrs, 1066 Instruction *statepointToken, 1067 IRBuilder<> Builder) { 1068 SmallVector<Instruction *, 64> NewDefs; 1069 NewDefs.reserve(liveVariables.size()); 1070 1071 Module *M = statepointToken->getParent()->getParent()->getParent(); 1072 1073 for (unsigned i = 0; i < liveVariables.size(); i++) { 1074 // We generate a (potentially) unique declaration for every pointer type 1075 // combination. This results is some blow up the function declarations in 1076 // the IR, but removes the need for argument bitcasts which shrinks the IR 1077 // greatly and makes it much more readable. 1078 SmallVector<Type *, 1> types; // one per 'any' type 1079 types.push_back(liveVariables[i]->getType()); // result type 1080 Value *gc_relocate_decl = Intrinsic::getDeclaration( 1081 M, Intrinsic::experimental_gc_relocate, types); 1082 1083 // Generate the gc.relocate call and save the result 1084 Value *baseIdx = 1085 ConstantInt::get(Type::getInt32Ty(M->getContext()), 1086 liveStart + find_index(liveVariables, basePtrs[i])); 1087 Value *liveIdx = ConstantInt::get( 1088 Type::getInt32Ty(M->getContext()), 1089 liveStart + find_index(liveVariables, liveVariables[i])); 1090 1091 // only specify a debug name if we can give a useful one 1092 Value *reloc = Builder.CreateCall3( 1093 gc_relocate_decl, statepointToken, baseIdx, liveIdx, 1094 liveVariables[i]->hasName() ? liveVariables[i]->getName() + ".relocated" 1095 : ""); 1096 // Trick CodeGen into thinking there are lots of free registers at this 1097 // fake call. 1098 cast<CallInst>(reloc)->setCallingConv(CallingConv::Cold); 1099 1100 NewDefs.push_back(cast<Instruction>(reloc)); 1101 } 1102 assert(NewDefs.size() == liveVariables.size() && 1103 "missing or extra redefinition at safepoint"); 1104 } 1105 1106 static void 1107 makeStatepointExplicitImpl(const CallSite &CS, /* to replace */ 1108 const SmallVectorImpl<llvm::Value *> &basePtrs, 1109 const SmallVectorImpl<llvm::Value *> &liveVariables, 1110 Pass *P, 1111 PartiallyConstructedSafepointRecord &result) { 1112 assert(basePtrs.size() == liveVariables.size()); 1113 assert(isStatepoint(CS) && 1114 "This method expects to be rewriting a statepoint"); 1115 1116 BasicBlock *BB = CS.getInstruction()->getParent(); 1117 assert(BB); 1118 Function *F = BB->getParent(); 1119 assert(F && "must be set"); 1120 Module *M = F->getParent(); 1121 (void)M; 1122 assert(M && "must be set"); 1123 1124 // We're not changing the function signature of the statepoint since the gc 1125 // arguments go into the var args section. 1126 Function *gc_statepoint_decl = CS.getCalledFunction(); 1127 1128 // Then go ahead and use the builder do actually do the inserts. We insert 1129 // immediately before the previous instruction under the assumption that all 1130 // arguments will be available here. We can't insert afterwards since we may 1131 // be replacing a terminator. 1132 Instruction *insertBefore = CS.getInstruction(); 1133 IRBuilder<> Builder(insertBefore); 1134 // Copy all of the arguments from the original statepoint - this includes the 1135 // target, call args, and deopt args 1136 SmallVector<llvm::Value *, 64> args; 1137 args.insert(args.end(), CS.arg_begin(), CS.arg_end()); 1138 // TODO: Clear the 'needs rewrite' flag 1139 1140 // add all the pointers to be relocated (gc arguments) 1141 // Capture the start of the live variable list for use in the gc_relocates 1142 const int live_start = args.size(); 1143 args.insert(args.end(), liveVariables.begin(), liveVariables.end()); 1144 1145 // Create the statepoint given all the arguments 1146 Instruction *token = nullptr; 1147 AttributeSet return_attributes; 1148 if (CS.isCall()) { 1149 CallInst *toReplace = cast<CallInst>(CS.getInstruction()); 1150 CallInst *call = 1151 Builder.CreateCall(gc_statepoint_decl, args, "safepoint_token"); 1152 call->setTailCall(toReplace->isTailCall()); 1153 call->setCallingConv(toReplace->getCallingConv()); 1154 1155 // Currently we will fail on parameter attributes and on certain 1156 // function attributes. 1157 AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes()); 1158 // In case if we can handle this set of sttributes - set up function attrs 1159 // directly on statepoint and return attrs later for gc_result intrinsic. 1160 call->setAttributes(new_attrs.getFnAttributes()); 1161 return_attributes = new_attrs.getRetAttributes(); 1162 1163 token = call; 1164 1165 // Put the following gc_result and gc_relocate calls immediately after the 1166 // the old call (which we're about to delete) 1167 BasicBlock::iterator next(toReplace); 1168 assert(BB->end() != next && "not a terminator, must have next"); 1169 next++; 1170 Instruction *IP = &*(next); 1171 Builder.SetInsertPoint(IP); 1172 Builder.SetCurrentDebugLocation(IP->getDebugLoc()); 1173 1174 } else { 1175 InvokeInst *toReplace = cast<InvokeInst>(CS.getInstruction()); 1176 1177 // Insert the new invoke into the old block. We'll remove the old one in a 1178 // moment at which point this will become the new terminator for the 1179 // original block. 1180 InvokeInst *invoke = InvokeInst::Create( 1181 gc_statepoint_decl, toReplace->getNormalDest(), 1182 toReplace->getUnwindDest(), args, "", toReplace->getParent()); 1183 invoke->setCallingConv(toReplace->getCallingConv()); 1184 1185 // Currently we will fail on parameter attributes and on certain 1186 // function attributes. 1187 AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes()); 1188 // In case if we can handle this set of sttributes - set up function attrs 1189 // directly on statepoint and return attrs later for gc_result intrinsic. 1190 invoke->setAttributes(new_attrs.getFnAttributes()); 1191 return_attributes = new_attrs.getRetAttributes(); 1192 1193 token = invoke; 1194 1195 // Generate gc relocates in exceptional path 1196 BasicBlock *unwindBlock = normalizeBBForInvokeSafepoint( 1197 toReplace->getUnwindDest(), invoke->getParent(), P); 1198 1199 Instruction *IP = &*(unwindBlock->getFirstInsertionPt()); 1200 Builder.SetInsertPoint(IP); 1201 Builder.SetCurrentDebugLocation(toReplace->getDebugLoc()); 1202 1203 // Extract second element from landingpad return value. We will attach 1204 // exceptional gc relocates to it. 1205 const unsigned idx = 1; 1206 Instruction *exceptional_token = 1207 cast<Instruction>(Builder.CreateExtractValue( 1208 unwindBlock->getLandingPadInst(), idx, "relocate_token")); 1209 result.UnwindToken = exceptional_token; 1210 1211 // Just throw away return value. We will use the one we got for normal 1212 // block. 1213 (void)CreateGCRelocates(liveVariables, live_start, basePtrs, 1214 exceptional_token, Builder); 1215 1216 // Generate gc relocates and returns for normal block 1217 BasicBlock *normalDest = normalizeBBForInvokeSafepoint( 1218 toReplace->getNormalDest(), invoke->getParent(), P); 1219 1220 IP = &*(normalDest->getFirstInsertionPt()); 1221 Builder.SetInsertPoint(IP); 1222 1223 // gc relocates will be generated later as if it were regular call 1224 // statepoint 1225 } 1226 assert(token); 1227 1228 // Take the name of the original value call if it had one. 1229 token->takeName(CS.getInstruction()); 1230 1231 // The GCResult is already inserted, we just need to find it 1232 #ifndef NDEBUG 1233 Instruction *toReplace = CS.getInstruction(); 1234 assert((toReplace->hasNUses(0) || toReplace->hasNUses(1)) && 1235 "only valid use before rewrite is gc.result"); 1236 assert(!toReplace->hasOneUse() || 1237 isGCResult(cast<Instruction>(*toReplace->user_begin()))); 1238 #endif 1239 1240 // Update the gc.result of the original statepoint (if any) to use the newly 1241 // inserted statepoint. This is safe to do here since the token can't be 1242 // considered a live reference. 1243 CS.getInstruction()->replaceAllUsesWith(token); 1244 1245 result.StatepointToken = token; 1246 1247 // Second, create a gc.relocate for every live variable 1248 CreateGCRelocates(liveVariables, live_start, basePtrs, token, Builder); 1249 } 1250 1251 namespace { 1252 struct name_ordering { 1253 Value *base; 1254 Value *derived; 1255 bool operator()(name_ordering const &a, name_ordering const &b) { 1256 return -1 == a.derived->getName().compare(b.derived->getName()); 1257 } 1258 }; 1259 } 1260 static void stablize_order(SmallVectorImpl<Value *> &basevec, 1261 SmallVectorImpl<Value *> &livevec) { 1262 assert(basevec.size() == livevec.size()); 1263 1264 SmallVector<name_ordering, 64> temp; 1265 for (size_t i = 0; i < basevec.size(); i++) { 1266 name_ordering v; 1267 v.base = basevec[i]; 1268 v.derived = livevec[i]; 1269 temp.push_back(v); 1270 } 1271 std::sort(temp.begin(), temp.end(), name_ordering()); 1272 for (size_t i = 0; i < basevec.size(); i++) { 1273 basevec[i] = temp[i].base; 1274 livevec[i] = temp[i].derived; 1275 } 1276 } 1277 1278 // Replace an existing gc.statepoint with a new one and a set of gc.relocates 1279 // which make the relocations happening at this safepoint explicit. 1280 // 1281 // WARNING: Does not do any fixup to adjust users of the original live 1282 // values. That's the callers responsibility. 1283 static void 1284 makeStatepointExplicit(DominatorTree &DT, const CallSite &CS, Pass *P, 1285 PartiallyConstructedSafepointRecord &result) { 1286 auto liveset = result.liveset; 1287 auto PointerToBase = result.PointerToBase; 1288 1289 // Convert to vector for efficient cross referencing. 1290 SmallVector<Value *, 64> basevec, livevec; 1291 livevec.reserve(liveset.size()); 1292 basevec.reserve(liveset.size()); 1293 for (Value *L : liveset) { 1294 livevec.push_back(L); 1295 1296 assert(PointerToBase.find(L) != PointerToBase.end()); 1297 Value *base = PointerToBase[L]; 1298 basevec.push_back(base); 1299 } 1300 assert(livevec.size() == basevec.size()); 1301 1302 // To make the output IR slightly more stable (for use in diffs), ensure a 1303 // fixed order of the values in the safepoint (by sorting the value name). 1304 // The order is otherwise meaningless. 1305 stablize_order(basevec, livevec); 1306 1307 // Do the actual rewriting and delete the old statepoint 1308 makeStatepointExplicitImpl(CS, basevec, livevec, P, result); 1309 CS.getInstruction()->eraseFromParent(); 1310 } 1311 1312 // Helper function for the relocationViaAlloca. 1313 // It receives iterator to the statepoint gc relocates and emits store to the 1314 // assigned 1315 // location (via allocaMap) for the each one of them. 1316 // Add visited values into the visitedLiveValues set we will later use them 1317 // for sanity check. 1318 static void 1319 insertRelocationStores(iterator_range<Value::user_iterator> gcRelocs, 1320 DenseMap<Value *, Value *> &allocaMap, 1321 DenseSet<Value *> &visitedLiveValues) { 1322 1323 for (User *U : gcRelocs) { 1324 if (!isa<IntrinsicInst>(U)) 1325 continue; 1326 1327 IntrinsicInst *relocatedValue = cast<IntrinsicInst>(U); 1328 1329 // We only care about relocates 1330 if (relocatedValue->getIntrinsicID() != 1331 Intrinsic::experimental_gc_relocate) { 1332 continue; 1333 } 1334 1335 GCRelocateOperands relocateOperands(relocatedValue); 1336 Value *originalValue = const_cast<Value *>(relocateOperands.derivedPtr()); 1337 assert(allocaMap.count(originalValue)); 1338 Value *alloca = allocaMap[originalValue]; 1339 1340 // Emit store into the related alloca 1341 StoreInst *store = new StoreInst(relocatedValue, alloca); 1342 store->insertAfter(relocatedValue); 1343 1344 #ifndef NDEBUG 1345 visitedLiveValues.insert(originalValue); 1346 #endif 1347 } 1348 } 1349 1350 /// do all the relocation update via allocas and mem2reg 1351 static void relocationViaAlloca( 1352 Function &F, DominatorTree &DT, ArrayRef<Value *> live, 1353 ArrayRef<struct PartiallyConstructedSafepointRecord> records) { 1354 #ifndef NDEBUG 1355 // record initial number of (static) allocas; we'll check we have the same 1356 // number when we get done. 1357 int InitialAllocaNum = 0; 1358 for (auto I = F.getEntryBlock().begin(), E = F.getEntryBlock().end(); I != E; 1359 I++) 1360 if (isa<AllocaInst>(*I)) 1361 InitialAllocaNum++; 1362 #endif 1363 1364 // TODO-PERF: change data structures, reserve 1365 DenseMap<Value *, Value *> allocaMap; 1366 SmallVector<AllocaInst *, 200> PromotableAllocas; 1367 PromotableAllocas.reserve(live.size()); 1368 1369 // emit alloca for each live gc pointer 1370 for (unsigned i = 0; i < live.size(); i++) { 1371 Value *liveValue = live[i]; 1372 AllocaInst *alloca = new AllocaInst(liveValue->getType(), "", 1373 F.getEntryBlock().getFirstNonPHI()); 1374 allocaMap[liveValue] = alloca; 1375 PromotableAllocas.push_back(alloca); 1376 } 1377 1378 // The next two loops are part of the same conceptual operation. We need to 1379 // insert a store to the alloca after the original def and at each 1380 // redefinition. We need to insert a load before each use. These are split 1381 // into distinct loops for performance reasons. 1382 1383 // update gc pointer after each statepoint 1384 // either store a relocated value or null (if no relocated value found for 1385 // this gc pointer and it is not a gc_result) 1386 // this must happen before we update the statepoint with load of alloca 1387 // otherwise we lose the link between statepoint and old def 1388 for (size_t i = 0; i < records.size(); i++) { 1389 const struct PartiallyConstructedSafepointRecord &info = records[i]; 1390 Value *Statepoint = info.StatepointToken; 1391 1392 // This will be used for consistency check 1393 DenseSet<Value *> visitedLiveValues; 1394 1395 // Insert stores for normal statepoint gc relocates 1396 insertRelocationStores(Statepoint->users(), allocaMap, visitedLiveValues); 1397 1398 // In case if it was invoke statepoint 1399 // we will insert stores for exceptional path gc relocates. 1400 if (isa<InvokeInst>(Statepoint)) { 1401 insertRelocationStores(info.UnwindToken->users(), allocaMap, 1402 visitedLiveValues); 1403 } 1404 1405 #ifndef NDEBUG 1406 // As a debuging aid, pretend that an unrelocated pointer becomes null at 1407 // the gc.statepoint. This will turn some subtle GC problems into slightly 1408 // easier to debug SEGVs 1409 SmallVector<AllocaInst *, 64> ToClobber; 1410 for (auto Pair : allocaMap) { 1411 Value *Def = Pair.first; 1412 AllocaInst *Alloca = cast<AllocaInst>(Pair.second); 1413 1414 // This value was relocated 1415 if (visitedLiveValues.count(Def)) { 1416 continue; 1417 } 1418 ToClobber.push_back(Alloca); 1419 } 1420 1421 auto InsertClobbersAt = [&](Instruction *IP) { 1422 for (auto *AI : ToClobber) { 1423 auto AIType = cast<PointerType>(AI->getType()); 1424 auto PT = cast<PointerType>(AIType->getElementType()); 1425 Constant *CPN = ConstantPointerNull::get(PT); 1426 StoreInst *store = new StoreInst(CPN, AI); 1427 store->insertBefore(IP); 1428 } 1429 }; 1430 1431 // Insert the clobbering stores. These may get intermixed with the 1432 // gc.results and gc.relocates, but that's fine. 1433 if (auto II = dyn_cast<InvokeInst>(Statepoint)) { 1434 InsertClobbersAt(II->getNormalDest()->getFirstInsertionPt()); 1435 InsertClobbersAt(II->getUnwindDest()->getFirstInsertionPt()); 1436 } else { 1437 BasicBlock::iterator Next(cast<CallInst>(Statepoint)); 1438 Next++; 1439 InsertClobbersAt(Next); 1440 } 1441 #endif 1442 } 1443 // update use with load allocas and add store for gc_relocated 1444 for (auto Pair : allocaMap) { 1445 Value *def = Pair.first; 1446 Value *alloca = Pair.second; 1447 1448 // we pre-record the uses of allocas so that we dont have to worry about 1449 // later update 1450 // that change the user information. 1451 SmallVector<Instruction *, 20> uses; 1452 // PERF: trade a linear scan for repeated reallocation 1453 uses.reserve(std::distance(def->user_begin(), def->user_end())); 1454 for (User *U : def->users()) { 1455 if (!isa<ConstantExpr>(U)) { 1456 // If the def has a ConstantExpr use, then the def is either a 1457 // ConstantExpr use itself or null. In either case 1458 // (recursively in the first, directly in the second), the oop 1459 // it is ultimately dependent on is null and this particular 1460 // use does not need to be fixed up. 1461 uses.push_back(cast<Instruction>(U)); 1462 } 1463 } 1464 1465 std::sort(uses.begin(), uses.end()); 1466 auto last = std::unique(uses.begin(), uses.end()); 1467 uses.erase(last, uses.end()); 1468 1469 for (Instruction *use : uses) { 1470 if (isa<PHINode>(use)) { 1471 PHINode *phi = cast<PHINode>(use); 1472 for (unsigned i = 0; i < phi->getNumIncomingValues(); i++) { 1473 if (def == phi->getIncomingValue(i)) { 1474 LoadInst *load = new LoadInst( 1475 alloca, "", phi->getIncomingBlock(i)->getTerminator()); 1476 phi->setIncomingValue(i, load); 1477 } 1478 } 1479 } else { 1480 LoadInst *load = new LoadInst(alloca, "", use); 1481 use->replaceUsesOfWith(def, load); 1482 } 1483 } 1484 1485 // emit store for the initial gc value 1486 // store must be inserted after load, otherwise store will be in alloca's 1487 // use list and an extra load will be inserted before it 1488 StoreInst *store = new StoreInst(def, alloca); 1489 if (Instruction *inst = dyn_cast<Instruction>(def)) { 1490 if (InvokeInst *invoke = dyn_cast<InvokeInst>(inst)) { 1491 // InvokeInst is a TerminatorInst so the store need to be inserted 1492 // into its normal destination block. 1493 BasicBlock *normalDest = invoke->getNormalDest(); 1494 store->insertBefore(normalDest->getFirstNonPHI()); 1495 } else { 1496 assert(!inst->isTerminator() && 1497 "The only TerminatorInst that can produce a value is " 1498 "InvokeInst which is handled above."); 1499 store->insertAfter(inst); 1500 } 1501 } else { 1502 assert((isa<Argument>(def) || isa<GlobalVariable>(def) || 1503 isa<ConstantPointerNull>(def)) && 1504 "Must be argument or global"); 1505 store->insertAfter(cast<Instruction>(alloca)); 1506 } 1507 } 1508 1509 assert(PromotableAllocas.size() == live.size() && 1510 "we must have the same allocas with lives"); 1511 if (!PromotableAllocas.empty()) { 1512 // apply mem2reg to promote alloca to SSA 1513 PromoteMemToReg(PromotableAllocas, DT); 1514 } 1515 1516 #ifndef NDEBUG 1517 for (auto I = F.getEntryBlock().begin(), E = F.getEntryBlock().end(); I != E; 1518 I++) 1519 if (isa<AllocaInst>(*I)) 1520 InitialAllocaNum--; 1521 assert(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); 1522 #endif 1523 } 1524 1525 /// Implement a unique function which doesn't require we sort the input 1526 /// vector. Doing so has the effect of changing the output of a couple of 1527 /// tests in ways which make them less useful in testing fused safepoints. 1528 template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { 1529 DenseSet<T> Seen; 1530 SmallVector<T, 128> TempVec; 1531 TempVec.reserve(Vec.size()); 1532 for (auto Element : Vec) 1533 TempVec.push_back(Element); 1534 Vec.clear(); 1535 for (auto V : TempVec) { 1536 if (Seen.insert(V).second) { 1537 Vec.push_back(V); 1538 } 1539 } 1540 } 1541 1542 static Function *getUseHolder(Module &M) { 1543 FunctionType *ftype = 1544 FunctionType::get(Type::getVoidTy(M.getContext()), true); 1545 Function *Func = cast<Function>(M.getOrInsertFunction("__tmp_use", ftype)); 1546 return Func; 1547 } 1548 1549 /// Insert holders so that each Value is obviously live through the entire 1550 /// liftetime of the call. 1551 static void insertUseHolderAfter(CallSite &CS, const ArrayRef<Value *> Values, 1552 SmallVectorImpl<CallInst *> &holders) { 1553 Module *M = CS.getInstruction()->getParent()->getParent()->getParent(); 1554 Function *Func = getUseHolder(*M); 1555 if (CS.isCall()) { 1556 // For call safepoints insert dummy calls right after safepoint 1557 BasicBlock::iterator next(CS.getInstruction()); 1558 next++; 1559 CallInst *base_holder = CallInst::Create(Func, Values, "", next); 1560 holders.push_back(base_holder); 1561 } else if (CS.isInvoke()) { 1562 // For invoke safepooints insert dummy calls both in normal and 1563 // exceptional destination blocks 1564 InvokeInst *invoke = cast<InvokeInst>(CS.getInstruction()); 1565 CallInst *normal_holder = CallInst::Create( 1566 Func, Values, "", invoke->getNormalDest()->getFirstInsertionPt()); 1567 CallInst *unwind_holder = CallInst::Create( 1568 Func, Values, "", invoke->getUnwindDest()->getFirstInsertionPt()); 1569 holders.push_back(normal_holder); 1570 holders.push_back(unwind_holder); 1571 } else 1572 llvm_unreachable("unsupported call type"); 1573 } 1574 1575 static void findLiveReferences( 1576 Function &F, DominatorTree &DT, Pass *P, ArrayRef<CallSite> toUpdate, 1577 MutableArrayRef<struct PartiallyConstructedSafepointRecord> records) { 1578 GCPtrLivenessData OriginalLivenessData; 1579 computeLiveInValues(DT, F, OriginalLivenessData); 1580 for (size_t i = 0; i < records.size(); i++) { 1581 struct PartiallyConstructedSafepointRecord &info = records[i]; 1582 const CallSite &CS = toUpdate[i]; 1583 analyzeParsePointLiveness(DT, OriginalLivenessData, CS, info); 1584 } 1585 } 1586 1587 /// Remove any vector of pointers from the liveset by scalarizing them over the 1588 /// statepoint instruction. Adds the scalarized pieces to the liveset. It 1589 /// would be preferrable to include the vector in the statepoint itself, but 1590 /// the lowering code currently does not handle that. Extending it would be 1591 /// slightly non-trivial since it requires a format change. Given how rare 1592 /// such cases are (for the moment?) scalarizing is an acceptable comprimise. 1593 static void splitVectorValues(Instruction *StatepointInst, 1594 StatepointLiveSetTy &LiveSet, DominatorTree &DT) { 1595 SmallVector<Value *, 16> ToSplit; 1596 for (Value *V : LiveSet) 1597 if (isa<VectorType>(V->getType())) 1598 ToSplit.push_back(V); 1599 1600 if (ToSplit.empty()) 1601 return; 1602 1603 Function &F = *(StatepointInst->getParent()->getParent()); 1604 1605 DenseMap<Value *, AllocaInst *> AllocaMap; 1606 // First is normal return, second is exceptional return (invoke only) 1607 DenseMap<Value *, std::pair<Value *, Value *>> Replacements; 1608 for (Value *V : ToSplit) { 1609 LiveSet.erase(V); 1610 1611 AllocaInst *Alloca = 1612 new AllocaInst(V->getType(), "", F.getEntryBlock().getFirstNonPHI()); 1613 AllocaMap[V] = Alloca; 1614 1615 VectorType *VT = cast<VectorType>(V->getType()); 1616 IRBuilder<> Builder(StatepointInst); 1617 SmallVector<Value *, 16> Elements; 1618 for (unsigned i = 0; i < VT->getNumElements(); i++) 1619 Elements.push_back(Builder.CreateExtractElement(V, Builder.getInt32(i))); 1620 LiveSet.insert(Elements.begin(), Elements.end()); 1621 1622 auto InsertVectorReform = [&](Instruction *IP) { 1623 Builder.SetInsertPoint(IP); 1624 Builder.SetCurrentDebugLocation(IP->getDebugLoc()); 1625 Value *ResultVec = UndefValue::get(VT); 1626 for (unsigned i = 0; i < VT->getNumElements(); i++) 1627 ResultVec = Builder.CreateInsertElement(ResultVec, Elements[i], 1628 Builder.getInt32(i)); 1629 return ResultVec; 1630 }; 1631 1632 if (isa<CallInst>(StatepointInst)) { 1633 BasicBlock::iterator Next(StatepointInst); 1634 Next++; 1635 Instruction *IP = &*(Next); 1636 Replacements[V].first = InsertVectorReform(IP); 1637 Replacements[V].second = nullptr; 1638 } else { 1639 InvokeInst *Invoke = cast<InvokeInst>(StatepointInst); 1640 // We've already normalized - check that we don't have shared destination 1641 // blocks 1642 BasicBlock *NormalDest = Invoke->getNormalDest(); 1643 assert(!isa<PHINode>(NormalDest->begin())); 1644 BasicBlock *UnwindDest = Invoke->getUnwindDest(); 1645 assert(!isa<PHINode>(UnwindDest->begin())); 1646 // Insert insert element sequences in both successors 1647 Instruction *IP = &*(NormalDest->getFirstInsertionPt()); 1648 Replacements[V].first = InsertVectorReform(IP); 1649 IP = &*(UnwindDest->getFirstInsertionPt()); 1650 Replacements[V].second = InsertVectorReform(IP); 1651 } 1652 } 1653 for (Value *V : ToSplit) { 1654 AllocaInst *Alloca = AllocaMap[V]; 1655 1656 // Capture all users before we start mutating use lists 1657 SmallVector<Instruction *, 16> Users; 1658 for (User *U : V->users()) 1659 Users.push_back(cast<Instruction>(U)); 1660 1661 for (Instruction *I : Users) { 1662 if (auto Phi = dyn_cast<PHINode>(I)) { 1663 for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) 1664 if (V == Phi->getIncomingValue(i)) { 1665 LoadInst *Load = new LoadInst( 1666 Alloca, "", Phi->getIncomingBlock(i)->getTerminator()); 1667 Phi->setIncomingValue(i, Load); 1668 } 1669 } else { 1670 LoadInst *Load = new LoadInst(Alloca, "", I); 1671 I->replaceUsesOfWith(V, Load); 1672 } 1673 } 1674 1675 // Store the original value and the replacement value into the alloca 1676 StoreInst *Store = new StoreInst(V, Alloca); 1677 if (auto I = dyn_cast<Instruction>(V)) 1678 Store->insertAfter(I); 1679 else 1680 Store->insertAfter(Alloca); 1681 1682 // Normal return for invoke, or call return 1683 Instruction *Replacement = cast<Instruction>(Replacements[V].first); 1684 (new StoreInst(Replacement, Alloca))->insertAfter(Replacement); 1685 // Unwind return for invoke only 1686 Replacement = cast_or_null<Instruction>(Replacements[V].second); 1687 if (Replacement) 1688 (new StoreInst(Replacement, Alloca))->insertAfter(Replacement); 1689 } 1690 1691 // apply mem2reg to promote alloca to SSA 1692 SmallVector<AllocaInst *, 16> Allocas; 1693 for (Value *V : ToSplit) 1694 Allocas.push_back(AllocaMap[V]); 1695 PromoteMemToReg(Allocas, DT); 1696 } 1697 1698 static bool insertParsePoints(Function &F, DominatorTree &DT, Pass *P, 1699 SmallVectorImpl<CallSite> &toUpdate) { 1700 #ifndef NDEBUG 1701 // sanity check the input 1702 std::set<CallSite> uniqued; 1703 uniqued.insert(toUpdate.begin(), toUpdate.end()); 1704 assert(uniqued.size() == toUpdate.size() && "no duplicates please!"); 1705 1706 for (size_t i = 0; i < toUpdate.size(); i++) { 1707 CallSite &CS = toUpdate[i]; 1708 assert(CS.getInstruction()->getParent()->getParent() == &F); 1709 assert(isStatepoint(CS) && "expected to already be a deopt statepoint"); 1710 } 1711 #endif 1712 1713 // A list of dummy calls added to the IR to keep various values obviously 1714 // live in the IR. We'll remove all of these when done. 1715 SmallVector<CallInst *, 64> holders; 1716 1717 // Insert a dummy call with all of the arguments to the vm_state we'll need 1718 // for the actual safepoint insertion. This ensures reference arguments in 1719 // the deopt argument list are considered live through the safepoint (and 1720 // thus makes sure they get relocated.) 1721 for (size_t i = 0; i < toUpdate.size(); i++) { 1722 CallSite &CS = toUpdate[i]; 1723 Statepoint StatepointCS(CS); 1724 1725 SmallVector<Value *, 64> DeoptValues; 1726 for (Use &U : StatepointCS.vm_state_args()) { 1727 Value *Arg = cast<Value>(&U); 1728 assert(!isUnhandledGCPointerType(Arg->getType()) && 1729 "support for FCA unimplemented"); 1730 if (isHandledGCPointerType(Arg->getType())) 1731 DeoptValues.push_back(Arg); 1732 } 1733 insertUseHolderAfter(CS, DeoptValues, holders); 1734 } 1735 1736 SmallVector<struct PartiallyConstructedSafepointRecord, 64> records; 1737 records.reserve(toUpdate.size()); 1738 for (size_t i = 0; i < toUpdate.size(); i++) { 1739 struct PartiallyConstructedSafepointRecord info; 1740 records.push_back(info); 1741 } 1742 assert(records.size() == toUpdate.size()); 1743 1744 // A) Identify all gc pointers which are staticly live at the given call 1745 // site. 1746 findLiveReferences(F, DT, P, toUpdate, records); 1747 1748 // Do a limited scalarization of any live at safepoint vector values which 1749 // contain pointers. This enables this pass to run after vectorization at 1750 // the cost of some possible performance loss. TODO: it would be nice to 1751 // natively support vectors all the way through the backend so we don't need 1752 // to scalarize here. 1753 for (size_t i = 0; i < records.size(); i++) { 1754 struct PartiallyConstructedSafepointRecord &info = records[i]; 1755 Instruction *statepoint = toUpdate[i].getInstruction(); 1756 splitVectorValues(cast<Instruction>(statepoint), info.liveset, DT); 1757 } 1758 1759 // B) Find the base pointers for each live pointer 1760 /* scope for caching */ { 1761 // Cache the 'defining value' relation used in the computation and 1762 // insertion of base phis and selects. This ensures that we don't insert 1763 // large numbers of duplicate base_phis. 1764 DefiningValueMapTy DVCache; 1765 1766 for (size_t i = 0; i < records.size(); i++) { 1767 struct PartiallyConstructedSafepointRecord &info = records[i]; 1768 CallSite &CS = toUpdate[i]; 1769 findBasePointers(DT, DVCache, CS, info); 1770 } 1771 } // end of cache scope 1772 1773 // The base phi insertion logic (for any safepoint) may have inserted new 1774 // instructions which are now live at some safepoint. The simplest such 1775 // example is: 1776 // loop: 1777 // phi a <-- will be a new base_phi here 1778 // safepoint 1 <-- that needs to be live here 1779 // gep a + 1 1780 // safepoint 2 1781 // br loop 1782 DenseSet<llvm::Value *> allInsertedDefs; 1783 for (size_t i = 0; i < records.size(); i++) { 1784 struct PartiallyConstructedSafepointRecord &info = records[i]; 1785 allInsertedDefs.insert(info.NewInsertedDefs.begin(), 1786 info.NewInsertedDefs.end()); 1787 } 1788 1789 // We insert some dummy calls after each safepoint to definitely hold live 1790 // the base pointers which were identified for that safepoint. We'll then 1791 // ask liveness for _every_ base inserted to see what is now live. Then we 1792 // remove the dummy calls. 1793 holders.reserve(holders.size() + records.size()); 1794 for (size_t i = 0; i < records.size(); i++) { 1795 struct PartiallyConstructedSafepointRecord &info = records[i]; 1796 CallSite &CS = toUpdate[i]; 1797 1798 SmallVector<Value *, 128> Bases; 1799 for (auto Pair : info.PointerToBase) { 1800 Bases.push_back(Pair.second); 1801 } 1802 insertUseHolderAfter(CS, Bases, holders); 1803 } 1804 1805 // By selecting base pointers, we've effectively inserted new uses. Thus, we 1806 // need to rerun liveness. We may *also* have inserted new defs, but that's 1807 // not the key issue. 1808 recomputeLiveInValues(F, DT, P, toUpdate, records); 1809 1810 if (PrintBasePointers) { 1811 for (size_t i = 0; i < records.size(); i++) { 1812 struct PartiallyConstructedSafepointRecord &info = records[i]; 1813 errs() << "Base Pairs: (w/Relocation)\n"; 1814 for (auto Pair : info.PointerToBase) { 1815 errs() << " derived %" << Pair.first->getName() << " base %" 1816 << Pair.second->getName() << "\n"; 1817 } 1818 } 1819 } 1820 for (size_t i = 0; i < holders.size(); i++) { 1821 holders[i]->eraseFromParent(); 1822 holders[i] = nullptr; 1823 } 1824 holders.clear(); 1825 1826 // Now run through and replace the existing statepoints with new ones with 1827 // the live variables listed. We do not yet update uses of the values being 1828 // relocated. We have references to live variables that need to 1829 // survive to the last iteration of this loop. (By construction, the 1830 // previous statepoint can not be a live variable, thus we can and remove 1831 // the old statepoint calls as we go.) 1832 for (size_t i = 0; i < records.size(); i++) { 1833 struct PartiallyConstructedSafepointRecord &info = records[i]; 1834 CallSite &CS = toUpdate[i]; 1835 makeStatepointExplicit(DT, CS, P, info); 1836 } 1837 toUpdate.clear(); // prevent accident use of invalid CallSites 1838 1839 // In case if we inserted relocates in a different basic block than the 1840 // original safepoint (this can happen for invokes). We need to be sure that 1841 // original values were not used in any of the phi nodes at the 1842 // beginning of basic block containing them. Because we know that all such 1843 // blocks will have single predecessor we can safely assume that all phi 1844 // nodes have single entry (because of normalizeBBForInvokeSafepoint). 1845 // Just remove them all here. 1846 for (size_t i = 0; i < records.size(); i++) { 1847 Instruction *I = records[i].StatepointToken; 1848 1849 if (InvokeInst *invoke = dyn_cast<InvokeInst>(I)) { 1850 FoldSingleEntryPHINodes(invoke->getNormalDest()); 1851 assert(!isa<PHINode>(invoke->getNormalDest()->begin())); 1852 1853 FoldSingleEntryPHINodes(invoke->getUnwindDest()); 1854 assert(!isa<PHINode>(invoke->getUnwindDest()->begin())); 1855 } 1856 } 1857 1858 // Do all the fixups of the original live variables to their relocated selves 1859 SmallVector<Value *, 128> live; 1860 for (size_t i = 0; i < records.size(); i++) { 1861 struct PartiallyConstructedSafepointRecord &info = records[i]; 1862 // We can't simply save the live set from the original insertion. One of 1863 // the live values might be the result of a call which needs a safepoint. 1864 // That Value* no longer exists and we need to use the new gc_result. 1865 // Thankfully, the liveset is embedded in the statepoint (and updated), so 1866 // we just grab that. 1867 Statepoint statepoint(info.StatepointToken); 1868 live.insert(live.end(), statepoint.gc_args_begin(), 1869 statepoint.gc_args_end()); 1870 } 1871 unique_unsorted(live); 1872 1873 #ifndef NDEBUG 1874 // sanity check 1875 for (auto ptr : live) { 1876 assert(isGCPointerType(ptr->getType()) && "must be a gc pointer type"); 1877 } 1878 #endif 1879 1880 relocationViaAlloca(F, DT, live, records); 1881 return !records.empty(); 1882 } 1883 1884 /// Returns true if this function should be rewritten by this pass. The main 1885 /// point of this function is as an extension point for custom logic. 1886 static bool shouldRewriteStatepointsIn(Function &F) { 1887 // TODO: This should check the GCStrategy 1888 if (F.hasGC()) { 1889 const std::string StatepointExampleName("statepoint-example"); 1890 return StatepointExampleName == F.getGC(); 1891 } else 1892 return false; 1893 } 1894 1895 bool RewriteStatepointsForGC::runOnFunction(Function &F) { 1896 // Nothing to do for declarations. 1897 if (F.isDeclaration() || F.empty()) 1898 return false; 1899 1900 // Policy choice says not to rewrite - the most common reason is that we're 1901 // compiling code without a GCStrategy. 1902 if (!shouldRewriteStatepointsIn(F)) 1903 return false; 1904 1905 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1906 1907 // Gather all the statepoints which need rewritten. Be careful to only 1908 // consider those in reachable code since we need to ask dominance queries 1909 // when rewriting. We'll delete the unreachable ones in a moment. 1910 SmallVector<CallSite, 64> ParsePointNeeded; 1911 bool HasUnreachableStatepoint = false; 1912 for (Instruction &I : inst_range(F)) { 1913 // TODO: only the ones with the flag set! 1914 if (isStatepoint(I)) { 1915 if (DT.isReachableFromEntry(I.getParent())) 1916 ParsePointNeeded.push_back(CallSite(&I)); 1917 else 1918 HasUnreachableStatepoint = true; 1919 } 1920 } 1921 1922 bool MadeChange = false; 1923 1924 // Delete any unreachable statepoints so that we don't have unrewritten 1925 // statepoints surviving this pass. This makes testing easier and the 1926 // resulting IR less confusing to human readers. Rather than be fancy, we 1927 // just reuse a utility function which removes the unreachable blocks. 1928 if (HasUnreachableStatepoint) 1929 MadeChange |= removeUnreachableBlocks(F); 1930 1931 // Return early if no work to do. 1932 if (ParsePointNeeded.empty()) 1933 return MadeChange; 1934 1935 // As a prepass, go ahead and aggressively destroy single entry phi nodes. 1936 // These are created by LCSSA. They have the effect of increasing the size 1937 // of liveness sets for no good reason. It may be harder to do this post 1938 // insertion since relocations and base phis can confuse things. 1939 for (BasicBlock &BB : F) 1940 if (BB.getUniquePredecessor()) { 1941 MadeChange = true; 1942 FoldSingleEntryPHINodes(&BB); 1943 } 1944 1945 MadeChange |= insertParsePoints(F, DT, this, ParsePointNeeded); 1946 return MadeChange; 1947 } 1948 1949 // liveness computation via standard dataflow 1950 // ------------------------------------------------------------------- 1951 1952 // TODO: Consider using bitvectors for liveness, the set of potentially 1953 // interesting values should be small and easy to pre-compute. 1954 1955 /// Is this value a constant consisting of entirely null values? 1956 static bool isConstantNull(Value *V) { 1957 return isa<Constant>(V) && cast<Constant>(V)->isNullValue(); 1958 } 1959 1960 /// Compute the live-in set for the location rbegin starting from 1961 /// the live-out set of the basic block 1962 static void computeLiveInValues(BasicBlock::reverse_iterator rbegin, 1963 BasicBlock::reverse_iterator rend, 1964 DenseSet<Value *> &LiveTmp) { 1965 1966 for (BasicBlock::reverse_iterator ritr = rbegin; ritr != rend; ritr++) { 1967 Instruction *I = &*ritr; 1968 1969 // KILL/Def - Remove this definition from LiveIn 1970 LiveTmp.erase(I); 1971 1972 // Don't consider *uses* in PHI nodes, we handle their contribution to 1973 // predecessor blocks when we seed the LiveOut sets 1974 if (isa<PHINode>(I)) 1975 continue; 1976 1977 // USE - Add to the LiveIn set for this instruction 1978 for (Value *V : I->operands()) { 1979 assert(!isUnhandledGCPointerType(V->getType()) && 1980 "support for FCA unimplemented"); 1981 if (isHandledGCPointerType(V->getType()) && !isConstantNull(V) && 1982 !isa<UndefValue>(V)) { 1983 // The choice to exclude null and undef is arbitrary here. Reconsider? 1984 LiveTmp.insert(V); 1985 } 1986 } 1987 } 1988 } 1989 1990 static void computeLiveOutSeed(BasicBlock *BB, DenseSet<Value *> &LiveTmp) { 1991 1992 for (BasicBlock *Succ : successors(BB)) { 1993 const BasicBlock::iterator E(Succ->getFirstNonPHI()); 1994 for (BasicBlock::iterator I = Succ->begin(); I != E; I++) { 1995 PHINode *Phi = cast<PHINode>(&*I); 1996 Value *V = Phi->getIncomingValueForBlock(BB); 1997 assert(!isUnhandledGCPointerType(V->getType()) && 1998 "support for FCA unimplemented"); 1999 if (isHandledGCPointerType(V->getType()) && !isConstantNull(V) && 2000 !isa<UndefValue>(V)) { 2001 // The choice to exclude null and undef is arbitrary here. Reconsider? 2002 LiveTmp.insert(V); 2003 } 2004 } 2005 } 2006 } 2007 2008 static DenseSet<Value *> computeKillSet(BasicBlock *BB) { 2009 DenseSet<Value *> KillSet; 2010 for (Instruction &I : *BB) 2011 if (isHandledGCPointerType(I.getType())) 2012 KillSet.insert(&I); 2013 return KillSet; 2014 } 2015 2016 /// Check that the items in 'Live' dominate 'TI'. This is used as a basic 2017 /// sanity check for the liveness computation. 2018 static void checkBasicSSA(DominatorTree &DT, DenseSet<Value *> &Live, 2019 TerminatorInst *TI, bool TermOkay = false) { 2020 #ifndef NDEBUG 2021 for (Value *V : Live) { 2022 if (auto *I = dyn_cast<Instruction>(V)) { 2023 // The terminator can be a member of the LiveOut set. LLVM's definition 2024 // of instruction dominance states that V does not dominate itself. As 2025 // such, we need to special case this to allow it. 2026 if (TermOkay && TI == I) 2027 continue; 2028 assert(DT.dominates(I, TI) && 2029 "basic SSA liveness expectation violated by liveness analysis"); 2030 } 2031 } 2032 #endif 2033 } 2034 2035 /// Check that all the liveness sets used during the computation of liveness 2036 /// obey basic SSA properties. This is useful for finding cases where we miss 2037 /// a def. 2038 static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, 2039 BasicBlock &BB) { 2040 checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); 2041 checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); 2042 checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); 2043 } 2044 2045 static void computeLiveInValues(DominatorTree &DT, Function &F, 2046 GCPtrLivenessData &Data) { 2047 2048 DenseSet<BasicBlock *> WorklistSet; 2049 SmallVector<BasicBlock *, 200> Worklist; 2050 auto AddPredsToWorklist = [&](BasicBlock *BB) { 2051 for (BasicBlock *Pred : predecessors(BB)) 2052 if (WorklistSet.insert(Pred).second) 2053 Worklist.push_back(Pred); 2054 }; 2055 auto NextItem = [&]() { 2056 BasicBlock *BB = Worklist.back(); 2057 Worklist.pop_back(); 2058 WorklistSet.erase(BB); 2059 return BB; 2060 }; 2061 2062 // Seed the liveness for each individual block 2063 for (BasicBlock &BB : F) { 2064 Data.KillSet[&BB] = computeKillSet(&BB); 2065 Data.LiveSet[&BB].clear(); 2066 computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB]); 2067 2068 #ifndef NDEBUG 2069 for (Value *Kill : Data.KillSet[&BB]) 2070 assert(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); 2071 #endif 2072 2073 Data.LiveOut[&BB] = DenseSet<Value *>(); 2074 computeLiveOutSeed(&BB, Data.LiveOut[&BB]); 2075 Data.LiveIn[&BB] = Data.LiveSet[&BB]; 2076 set_union(Data.LiveIn[&BB], Data.LiveOut[&BB]); 2077 set_subtract(Data.LiveIn[&BB], Data.KillSet[&BB]); 2078 if (!Data.LiveIn[&BB].empty()) 2079 AddPredsToWorklist(&BB); 2080 } 2081 2082 // Propagate that liveness until stable 2083 while (!Worklist.empty()) { 2084 BasicBlock *BB = NextItem(); 2085 2086 // Compute our new liveout set, then exit early if it hasn't changed 2087 // despite the contribution of our successor. 2088 DenseSet<Value *> LiveOut = Data.LiveOut[BB]; 2089 const auto OldLiveOutSize = LiveOut.size(); 2090 for (BasicBlock *Succ : successors(BB)) { 2091 assert(Data.LiveIn.count(Succ)); 2092 set_union(LiveOut, Data.LiveIn[Succ]); 2093 } 2094 // assert OutLiveOut is a subset of LiveOut 2095 if (OldLiveOutSize == LiveOut.size()) { 2096 // If the sets are the same size, then we didn't actually add anything 2097 // when unioning our successors LiveIn Thus, the LiveIn of this block 2098 // hasn't changed. 2099 continue; 2100 } 2101 Data.LiveOut[BB] = LiveOut; 2102 2103 // Apply the effects of this basic block 2104 DenseSet<Value *> LiveTmp = LiveOut; 2105 set_union(LiveTmp, Data.LiveSet[BB]); 2106 set_subtract(LiveTmp, Data.KillSet[BB]); 2107 2108 assert(Data.LiveIn.count(BB)); 2109 const DenseSet<Value *> &OldLiveIn = Data.LiveIn[BB]; 2110 // assert: OldLiveIn is a subset of LiveTmp 2111 if (OldLiveIn.size() != LiveTmp.size()) { 2112 Data.LiveIn[BB] = LiveTmp; 2113 AddPredsToWorklist(BB); 2114 } 2115 } // while( !worklist.empty() ) 2116 2117 #ifndef NDEBUG 2118 // Sanity check our ouput against SSA properties. This helps catch any 2119 // missing kills during the above iteration. 2120 for (BasicBlock &BB : F) { 2121 checkBasicSSA(DT, Data, BB); 2122 } 2123 #endif 2124 } 2125 2126 static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, 2127 StatepointLiveSetTy &Out) { 2128 2129 BasicBlock *BB = Inst->getParent(); 2130 2131 // Note: The copy is intentional and required 2132 assert(Data.LiveOut.count(BB)); 2133 DenseSet<Value *> LiveOut = Data.LiveOut[BB]; 2134 2135 // We want to handle the statepoint itself oddly. It's 2136 // call result is not live (normal), nor are it's arguments 2137 // (unless they're used again later). This adjustment is 2138 // specifically what we need to relocate 2139 BasicBlock::reverse_iterator rend(Inst); 2140 computeLiveInValues(BB->rbegin(), rend, LiveOut); 2141 LiveOut.erase(Inst); 2142 Out.insert(LiveOut.begin(), LiveOut.end()); 2143 } 2144 2145 static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, 2146 const CallSite &CS, 2147 PartiallyConstructedSafepointRecord &Info) { 2148 Instruction *Inst = CS.getInstruction(); 2149 StatepointLiveSetTy Updated; 2150 findLiveSetAtInst(Inst, RevisedLivenessData, Updated); 2151 2152 #ifndef NDEBUG 2153 DenseSet<Value *> Bases; 2154 for (auto KVPair : Info.PointerToBase) { 2155 Bases.insert(KVPair.second); 2156 } 2157 #endif 2158 // We may have base pointers which are now live that weren't before. We need 2159 // to update the PointerToBase structure to reflect this. 2160 for (auto V : Updated) 2161 if (!Info.PointerToBase.count(V)) { 2162 assert(Bases.count(V) && "can't find base for unexpected live value"); 2163 Info.PointerToBase[V] = V; 2164 continue; 2165 } 2166 2167 #ifndef NDEBUG 2168 for (auto V : Updated) { 2169 assert(Info.PointerToBase.count(V) && 2170 "must be able to find base for live value"); 2171 } 2172 #endif 2173 2174 // Remove any stale base mappings - this can happen since our liveness is 2175 // more precise then the one inherent in the base pointer analysis 2176 DenseSet<Value *> ToErase; 2177 for (auto KVPair : Info.PointerToBase) 2178 if (!Updated.count(KVPair.first)) 2179 ToErase.insert(KVPair.first); 2180 for (auto V : ToErase) 2181 Info.PointerToBase.erase(V); 2182 2183 #ifndef NDEBUG 2184 for (auto KVPair : Info.PointerToBase) 2185 assert(Updated.count(KVPair.first) && "record for non-live value"); 2186 #endif 2187 2188 Info.liveset = Updated; 2189 } 2190