1 //===- CloneFunction.cpp - Clone a function into another function ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the CloneFunctionInto interface, which is used as the 10 // low-level function cloner. This is used by the CloneFunction and function 11 // inliner to do the dirty work of copying the body of a function around. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/Analysis/ConstantFolding.h" 18 #include "llvm/Analysis/DomTreeUpdater.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/LoopInfo.h" 21 #include "llvm/IR/AttributeMask.h" 22 #include "llvm/IR/CFG.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/DebugInfo.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/Instructions.h" 28 #include "llvm/IR/IntrinsicInst.h" 29 #include "llvm/IR/LLVMContext.h" 30 #include "llvm/IR/MDBuilder.h" 31 #include "llvm/IR/Metadata.h" 32 #include "llvm/IR/Module.h" 33 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 34 #include "llvm/Transforms/Utils/Cloning.h" 35 #include "llvm/Transforms/Utils/Local.h" 36 #include "llvm/Transforms/Utils/ValueMapper.h" 37 #include <map> 38 #include <optional> 39 using namespace llvm; 40 41 #define DEBUG_TYPE "clone-function" 42 43 /// See comments in Cloning.h. 44 BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, 45 const Twine &NameSuffix, Function *F, 46 ClonedCodeInfo *CodeInfo, 47 DebugInfoFinder *DIFinder) { 48 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F); 49 NewBB->IsNewDbgInfoFormat = BB->IsNewDbgInfoFormat; 50 if (BB->hasName()) 51 NewBB->setName(BB->getName() + NameSuffix); 52 53 bool hasCalls = false, hasDynamicAllocas = false, hasMemProfMetadata = false; 54 Module *TheModule = F ? F->getParent() : nullptr; 55 56 // Loop over all instructions, and copy them over. 57 for (const Instruction &I : *BB) { 58 if (DIFinder && TheModule) 59 DIFinder->processInstruction(*TheModule, I); 60 61 Instruction *NewInst = I.clone(); 62 if (I.hasName()) 63 NewInst->setName(I.getName() + NameSuffix); 64 65 NewInst->insertBefore(*NewBB, NewBB->end()); 66 NewInst->cloneDebugInfoFrom(&I); 67 68 VMap[&I] = NewInst; // Add instruction map to value. 69 70 if (isa<CallInst>(I) && !I.isDebugOrPseudoInst()) { 71 hasCalls = true; 72 hasMemProfMetadata |= I.hasMetadata(LLVMContext::MD_memprof); 73 } 74 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { 75 if (!AI->isStaticAlloca()) { 76 hasDynamicAllocas = true; 77 } 78 } 79 } 80 81 if (CodeInfo) { 82 CodeInfo->ContainsCalls |= hasCalls; 83 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata; 84 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 85 } 86 return NewBB; 87 } 88 89 // Clone OldFunc into NewFunc, transforming the old arguments into references to 90 // VMap values. 91 // 92 void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc, 93 ValueToValueMapTy &VMap, 94 CloneFunctionChangeType Changes, 95 SmallVectorImpl<ReturnInst *> &Returns, 96 const char *NameSuffix, ClonedCodeInfo *CodeInfo, 97 ValueMapTypeRemapper *TypeMapper, 98 ValueMaterializer *Materializer) { 99 NewFunc->setIsNewDbgInfoFormat(OldFunc->IsNewDbgInfoFormat); 100 assert(NameSuffix && "NameSuffix cannot be null!"); 101 102 #ifndef NDEBUG 103 for (const Argument &I : OldFunc->args()) 104 assert(VMap.count(&I) && "No mapping from source argument specified!"); 105 #endif 106 107 bool ModuleLevelChanges = Changes > CloneFunctionChangeType::LocalChangesOnly; 108 109 // Copy all attributes other than those stored in the AttributeList. We need 110 // to remap the parameter indices of the AttributeList. 111 AttributeList NewAttrs = NewFunc->getAttributes(); 112 NewFunc->copyAttributesFrom(OldFunc); 113 NewFunc->setAttributes(NewAttrs); 114 115 const RemapFlags FuncGlobalRefFlags = 116 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 117 118 // Fix up the personality function that got copied over. 119 if (OldFunc->hasPersonalityFn()) 120 NewFunc->setPersonalityFn(MapValue(OldFunc->getPersonalityFn(), VMap, 121 FuncGlobalRefFlags, TypeMapper, 122 Materializer)); 123 124 if (OldFunc->hasPrefixData()) { 125 NewFunc->setPrefixData(MapValue(OldFunc->getPrefixData(), VMap, 126 FuncGlobalRefFlags, TypeMapper, 127 Materializer)); 128 } 129 130 if (OldFunc->hasPrologueData()) { 131 NewFunc->setPrologueData(MapValue(OldFunc->getPrologueData(), VMap, 132 FuncGlobalRefFlags, TypeMapper, 133 Materializer)); 134 } 135 136 SmallVector<AttributeSet, 4> NewArgAttrs(NewFunc->arg_size()); 137 AttributeList OldAttrs = OldFunc->getAttributes(); 138 139 // Clone any argument attributes that are present in the VMap. 140 for (const Argument &OldArg : OldFunc->args()) { 141 if (Argument *NewArg = dyn_cast<Argument>(VMap[&OldArg])) { 142 NewArgAttrs[NewArg->getArgNo()] = 143 OldAttrs.getParamAttrs(OldArg.getArgNo()); 144 } 145 } 146 147 NewFunc->setAttributes( 148 AttributeList::get(NewFunc->getContext(), OldAttrs.getFnAttrs(), 149 OldAttrs.getRetAttrs(), NewArgAttrs)); 150 151 // Everything else beyond this point deals with function instructions, 152 // so if we are dealing with a function declaration, we're done. 153 if (OldFunc->isDeclaration()) 154 return; 155 156 // When we remap instructions within the same module, we want to avoid 157 // duplicating inlined DISubprograms, so record all subprograms we find as we 158 // duplicate instructions and then freeze them in the MD map. We also record 159 // information about dbg.value and dbg.declare to avoid duplicating the 160 // types. 161 std::optional<DebugInfoFinder> DIFinder; 162 163 // Track the subprogram attachment that needs to be cloned to fine-tune the 164 // mapping within the same module. 165 DISubprogram *SPClonedWithinModule = nullptr; 166 if (Changes < CloneFunctionChangeType::DifferentModule) { 167 assert((NewFunc->getParent() == nullptr || 168 NewFunc->getParent() == OldFunc->getParent()) && 169 "Expected NewFunc to have the same parent, or no parent"); 170 171 // Need to find subprograms, types, and compile units. 172 DIFinder.emplace(); 173 174 SPClonedWithinModule = OldFunc->getSubprogram(); 175 if (SPClonedWithinModule) 176 DIFinder->processSubprogram(SPClonedWithinModule); 177 } else { 178 assert((NewFunc->getParent() == nullptr || 179 NewFunc->getParent() != OldFunc->getParent()) && 180 "Expected NewFunc to have different parents, or no parent"); 181 182 if (Changes == CloneFunctionChangeType::DifferentModule) { 183 assert(NewFunc->getParent() && 184 "Need parent of new function to maintain debug info invariants"); 185 186 // Need to find all the compile units. 187 DIFinder.emplace(); 188 } 189 } 190 191 // Loop over all of the basic blocks in the function, cloning them as 192 // appropriate. Note that we save BE this way in order to handle cloning of 193 // recursive functions into themselves. 194 for (const BasicBlock &BB : *OldFunc) { 195 196 // Create a new basic block and copy instructions into it! 197 BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc, CodeInfo, 198 DIFinder ? &*DIFinder : nullptr); 199 200 // Add basic block mapping. 201 VMap[&BB] = CBB; 202 203 // It is only legal to clone a function if a block address within that 204 // function is never referenced outside of the function. Given that, we 205 // want to map block addresses from the old function to block addresses in 206 // the clone. (This is different from the generic ValueMapper 207 // implementation, which generates an invalid blockaddress when 208 // cloning a function.) 209 if (BB.hasAddressTaken()) { 210 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 211 const_cast<BasicBlock *>(&BB)); 212 VMap[OldBBAddr] = BlockAddress::get(NewFunc, CBB); 213 } 214 215 // Note return instructions for the caller. 216 if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator())) 217 Returns.push_back(RI); 218 } 219 220 if (Changes < CloneFunctionChangeType::DifferentModule && 221 DIFinder->subprogram_count() > 0) { 222 // Turn on module-level changes, since we need to clone (some of) the 223 // debug info metadata. 224 // 225 // FIXME: Metadata effectively owned by a function should be made 226 // local, and only that local metadata should be cloned. 227 ModuleLevelChanges = true; 228 229 auto mapToSelfIfNew = [&VMap](MDNode *N) { 230 // Avoid clobbering an existing mapping. 231 (void)VMap.MD().try_emplace(N, N); 232 }; 233 234 // Avoid cloning types, compile units, and (other) subprograms. 235 SmallPtrSet<const DISubprogram *, 16> MappedToSelfSPs; 236 for (DISubprogram *ISP : DIFinder->subprograms()) { 237 if (ISP != SPClonedWithinModule) { 238 mapToSelfIfNew(ISP); 239 MappedToSelfSPs.insert(ISP); 240 } 241 } 242 243 // If a subprogram isn't going to be cloned skip its lexical blocks as well. 244 for (DIScope *S : DIFinder->scopes()) { 245 auto *LScope = dyn_cast<DILocalScope>(S); 246 if (LScope && MappedToSelfSPs.count(LScope->getSubprogram())) 247 mapToSelfIfNew(S); 248 } 249 250 for (DICompileUnit *CU : DIFinder->compile_units()) 251 mapToSelfIfNew(CU); 252 253 for (DIType *Type : DIFinder->types()) 254 mapToSelfIfNew(Type); 255 } else { 256 assert(!SPClonedWithinModule && 257 "Subprogram should be in DIFinder->subprogram_count()..."); 258 } 259 260 const auto RemapFlag = ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 261 // Duplicate the metadata that is attached to the cloned function. 262 // Subprograms/CUs/types that were already mapped to themselves won't be 263 // duplicated. 264 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs; 265 OldFunc->getAllMetadata(MDs); 266 for (auto MD : MDs) { 267 NewFunc->addMetadata(MD.first, *MapMetadata(MD.second, VMap, RemapFlag, 268 TypeMapper, Materializer)); 269 } 270 271 // Loop over all of the instructions in the new function, fixing up operand 272 // references as we go. This uses VMap to do all the hard work. 273 for (Function::iterator 274 BB = cast<BasicBlock>(VMap[&OldFunc->front()])->getIterator(), 275 BE = NewFunc->end(); 276 BB != BE; ++BB) 277 // Loop over all instructions, fixing each one as we find it, and any 278 // attached debug-info records. 279 for (Instruction &II : *BB) { 280 RemapInstruction(&II, VMap, RemapFlag, TypeMapper, Materializer); 281 RemapDbgVariableRecordRange(II.getModule(), II.getDbgRecordRange(), VMap, 282 RemapFlag, TypeMapper, Materializer); 283 } 284 285 // Only update !llvm.dbg.cu for DifferentModule (not CloneModule). In the 286 // same module, the compile unit will already be listed (or not). When 287 // cloning a module, CloneModule() will handle creating the named metadata. 288 if (Changes != CloneFunctionChangeType::DifferentModule) 289 return; 290 291 // Update !llvm.dbg.cu with compile units added to the new module if this 292 // function is being cloned in isolation. 293 // 294 // FIXME: This is making global / module-level changes, which doesn't seem 295 // like the right encapsulation Consider dropping the requirement to update 296 // !llvm.dbg.cu (either obsoleting the node, or restricting it to 297 // non-discardable compile units) instead of discovering compile units by 298 // visiting the metadata attached to global values, which would allow this 299 // code to be deleted. Alternatively, perhaps give responsibility for this 300 // update to CloneFunctionInto's callers. 301 auto *NewModule = NewFunc->getParent(); 302 auto *NMD = NewModule->getOrInsertNamedMetadata("llvm.dbg.cu"); 303 // Avoid multiple insertions of the same DICompileUnit to NMD. 304 SmallPtrSet<const void *, 8> Visited; 305 for (auto *Operand : NMD->operands()) 306 Visited.insert(Operand); 307 for (auto *Unit : DIFinder->compile_units()) { 308 MDNode *MappedUnit = 309 MapMetadata(Unit, VMap, RF_None, TypeMapper, Materializer); 310 if (Visited.insert(MappedUnit).second) 311 NMD->addOperand(MappedUnit); 312 } 313 } 314 315 /// Return a copy of the specified function and add it to that function's 316 /// module. Also, any references specified in the VMap are changed to refer to 317 /// their mapped value instead of the original one. If any of the arguments to 318 /// the function are in the VMap, the arguments are deleted from the resultant 319 /// function. The VMap is updated to include mappings from all of the 320 /// instructions and basicblocks in the function from their old to new values. 321 /// 322 Function *llvm::CloneFunction(Function *F, ValueToValueMapTy &VMap, 323 ClonedCodeInfo *CodeInfo) { 324 std::vector<Type *> ArgTypes; 325 326 // The user might be deleting arguments to the function by specifying them in 327 // the VMap. If so, we need to not add the arguments to the arg ty vector 328 // 329 for (const Argument &I : F->args()) 330 if (VMap.count(&I) == 0) // Haven't mapped the argument to anything yet? 331 ArgTypes.push_back(I.getType()); 332 333 // Create a new function type... 334 FunctionType *FTy = 335 FunctionType::get(F->getFunctionType()->getReturnType(), ArgTypes, 336 F->getFunctionType()->isVarArg()); 337 338 // Create the new function... 339 Function *NewF = Function::Create(FTy, F->getLinkage(), F->getAddressSpace(), 340 F->getName(), F->getParent()); 341 NewF->setIsNewDbgInfoFormat(F->IsNewDbgInfoFormat); 342 343 // Loop over the arguments, copying the names of the mapped arguments over... 344 Function::arg_iterator DestI = NewF->arg_begin(); 345 for (const Argument &I : F->args()) 346 if (VMap.count(&I) == 0) { // Is this argument preserved? 347 DestI->setName(I.getName()); // Copy the name over... 348 VMap[&I] = &*DestI++; // Add mapping to VMap 349 } 350 351 SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned. 352 CloneFunctionInto(NewF, F, VMap, CloneFunctionChangeType::LocalChangesOnly, 353 Returns, "", CodeInfo); 354 355 return NewF; 356 } 357 358 namespace { 359 /// This is a private class used to implement CloneAndPruneFunctionInto. 360 struct PruningFunctionCloner { 361 Function *NewFunc; 362 const Function *OldFunc; 363 ValueToValueMapTy &VMap; 364 bool ModuleLevelChanges; 365 const char *NameSuffix; 366 ClonedCodeInfo *CodeInfo; 367 bool HostFuncIsStrictFP; 368 369 Instruction *cloneInstruction(BasicBlock::const_iterator II); 370 371 public: 372 PruningFunctionCloner(Function *newFunc, const Function *oldFunc, 373 ValueToValueMapTy &valueMap, bool moduleLevelChanges, 374 const char *nameSuffix, ClonedCodeInfo *codeInfo) 375 : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap), 376 ModuleLevelChanges(moduleLevelChanges), NameSuffix(nameSuffix), 377 CodeInfo(codeInfo) { 378 HostFuncIsStrictFP = 379 newFunc->getAttributes().hasFnAttr(Attribute::StrictFP); 380 } 381 382 /// The specified block is found to be reachable, clone it and 383 /// anything that it can reach. 384 void CloneBlock(const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 385 std::vector<const BasicBlock *> &ToClone); 386 }; 387 } // namespace 388 389 static bool hasRoundingModeOperand(Intrinsic::ID CIID) { 390 switch (CIID) { 391 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 392 case Intrinsic::INTRINSIC: \ 393 return ROUND_MODE == 1; 394 #define FUNCTION INSTRUCTION 395 #include "llvm/IR/ConstrainedOps.def" 396 default: 397 llvm_unreachable("Unexpected constrained intrinsic id"); 398 } 399 } 400 401 Instruction * 402 PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) { 403 const Instruction &OldInst = *II; 404 Instruction *NewInst = nullptr; 405 if (HostFuncIsStrictFP) { 406 Intrinsic::ID CIID = getConstrainedIntrinsicID(OldInst); 407 if (CIID != Intrinsic::not_intrinsic) { 408 // Instead of cloning the instruction, a call to constrained intrinsic 409 // should be created. 410 // Assume the first arguments of constrained intrinsics are the same as 411 // the operands of original instruction. 412 413 // Determine overloaded types of the intrinsic. 414 SmallVector<Type *, 2> TParams; 415 SmallVector<Intrinsic::IITDescriptor, 8> Descriptor; 416 getIntrinsicInfoTableEntries(CIID, Descriptor); 417 for (unsigned I = 0, E = Descriptor.size(); I != E; ++I) { 418 Intrinsic::IITDescriptor Operand = Descriptor[I]; 419 switch (Operand.Kind) { 420 case Intrinsic::IITDescriptor::Argument: 421 if (Operand.getArgumentKind() != 422 Intrinsic::IITDescriptor::AK_MatchType) { 423 if (I == 0) 424 TParams.push_back(OldInst.getType()); 425 else 426 TParams.push_back(OldInst.getOperand(I - 1)->getType()); 427 } 428 break; 429 case Intrinsic::IITDescriptor::SameVecWidthArgument: 430 ++I; 431 break; 432 default: 433 break; 434 } 435 } 436 437 // Create intrinsic call. 438 LLVMContext &Ctx = NewFunc->getContext(); 439 Function *IFn = 440 Intrinsic::getDeclaration(NewFunc->getParent(), CIID, TParams); 441 SmallVector<Value *, 4> Args; 442 unsigned NumOperands = OldInst.getNumOperands(); 443 if (isa<CallInst>(OldInst)) 444 --NumOperands; 445 for (unsigned I = 0; I < NumOperands; ++I) { 446 Value *Op = OldInst.getOperand(I); 447 Args.push_back(Op); 448 } 449 if (const auto *CmpI = dyn_cast<FCmpInst>(&OldInst)) { 450 FCmpInst::Predicate Pred = CmpI->getPredicate(); 451 StringRef PredName = FCmpInst::getPredicateName(Pred); 452 Args.push_back(MetadataAsValue::get(Ctx, MDString::get(Ctx, PredName))); 453 } 454 455 // The last arguments of a constrained intrinsic are metadata that 456 // represent rounding mode (absents in some intrinsics) and exception 457 // behavior. The inlined function uses default settings. 458 if (hasRoundingModeOperand(CIID)) 459 Args.push_back( 460 MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest"))); 461 Args.push_back( 462 MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore"))); 463 464 NewInst = CallInst::Create(IFn, Args, OldInst.getName() + ".strict"); 465 } 466 } 467 if (!NewInst) 468 NewInst = II->clone(); 469 return NewInst; 470 } 471 472 /// The specified block is found to be reachable, clone it and 473 /// anything that it can reach. 474 void PruningFunctionCloner::CloneBlock( 475 const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 476 std::vector<const BasicBlock *> &ToClone) { 477 WeakTrackingVH &BBEntry = VMap[BB]; 478 479 // Have we already cloned this block? 480 if (BBEntry) 481 return; 482 483 // Nope, clone it now. 484 BasicBlock *NewBB; 485 Twine NewName(BB->hasName() ? Twine(BB->getName()) + NameSuffix : ""); 486 BBEntry = NewBB = BasicBlock::Create(BB->getContext(), NewName, NewFunc); 487 NewBB->IsNewDbgInfoFormat = BB->IsNewDbgInfoFormat; 488 489 // It is only legal to clone a function if a block address within that 490 // function is never referenced outside of the function. Given that, we 491 // want to map block addresses from the old function to block addresses in 492 // the clone. (This is different from the generic ValueMapper 493 // implementation, which generates an invalid blockaddress when 494 // cloning a function.) 495 // 496 // Note that we don't need to fix the mapping for unreachable blocks; 497 // the default mapping there is safe. 498 if (BB->hasAddressTaken()) { 499 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 500 const_cast<BasicBlock *>(BB)); 501 VMap[OldBBAddr] = BlockAddress::get(NewFunc, NewBB); 502 } 503 504 bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false; 505 bool hasMemProfMetadata = false; 506 507 // Keep a cursor pointing at the last place we cloned debug-info records from. 508 BasicBlock::const_iterator DbgCursor = StartingInst; 509 auto CloneDbgRecordsToHere = 510 [NewBB, &DbgCursor](Instruction *NewInst, BasicBlock::const_iterator II) { 511 if (!NewBB->IsNewDbgInfoFormat) 512 return; 513 514 // Clone debug-info records onto this instruction. Iterate through any 515 // source-instructions we've cloned and then subsequently optimised 516 // away, so that their debug-info doesn't go missing. 517 for (; DbgCursor != II; ++DbgCursor) 518 NewInst->cloneDebugInfoFrom(&*DbgCursor, std::nullopt, false); 519 NewInst->cloneDebugInfoFrom(&*II); 520 DbgCursor = std::next(II); 521 }; 522 523 // Loop over all instructions, and copy them over, DCE'ing as we go. This 524 // loop doesn't include the terminator. 525 for (BasicBlock::const_iterator II = StartingInst, IE = --BB->end(); II != IE; 526 ++II) { 527 528 Instruction *NewInst = cloneInstruction(II); 529 NewInst->insertInto(NewBB, NewBB->end()); 530 531 if (HostFuncIsStrictFP) { 532 // All function calls in the inlined function must get 'strictfp' 533 // attribute to prevent undesirable optimizations. 534 if (auto *Call = dyn_cast<CallInst>(NewInst)) 535 Call->addFnAttr(Attribute::StrictFP); 536 } 537 538 // Eagerly remap operands to the newly cloned instruction, except for PHI 539 // nodes for which we defer processing until we update the CFG. Also defer 540 // debug intrinsic processing because they may contain use-before-defs. 541 if (!isa<PHINode>(NewInst) && !isa<DbgVariableIntrinsic>(NewInst)) { 542 RemapInstruction(NewInst, VMap, 543 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 544 545 // Eagerly constant fold the newly cloned instruction. If successful, add 546 // a mapping to the new value. Non-constant operands may be incomplete at 547 // this stage, thus instruction simplification is performed after 548 // processing phi-nodes. 549 if (Value *V = ConstantFoldInstruction( 550 NewInst, BB->getModule()->getDataLayout())) { 551 if (isInstructionTriviallyDead(NewInst)) { 552 VMap[&*II] = V; 553 NewInst->eraseFromParent(); 554 continue; 555 } 556 } 557 } 558 559 if (II->hasName()) 560 NewInst->setName(II->getName() + NameSuffix); 561 VMap[&*II] = NewInst; // Add instruction map to value. 562 if (isa<CallInst>(II) && !II->isDebugOrPseudoInst()) { 563 hasCalls = true; 564 hasMemProfMetadata |= II->hasMetadata(LLVMContext::MD_memprof); 565 } 566 567 CloneDbgRecordsToHere(NewInst, II); 568 569 if (CodeInfo) { 570 CodeInfo->OrigVMap[&*II] = NewInst; 571 if (auto *CB = dyn_cast<CallBase>(&*II)) 572 if (CB->hasOperandBundles()) 573 CodeInfo->OperandBundleCallSites.push_back(NewInst); 574 } 575 576 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) { 577 if (isa<ConstantInt>(AI->getArraySize())) 578 hasStaticAllocas = true; 579 else 580 hasDynamicAllocas = true; 581 } 582 } 583 584 // Finally, clone over the terminator. 585 const Instruction *OldTI = BB->getTerminator(); 586 bool TerminatorDone = false; 587 if (const BranchInst *BI = dyn_cast<BranchInst>(OldTI)) { 588 if (BI->isConditional()) { 589 // If the condition was a known constant in the callee... 590 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 591 // Or is a known constant in the caller... 592 if (!Cond) { 593 Value *V = VMap.lookup(BI->getCondition()); 594 Cond = dyn_cast_or_null<ConstantInt>(V); 595 } 596 597 // Constant fold to uncond branch! 598 if (Cond) { 599 BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue()); 600 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 601 ToClone.push_back(Dest); 602 TerminatorDone = true; 603 } 604 } 605 } else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) { 606 // If switching on a value known constant in the caller. 607 ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition()); 608 if (!Cond) { // Or known constant after constant prop in the callee... 609 Value *V = VMap.lookup(SI->getCondition()); 610 Cond = dyn_cast_or_null<ConstantInt>(V); 611 } 612 if (Cond) { // Constant fold to uncond branch! 613 SwitchInst::ConstCaseHandle Case = *SI->findCaseValue(Cond); 614 BasicBlock *Dest = const_cast<BasicBlock *>(Case.getCaseSuccessor()); 615 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 616 ToClone.push_back(Dest); 617 TerminatorDone = true; 618 } 619 } 620 621 if (!TerminatorDone) { 622 Instruction *NewInst = OldTI->clone(); 623 if (OldTI->hasName()) 624 NewInst->setName(OldTI->getName() + NameSuffix); 625 NewInst->insertInto(NewBB, NewBB->end()); 626 627 CloneDbgRecordsToHere(NewInst, OldTI->getIterator()); 628 629 VMap[OldTI] = NewInst; // Add instruction map to value. 630 631 if (CodeInfo) { 632 CodeInfo->OrigVMap[OldTI] = NewInst; 633 if (auto *CB = dyn_cast<CallBase>(OldTI)) 634 if (CB->hasOperandBundles()) 635 CodeInfo->OperandBundleCallSites.push_back(NewInst); 636 } 637 638 // Recursively clone any reachable successor blocks. 639 append_range(ToClone, successors(BB->getTerminator())); 640 } else { 641 // If we didn't create a new terminator, clone DbgVariableRecords from the 642 // old terminator onto the new terminator. 643 Instruction *NewInst = NewBB->getTerminator(); 644 assert(NewInst); 645 646 CloneDbgRecordsToHere(NewInst, OldTI->getIterator()); 647 } 648 649 if (CodeInfo) { 650 CodeInfo->ContainsCalls |= hasCalls; 651 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata; 652 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 653 CodeInfo->ContainsDynamicAllocas |= 654 hasStaticAllocas && BB != &BB->getParent()->front(); 655 } 656 } 657 658 /// This works like CloneAndPruneFunctionInto, except that it does not clone the 659 /// entire function. Instead it starts at an instruction provided by the caller 660 /// and copies (and prunes) only the code reachable from that instruction. 661 void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc, 662 const Instruction *StartingInst, 663 ValueToValueMapTy &VMap, 664 bool ModuleLevelChanges, 665 SmallVectorImpl<ReturnInst *> &Returns, 666 const char *NameSuffix, 667 ClonedCodeInfo *CodeInfo) { 668 assert(NameSuffix && "NameSuffix cannot be null!"); 669 670 ValueMapTypeRemapper *TypeMapper = nullptr; 671 ValueMaterializer *Materializer = nullptr; 672 673 #ifndef NDEBUG 674 // If the cloning starts at the beginning of the function, verify that 675 // the function arguments are mapped. 676 if (!StartingInst) 677 for (const Argument &II : OldFunc->args()) 678 assert(VMap.count(&II) && "No mapping from source argument specified!"); 679 #endif 680 681 PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges, 682 NameSuffix, CodeInfo); 683 const BasicBlock *StartingBB; 684 if (StartingInst) 685 StartingBB = StartingInst->getParent(); 686 else { 687 StartingBB = &OldFunc->getEntryBlock(); 688 StartingInst = &StartingBB->front(); 689 } 690 691 // Collect debug intrinsics for remapping later. 692 SmallVector<const DbgVariableIntrinsic *, 8> DbgIntrinsics; 693 for (const auto &BB : *OldFunc) { 694 for (const auto &I : BB) { 695 if (const auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I)) 696 DbgIntrinsics.push_back(DVI); 697 } 698 } 699 700 // Clone the entry block, and anything recursively reachable from it. 701 std::vector<const BasicBlock *> CloneWorklist; 702 PFC.CloneBlock(StartingBB, StartingInst->getIterator(), CloneWorklist); 703 while (!CloneWorklist.empty()) { 704 const BasicBlock *BB = CloneWorklist.back(); 705 CloneWorklist.pop_back(); 706 PFC.CloneBlock(BB, BB->begin(), CloneWorklist); 707 } 708 709 // Loop over all of the basic blocks in the old function. If the block was 710 // reachable, we have cloned it and the old block is now in the value map: 711 // insert it into the new function in the right order. If not, ignore it. 712 // 713 // Defer PHI resolution until rest of function is resolved. 714 SmallVector<const PHINode *, 16> PHIToResolve; 715 for (const BasicBlock &BI : *OldFunc) { 716 Value *V = VMap.lookup(&BI); 717 BasicBlock *NewBB = cast_or_null<BasicBlock>(V); 718 if (!NewBB) 719 continue; // Dead block. 720 721 // Move the new block to preserve the order in the original function. 722 NewBB->moveBefore(NewFunc->end()); 723 724 // Handle PHI nodes specially, as we have to remove references to dead 725 // blocks. 726 for (const PHINode &PN : BI.phis()) { 727 // PHI nodes may have been remapped to non-PHI nodes by the caller or 728 // during the cloning process. 729 if (isa<PHINode>(VMap[&PN])) 730 PHIToResolve.push_back(&PN); 731 else 732 break; 733 } 734 735 // Finally, remap the terminator instructions, as those can't be remapped 736 // until all BBs are mapped. 737 RemapInstruction(NewBB->getTerminator(), VMap, 738 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 739 TypeMapper, Materializer); 740 } 741 742 // Defer PHI resolution until rest of function is resolved, PHI resolution 743 // requires the CFG to be up-to-date. 744 for (unsigned phino = 0, e = PHIToResolve.size(); phino != e;) { 745 const PHINode *OPN = PHIToResolve[phino]; 746 unsigned NumPreds = OPN->getNumIncomingValues(); 747 const BasicBlock *OldBB = OPN->getParent(); 748 BasicBlock *NewBB = cast<BasicBlock>(VMap[OldBB]); 749 750 // Map operands for blocks that are live and remove operands for blocks 751 // that are dead. 752 for (; phino != PHIToResolve.size() && 753 PHIToResolve[phino]->getParent() == OldBB; 754 ++phino) { 755 OPN = PHIToResolve[phino]; 756 PHINode *PN = cast<PHINode>(VMap[OPN]); 757 for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) { 758 Value *V = VMap.lookup(PN->getIncomingBlock(pred)); 759 if (BasicBlock *MappedBlock = cast_or_null<BasicBlock>(V)) { 760 Value *InVal = 761 MapValue(PN->getIncomingValue(pred), VMap, 762 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 763 assert(InVal && "Unknown input value?"); 764 PN->setIncomingValue(pred, InVal); 765 PN->setIncomingBlock(pred, MappedBlock); 766 } else { 767 PN->removeIncomingValue(pred, false); 768 --pred; // Revisit the next entry. 769 --e; 770 } 771 } 772 } 773 774 // The loop above has removed PHI entries for those blocks that are dead 775 // and has updated others. However, if a block is live (i.e. copied over) 776 // but its terminator has been changed to not go to this block, then our 777 // phi nodes will have invalid entries. Update the PHI nodes in this 778 // case. 779 PHINode *PN = cast<PHINode>(NewBB->begin()); 780 NumPreds = pred_size(NewBB); 781 if (NumPreds != PN->getNumIncomingValues()) { 782 assert(NumPreds < PN->getNumIncomingValues()); 783 // Count how many times each predecessor comes to this block. 784 std::map<BasicBlock *, unsigned> PredCount; 785 for (BasicBlock *Pred : predecessors(NewBB)) 786 --PredCount[Pred]; 787 788 // Figure out how many entries to remove from each PHI. 789 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 790 ++PredCount[PN->getIncomingBlock(i)]; 791 792 // At this point, the excess predecessor entries are positive in the 793 // map. Loop over all of the PHIs and remove excess predecessor 794 // entries. 795 BasicBlock::iterator I = NewBB->begin(); 796 for (; (PN = dyn_cast<PHINode>(I)); ++I) { 797 for (const auto &PCI : PredCount) { 798 BasicBlock *Pred = PCI.first; 799 for (unsigned NumToRemove = PCI.second; NumToRemove; --NumToRemove) 800 PN->removeIncomingValue(Pred, false); 801 } 802 } 803 } 804 805 // If the loops above have made these phi nodes have 0 or 1 operand, 806 // replace them with poison or the input value. We must do this for 807 // correctness, because 0-operand phis are not valid. 808 PN = cast<PHINode>(NewBB->begin()); 809 if (PN->getNumIncomingValues() == 0) { 810 BasicBlock::iterator I = NewBB->begin(); 811 BasicBlock::const_iterator OldI = OldBB->begin(); 812 while ((PN = dyn_cast<PHINode>(I++))) { 813 Value *NV = PoisonValue::get(PN->getType()); 814 PN->replaceAllUsesWith(NV); 815 assert(VMap[&*OldI] == PN && "VMap mismatch"); 816 VMap[&*OldI] = NV; 817 PN->eraseFromParent(); 818 ++OldI; 819 } 820 } 821 } 822 823 // Drop all incompatible return attributes that cannot be applied to NewFunc 824 // during cloning, so as to allow instruction simplification to reason on the 825 // old state of the function. The original attributes are restored later. 826 AttributeMask IncompatibleAttrs = 827 AttributeFuncs::typeIncompatible(OldFunc->getReturnType()); 828 AttributeList Attrs = NewFunc->getAttributes(); 829 NewFunc->removeRetAttrs(IncompatibleAttrs); 830 831 // As phi-nodes have been now remapped, allow incremental simplification of 832 // newly-cloned instructions. 833 const DataLayout &DL = NewFunc->getParent()->getDataLayout(); 834 for (const auto &BB : *OldFunc) { 835 for (const auto &I : BB) { 836 auto *NewI = dyn_cast_or_null<Instruction>(VMap.lookup(&I)); 837 if (!NewI) 838 continue; 839 840 // Skip over non-intrinsic callsites, we don't want to remove any nodes 841 // from the CGSCC. 842 CallBase *CB = dyn_cast<CallBase>(NewI); 843 if (CB && CB->getCalledFunction() && 844 !CB->getCalledFunction()->isIntrinsic()) 845 continue; 846 847 if (Value *V = simplifyInstruction(NewI, DL)) { 848 NewI->replaceAllUsesWith(V); 849 850 if (isInstructionTriviallyDead(NewI)) { 851 NewI->eraseFromParent(); 852 } else { 853 // Did not erase it? Restore the new instruction into VMap previously 854 // dropped by `ValueIsRAUWd`. 855 VMap[&I] = NewI; 856 } 857 } 858 } 859 } 860 861 // Restore attributes. 862 NewFunc->setAttributes(Attrs); 863 864 // Remap debug intrinsic operands now that all values have been mapped. 865 // Doing this now (late) preserves use-before-defs in debug intrinsics. If 866 // we didn't do this, ValueAsMetadata(use-before-def) operands would be 867 // replaced by empty metadata. This would signal later cleanup passes to 868 // remove the debug intrinsics, potentially causing incorrect locations. 869 for (const auto *DVI : DbgIntrinsics) { 870 if (DbgVariableIntrinsic *NewDVI = 871 cast_or_null<DbgVariableIntrinsic>(VMap.lookup(DVI))) 872 RemapInstruction(NewDVI, VMap, 873 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 874 TypeMapper, Materializer); 875 } 876 877 // Do the same for DbgVariableRecords, touching all the instructions in the 878 // cloned range of blocks. 879 Function::iterator Begin = cast<BasicBlock>(VMap[StartingBB])->getIterator(); 880 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) { 881 for (Instruction &I : BB) { 882 RemapDbgVariableRecordRange(I.getModule(), I.getDbgRecordRange(), VMap, 883 ModuleLevelChanges ? RF_None 884 : RF_NoModuleLevelChanges, 885 TypeMapper, Materializer); 886 } 887 } 888 889 // Simplify conditional branches and switches with a constant operand. We try 890 // to prune these out when cloning, but if the simplification required 891 // looking through PHI nodes, those are only available after forming the full 892 // basic block. That may leave some here, and we still want to prune the dead 893 // code as early as possible. 894 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 895 ConstantFoldTerminator(&BB); 896 897 // Some blocks may have become unreachable as a result. Find and delete them. 898 { 899 SmallPtrSet<BasicBlock *, 16> ReachableBlocks; 900 SmallVector<BasicBlock *, 16> Worklist; 901 Worklist.push_back(&*Begin); 902 while (!Worklist.empty()) { 903 BasicBlock *BB = Worklist.pop_back_val(); 904 if (ReachableBlocks.insert(BB).second) 905 append_range(Worklist, successors(BB)); 906 } 907 908 SmallVector<BasicBlock *, 16> UnreachableBlocks; 909 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 910 if (!ReachableBlocks.contains(&BB)) 911 UnreachableBlocks.push_back(&BB); 912 DeleteDeadBlocks(UnreachableBlocks); 913 } 914 915 // Now that the inlined function body has been fully constructed, go through 916 // and zap unconditional fall-through branches. This happens all the time when 917 // specializing code: code specialization turns conditional branches into 918 // uncond branches, and this code folds them. 919 Function::iterator I = Begin; 920 while (I != NewFunc->end()) { 921 BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator()); 922 if (!BI || BI->isConditional()) { 923 ++I; 924 continue; 925 } 926 927 BasicBlock *Dest = BI->getSuccessor(0); 928 if (!Dest->getSinglePredecessor()) { 929 ++I; 930 continue; 931 } 932 933 // We shouldn't be able to get single-entry PHI nodes here, as instsimplify 934 // above should have zapped all of them.. 935 assert(!isa<PHINode>(Dest->begin())); 936 937 // We know all single-entry PHI nodes in the inlined function have been 938 // removed, so we just need to splice the blocks. 939 BI->eraseFromParent(); 940 941 // Make all PHI nodes that referred to Dest now refer to I as their source. 942 Dest->replaceAllUsesWith(&*I); 943 944 // Move all the instructions in the succ to the pred. 945 I->splice(I->end(), Dest); 946 947 // Remove the dest block. 948 Dest->eraseFromParent(); 949 950 // Do not increment I, iteratively merge all things this block branches to. 951 } 952 953 // Make a final pass over the basic blocks from the old function to gather 954 // any return instructions which survived folding. We have to do this here 955 // because we can iteratively remove and merge returns above. 956 for (Function::iterator I = cast<BasicBlock>(VMap[StartingBB])->getIterator(), 957 E = NewFunc->end(); 958 I != E; ++I) 959 if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator())) 960 Returns.push_back(RI); 961 } 962 963 /// This works exactly like CloneFunctionInto, 964 /// except that it does some simple constant prop and DCE on the fly. The 965 /// effect of this is to copy significantly less code in cases where (for 966 /// example) a function call with constant arguments is inlined, and those 967 /// constant arguments cause a significant amount of code in the callee to be 968 /// dead. Since this doesn't produce an exact copy of the input, it can't be 969 /// used for things like CloneFunction or CloneModule. 970 void llvm::CloneAndPruneFunctionInto( 971 Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, 972 bool ModuleLevelChanges, SmallVectorImpl<ReturnInst *> &Returns, 973 const char *NameSuffix, ClonedCodeInfo *CodeInfo) { 974 CloneAndPruneIntoFromInst(NewFunc, OldFunc, &OldFunc->front().front(), VMap, 975 ModuleLevelChanges, Returns, NameSuffix, CodeInfo); 976 } 977 978 /// Remaps instructions in \p Blocks using the mapping in \p VMap. 979 void llvm::remapInstructionsInBlocks(ArrayRef<BasicBlock *> Blocks, 980 ValueToValueMapTy &VMap) { 981 // Rewrite the code to refer to itself. 982 for (auto *BB : Blocks) { 983 for (auto &Inst : *BB) { 984 RemapDbgVariableRecordRange( 985 Inst.getModule(), Inst.getDbgRecordRange(), VMap, 986 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 987 RemapInstruction(&Inst, VMap, 988 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 989 } 990 } 991 } 992 993 /// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p 994 /// Blocks. 995 /// 996 /// Updates LoopInfo and DominatorTree assuming the loop is dominated by block 997 /// \p LoopDomBB. Insert the new blocks before block specified in \p Before. 998 Loop *llvm::cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB, 999 Loop *OrigLoop, ValueToValueMapTy &VMap, 1000 const Twine &NameSuffix, LoopInfo *LI, 1001 DominatorTree *DT, 1002 SmallVectorImpl<BasicBlock *> &Blocks) { 1003 Function *F = OrigLoop->getHeader()->getParent(); 1004 Loop *ParentLoop = OrigLoop->getParentLoop(); 1005 DenseMap<Loop *, Loop *> LMap; 1006 1007 Loop *NewLoop = LI->AllocateLoop(); 1008 LMap[OrigLoop] = NewLoop; 1009 if (ParentLoop) 1010 ParentLoop->addChildLoop(NewLoop); 1011 else 1012 LI->addTopLevelLoop(NewLoop); 1013 1014 BasicBlock *OrigPH = OrigLoop->getLoopPreheader(); 1015 assert(OrigPH && "No preheader"); 1016 BasicBlock *NewPH = CloneBasicBlock(OrigPH, VMap, NameSuffix, F); 1017 // To rename the loop PHIs. 1018 VMap[OrigPH] = NewPH; 1019 Blocks.push_back(NewPH); 1020 1021 // Update LoopInfo. 1022 if (ParentLoop) 1023 ParentLoop->addBasicBlockToLoop(NewPH, *LI); 1024 1025 // Update DominatorTree. 1026 DT->addNewBlock(NewPH, LoopDomBB); 1027 1028 for (Loop *CurLoop : OrigLoop->getLoopsInPreorder()) { 1029 Loop *&NewLoop = LMap[CurLoop]; 1030 if (!NewLoop) { 1031 NewLoop = LI->AllocateLoop(); 1032 1033 // Establish the parent/child relationship. 1034 Loop *OrigParent = CurLoop->getParentLoop(); 1035 assert(OrigParent && "Could not find the original parent loop"); 1036 Loop *NewParentLoop = LMap[OrigParent]; 1037 assert(NewParentLoop && "Could not find the new parent loop"); 1038 1039 NewParentLoop->addChildLoop(NewLoop); 1040 } 1041 } 1042 1043 for (BasicBlock *BB : OrigLoop->getBlocks()) { 1044 Loop *CurLoop = LI->getLoopFor(BB); 1045 Loop *&NewLoop = LMap[CurLoop]; 1046 assert(NewLoop && "Expecting new loop to be allocated"); 1047 1048 BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F); 1049 VMap[BB] = NewBB; 1050 1051 // Update LoopInfo. 1052 NewLoop->addBasicBlockToLoop(NewBB, *LI); 1053 1054 // Add DominatorTree node. After seeing all blocks, update to correct 1055 // IDom. 1056 DT->addNewBlock(NewBB, NewPH); 1057 1058 Blocks.push_back(NewBB); 1059 } 1060 1061 for (BasicBlock *BB : OrigLoop->getBlocks()) { 1062 // Update loop headers. 1063 Loop *CurLoop = LI->getLoopFor(BB); 1064 if (BB == CurLoop->getHeader()) 1065 LMap[CurLoop]->moveToHeader(cast<BasicBlock>(VMap[BB])); 1066 1067 // Update DominatorTree. 1068 BasicBlock *IDomBB = DT->getNode(BB)->getIDom()->getBlock(); 1069 DT->changeImmediateDominator(cast<BasicBlock>(VMap[BB]), 1070 cast<BasicBlock>(VMap[IDomBB])); 1071 } 1072 1073 // Move them physically from the end of the block list. 1074 F->splice(Before->getIterator(), F, NewPH->getIterator()); 1075 F->splice(Before->getIterator(), F, NewLoop->getHeader()->getIterator(), 1076 F->end()); 1077 1078 return NewLoop; 1079 } 1080 1081 /// Duplicate non-Phi instructions from the beginning of block up to 1082 /// StopAt instruction into a split block between BB and its predecessor. 1083 BasicBlock *llvm::DuplicateInstructionsInSplitBetween( 1084 BasicBlock *BB, BasicBlock *PredBB, Instruction *StopAt, 1085 ValueToValueMapTy &ValueMapping, DomTreeUpdater &DTU) { 1086 1087 assert(count(successors(PredBB), BB) == 1 && 1088 "There must be a single edge between PredBB and BB!"); 1089 // We are going to have to map operands from the original BB block to the new 1090 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to 1091 // account for entry from PredBB. 1092 BasicBlock::iterator BI = BB->begin(); 1093 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 1094 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 1095 1096 BasicBlock *NewBB = SplitEdge(PredBB, BB); 1097 NewBB->setName(PredBB->getName() + ".split"); 1098 Instruction *NewTerm = NewBB->getTerminator(); 1099 1100 // FIXME: SplitEdge does not yet take a DTU, so we include the split edge 1101 // in the update set here. 1102 DTU.applyUpdates({{DominatorTree::Delete, PredBB, BB}, 1103 {DominatorTree::Insert, PredBB, NewBB}, 1104 {DominatorTree::Insert, NewBB, BB}}); 1105 1106 // Clone the non-phi instructions of BB into NewBB, keeping track of the 1107 // mapping and using it to remap operands in the cloned instructions. 1108 // Stop once we see the terminator too. This covers the case where BB's 1109 // terminator gets replaced and StopAt == BB's terminator. 1110 for (; StopAt != &*BI && BB->getTerminator() != &*BI; ++BI) { 1111 Instruction *New = BI->clone(); 1112 New->setName(BI->getName()); 1113 New->insertBefore(NewTerm); 1114 New->cloneDebugInfoFrom(&*BI); 1115 ValueMapping[&*BI] = New; 1116 1117 // Remap operands to patch up intra-block references. 1118 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1119 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1120 auto I = ValueMapping.find(Inst); 1121 if (I != ValueMapping.end()) 1122 New->setOperand(i, I->second); 1123 } 1124 1125 // Remap debug variable operands. 1126 remapDebugVariable(ValueMapping, New); 1127 } 1128 1129 return NewBB; 1130 } 1131 1132 void llvm::cloneNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1133 DenseMap<MDNode *, MDNode *> &ClonedScopes, 1134 StringRef Ext, LLVMContext &Context) { 1135 MDBuilder MDB(Context); 1136 1137 for (auto *ScopeList : NoAliasDeclScopes) { 1138 for (const auto &MDOperand : ScopeList->operands()) { 1139 if (MDNode *MD = dyn_cast<MDNode>(MDOperand)) { 1140 AliasScopeNode SNANode(MD); 1141 1142 std::string Name; 1143 auto ScopeName = SNANode.getName(); 1144 if (!ScopeName.empty()) 1145 Name = (Twine(ScopeName) + ":" + Ext).str(); 1146 else 1147 Name = std::string(Ext); 1148 1149 MDNode *NewScope = MDB.createAnonymousAliasScope( 1150 const_cast<MDNode *>(SNANode.getDomain()), Name); 1151 ClonedScopes.insert(std::make_pair(MD, NewScope)); 1152 } 1153 } 1154 } 1155 } 1156 1157 void llvm::adaptNoAliasScopes(Instruction *I, 1158 const DenseMap<MDNode *, MDNode *> &ClonedScopes, 1159 LLVMContext &Context) { 1160 auto CloneScopeList = [&](const MDNode *ScopeList) -> MDNode * { 1161 bool NeedsReplacement = false; 1162 SmallVector<Metadata *, 8> NewScopeList; 1163 for (const auto &MDOp : ScopeList->operands()) { 1164 if (MDNode *MD = dyn_cast<MDNode>(MDOp)) { 1165 if (auto *NewMD = ClonedScopes.lookup(MD)) { 1166 NewScopeList.push_back(NewMD); 1167 NeedsReplacement = true; 1168 continue; 1169 } 1170 NewScopeList.push_back(MD); 1171 } 1172 } 1173 if (NeedsReplacement) 1174 return MDNode::get(Context, NewScopeList); 1175 return nullptr; 1176 }; 1177 1178 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(I)) 1179 if (auto *NewScopeList = CloneScopeList(Decl->getScopeList())) 1180 Decl->setScopeList(NewScopeList); 1181 1182 auto replaceWhenNeeded = [&](unsigned MD_ID) { 1183 if (const MDNode *CSNoAlias = I->getMetadata(MD_ID)) 1184 if (auto *NewScopeList = CloneScopeList(CSNoAlias)) 1185 I->setMetadata(MD_ID, NewScopeList); 1186 }; 1187 replaceWhenNeeded(LLVMContext::MD_noalias); 1188 replaceWhenNeeded(LLVMContext::MD_alias_scope); 1189 } 1190 1191 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1192 ArrayRef<BasicBlock *> NewBlocks, 1193 LLVMContext &Context, StringRef Ext) { 1194 if (NoAliasDeclScopes.empty()) 1195 return; 1196 1197 DenseMap<MDNode *, MDNode *> ClonedScopes; 1198 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1199 << NoAliasDeclScopes.size() << " node(s)\n"); 1200 1201 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1202 // Identify instructions using metadata that needs adaptation 1203 for (BasicBlock *NewBlock : NewBlocks) 1204 for (Instruction &I : *NewBlock) 1205 adaptNoAliasScopes(&I, ClonedScopes, Context); 1206 } 1207 1208 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1209 Instruction *IStart, Instruction *IEnd, 1210 LLVMContext &Context, StringRef Ext) { 1211 if (NoAliasDeclScopes.empty()) 1212 return; 1213 1214 DenseMap<MDNode *, MDNode *> ClonedScopes; 1215 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1216 << NoAliasDeclScopes.size() << " node(s)\n"); 1217 1218 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1219 // Identify instructions using metadata that needs adaptation 1220 assert(IStart->getParent() == IEnd->getParent() && "different basic block ?"); 1221 auto ItStart = IStart->getIterator(); 1222 auto ItEnd = IEnd->getIterator(); 1223 ++ItEnd; // IEnd is included, increment ItEnd to get the end of the range 1224 for (auto &I : llvm::make_range(ItStart, ItEnd)) 1225 adaptNoAliasScopes(&I, ClonedScopes, Context); 1226 } 1227 1228 void llvm::identifyNoAliasScopesToClone( 1229 ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1230 for (BasicBlock *BB : BBs) 1231 for (Instruction &I : *BB) 1232 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1233 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1234 } 1235 1236 void llvm::identifyNoAliasScopesToClone( 1237 BasicBlock::iterator Start, BasicBlock::iterator End, 1238 SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1239 for (Instruction &I : make_range(Start, End)) 1240 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1241 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1242 } 1243