1 //===- CloneFunction.cpp - Clone a function into another function ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the CloneFunctionInto interface, which is used as the 10 // low-level function cloner. This is used by the CloneFunction and function 11 // inliner to do the dirty work of copying the body of a function around. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/Analysis/ConstantFolding.h" 17 #include "llvm/Analysis/DomTreeUpdater.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/IR/AttributeMask.h" 21 #include "llvm/IR/CFG.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/DebugInfo.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/Instructions.h" 27 #include "llvm/IR/IntrinsicInst.h" 28 #include "llvm/IR/LLVMContext.h" 29 #include "llvm/IR/MDBuilder.h" 30 #include "llvm/IR/Metadata.h" 31 #include "llvm/IR/Module.h" 32 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 33 #include "llvm/Transforms/Utils/Cloning.h" 34 #include "llvm/Transforms/Utils/Local.h" 35 #include "llvm/Transforms/Utils/ValueMapper.h" 36 #include <map> 37 #include <optional> 38 using namespace llvm; 39 40 #define DEBUG_TYPE "clone-function" 41 42 /// See comments in Cloning.h. 43 BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, 44 const Twine &NameSuffix, Function *F, 45 ClonedCodeInfo *CodeInfo, 46 DebugInfoFinder *DIFinder) { 47 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F); 48 NewBB->IsNewDbgInfoFormat = BB->IsNewDbgInfoFormat; 49 if (BB->hasName()) 50 NewBB->setName(BB->getName() + NameSuffix); 51 52 bool hasCalls = false, hasDynamicAllocas = false, hasMemProfMetadata = false; 53 Module *TheModule = F ? F->getParent() : nullptr; 54 55 // Loop over all instructions, and copy them over. 56 for (const Instruction &I : *BB) { 57 if (DIFinder && TheModule) 58 DIFinder->processInstruction(*TheModule, I); 59 60 Instruction *NewInst = I.clone(); 61 if (I.hasName()) 62 NewInst->setName(I.getName() + NameSuffix); 63 64 NewInst->insertBefore(*NewBB, NewBB->end()); 65 NewInst->cloneDebugInfoFrom(&I); 66 67 VMap[&I] = NewInst; // Add instruction map to value. 68 69 if (isa<CallInst>(I) && !I.isDebugOrPseudoInst()) { 70 hasCalls = true; 71 hasMemProfMetadata |= I.hasMetadata(LLVMContext::MD_memprof); 72 hasMemProfMetadata |= I.hasMetadata(LLVMContext::MD_callsite); 73 } 74 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { 75 if (!AI->isStaticAlloca()) { 76 hasDynamicAllocas = true; 77 } 78 } 79 } 80 81 if (CodeInfo) { 82 CodeInfo->ContainsCalls |= hasCalls; 83 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata; 84 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 85 } 86 return NewBB; 87 } 88 89 void llvm::CloneFunctionAttributesInto(Function *NewFunc, 90 const Function *OldFunc, 91 ValueToValueMapTy &VMap, 92 bool ModuleLevelChanges, 93 ValueMapTypeRemapper *TypeMapper, 94 ValueMaterializer *Materializer) { 95 // Copy all attributes other than those stored in Function's AttributeList 96 // which holds e.g. parameters and return value attributes. 97 AttributeList NewAttrs = NewFunc->getAttributes(); 98 NewFunc->copyAttributesFrom(OldFunc); 99 NewFunc->setAttributes(NewAttrs); 100 101 const RemapFlags FuncGlobalRefFlags = 102 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 103 104 // Fix up the personality function that got copied over. 105 if (OldFunc->hasPersonalityFn()) 106 NewFunc->setPersonalityFn(MapValue(OldFunc->getPersonalityFn(), VMap, 107 FuncGlobalRefFlags, TypeMapper, 108 Materializer)); 109 110 if (OldFunc->hasPrefixData()) { 111 NewFunc->setPrefixData(MapValue(OldFunc->getPrefixData(), VMap, 112 FuncGlobalRefFlags, TypeMapper, 113 Materializer)); 114 } 115 116 if (OldFunc->hasPrologueData()) { 117 NewFunc->setPrologueData(MapValue(OldFunc->getPrologueData(), VMap, 118 FuncGlobalRefFlags, TypeMapper, 119 Materializer)); 120 } 121 122 SmallVector<AttributeSet, 4> NewArgAttrs(NewFunc->arg_size()); 123 AttributeList OldAttrs = OldFunc->getAttributes(); 124 125 // Clone any argument attributes that are present in the VMap. 126 for (const Argument &OldArg : OldFunc->args()) { 127 if (Argument *NewArg = dyn_cast<Argument>(VMap[&OldArg])) { 128 // Remap the parameter indices. 129 NewArgAttrs[NewArg->getArgNo()] = 130 OldAttrs.getParamAttrs(OldArg.getArgNo()); 131 } 132 } 133 134 NewFunc->setAttributes( 135 AttributeList::get(NewFunc->getContext(), OldAttrs.getFnAttrs(), 136 OldAttrs.getRetAttrs(), NewArgAttrs)); 137 } 138 139 // Clone OldFunc into NewFunc, transforming the old arguments into references to 140 // VMap values. 141 void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc, 142 ValueToValueMapTy &VMap, 143 CloneFunctionChangeType Changes, 144 SmallVectorImpl<ReturnInst *> &Returns, 145 const char *NameSuffix, ClonedCodeInfo *CodeInfo, 146 ValueMapTypeRemapper *TypeMapper, 147 ValueMaterializer *Materializer) { 148 NewFunc->setIsNewDbgInfoFormat(OldFunc->IsNewDbgInfoFormat); 149 assert(NameSuffix && "NameSuffix cannot be null!"); 150 151 #ifndef NDEBUG 152 for (const Argument &I : OldFunc->args()) 153 assert(VMap.count(&I) && "No mapping from source argument specified!"); 154 #endif 155 156 bool ModuleLevelChanges = Changes > CloneFunctionChangeType::LocalChangesOnly; 157 158 CloneFunctionAttributesInto(NewFunc, OldFunc, VMap, ModuleLevelChanges, 159 TypeMapper, Materializer); 160 161 // Everything else beyond this point deals with function instructions, 162 // so if we are dealing with a function declaration, we're done. 163 if (OldFunc->isDeclaration()) 164 return; 165 166 // When we remap instructions within the same module, we want to avoid 167 // duplicating inlined DISubprograms, so record all subprograms we find as we 168 // duplicate instructions and then freeze them in the MD map. We also record 169 // information about dbg.value and dbg.declare to avoid duplicating the 170 // types. 171 std::optional<DebugInfoFinder> DIFinder; 172 173 // Track the subprogram attachment that needs to be cloned to fine-tune the 174 // mapping within the same module. 175 DISubprogram *SPClonedWithinModule = nullptr; 176 if (Changes < CloneFunctionChangeType::DifferentModule) { 177 assert((NewFunc->getParent() == nullptr || 178 NewFunc->getParent() == OldFunc->getParent()) && 179 "Expected NewFunc to have the same parent, or no parent"); 180 181 // Need to find subprograms, types, and compile units. 182 DIFinder.emplace(); 183 184 SPClonedWithinModule = OldFunc->getSubprogram(); 185 if (SPClonedWithinModule) 186 DIFinder->processSubprogram(SPClonedWithinModule); 187 } else { 188 assert((NewFunc->getParent() == nullptr || 189 NewFunc->getParent() != OldFunc->getParent()) && 190 "Expected NewFunc to have different parents, or no parent"); 191 192 if (Changes == CloneFunctionChangeType::DifferentModule) { 193 assert(NewFunc->getParent() && 194 "Need parent of new function to maintain debug info invariants"); 195 196 // Need to find all the compile units. 197 DIFinder.emplace(); 198 } 199 } 200 201 // Loop over all of the basic blocks in the function, cloning them as 202 // appropriate. Note that we save BE this way in order to handle cloning of 203 // recursive functions into themselves. 204 for (const BasicBlock &BB : *OldFunc) { 205 206 // Create a new basic block and copy instructions into it! 207 BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc, CodeInfo, 208 DIFinder ? &*DIFinder : nullptr); 209 210 // Add basic block mapping. 211 VMap[&BB] = CBB; 212 213 // It is only legal to clone a function if a block address within that 214 // function is never referenced outside of the function. Given that, we 215 // want to map block addresses from the old function to block addresses in 216 // the clone. (This is different from the generic ValueMapper 217 // implementation, which generates an invalid blockaddress when 218 // cloning a function.) 219 if (BB.hasAddressTaken()) { 220 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 221 const_cast<BasicBlock *>(&BB)); 222 VMap[OldBBAddr] = BlockAddress::get(NewFunc, CBB); 223 } 224 225 // Note return instructions for the caller. 226 if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator())) 227 Returns.push_back(RI); 228 } 229 230 if (Changes < CloneFunctionChangeType::DifferentModule && 231 DIFinder->subprogram_count() > 0) { 232 // Turn on module-level changes, since we need to clone (some of) the 233 // debug info metadata. 234 // 235 // FIXME: Metadata effectively owned by a function should be made 236 // local, and only that local metadata should be cloned. 237 ModuleLevelChanges = true; 238 239 auto mapToSelfIfNew = [&VMap](MDNode *N) { 240 // Avoid clobbering an existing mapping. 241 (void)VMap.MD().try_emplace(N, N); 242 }; 243 244 // Avoid cloning types, compile units, and (other) subprograms. 245 SmallPtrSet<const DISubprogram *, 16> MappedToSelfSPs; 246 for (DISubprogram *ISP : DIFinder->subprograms()) { 247 if (ISP != SPClonedWithinModule) { 248 mapToSelfIfNew(ISP); 249 MappedToSelfSPs.insert(ISP); 250 } 251 } 252 253 // If a subprogram isn't going to be cloned skip its lexical blocks as well. 254 for (DIScope *S : DIFinder->scopes()) { 255 auto *LScope = dyn_cast<DILocalScope>(S); 256 if (LScope && MappedToSelfSPs.count(LScope->getSubprogram())) 257 mapToSelfIfNew(S); 258 } 259 260 for (DICompileUnit *CU : DIFinder->compile_units()) 261 mapToSelfIfNew(CU); 262 263 for (DIType *Type : DIFinder->types()) 264 mapToSelfIfNew(Type); 265 } else { 266 assert(!SPClonedWithinModule && 267 "Subprogram should be in DIFinder->subprogram_count()..."); 268 } 269 270 const auto RemapFlag = ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 271 // Duplicate the metadata that is attached to the cloned function. 272 // Subprograms/CUs/types that were already mapped to themselves won't be 273 // duplicated. 274 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs; 275 OldFunc->getAllMetadata(MDs); 276 for (auto MD : MDs) { 277 NewFunc->addMetadata(MD.first, *MapMetadata(MD.second, VMap, RemapFlag, 278 TypeMapper, Materializer)); 279 } 280 281 // Loop over all of the instructions in the new function, fixing up operand 282 // references as we go. This uses VMap to do all the hard work. 283 for (Function::iterator 284 BB = cast<BasicBlock>(VMap[&OldFunc->front()])->getIterator(), 285 BE = NewFunc->end(); 286 BB != BE; ++BB) 287 // Loop over all instructions, fixing each one as we find it, and any 288 // attached debug-info records. 289 for (Instruction &II : *BB) { 290 RemapInstruction(&II, VMap, RemapFlag, TypeMapper, Materializer); 291 RemapDbgRecordRange(II.getModule(), II.getDbgRecordRange(), VMap, 292 RemapFlag, TypeMapper, Materializer); 293 } 294 295 // Only update !llvm.dbg.cu for DifferentModule (not CloneModule). In the 296 // same module, the compile unit will already be listed (or not). When 297 // cloning a module, CloneModule() will handle creating the named metadata. 298 if (Changes != CloneFunctionChangeType::DifferentModule) 299 return; 300 301 // Update !llvm.dbg.cu with compile units added to the new module if this 302 // function is being cloned in isolation. 303 // 304 // FIXME: This is making global / module-level changes, which doesn't seem 305 // like the right encapsulation Consider dropping the requirement to update 306 // !llvm.dbg.cu (either obsoleting the node, or restricting it to 307 // non-discardable compile units) instead of discovering compile units by 308 // visiting the metadata attached to global values, which would allow this 309 // code to be deleted. Alternatively, perhaps give responsibility for this 310 // update to CloneFunctionInto's callers. 311 auto *NewModule = NewFunc->getParent(); 312 auto *NMD = NewModule->getOrInsertNamedMetadata("llvm.dbg.cu"); 313 // Avoid multiple insertions of the same DICompileUnit to NMD. 314 SmallPtrSet<const void *, 8> Visited; 315 for (auto *Operand : NMD->operands()) 316 Visited.insert(Operand); 317 for (auto *Unit : DIFinder->compile_units()) { 318 MDNode *MappedUnit = 319 MapMetadata(Unit, VMap, RF_None, TypeMapper, Materializer); 320 if (Visited.insert(MappedUnit).second) 321 NMD->addOperand(MappedUnit); 322 } 323 } 324 325 /// Return a copy of the specified function and add it to that function's 326 /// module. Also, any references specified in the VMap are changed to refer to 327 /// their mapped value instead of the original one. If any of the arguments to 328 /// the function are in the VMap, the arguments are deleted from the resultant 329 /// function. The VMap is updated to include mappings from all of the 330 /// instructions and basicblocks in the function from their old to new values. 331 /// 332 Function *llvm::CloneFunction(Function *F, ValueToValueMapTy &VMap, 333 ClonedCodeInfo *CodeInfo) { 334 std::vector<Type *> ArgTypes; 335 336 // The user might be deleting arguments to the function by specifying them in 337 // the VMap. If so, we need to not add the arguments to the arg ty vector 338 // 339 for (const Argument &I : F->args()) 340 if (VMap.count(&I) == 0) // Haven't mapped the argument to anything yet? 341 ArgTypes.push_back(I.getType()); 342 343 // Create a new function type... 344 FunctionType *FTy = 345 FunctionType::get(F->getFunctionType()->getReturnType(), ArgTypes, 346 F->getFunctionType()->isVarArg()); 347 348 // Create the new function... 349 Function *NewF = Function::Create(FTy, F->getLinkage(), F->getAddressSpace(), 350 F->getName(), F->getParent()); 351 NewF->setIsNewDbgInfoFormat(F->IsNewDbgInfoFormat); 352 353 // Loop over the arguments, copying the names of the mapped arguments over... 354 Function::arg_iterator DestI = NewF->arg_begin(); 355 for (const Argument &I : F->args()) 356 if (VMap.count(&I) == 0) { // Is this argument preserved? 357 DestI->setName(I.getName()); // Copy the name over... 358 VMap[&I] = &*DestI++; // Add mapping to VMap 359 } 360 361 SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned. 362 CloneFunctionInto(NewF, F, VMap, CloneFunctionChangeType::LocalChangesOnly, 363 Returns, "", CodeInfo); 364 365 return NewF; 366 } 367 368 namespace { 369 /// This is a private class used to implement CloneAndPruneFunctionInto. 370 struct PruningFunctionCloner { 371 Function *NewFunc; 372 const Function *OldFunc; 373 ValueToValueMapTy &VMap; 374 bool ModuleLevelChanges; 375 const char *NameSuffix; 376 ClonedCodeInfo *CodeInfo; 377 bool HostFuncIsStrictFP; 378 379 Instruction *cloneInstruction(BasicBlock::const_iterator II); 380 381 public: 382 PruningFunctionCloner(Function *newFunc, const Function *oldFunc, 383 ValueToValueMapTy &valueMap, bool moduleLevelChanges, 384 const char *nameSuffix, ClonedCodeInfo *codeInfo) 385 : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap), 386 ModuleLevelChanges(moduleLevelChanges), NameSuffix(nameSuffix), 387 CodeInfo(codeInfo) { 388 HostFuncIsStrictFP = 389 newFunc->getAttributes().hasFnAttr(Attribute::StrictFP); 390 } 391 392 /// The specified block is found to be reachable, clone it and 393 /// anything that it can reach. 394 void CloneBlock(const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 395 std::vector<const BasicBlock *> &ToClone); 396 }; 397 } // namespace 398 399 Instruction * 400 PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) { 401 const Instruction &OldInst = *II; 402 Instruction *NewInst = nullptr; 403 if (HostFuncIsStrictFP) { 404 Intrinsic::ID CIID = getConstrainedIntrinsicID(OldInst); 405 if (CIID != Intrinsic::not_intrinsic) { 406 // Instead of cloning the instruction, a call to constrained intrinsic 407 // should be created. 408 // Assume the first arguments of constrained intrinsics are the same as 409 // the operands of original instruction. 410 411 // Determine overloaded types of the intrinsic. 412 SmallVector<Type *, 2> TParams; 413 SmallVector<Intrinsic::IITDescriptor, 8> Descriptor; 414 getIntrinsicInfoTableEntries(CIID, Descriptor); 415 for (unsigned I = 0, E = Descriptor.size(); I != E; ++I) { 416 Intrinsic::IITDescriptor Operand = Descriptor[I]; 417 switch (Operand.Kind) { 418 case Intrinsic::IITDescriptor::Argument: 419 if (Operand.getArgumentKind() != 420 Intrinsic::IITDescriptor::AK_MatchType) { 421 if (I == 0) 422 TParams.push_back(OldInst.getType()); 423 else 424 TParams.push_back(OldInst.getOperand(I - 1)->getType()); 425 } 426 break; 427 case Intrinsic::IITDescriptor::SameVecWidthArgument: 428 ++I; 429 break; 430 default: 431 break; 432 } 433 } 434 435 // Create intrinsic call. 436 LLVMContext &Ctx = NewFunc->getContext(); 437 Function *IFn = Intrinsic::getOrInsertDeclaration(NewFunc->getParent(), 438 CIID, TParams); 439 SmallVector<Value *, 4> Args; 440 unsigned NumOperands = OldInst.getNumOperands(); 441 if (isa<CallInst>(OldInst)) 442 --NumOperands; 443 for (unsigned I = 0; I < NumOperands; ++I) { 444 Value *Op = OldInst.getOperand(I); 445 Args.push_back(Op); 446 } 447 if (const auto *CmpI = dyn_cast<FCmpInst>(&OldInst)) { 448 FCmpInst::Predicate Pred = CmpI->getPredicate(); 449 StringRef PredName = FCmpInst::getPredicateName(Pred); 450 Args.push_back(MetadataAsValue::get(Ctx, MDString::get(Ctx, PredName))); 451 } 452 453 // The last arguments of a constrained intrinsic are metadata that 454 // represent rounding mode (absents in some intrinsics) and exception 455 // behavior. The inlined function uses default settings. 456 if (Intrinsic::hasConstrainedFPRoundingModeOperand(CIID)) 457 Args.push_back( 458 MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest"))); 459 Args.push_back( 460 MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore"))); 461 462 NewInst = CallInst::Create(IFn, Args, OldInst.getName() + ".strict"); 463 } 464 } 465 if (!NewInst) 466 NewInst = II->clone(); 467 return NewInst; 468 } 469 470 /// The specified block is found to be reachable, clone it and 471 /// anything that it can reach. 472 void PruningFunctionCloner::CloneBlock( 473 const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 474 std::vector<const BasicBlock *> &ToClone) { 475 WeakTrackingVH &BBEntry = VMap[BB]; 476 477 // Have we already cloned this block? 478 if (BBEntry) 479 return; 480 481 // Nope, clone it now. 482 BasicBlock *NewBB; 483 Twine NewName(BB->hasName() ? Twine(BB->getName()) + NameSuffix : ""); 484 BBEntry = NewBB = BasicBlock::Create(BB->getContext(), NewName, NewFunc); 485 NewBB->IsNewDbgInfoFormat = BB->IsNewDbgInfoFormat; 486 487 // It is only legal to clone a function if a block address within that 488 // function is never referenced outside of the function. Given that, we 489 // want to map block addresses from the old function to block addresses in 490 // the clone. (This is different from the generic ValueMapper 491 // implementation, which generates an invalid blockaddress when 492 // cloning a function.) 493 // 494 // Note that we don't need to fix the mapping for unreachable blocks; 495 // the default mapping there is safe. 496 if (BB->hasAddressTaken()) { 497 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 498 const_cast<BasicBlock *>(BB)); 499 VMap[OldBBAddr] = BlockAddress::get(NewFunc, NewBB); 500 } 501 502 bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false; 503 bool hasMemProfMetadata = false; 504 505 // Keep a cursor pointing at the last place we cloned debug-info records from. 506 BasicBlock::const_iterator DbgCursor = StartingInst; 507 auto CloneDbgRecordsToHere = 508 [NewBB, &DbgCursor](Instruction *NewInst, BasicBlock::const_iterator II) { 509 if (!NewBB->IsNewDbgInfoFormat) 510 return; 511 512 // Clone debug-info records onto this instruction. Iterate through any 513 // source-instructions we've cloned and then subsequently optimised 514 // away, so that their debug-info doesn't go missing. 515 for (; DbgCursor != II; ++DbgCursor) 516 NewInst->cloneDebugInfoFrom(&*DbgCursor, std::nullopt, false); 517 NewInst->cloneDebugInfoFrom(&*II); 518 DbgCursor = std::next(II); 519 }; 520 521 // Loop over all instructions, and copy them over, DCE'ing as we go. This 522 // loop doesn't include the terminator. 523 for (BasicBlock::const_iterator II = StartingInst, IE = --BB->end(); II != IE; 524 ++II) { 525 526 // Don't clone fake_use as it may suppress many optimizations 527 // due to inlining, especially SROA. 528 if (auto *IntrInst = dyn_cast<IntrinsicInst>(II)) 529 if (IntrInst->getIntrinsicID() == Intrinsic::fake_use) 530 continue; 531 532 Instruction *NewInst = cloneInstruction(II); 533 NewInst->insertInto(NewBB, NewBB->end()); 534 535 if (HostFuncIsStrictFP) { 536 // All function calls in the inlined function must get 'strictfp' 537 // attribute to prevent undesirable optimizations. 538 if (auto *Call = dyn_cast<CallInst>(NewInst)) 539 Call->addFnAttr(Attribute::StrictFP); 540 } 541 542 // Eagerly remap operands to the newly cloned instruction, except for PHI 543 // nodes for which we defer processing until we update the CFG. Also defer 544 // debug intrinsic processing because they may contain use-before-defs. 545 if (!isa<PHINode>(NewInst) && !isa<DbgVariableIntrinsic>(NewInst)) { 546 RemapInstruction(NewInst, VMap, 547 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 548 549 // Eagerly constant fold the newly cloned instruction. If successful, add 550 // a mapping to the new value. Non-constant operands may be incomplete at 551 // this stage, thus instruction simplification is performed after 552 // processing phi-nodes. 553 if (Value *V = ConstantFoldInstruction( 554 NewInst, BB->getDataLayout())) { 555 if (isInstructionTriviallyDead(NewInst)) { 556 VMap[&*II] = V; 557 NewInst->eraseFromParent(); 558 continue; 559 } 560 } 561 } 562 563 if (II->hasName()) 564 NewInst->setName(II->getName() + NameSuffix); 565 VMap[&*II] = NewInst; // Add instruction map to value. 566 if (isa<CallInst>(II) && !II->isDebugOrPseudoInst()) { 567 hasCalls = true; 568 hasMemProfMetadata |= II->hasMetadata(LLVMContext::MD_memprof); 569 hasMemProfMetadata |= II->hasMetadata(LLVMContext::MD_callsite); 570 } 571 572 CloneDbgRecordsToHere(NewInst, II); 573 574 if (CodeInfo) { 575 CodeInfo->OrigVMap[&*II] = NewInst; 576 if (auto *CB = dyn_cast<CallBase>(&*II)) 577 if (CB->hasOperandBundles()) 578 CodeInfo->OperandBundleCallSites.push_back(NewInst); 579 } 580 581 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) { 582 if (isa<ConstantInt>(AI->getArraySize())) 583 hasStaticAllocas = true; 584 else 585 hasDynamicAllocas = true; 586 } 587 } 588 589 // Finally, clone over the terminator. 590 const Instruction *OldTI = BB->getTerminator(); 591 bool TerminatorDone = false; 592 if (const BranchInst *BI = dyn_cast<BranchInst>(OldTI)) { 593 if (BI->isConditional()) { 594 // If the condition was a known constant in the callee... 595 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 596 // Or is a known constant in the caller... 597 if (!Cond) { 598 Value *V = VMap.lookup(BI->getCondition()); 599 Cond = dyn_cast_or_null<ConstantInt>(V); 600 } 601 602 // Constant fold to uncond branch! 603 if (Cond) { 604 BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue()); 605 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 606 ToClone.push_back(Dest); 607 TerminatorDone = true; 608 } 609 } 610 } else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) { 611 // If switching on a value known constant in the caller. 612 ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition()); 613 if (!Cond) { // Or known constant after constant prop in the callee... 614 Value *V = VMap.lookup(SI->getCondition()); 615 Cond = dyn_cast_or_null<ConstantInt>(V); 616 } 617 if (Cond) { // Constant fold to uncond branch! 618 SwitchInst::ConstCaseHandle Case = *SI->findCaseValue(Cond); 619 BasicBlock *Dest = const_cast<BasicBlock *>(Case.getCaseSuccessor()); 620 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 621 ToClone.push_back(Dest); 622 TerminatorDone = true; 623 } 624 } 625 626 if (!TerminatorDone) { 627 Instruction *NewInst = OldTI->clone(); 628 if (OldTI->hasName()) 629 NewInst->setName(OldTI->getName() + NameSuffix); 630 NewInst->insertInto(NewBB, NewBB->end()); 631 632 CloneDbgRecordsToHere(NewInst, OldTI->getIterator()); 633 634 VMap[OldTI] = NewInst; // Add instruction map to value. 635 636 if (CodeInfo) { 637 CodeInfo->OrigVMap[OldTI] = NewInst; 638 if (auto *CB = dyn_cast<CallBase>(OldTI)) 639 if (CB->hasOperandBundles()) 640 CodeInfo->OperandBundleCallSites.push_back(NewInst); 641 } 642 643 // Recursively clone any reachable successor blocks. 644 append_range(ToClone, successors(BB->getTerminator())); 645 } else { 646 // If we didn't create a new terminator, clone DbgVariableRecords from the 647 // old terminator onto the new terminator. 648 Instruction *NewInst = NewBB->getTerminator(); 649 assert(NewInst); 650 651 CloneDbgRecordsToHere(NewInst, OldTI->getIterator()); 652 } 653 654 if (CodeInfo) { 655 CodeInfo->ContainsCalls |= hasCalls; 656 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata; 657 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 658 CodeInfo->ContainsDynamicAllocas |= 659 hasStaticAllocas && BB != &BB->getParent()->front(); 660 } 661 } 662 663 /// This works like CloneAndPruneFunctionInto, except that it does not clone the 664 /// entire function. Instead it starts at an instruction provided by the caller 665 /// and copies (and prunes) only the code reachable from that instruction. 666 void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc, 667 const Instruction *StartingInst, 668 ValueToValueMapTy &VMap, 669 bool ModuleLevelChanges, 670 SmallVectorImpl<ReturnInst *> &Returns, 671 const char *NameSuffix, 672 ClonedCodeInfo *CodeInfo) { 673 assert(NameSuffix && "NameSuffix cannot be null!"); 674 675 ValueMapTypeRemapper *TypeMapper = nullptr; 676 ValueMaterializer *Materializer = nullptr; 677 678 #ifndef NDEBUG 679 // If the cloning starts at the beginning of the function, verify that 680 // the function arguments are mapped. 681 if (!StartingInst) 682 for (const Argument &II : OldFunc->args()) 683 assert(VMap.count(&II) && "No mapping from source argument specified!"); 684 #endif 685 686 PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges, 687 NameSuffix, CodeInfo); 688 const BasicBlock *StartingBB; 689 if (StartingInst) 690 StartingBB = StartingInst->getParent(); 691 else { 692 StartingBB = &OldFunc->getEntryBlock(); 693 StartingInst = &StartingBB->front(); 694 } 695 696 // Collect debug intrinsics for remapping later. 697 SmallVector<const DbgVariableIntrinsic *, 8> DbgIntrinsics; 698 for (const auto &BB : *OldFunc) { 699 for (const auto &I : BB) { 700 if (const auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I)) 701 DbgIntrinsics.push_back(DVI); 702 } 703 } 704 705 // Clone the entry block, and anything recursively reachable from it. 706 std::vector<const BasicBlock *> CloneWorklist; 707 PFC.CloneBlock(StartingBB, StartingInst->getIterator(), CloneWorklist); 708 while (!CloneWorklist.empty()) { 709 const BasicBlock *BB = CloneWorklist.back(); 710 CloneWorklist.pop_back(); 711 PFC.CloneBlock(BB, BB->begin(), CloneWorklist); 712 } 713 714 // Loop over all of the basic blocks in the old function. If the block was 715 // reachable, we have cloned it and the old block is now in the value map: 716 // insert it into the new function in the right order. If not, ignore it. 717 // 718 // Defer PHI resolution until rest of function is resolved. 719 SmallVector<const PHINode *, 16> PHIToResolve; 720 for (const BasicBlock &BI : *OldFunc) { 721 Value *V = VMap.lookup(&BI); 722 BasicBlock *NewBB = cast_or_null<BasicBlock>(V); 723 if (!NewBB) 724 continue; // Dead block. 725 726 // Move the new block to preserve the order in the original function. 727 NewBB->moveBefore(NewFunc->end()); 728 729 // Handle PHI nodes specially, as we have to remove references to dead 730 // blocks. 731 for (const PHINode &PN : BI.phis()) { 732 // PHI nodes may have been remapped to non-PHI nodes by the caller or 733 // during the cloning process. 734 if (isa<PHINode>(VMap[&PN])) 735 PHIToResolve.push_back(&PN); 736 else 737 break; 738 } 739 740 // Finally, remap the terminator instructions, as those can't be remapped 741 // until all BBs are mapped. 742 RemapInstruction(NewBB->getTerminator(), VMap, 743 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 744 TypeMapper, Materializer); 745 } 746 747 // Defer PHI resolution until rest of function is resolved, PHI resolution 748 // requires the CFG to be up-to-date. 749 for (unsigned phino = 0, e = PHIToResolve.size(); phino != e;) { 750 const PHINode *OPN = PHIToResolve[phino]; 751 unsigned NumPreds = OPN->getNumIncomingValues(); 752 const BasicBlock *OldBB = OPN->getParent(); 753 BasicBlock *NewBB = cast<BasicBlock>(VMap[OldBB]); 754 755 // Map operands for blocks that are live and remove operands for blocks 756 // that are dead. 757 for (; phino != PHIToResolve.size() && 758 PHIToResolve[phino]->getParent() == OldBB; 759 ++phino) { 760 OPN = PHIToResolve[phino]; 761 PHINode *PN = cast<PHINode>(VMap[OPN]); 762 for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) { 763 Value *V = VMap.lookup(PN->getIncomingBlock(pred)); 764 if (BasicBlock *MappedBlock = cast_or_null<BasicBlock>(V)) { 765 Value *InVal = 766 MapValue(PN->getIncomingValue(pred), VMap, 767 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 768 assert(InVal && "Unknown input value?"); 769 PN->setIncomingValue(pred, InVal); 770 PN->setIncomingBlock(pred, MappedBlock); 771 } else { 772 PN->removeIncomingValue(pred, false); 773 --pred; // Revisit the next entry. 774 --e; 775 } 776 } 777 } 778 779 // The loop above has removed PHI entries for those blocks that are dead 780 // and has updated others. However, if a block is live (i.e. copied over) 781 // but its terminator has been changed to not go to this block, then our 782 // phi nodes will have invalid entries. Update the PHI nodes in this 783 // case. 784 PHINode *PN = cast<PHINode>(NewBB->begin()); 785 NumPreds = pred_size(NewBB); 786 if (NumPreds != PN->getNumIncomingValues()) { 787 assert(NumPreds < PN->getNumIncomingValues()); 788 // Count how many times each predecessor comes to this block. 789 std::map<BasicBlock *, unsigned> PredCount; 790 for (BasicBlock *Pred : predecessors(NewBB)) 791 --PredCount[Pred]; 792 793 // Figure out how many entries to remove from each PHI. 794 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 795 ++PredCount[PN->getIncomingBlock(i)]; 796 797 // At this point, the excess predecessor entries are positive in the 798 // map. Loop over all of the PHIs and remove excess predecessor 799 // entries. 800 BasicBlock::iterator I = NewBB->begin(); 801 for (; (PN = dyn_cast<PHINode>(I)); ++I) { 802 for (const auto &PCI : PredCount) { 803 BasicBlock *Pred = PCI.first; 804 for (unsigned NumToRemove = PCI.second; NumToRemove; --NumToRemove) 805 PN->removeIncomingValue(Pred, false); 806 } 807 } 808 } 809 810 // If the loops above have made these phi nodes have 0 or 1 operand, 811 // replace them with poison or the input value. We must do this for 812 // correctness, because 0-operand phis are not valid. 813 PN = cast<PHINode>(NewBB->begin()); 814 if (PN->getNumIncomingValues() == 0) { 815 BasicBlock::iterator I = NewBB->begin(); 816 BasicBlock::const_iterator OldI = OldBB->begin(); 817 while ((PN = dyn_cast<PHINode>(I++))) { 818 Value *NV = PoisonValue::get(PN->getType()); 819 PN->replaceAllUsesWith(NV); 820 assert(VMap[&*OldI] == PN && "VMap mismatch"); 821 VMap[&*OldI] = NV; 822 PN->eraseFromParent(); 823 ++OldI; 824 } 825 } 826 } 827 828 // Drop all incompatible return attributes that cannot be applied to NewFunc 829 // during cloning, so as to allow instruction simplification to reason on the 830 // old state of the function. The original attributes are restored later. 831 AttributeList Attrs = NewFunc->getAttributes(); 832 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible( 833 OldFunc->getReturnType(), Attrs.getRetAttrs()); 834 NewFunc->removeRetAttrs(IncompatibleAttrs); 835 836 // As phi-nodes have been now remapped, allow incremental simplification of 837 // newly-cloned instructions. 838 const DataLayout &DL = NewFunc->getDataLayout(); 839 for (const auto &BB : *OldFunc) { 840 for (const auto &I : BB) { 841 auto *NewI = dyn_cast_or_null<Instruction>(VMap.lookup(&I)); 842 if (!NewI) 843 continue; 844 845 if (Value *V = simplifyInstruction(NewI, DL)) { 846 NewI->replaceAllUsesWith(V); 847 848 if (isInstructionTriviallyDead(NewI)) { 849 NewI->eraseFromParent(); 850 } else { 851 // Did not erase it? Restore the new instruction into VMap previously 852 // dropped by `ValueIsRAUWd`. 853 VMap[&I] = NewI; 854 } 855 } 856 } 857 } 858 859 // Restore attributes. 860 NewFunc->setAttributes(Attrs); 861 862 // Remap debug intrinsic operands now that all values have been mapped. 863 // Doing this now (late) preserves use-before-defs in debug intrinsics. If 864 // we didn't do this, ValueAsMetadata(use-before-def) operands would be 865 // replaced by empty metadata. This would signal later cleanup passes to 866 // remove the debug intrinsics, potentially causing incorrect locations. 867 for (const auto *DVI : DbgIntrinsics) { 868 if (DbgVariableIntrinsic *NewDVI = 869 cast_or_null<DbgVariableIntrinsic>(VMap.lookup(DVI))) 870 RemapInstruction(NewDVI, VMap, 871 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 872 TypeMapper, Materializer); 873 } 874 875 // Do the same for DbgVariableRecords, touching all the instructions in the 876 // cloned range of blocks. 877 Function::iterator Begin = cast<BasicBlock>(VMap[StartingBB])->getIterator(); 878 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) { 879 for (Instruction &I : BB) { 880 RemapDbgRecordRange(I.getModule(), I.getDbgRecordRange(), VMap, 881 ModuleLevelChanges ? RF_None 882 : RF_NoModuleLevelChanges, 883 TypeMapper, Materializer); 884 } 885 } 886 887 // Simplify conditional branches and switches with a constant operand. We try 888 // to prune these out when cloning, but if the simplification required 889 // looking through PHI nodes, those are only available after forming the full 890 // basic block. That may leave some here, and we still want to prune the dead 891 // code as early as possible. 892 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 893 ConstantFoldTerminator(&BB); 894 895 // Some blocks may have become unreachable as a result. Find and delete them. 896 { 897 SmallPtrSet<BasicBlock *, 16> ReachableBlocks; 898 SmallVector<BasicBlock *, 16> Worklist; 899 Worklist.push_back(&*Begin); 900 while (!Worklist.empty()) { 901 BasicBlock *BB = Worklist.pop_back_val(); 902 if (ReachableBlocks.insert(BB).second) 903 append_range(Worklist, successors(BB)); 904 } 905 906 SmallVector<BasicBlock *, 16> UnreachableBlocks; 907 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 908 if (!ReachableBlocks.contains(&BB)) 909 UnreachableBlocks.push_back(&BB); 910 DeleteDeadBlocks(UnreachableBlocks); 911 } 912 913 // Now that the inlined function body has been fully constructed, go through 914 // and zap unconditional fall-through branches. This happens all the time when 915 // specializing code: code specialization turns conditional branches into 916 // uncond branches, and this code folds them. 917 Function::iterator I = Begin; 918 while (I != NewFunc->end()) { 919 BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator()); 920 if (!BI || BI->isConditional()) { 921 ++I; 922 continue; 923 } 924 925 BasicBlock *Dest = BI->getSuccessor(0); 926 if (!Dest->getSinglePredecessor()) { 927 ++I; 928 continue; 929 } 930 931 // We shouldn't be able to get single-entry PHI nodes here, as instsimplify 932 // above should have zapped all of them.. 933 assert(!isa<PHINode>(Dest->begin())); 934 935 // We know all single-entry PHI nodes in the inlined function have been 936 // removed, so we just need to splice the blocks. 937 BI->eraseFromParent(); 938 939 // Make all PHI nodes that referred to Dest now refer to I as their source. 940 Dest->replaceAllUsesWith(&*I); 941 942 // Move all the instructions in the succ to the pred. 943 I->splice(I->end(), Dest); 944 945 // Remove the dest block. 946 Dest->eraseFromParent(); 947 948 // Do not increment I, iteratively merge all things this block branches to. 949 } 950 951 // Make a final pass over the basic blocks from the old function to gather 952 // any return instructions which survived folding. We have to do this here 953 // because we can iteratively remove and merge returns above. 954 for (Function::iterator I = cast<BasicBlock>(VMap[StartingBB])->getIterator(), 955 E = NewFunc->end(); 956 I != E; ++I) 957 if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator())) 958 Returns.push_back(RI); 959 } 960 961 /// This works exactly like CloneFunctionInto, 962 /// except that it does some simple constant prop and DCE on the fly. The 963 /// effect of this is to copy significantly less code in cases where (for 964 /// example) a function call with constant arguments is inlined, and those 965 /// constant arguments cause a significant amount of code in the callee to be 966 /// dead. Since this doesn't produce an exact copy of the input, it can't be 967 /// used for things like CloneFunction or CloneModule. 968 void llvm::CloneAndPruneFunctionInto( 969 Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, 970 bool ModuleLevelChanges, SmallVectorImpl<ReturnInst *> &Returns, 971 const char *NameSuffix, ClonedCodeInfo *CodeInfo) { 972 CloneAndPruneIntoFromInst(NewFunc, OldFunc, &OldFunc->front().front(), VMap, 973 ModuleLevelChanges, Returns, NameSuffix, CodeInfo); 974 } 975 976 /// Remaps instructions in \p Blocks using the mapping in \p VMap. 977 void llvm::remapInstructionsInBlocks(ArrayRef<BasicBlock *> Blocks, 978 ValueToValueMapTy &VMap) { 979 // Rewrite the code to refer to itself. 980 for (auto *BB : Blocks) { 981 for (auto &Inst : *BB) { 982 RemapDbgRecordRange(Inst.getModule(), Inst.getDbgRecordRange(), VMap, 983 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 984 RemapInstruction(&Inst, VMap, 985 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 986 } 987 } 988 } 989 990 /// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p 991 /// Blocks. 992 /// 993 /// Updates LoopInfo and DominatorTree assuming the loop is dominated by block 994 /// \p LoopDomBB. Insert the new blocks before block specified in \p Before. 995 Loop *llvm::cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB, 996 Loop *OrigLoop, ValueToValueMapTy &VMap, 997 const Twine &NameSuffix, LoopInfo *LI, 998 DominatorTree *DT, 999 SmallVectorImpl<BasicBlock *> &Blocks) { 1000 Function *F = OrigLoop->getHeader()->getParent(); 1001 Loop *ParentLoop = OrigLoop->getParentLoop(); 1002 DenseMap<Loop *, Loop *> LMap; 1003 1004 Loop *NewLoop = LI->AllocateLoop(); 1005 LMap[OrigLoop] = NewLoop; 1006 if (ParentLoop) 1007 ParentLoop->addChildLoop(NewLoop); 1008 else 1009 LI->addTopLevelLoop(NewLoop); 1010 1011 BasicBlock *OrigPH = OrigLoop->getLoopPreheader(); 1012 assert(OrigPH && "No preheader"); 1013 BasicBlock *NewPH = CloneBasicBlock(OrigPH, VMap, NameSuffix, F); 1014 // To rename the loop PHIs. 1015 VMap[OrigPH] = NewPH; 1016 Blocks.push_back(NewPH); 1017 1018 // Update LoopInfo. 1019 if (ParentLoop) 1020 ParentLoop->addBasicBlockToLoop(NewPH, *LI); 1021 1022 // Update DominatorTree. 1023 DT->addNewBlock(NewPH, LoopDomBB); 1024 1025 for (Loop *CurLoop : OrigLoop->getLoopsInPreorder()) { 1026 Loop *&NewLoop = LMap[CurLoop]; 1027 if (!NewLoop) { 1028 NewLoop = LI->AllocateLoop(); 1029 1030 // Establish the parent/child relationship. 1031 Loop *OrigParent = CurLoop->getParentLoop(); 1032 assert(OrigParent && "Could not find the original parent loop"); 1033 Loop *NewParentLoop = LMap[OrigParent]; 1034 assert(NewParentLoop && "Could not find the new parent loop"); 1035 1036 NewParentLoop->addChildLoop(NewLoop); 1037 } 1038 } 1039 1040 for (BasicBlock *BB : OrigLoop->getBlocks()) { 1041 Loop *CurLoop = LI->getLoopFor(BB); 1042 Loop *&NewLoop = LMap[CurLoop]; 1043 assert(NewLoop && "Expecting new loop to be allocated"); 1044 1045 BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F); 1046 VMap[BB] = NewBB; 1047 1048 // Update LoopInfo. 1049 NewLoop->addBasicBlockToLoop(NewBB, *LI); 1050 1051 // Add DominatorTree node. After seeing all blocks, update to correct 1052 // IDom. 1053 DT->addNewBlock(NewBB, NewPH); 1054 1055 Blocks.push_back(NewBB); 1056 } 1057 1058 for (BasicBlock *BB : OrigLoop->getBlocks()) { 1059 // Update loop headers. 1060 Loop *CurLoop = LI->getLoopFor(BB); 1061 if (BB == CurLoop->getHeader()) 1062 LMap[CurLoop]->moveToHeader(cast<BasicBlock>(VMap[BB])); 1063 1064 // Update DominatorTree. 1065 BasicBlock *IDomBB = DT->getNode(BB)->getIDom()->getBlock(); 1066 DT->changeImmediateDominator(cast<BasicBlock>(VMap[BB]), 1067 cast<BasicBlock>(VMap[IDomBB])); 1068 } 1069 1070 // Move them physically from the end of the block list. 1071 F->splice(Before->getIterator(), F, NewPH->getIterator()); 1072 F->splice(Before->getIterator(), F, NewLoop->getHeader()->getIterator(), 1073 F->end()); 1074 1075 return NewLoop; 1076 } 1077 1078 /// Duplicate non-Phi instructions from the beginning of block up to 1079 /// StopAt instruction into a split block between BB and its predecessor. 1080 BasicBlock *llvm::DuplicateInstructionsInSplitBetween( 1081 BasicBlock *BB, BasicBlock *PredBB, Instruction *StopAt, 1082 ValueToValueMapTy &ValueMapping, DomTreeUpdater &DTU) { 1083 1084 assert(count(successors(PredBB), BB) == 1 && 1085 "There must be a single edge between PredBB and BB!"); 1086 // We are going to have to map operands from the original BB block to the new 1087 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to 1088 // account for entry from PredBB. 1089 BasicBlock::iterator BI = BB->begin(); 1090 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 1091 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 1092 1093 BasicBlock *NewBB = SplitEdge(PredBB, BB); 1094 NewBB->setName(PredBB->getName() + ".split"); 1095 Instruction *NewTerm = NewBB->getTerminator(); 1096 1097 // FIXME: SplitEdge does not yet take a DTU, so we include the split edge 1098 // in the update set here. 1099 DTU.applyUpdates({{DominatorTree::Delete, PredBB, BB}, 1100 {DominatorTree::Insert, PredBB, NewBB}, 1101 {DominatorTree::Insert, NewBB, BB}}); 1102 1103 // Clone the non-phi instructions of BB into NewBB, keeping track of the 1104 // mapping and using it to remap operands in the cloned instructions. 1105 // Stop once we see the terminator too. This covers the case where BB's 1106 // terminator gets replaced and StopAt == BB's terminator. 1107 for (; StopAt != &*BI && BB->getTerminator() != &*BI; ++BI) { 1108 Instruction *New = BI->clone(); 1109 New->setName(BI->getName()); 1110 New->insertBefore(NewTerm); 1111 New->cloneDebugInfoFrom(&*BI); 1112 ValueMapping[&*BI] = New; 1113 1114 // Remap operands to patch up intra-block references. 1115 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1116 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1117 auto I = ValueMapping.find(Inst); 1118 if (I != ValueMapping.end()) 1119 New->setOperand(i, I->second); 1120 } 1121 1122 // Remap debug variable operands. 1123 remapDebugVariable(ValueMapping, New); 1124 } 1125 1126 return NewBB; 1127 } 1128 1129 void llvm::cloneNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1130 DenseMap<MDNode *, MDNode *> &ClonedScopes, 1131 StringRef Ext, LLVMContext &Context) { 1132 MDBuilder MDB(Context); 1133 1134 for (auto *ScopeList : NoAliasDeclScopes) { 1135 for (const auto &MDOperand : ScopeList->operands()) { 1136 if (MDNode *MD = dyn_cast<MDNode>(MDOperand)) { 1137 AliasScopeNode SNANode(MD); 1138 1139 std::string Name; 1140 auto ScopeName = SNANode.getName(); 1141 if (!ScopeName.empty()) 1142 Name = (Twine(ScopeName) + ":" + Ext).str(); 1143 else 1144 Name = std::string(Ext); 1145 1146 MDNode *NewScope = MDB.createAnonymousAliasScope( 1147 const_cast<MDNode *>(SNANode.getDomain()), Name); 1148 ClonedScopes.insert(std::make_pair(MD, NewScope)); 1149 } 1150 } 1151 } 1152 } 1153 1154 void llvm::adaptNoAliasScopes(Instruction *I, 1155 const DenseMap<MDNode *, MDNode *> &ClonedScopes, 1156 LLVMContext &Context) { 1157 auto CloneScopeList = [&](const MDNode *ScopeList) -> MDNode * { 1158 bool NeedsReplacement = false; 1159 SmallVector<Metadata *, 8> NewScopeList; 1160 for (const auto &MDOp : ScopeList->operands()) { 1161 if (MDNode *MD = dyn_cast<MDNode>(MDOp)) { 1162 if (auto *NewMD = ClonedScopes.lookup(MD)) { 1163 NewScopeList.push_back(NewMD); 1164 NeedsReplacement = true; 1165 continue; 1166 } 1167 NewScopeList.push_back(MD); 1168 } 1169 } 1170 if (NeedsReplacement) 1171 return MDNode::get(Context, NewScopeList); 1172 return nullptr; 1173 }; 1174 1175 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(I)) 1176 if (auto *NewScopeList = CloneScopeList(Decl->getScopeList())) 1177 Decl->setScopeList(NewScopeList); 1178 1179 auto replaceWhenNeeded = [&](unsigned MD_ID) { 1180 if (const MDNode *CSNoAlias = I->getMetadata(MD_ID)) 1181 if (auto *NewScopeList = CloneScopeList(CSNoAlias)) 1182 I->setMetadata(MD_ID, NewScopeList); 1183 }; 1184 replaceWhenNeeded(LLVMContext::MD_noalias); 1185 replaceWhenNeeded(LLVMContext::MD_alias_scope); 1186 } 1187 1188 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1189 ArrayRef<BasicBlock *> NewBlocks, 1190 LLVMContext &Context, StringRef Ext) { 1191 if (NoAliasDeclScopes.empty()) 1192 return; 1193 1194 DenseMap<MDNode *, MDNode *> ClonedScopes; 1195 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1196 << NoAliasDeclScopes.size() << " node(s)\n"); 1197 1198 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1199 // Identify instructions using metadata that needs adaptation 1200 for (BasicBlock *NewBlock : NewBlocks) 1201 for (Instruction &I : *NewBlock) 1202 adaptNoAliasScopes(&I, ClonedScopes, Context); 1203 } 1204 1205 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1206 Instruction *IStart, Instruction *IEnd, 1207 LLVMContext &Context, StringRef Ext) { 1208 if (NoAliasDeclScopes.empty()) 1209 return; 1210 1211 DenseMap<MDNode *, MDNode *> ClonedScopes; 1212 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1213 << NoAliasDeclScopes.size() << " node(s)\n"); 1214 1215 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1216 // Identify instructions using metadata that needs adaptation 1217 assert(IStart->getParent() == IEnd->getParent() && "different basic block ?"); 1218 auto ItStart = IStart->getIterator(); 1219 auto ItEnd = IEnd->getIterator(); 1220 ++ItEnd; // IEnd is included, increment ItEnd to get the end of the range 1221 for (auto &I : llvm::make_range(ItStart, ItEnd)) 1222 adaptNoAliasScopes(&I, ClonedScopes, Context); 1223 } 1224 1225 void llvm::identifyNoAliasScopesToClone( 1226 ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1227 for (BasicBlock *BB : BBs) 1228 for (Instruction &I : *BB) 1229 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1230 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1231 } 1232 1233 void llvm::identifyNoAliasScopesToClone( 1234 BasicBlock::iterator Start, BasicBlock::iterator End, 1235 SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1236 for (Instruction &I : make_range(Start, End)) 1237 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1238 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1239 } 1240