1 //===- CloneFunction.cpp - Clone a function into another function ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the CloneFunctionInto interface, which is used as the 10 // low-level function cloner. This is used by the CloneFunction and function 11 // inliner to do the dirty work of copying the body of a function around. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/SmallVector.h" 16 #include "llvm/Analysis/ConstantFolding.h" 17 #include "llvm/Analysis/DomTreeUpdater.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/IR/AttributeMask.h" 21 #include "llvm/IR/CFG.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/DebugInfo.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/InstIterator.h" 27 #include "llvm/IR/Instructions.h" 28 #include "llvm/IR/IntrinsicInst.h" 29 #include "llvm/IR/LLVMContext.h" 30 #include "llvm/IR/MDBuilder.h" 31 #include "llvm/IR/Metadata.h" 32 #include "llvm/IR/Module.h" 33 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 34 #include "llvm/Transforms/Utils/Cloning.h" 35 #include "llvm/Transforms/Utils/Local.h" 36 #include "llvm/Transforms/Utils/ValueMapper.h" 37 #include <map> 38 #include <optional> 39 using namespace llvm; 40 41 #define DEBUG_TYPE "clone-function" 42 43 /// See comments in Cloning.h. 44 BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, 45 const Twine &NameSuffix, Function *F, 46 ClonedCodeInfo *CodeInfo) { 47 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F); 48 NewBB->IsNewDbgInfoFormat = BB->IsNewDbgInfoFormat; 49 if (BB->hasName()) 50 NewBB->setName(BB->getName() + NameSuffix); 51 52 bool hasCalls = false, hasDynamicAllocas = false, hasMemProfMetadata = false; 53 54 // Loop over all instructions, and copy them over. 55 for (const Instruction &I : *BB) { 56 Instruction *NewInst = I.clone(); 57 if (I.hasName()) 58 NewInst->setName(I.getName() + NameSuffix); 59 60 NewInst->insertBefore(*NewBB, NewBB->end()); 61 NewInst->cloneDebugInfoFrom(&I); 62 63 VMap[&I] = NewInst; // Add instruction map to value. 64 65 if (isa<CallInst>(I) && !I.isDebugOrPseudoInst()) { 66 hasCalls = true; 67 hasMemProfMetadata |= I.hasMetadata(LLVMContext::MD_memprof); 68 hasMemProfMetadata |= I.hasMetadata(LLVMContext::MD_callsite); 69 } 70 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { 71 if (!AI->isStaticAlloca()) { 72 hasDynamicAllocas = true; 73 } 74 } 75 } 76 77 if (CodeInfo) { 78 CodeInfo->ContainsCalls |= hasCalls; 79 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata; 80 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 81 } 82 return NewBB; 83 } 84 85 void llvm::CloneFunctionAttributesInto(Function *NewFunc, 86 const Function *OldFunc, 87 ValueToValueMapTy &VMap, 88 bool ModuleLevelChanges, 89 ValueMapTypeRemapper *TypeMapper, 90 ValueMaterializer *Materializer) { 91 // Copy all attributes other than those stored in Function's AttributeList 92 // which holds e.g. parameters and return value attributes. 93 AttributeList NewAttrs = NewFunc->getAttributes(); 94 NewFunc->copyAttributesFrom(OldFunc); 95 NewFunc->setAttributes(NewAttrs); 96 97 const RemapFlags FuncGlobalRefFlags = 98 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 99 100 // Fix up the personality function that got copied over. 101 if (OldFunc->hasPersonalityFn()) 102 NewFunc->setPersonalityFn(MapValue(OldFunc->getPersonalityFn(), VMap, 103 FuncGlobalRefFlags, TypeMapper, 104 Materializer)); 105 106 if (OldFunc->hasPrefixData()) { 107 NewFunc->setPrefixData(MapValue(OldFunc->getPrefixData(), VMap, 108 FuncGlobalRefFlags, TypeMapper, 109 Materializer)); 110 } 111 112 if (OldFunc->hasPrologueData()) { 113 NewFunc->setPrologueData(MapValue(OldFunc->getPrologueData(), VMap, 114 FuncGlobalRefFlags, TypeMapper, 115 Materializer)); 116 } 117 118 SmallVector<AttributeSet, 4> NewArgAttrs(NewFunc->arg_size()); 119 AttributeList OldAttrs = OldFunc->getAttributes(); 120 121 // Clone any argument attributes that are present in the VMap. 122 for (const Argument &OldArg : OldFunc->args()) { 123 if (Argument *NewArg = dyn_cast<Argument>(VMap[&OldArg])) { 124 // Remap the parameter indices. 125 NewArgAttrs[NewArg->getArgNo()] = 126 OldAttrs.getParamAttrs(OldArg.getArgNo()); 127 } 128 } 129 130 NewFunc->setAttributes( 131 AttributeList::get(NewFunc->getContext(), OldAttrs.getFnAttrs(), 132 OldAttrs.getRetAttrs(), NewArgAttrs)); 133 } 134 135 DISubprogram *llvm::CollectDebugInfoForCloning(const Function &F, 136 CloneFunctionChangeType Changes, 137 DebugInfoFinder &DIFinder) { 138 DISubprogram *SPClonedWithinModule = nullptr; 139 if (Changes < CloneFunctionChangeType::DifferentModule) { 140 SPClonedWithinModule = F.getSubprogram(); 141 } 142 if (SPClonedWithinModule) 143 DIFinder.processSubprogram(SPClonedWithinModule); 144 145 const Module *M = F.getParent(); 146 if (Changes != CloneFunctionChangeType::ClonedModule && M) { 147 // Inspect instructions to process e.g. DILexicalBlocks of inlined functions 148 for (const auto &I : instructions(F)) 149 DIFinder.processInstruction(*M, I); 150 } 151 152 return SPClonedWithinModule; 153 } 154 155 // Clone OldFunc into NewFunc, transforming the old arguments into references to 156 // VMap values. 157 void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc, 158 ValueToValueMapTy &VMap, 159 CloneFunctionChangeType Changes, 160 SmallVectorImpl<ReturnInst *> &Returns, 161 const char *NameSuffix, ClonedCodeInfo *CodeInfo, 162 ValueMapTypeRemapper *TypeMapper, 163 ValueMaterializer *Materializer) { 164 NewFunc->setIsNewDbgInfoFormat(OldFunc->IsNewDbgInfoFormat); 165 assert(NameSuffix && "NameSuffix cannot be null!"); 166 167 #ifndef NDEBUG 168 for (const Argument &I : OldFunc->args()) 169 assert(VMap.count(&I) && "No mapping from source argument specified!"); 170 #endif 171 172 bool ModuleLevelChanges = Changes > CloneFunctionChangeType::LocalChangesOnly; 173 174 CloneFunctionAttributesInto(NewFunc, OldFunc, VMap, ModuleLevelChanges, 175 TypeMapper, Materializer); 176 177 // Everything else beyond this point deals with function instructions, 178 // so if we are dealing with a function declaration, we're done. 179 if (OldFunc->isDeclaration()) 180 return; 181 182 // When we remap instructions within the same module, we want to avoid 183 // duplicating inlined DISubprograms, so record all subprograms we find as we 184 // duplicate instructions and then freeze them in the MD map. We also record 185 // information about dbg.value and dbg.declare to avoid duplicating the 186 // types. 187 DebugInfoFinder DIFinder; 188 189 // Track the subprogram attachment that needs to be cloned to fine-tune the 190 // mapping within the same module. 191 if (Changes < CloneFunctionChangeType::DifferentModule) { 192 // Need to find subprograms, types, and compile units. 193 194 assert((NewFunc->getParent() == nullptr || 195 NewFunc->getParent() == OldFunc->getParent()) && 196 "Expected NewFunc to have the same parent, or no parent"); 197 } else { 198 // Need to find all the compile units. 199 200 assert((NewFunc->getParent() == nullptr || 201 NewFunc->getParent() != OldFunc->getParent()) && 202 "Expected NewFunc to have different parents, or no parent"); 203 204 if (Changes == CloneFunctionChangeType::DifferentModule) { 205 assert(NewFunc->getParent() && 206 "Need parent of new function to maintain debug info invariants"); 207 } 208 } 209 210 DISubprogram *SPClonedWithinModule = 211 CollectDebugInfoForCloning(*OldFunc, Changes, DIFinder); 212 213 if (Changes < CloneFunctionChangeType::DifferentModule && 214 DIFinder.subprogram_count() > 0) { 215 // Turn on module-level changes, since we need to clone (some of) the 216 // debug info metadata. 217 // 218 // FIXME: Metadata effectively owned by a function should be made 219 // local, and only that local metadata should be cloned. 220 ModuleLevelChanges = true; 221 222 auto mapToSelfIfNew = [&VMap](MDNode *N) { 223 // Avoid clobbering an existing mapping. 224 (void)VMap.MD().try_emplace(N, N); 225 }; 226 227 // Avoid cloning types, compile units, and (other) subprograms. 228 SmallPtrSet<const DISubprogram *, 16> MappedToSelfSPs; 229 for (DISubprogram *ISP : DIFinder.subprograms()) { 230 if (ISP != SPClonedWithinModule) { 231 mapToSelfIfNew(ISP); 232 MappedToSelfSPs.insert(ISP); 233 } 234 } 235 236 // If a subprogram isn't going to be cloned skip its lexical blocks as well. 237 for (DIScope *S : DIFinder.scopes()) { 238 auto *LScope = dyn_cast<DILocalScope>(S); 239 if (LScope && MappedToSelfSPs.count(LScope->getSubprogram())) 240 mapToSelfIfNew(S); 241 } 242 243 for (DICompileUnit *CU : DIFinder.compile_units()) 244 mapToSelfIfNew(CU); 245 246 for (DIType *Type : DIFinder.types()) 247 mapToSelfIfNew(Type); 248 } else { 249 assert(!SPClonedWithinModule && 250 "Subprogram should be in DIFinder->subprogram_count()..."); 251 } 252 253 const auto RemapFlag = ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 254 // Duplicate the metadata that is attached to the cloned function. 255 // Subprograms/CUs/types that were already mapped to themselves won't be 256 // duplicated. 257 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs; 258 OldFunc->getAllMetadata(MDs); 259 for (auto MD : MDs) { 260 NewFunc->addMetadata(MD.first, *MapMetadata(MD.second, VMap, RemapFlag, 261 TypeMapper, Materializer)); 262 } 263 264 // Loop over all of the basic blocks in the function, cloning them as 265 // appropriate. Note that we save BE this way in order to handle cloning of 266 // recursive functions into themselves. 267 for (const BasicBlock &BB : *OldFunc) { 268 269 // Create a new basic block and copy instructions into it! 270 BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc, CodeInfo); 271 272 // Add basic block mapping. 273 VMap[&BB] = CBB; 274 275 // It is only legal to clone a function if a block address within that 276 // function is never referenced outside of the function. Given that, we 277 // want to map block addresses from the old function to block addresses in 278 // the clone. (This is different from the generic ValueMapper 279 // implementation, which generates an invalid blockaddress when 280 // cloning a function.) 281 if (BB.hasAddressTaken()) { 282 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 283 const_cast<BasicBlock *>(&BB)); 284 VMap[OldBBAddr] = BlockAddress::get(NewFunc, CBB); 285 } 286 287 // Note return instructions for the caller. 288 if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator())) 289 Returns.push_back(RI); 290 } 291 292 // Loop over all of the instructions in the new function, fixing up operand 293 // references as we go. This uses VMap to do all the hard work. 294 for (Function::iterator 295 BB = cast<BasicBlock>(VMap[&OldFunc->front()])->getIterator(), 296 BE = NewFunc->end(); 297 BB != BE; ++BB) 298 // Loop over all instructions, fixing each one as we find it, and any 299 // attached debug-info records. 300 for (Instruction &II : *BB) { 301 RemapInstruction(&II, VMap, RemapFlag, TypeMapper, Materializer); 302 RemapDbgRecordRange(II.getModule(), II.getDbgRecordRange(), VMap, 303 RemapFlag, TypeMapper, Materializer); 304 } 305 306 // Only update !llvm.dbg.cu for DifferentModule (not CloneModule). In the 307 // same module, the compile unit will already be listed (or not). When 308 // cloning a module, CloneModule() will handle creating the named metadata. 309 if (Changes != CloneFunctionChangeType::DifferentModule) 310 return; 311 312 // Update !llvm.dbg.cu with compile units added to the new module if this 313 // function is being cloned in isolation. 314 // 315 // FIXME: This is making global / module-level changes, which doesn't seem 316 // like the right encapsulation Consider dropping the requirement to update 317 // !llvm.dbg.cu (either obsoleting the node, or restricting it to 318 // non-discardable compile units) instead of discovering compile units by 319 // visiting the metadata attached to global values, which would allow this 320 // code to be deleted. Alternatively, perhaps give responsibility for this 321 // update to CloneFunctionInto's callers. 322 auto *NewModule = NewFunc->getParent(); 323 auto *NMD = NewModule->getOrInsertNamedMetadata("llvm.dbg.cu"); 324 // Avoid multiple insertions of the same DICompileUnit to NMD. 325 SmallPtrSet<const void *, 8> Visited; 326 for (auto *Operand : NMD->operands()) 327 Visited.insert(Operand); 328 for (auto *Unit : DIFinder.compile_units()) { 329 MDNode *MappedUnit = 330 MapMetadata(Unit, VMap, RF_None, TypeMapper, Materializer); 331 if (Visited.insert(MappedUnit).second) 332 NMD->addOperand(MappedUnit); 333 } 334 } 335 336 /// Return a copy of the specified function and add it to that function's 337 /// module. Also, any references specified in the VMap are changed to refer to 338 /// their mapped value instead of the original one. If any of the arguments to 339 /// the function are in the VMap, the arguments are deleted from the resultant 340 /// function. The VMap is updated to include mappings from all of the 341 /// instructions and basicblocks in the function from their old to new values. 342 /// 343 Function *llvm::CloneFunction(Function *F, ValueToValueMapTy &VMap, 344 ClonedCodeInfo *CodeInfo) { 345 std::vector<Type *> ArgTypes; 346 347 // The user might be deleting arguments to the function by specifying them in 348 // the VMap. If so, we need to not add the arguments to the arg ty vector 349 // 350 for (const Argument &I : F->args()) 351 if (VMap.count(&I) == 0) // Haven't mapped the argument to anything yet? 352 ArgTypes.push_back(I.getType()); 353 354 // Create a new function type... 355 FunctionType *FTy = 356 FunctionType::get(F->getFunctionType()->getReturnType(), ArgTypes, 357 F->getFunctionType()->isVarArg()); 358 359 // Create the new function... 360 Function *NewF = Function::Create(FTy, F->getLinkage(), F->getAddressSpace(), 361 F->getName(), F->getParent()); 362 NewF->setIsNewDbgInfoFormat(F->IsNewDbgInfoFormat); 363 364 // Loop over the arguments, copying the names of the mapped arguments over... 365 Function::arg_iterator DestI = NewF->arg_begin(); 366 for (const Argument &I : F->args()) 367 if (VMap.count(&I) == 0) { // Is this argument preserved? 368 DestI->setName(I.getName()); // Copy the name over... 369 VMap[&I] = &*DestI++; // Add mapping to VMap 370 } 371 372 SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned. 373 CloneFunctionInto(NewF, F, VMap, CloneFunctionChangeType::LocalChangesOnly, 374 Returns, "", CodeInfo); 375 376 return NewF; 377 } 378 379 namespace { 380 /// This is a private class used to implement CloneAndPruneFunctionInto. 381 struct PruningFunctionCloner { 382 Function *NewFunc; 383 const Function *OldFunc; 384 ValueToValueMapTy &VMap; 385 bool ModuleLevelChanges; 386 const char *NameSuffix; 387 ClonedCodeInfo *CodeInfo; 388 bool HostFuncIsStrictFP; 389 390 Instruction *cloneInstruction(BasicBlock::const_iterator II); 391 392 public: 393 PruningFunctionCloner(Function *newFunc, const Function *oldFunc, 394 ValueToValueMapTy &valueMap, bool moduleLevelChanges, 395 const char *nameSuffix, ClonedCodeInfo *codeInfo) 396 : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap), 397 ModuleLevelChanges(moduleLevelChanges), NameSuffix(nameSuffix), 398 CodeInfo(codeInfo) { 399 HostFuncIsStrictFP = 400 newFunc->getAttributes().hasFnAttr(Attribute::StrictFP); 401 } 402 403 /// The specified block is found to be reachable, clone it and 404 /// anything that it can reach. 405 void CloneBlock(const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 406 std::vector<const BasicBlock *> &ToClone); 407 }; 408 } // namespace 409 410 Instruction * 411 PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) { 412 const Instruction &OldInst = *II; 413 Instruction *NewInst = nullptr; 414 if (HostFuncIsStrictFP) { 415 Intrinsic::ID CIID = getConstrainedIntrinsicID(OldInst); 416 if (CIID != Intrinsic::not_intrinsic) { 417 // Instead of cloning the instruction, a call to constrained intrinsic 418 // should be created. 419 // Assume the first arguments of constrained intrinsics are the same as 420 // the operands of original instruction. 421 422 // Determine overloaded types of the intrinsic. 423 SmallVector<Type *, 2> TParams; 424 SmallVector<Intrinsic::IITDescriptor, 8> Descriptor; 425 getIntrinsicInfoTableEntries(CIID, Descriptor); 426 for (unsigned I = 0, E = Descriptor.size(); I != E; ++I) { 427 Intrinsic::IITDescriptor Operand = Descriptor[I]; 428 switch (Operand.Kind) { 429 case Intrinsic::IITDescriptor::Argument: 430 if (Operand.getArgumentKind() != 431 Intrinsic::IITDescriptor::AK_MatchType) { 432 if (I == 0) 433 TParams.push_back(OldInst.getType()); 434 else 435 TParams.push_back(OldInst.getOperand(I - 1)->getType()); 436 } 437 break; 438 case Intrinsic::IITDescriptor::SameVecWidthArgument: 439 ++I; 440 break; 441 default: 442 break; 443 } 444 } 445 446 // Create intrinsic call. 447 LLVMContext &Ctx = NewFunc->getContext(); 448 Function *IFn = Intrinsic::getOrInsertDeclaration(NewFunc->getParent(), 449 CIID, TParams); 450 SmallVector<Value *, 4> Args; 451 unsigned NumOperands = OldInst.getNumOperands(); 452 if (isa<CallInst>(OldInst)) 453 --NumOperands; 454 for (unsigned I = 0; I < NumOperands; ++I) { 455 Value *Op = OldInst.getOperand(I); 456 Args.push_back(Op); 457 } 458 if (const auto *CmpI = dyn_cast<FCmpInst>(&OldInst)) { 459 FCmpInst::Predicate Pred = CmpI->getPredicate(); 460 StringRef PredName = FCmpInst::getPredicateName(Pred); 461 Args.push_back(MetadataAsValue::get(Ctx, MDString::get(Ctx, PredName))); 462 } 463 464 // The last arguments of a constrained intrinsic are metadata that 465 // represent rounding mode (absents in some intrinsics) and exception 466 // behavior. The inlined function uses default settings. 467 if (Intrinsic::hasConstrainedFPRoundingModeOperand(CIID)) 468 Args.push_back( 469 MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest"))); 470 Args.push_back( 471 MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore"))); 472 473 NewInst = CallInst::Create(IFn, Args, OldInst.getName() + ".strict"); 474 } 475 } 476 if (!NewInst) 477 NewInst = II->clone(); 478 return NewInst; 479 } 480 481 /// The specified block is found to be reachable, clone it and 482 /// anything that it can reach. 483 void PruningFunctionCloner::CloneBlock( 484 const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 485 std::vector<const BasicBlock *> &ToClone) { 486 WeakTrackingVH &BBEntry = VMap[BB]; 487 488 // Have we already cloned this block? 489 if (BBEntry) 490 return; 491 492 // Nope, clone it now. 493 BasicBlock *NewBB; 494 Twine NewName(BB->hasName() ? Twine(BB->getName()) + NameSuffix : ""); 495 BBEntry = NewBB = BasicBlock::Create(BB->getContext(), NewName, NewFunc); 496 NewBB->IsNewDbgInfoFormat = BB->IsNewDbgInfoFormat; 497 498 // It is only legal to clone a function if a block address within that 499 // function is never referenced outside of the function. Given that, we 500 // want to map block addresses from the old function to block addresses in 501 // the clone. (This is different from the generic ValueMapper 502 // implementation, which generates an invalid blockaddress when 503 // cloning a function.) 504 // 505 // Note that we don't need to fix the mapping for unreachable blocks; 506 // the default mapping there is safe. 507 if (BB->hasAddressTaken()) { 508 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 509 const_cast<BasicBlock *>(BB)); 510 VMap[OldBBAddr] = BlockAddress::get(NewFunc, NewBB); 511 } 512 513 bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false; 514 bool hasMemProfMetadata = false; 515 516 // Keep a cursor pointing at the last place we cloned debug-info records from. 517 BasicBlock::const_iterator DbgCursor = StartingInst; 518 auto CloneDbgRecordsToHere = 519 [NewBB, &DbgCursor](Instruction *NewInst, BasicBlock::const_iterator II) { 520 if (!NewBB->IsNewDbgInfoFormat) 521 return; 522 523 // Clone debug-info records onto this instruction. Iterate through any 524 // source-instructions we've cloned and then subsequently optimised 525 // away, so that their debug-info doesn't go missing. 526 for (; DbgCursor != II; ++DbgCursor) 527 NewInst->cloneDebugInfoFrom(&*DbgCursor, std::nullopt, false); 528 NewInst->cloneDebugInfoFrom(&*II); 529 DbgCursor = std::next(II); 530 }; 531 532 // Loop over all instructions, and copy them over, DCE'ing as we go. This 533 // loop doesn't include the terminator. 534 for (BasicBlock::const_iterator II = StartingInst, IE = --BB->end(); II != IE; 535 ++II) { 536 537 // Don't clone fake_use as it may suppress many optimizations 538 // due to inlining, especially SROA. 539 if (auto *IntrInst = dyn_cast<IntrinsicInst>(II)) 540 if (IntrInst->getIntrinsicID() == Intrinsic::fake_use) 541 continue; 542 543 Instruction *NewInst = cloneInstruction(II); 544 NewInst->insertInto(NewBB, NewBB->end()); 545 546 if (HostFuncIsStrictFP) { 547 // All function calls in the inlined function must get 'strictfp' 548 // attribute to prevent undesirable optimizations. 549 if (auto *Call = dyn_cast<CallInst>(NewInst)) 550 Call->addFnAttr(Attribute::StrictFP); 551 } 552 553 // Eagerly remap operands to the newly cloned instruction, except for PHI 554 // nodes for which we defer processing until we update the CFG. Also defer 555 // debug intrinsic processing because they may contain use-before-defs. 556 if (!isa<PHINode>(NewInst) && !isa<DbgVariableIntrinsic>(NewInst)) { 557 RemapInstruction(NewInst, VMap, 558 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 559 560 // Eagerly constant fold the newly cloned instruction. If successful, add 561 // a mapping to the new value. Non-constant operands may be incomplete at 562 // this stage, thus instruction simplification is performed after 563 // processing phi-nodes. 564 if (Value *V = ConstantFoldInstruction( 565 NewInst, BB->getDataLayout())) { 566 if (isInstructionTriviallyDead(NewInst)) { 567 VMap[&*II] = V; 568 NewInst->eraseFromParent(); 569 continue; 570 } 571 } 572 } 573 574 if (II->hasName()) 575 NewInst->setName(II->getName() + NameSuffix); 576 VMap[&*II] = NewInst; // Add instruction map to value. 577 if (isa<CallInst>(II) && !II->isDebugOrPseudoInst()) { 578 hasCalls = true; 579 hasMemProfMetadata |= II->hasMetadata(LLVMContext::MD_memprof); 580 hasMemProfMetadata |= II->hasMetadata(LLVMContext::MD_callsite); 581 } 582 583 CloneDbgRecordsToHere(NewInst, II); 584 585 if (CodeInfo) { 586 CodeInfo->OrigVMap[&*II] = NewInst; 587 if (auto *CB = dyn_cast<CallBase>(&*II)) 588 if (CB->hasOperandBundles()) 589 CodeInfo->OperandBundleCallSites.push_back(NewInst); 590 } 591 592 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) { 593 if (isa<ConstantInt>(AI->getArraySize())) 594 hasStaticAllocas = true; 595 else 596 hasDynamicAllocas = true; 597 } 598 } 599 600 // Finally, clone over the terminator. 601 const Instruction *OldTI = BB->getTerminator(); 602 bool TerminatorDone = false; 603 if (const BranchInst *BI = dyn_cast<BranchInst>(OldTI)) { 604 if (BI->isConditional()) { 605 // If the condition was a known constant in the callee... 606 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 607 // Or is a known constant in the caller... 608 if (!Cond) { 609 Value *V = VMap.lookup(BI->getCondition()); 610 Cond = dyn_cast_or_null<ConstantInt>(V); 611 } 612 613 // Constant fold to uncond branch! 614 if (Cond) { 615 BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue()); 616 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 617 ToClone.push_back(Dest); 618 TerminatorDone = true; 619 } 620 } 621 } else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) { 622 // If switching on a value known constant in the caller. 623 ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition()); 624 if (!Cond) { // Or known constant after constant prop in the callee... 625 Value *V = VMap.lookup(SI->getCondition()); 626 Cond = dyn_cast_or_null<ConstantInt>(V); 627 } 628 if (Cond) { // Constant fold to uncond branch! 629 SwitchInst::ConstCaseHandle Case = *SI->findCaseValue(Cond); 630 BasicBlock *Dest = const_cast<BasicBlock *>(Case.getCaseSuccessor()); 631 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 632 ToClone.push_back(Dest); 633 TerminatorDone = true; 634 } 635 } 636 637 if (!TerminatorDone) { 638 Instruction *NewInst = OldTI->clone(); 639 if (OldTI->hasName()) 640 NewInst->setName(OldTI->getName() + NameSuffix); 641 NewInst->insertInto(NewBB, NewBB->end()); 642 643 CloneDbgRecordsToHere(NewInst, OldTI->getIterator()); 644 645 VMap[OldTI] = NewInst; // Add instruction map to value. 646 647 if (CodeInfo) { 648 CodeInfo->OrigVMap[OldTI] = NewInst; 649 if (auto *CB = dyn_cast<CallBase>(OldTI)) 650 if (CB->hasOperandBundles()) 651 CodeInfo->OperandBundleCallSites.push_back(NewInst); 652 } 653 654 // Recursively clone any reachable successor blocks. 655 append_range(ToClone, successors(BB->getTerminator())); 656 } else { 657 // If we didn't create a new terminator, clone DbgVariableRecords from the 658 // old terminator onto the new terminator. 659 Instruction *NewInst = NewBB->getTerminator(); 660 assert(NewInst); 661 662 CloneDbgRecordsToHere(NewInst, OldTI->getIterator()); 663 } 664 665 if (CodeInfo) { 666 CodeInfo->ContainsCalls |= hasCalls; 667 CodeInfo->ContainsMemProfMetadata |= hasMemProfMetadata; 668 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 669 CodeInfo->ContainsDynamicAllocas |= 670 hasStaticAllocas && BB != &BB->getParent()->front(); 671 } 672 } 673 674 /// This works like CloneAndPruneFunctionInto, except that it does not clone the 675 /// entire function. Instead it starts at an instruction provided by the caller 676 /// and copies (and prunes) only the code reachable from that instruction. 677 void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc, 678 const Instruction *StartingInst, 679 ValueToValueMapTy &VMap, 680 bool ModuleLevelChanges, 681 SmallVectorImpl<ReturnInst *> &Returns, 682 const char *NameSuffix, 683 ClonedCodeInfo *CodeInfo) { 684 assert(NameSuffix && "NameSuffix cannot be null!"); 685 686 ValueMapTypeRemapper *TypeMapper = nullptr; 687 ValueMaterializer *Materializer = nullptr; 688 689 #ifndef NDEBUG 690 // If the cloning starts at the beginning of the function, verify that 691 // the function arguments are mapped. 692 if (!StartingInst) 693 for (const Argument &II : OldFunc->args()) 694 assert(VMap.count(&II) && "No mapping from source argument specified!"); 695 #endif 696 697 PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges, 698 NameSuffix, CodeInfo); 699 const BasicBlock *StartingBB; 700 if (StartingInst) 701 StartingBB = StartingInst->getParent(); 702 else { 703 StartingBB = &OldFunc->getEntryBlock(); 704 StartingInst = &StartingBB->front(); 705 } 706 707 // Collect debug intrinsics for remapping later. 708 SmallVector<const DbgVariableIntrinsic *, 8> DbgIntrinsics; 709 for (const auto &BB : *OldFunc) { 710 for (const auto &I : BB) { 711 if (const auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I)) 712 DbgIntrinsics.push_back(DVI); 713 } 714 } 715 716 // Clone the entry block, and anything recursively reachable from it. 717 std::vector<const BasicBlock *> CloneWorklist; 718 PFC.CloneBlock(StartingBB, StartingInst->getIterator(), CloneWorklist); 719 while (!CloneWorklist.empty()) { 720 const BasicBlock *BB = CloneWorklist.back(); 721 CloneWorklist.pop_back(); 722 PFC.CloneBlock(BB, BB->begin(), CloneWorklist); 723 } 724 725 // Loop over all of the basic blocks in the old function. If the block was 726 // reachable, we have cloned it and the old block is now in the value map: 727 // insert it into the new function in the right order. If not, ignore it. 728 // 729 // Defer PHI resolution until rest of function is resolved. 730 SmallVector<const PHINode *, 16> PHIToResolve; 731 for (const BasicBlock &BI : *OldFunc) { 732 Value *V = VMap.lookup(&BI); 733 BasicBlock *NewBB = cast_or_null<BasicBlock>(V); 734 if (!NewBB) 735 continue; // Dead block. 736 737 // Move the new block to preserve the order in the original function. 738 NewBB->moveBefore(NewFunc->end()); 739 740 // Handle PHI nodes specially, as we have to remove references to dead 741 // blocks. 742 for (const PHINode &PN : BI.phis()) { 743 // PHI nodes may have been remapped to non-PHI nodes by the caller or 744 // during the cloning process. 745 if (isa<PHINode>(VMap[&PN])) 746 PHIToResolve.push_back(&PN); 747 else 748 break; 749 } 750 751 // Finally, remap the terminator instructions, as those can't be remapped 752 // until all BBs are mapped. 753 RemapInstruction(NewBB->getTerminator(), VMap, 754 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 755 TypeMapper, Materializer); 756 } 757 758 // Defer PHI resolution until rest of function is resolved, PHI resolution 759 // requires the CFG to be up-to-date. 760 for (unsigned phino = 0, e = PHIToResolve.size(); phino != e;) { 761 const PHINode *OPN = PHIToResolve[phino]; 762 unsigned NumPreds = OPN->getNumIncomingValues(); 763 const BasicBlock *OldBB = OPN->getParent(); 764 BasicBlock *NewBB = cast<BasicBlock>(VMap[OldBB]); 765 766 // Map operands for blocks that are live and remove operands for blocks 767 // that are dead. 768 for (; phino != PHIToResolve.size() && 769 PHIToResolve[phino]->getParent() == OldBB; 770 ++phino) { 771 OPN = PHIToResolve[phino]; 772 PHINode *PN = cast<PHINode>(VMap[OPN]); 773 for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) { 774 Value *V = VMap.lookup(PN->getIncomingBlock(pred)); 775 if (BasicBlock *MappedBlock = cast_or_null<BasicBlock>(V)) { 776 Value *InVal = 777 MapValue(PN->getIncomingValue(pred), VMap, 778 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 779 assert(InVal && "Unknown input value?"); 780 PN->setIncomingValue(pred, InVal); 781 PN->setIncomingBlock(pred, MappedBlock); 782 } else { 783 PN->removeIncomingValue(pred, false); 784 --pred; // Revisit the next entry. 785 --e; 786 } 787 } 788 } 789 790 // The loop above has removed PHI entries for those blocks that are dead 791 // and has updated others. However, if a block is live (i.e. copied over) 792 // but its terminator has been changed to not go to this block, then our 793 // phi nodes will have invalid entries. Update the PHI nodes in this 794 // case. 795 PHINode *PN = cast<PHINode>(NewBB->begin()); 796 NumPreds = pred_size(NewBB); 797 if (NumPreds != PN->getNumIncomingValues()) { 798 assert(NumPreds < PN->getNumIncomingValues()); 799 // Count how many times each predecessor comes to this block. 800 std::map<BasicBlock *, unsigned> PredCount; 801 for (BasicBlock *Pred : predecessors(NewBB)) 802 --PredCount[Pred]; 803 804 // Figure out how many entries to remove from each PHI. 805 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 806 ++PredCount[PN->getIncomingBlock(i)]; 807 808 // At this point, the excess predecessor entries are positive in the 809 // map. Loop over all of the PHIs and remove excess predecessor 810 // entries. 811 BasicBlock::iterator I = NewBB->begin(); 812 for (; (PN = dyn_cast<PHINode>(I)); ++I) { 813 for (const auto &PCI : PredCount) { 814 BasicBlock *Pred = PCI.first; 815 for (unsigned NumToRemove = PCI.second; NumToRemove; --NumToRemove) 816 PN->removeIncomingValue(Pred, false); 817 } 818 } 819 } 820 821 // If the loops above have made these phi nodes have 0 or 1 operand, 822 // replace them with poison or the input value. We must do this for 823 // correctness, because 0-operand phis are not valid. 824 PN = cast<PHINode>(NewBB->begin()); 825 if (PN->getNumIncomingValues() == 0) { 826 BasicBlock::iterator I = NewBB->begin(); 827 BasicBlock::const_iterator OldI = OldBB->begin(); 828 while ((PN = dyn_cast<PHINode>(I++))) { 829 Value *NV = PoisonValue::get(PN->getType()); 830 PN->replaceAllUsesWith(NV); 831 assert(VMap[&*OldI] == PN && "VMap mismatch"); 832 VMap[&*OldI] = NV; 833 PN->eraseFromParent(); 834 ++OldI; 835 } 836 } 837 } 838 839 // Drop all incompatible return attributes that cannot be applied to NewFunc 840 // during cloning, so as to allow instruction simplification to reason on the 841 // old state of the function. The original attributes are restored later. 842 AttributeList Attrs = NewFunc->getAttributes(); 843 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible( 844 OldFunc->getReturnType(), Attrs.getRetAttrs()); 845 NewFunc->removeRetAttrs(IncompatibleAttrs); 846 847 // As phi-nodes have been now remapped, allow incremental simplification of 848 // newly-cloned instructions. 849 const DataLayout &DL = NewFunc->getDataLayout(); 850 for (const auto &BB : *OldFunc) { 851 for (const auto &I : BB) { 852 auto *NewI = dyn_cast_or_null<Instruction>(VMap.lookup(&I)); 853 if (!NewI) 854 continue; 855 856 if (Value *V = simplifyInstruction(NewI, DL)) { 857 NewI->replaceAllUsesWith(V); 858 859 if (isInstructionTriviallyDead(NewI)) { 860 NewI->eraseFromParent(); 861 } else { 862 // Did not erase it? Restore the new instruction into VMap previously 863 // dropped by `ValueIsRAUWd`. 864 VMap[&I] = NewI; 865 } 866 } 867 } 868 } 869 870 // Restore attributes. 871 NewFunc->setAttributes(Attrs); 872 873 // Remap debug intrinsic operands now that all values have been mapped. 874 // Doing this now (late) preserves use-before-defs in debug intrinsics. If 875 // we didn't do this, ValueAsMetadata(use-before-def) operands would be 876 // replaced by empty metadata. This would signal later cleanup passes to 877 // remove the debug intrinsics, potentially causing incorrect locations. 878 for (const auto *DVI : DbgIntrinsics) { 879 if (DbgVariableIntrinsic *NewDVI = 880 cast_or_null<DbgVariableIntrinsic>(VMap.lookup(DVI))) 881 RemapInstruction(NewDVI, VMap, 882 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 883 TypeMapper, Materializer); 884 } 885 886 // Do the same for DbgVariableRecords, touching all the instructions in the 887 // cloned range of blocks. 888 Function::iterator Begin = cast<BasicBlock>(VMap[StartingBB])->getIterator(); 889 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) { 890 for (Instruction &I : BB) { 891 RemapDbgRecordRange(I.getModule(), I.getDbgRecordRange(), VMap, 892 ModuleLevelChanges ? RF_None 893 : RF_NoModuleLevelChanges, 894 TypeMapper, Materializer); 895 } 896 } 897 898 // Simplify conditional branches and switches with a constant operand. We try 899 // to prune these out when cloning, but if the simplification required 900 // looking through PHI nodes, those are only available after forming the full 901 // basic block. That may leave some here, and we still want to prune the dead 902 // code as early as possible. 903 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 904 ConstantFoldTerminator(&BB); 905 906 // Some blocks may have become unreachable as a result. Find and delete them. 907 { 908 SmallPtrSet<BasicBlock *, 16> ReachableBlocks; 909 SmallVector<BasicBlock *, 16> Worklist; 910 Worklist.push_back(&*Begin); 911 while (!Worklist.empty()) { 912 BasicBlock *BB = Worklist.pop_back_val(); 913 if (ReachableBlocks.insert(BB).second) 914 append_range(Worklist, successors(BB)); 915 } 916 917 SmallVector<BasicBlock *, 16> UnreachableBlocks; 918 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 919 if (!ReachableBlocks.contains(&BB)) 920 UnreachableBlocks.push_back(&BB); 921 DeleteDeadBlocks(UnreachableBlocks); 922 } 923 924 // Now that the inlined function body has been fully constructed, go through 925 // and zap unconditional fall-through branches. This happens all the time when 926 // specializing code: code specialization turns conditional branches into 927 // uncond branches, and this code folds them. 928 Function::iterator I = Begin; 929 while (I != NewFunc->end()) { 930 BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator()); 931 if (!BI || BI->isConditional()) { 932 ++I; 933 continue; 934 } 935 936 BasicBlock *Dest = BI->getSuccessor(0); 937 if (!Dest->getSinglePredecessor()) { 938 ++I; 939 continue; 940 } 941 942 // We shouldn't be able to get single-entry PHI nodes here, as instsimplify 943 // above should have zapped all of them.. 944 assert(!isa<PHINode>(Dest->begin())); 945 946 // We know all single-entry PHI nodes in the inlined function have been 947 // removed, so we just need to splice the blocks. 948 BI->eraseFromParent(); 949 950 // Make all PHI nodes that referred to Dest now refer to I as their source. 951 Dest->replaceAllUsesWith(&*I); 952 953 // Move all the instructions in the succ to the pred. 954 I->splice(I->end(), Dest); 955 956 // Remove the dest block. 957 Dest->eraseFromParent(); 958 959 // Do not increment I, iteratively merge all things this block branches to. 960 } 961 962 // Make a final pass over the basic blocks from the old function to gather 963 // any return instructions which survived folding. We have to do this here 964 // because we can iteratively remove and merge returns above. 965 for (Function::iterator I = cast<BasicBlock>(VMap[StartingBB])->getIterator(), 966 E = NewFunc->end(); 967 I != E; ++I) 968 if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator())) 969 Returns.push_back(RI); 970 } 971 972 /// This works exactly like CloneFunctionInto, 973 /// except that it does some simple constant prop and DCE on the fly. The 974 /// effect of this is to copy significantly less code in cases where (for 975 /// example) a function call with constant arguments is inlined, and those 976 /// constant arguments cause a significant amount of code in the callee to be 977 /// dead. Since this doesn't produce an exact copy of the input, it can't be 978 /// used for things like CloneFunction or CloneModule. 979 void llvm::CloneAndPruneFunctionInto( 980 Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, 981 bool ModuleLevelChanges, SmallVectorImpl<ReturnInst *> &Returns, 982 const char *NameSuffix, ClonedCodeInfo *CodeInfo) { 983 CloneAndPruneIntoFromInst(NewFunc, OldFunc, &OldFunc->front().front(), VMap, 984 ModuleLevelChanges, Returns, NameSuffix, CodeInfo); 985 } 986 987 /// Remaps instructions in \p Blocks using the mapping in \p VMap. 988 void llvm::remapInstructionsInBlocks(ArrayRef<BasicBlock *> Blocks, 989 ValueToValueMapTy &VMap) { 990 // Rewrite the code to refer to itself. 991 for (auto *BB : Blocks) { 992 for (auto &Inst : *BB) { 993 RemapDbgRecordRange(Inst.getModule(), Inst.getDbgRecordRange(), VMap, 994 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 995 RemapInstruction(&Inst, VMap, 996 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 997 } 998 } 999 } 1000 1001 /// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p 1002 /// Blocks. 1003 /// 1004 /// Updates LoopInfo and DominatorTree assuming the loop is dominated by block 1005 /// \p LoopDomBB. Insert the new blocks before block specified in \p Before. 1006 Loop *llvm::cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB, 1007 Loop *OrigLoop, ValueToValueMapTy &VMap, 1008 const Twine &NameSuffix, LoopInfo *LI, 1009 DominatorTree *DT, 1010 SmallVectorImpl<BasicBlock *> &Blocks) { 1011 Function *F = OrigLoop->getHeader()->getParent(); 1012 Loop *ParentLoop = OrigLoop->getParentLoop(); 1013 DenseMap<Loop *, Loop *> LMap; 1014 1015 Loop *NewLoop = LI->AllocateLoop(); 1016 LMap[OrigLoop] = NewLoop; 1017 if (ParentLoop) 1018 ParentLoop->addChildLoop(NewLoop); 1019 else 1020 LI->addTopLevelLoop(NewLoop); 1021 1022 BasicBlock *OrigPH = OrigLoop->getLoopPreheader(); 1023 assert(OrigPH && "No preheader"); 1024 BasicBlock *NewPH = CloneBasicBlock(OrigPH, VMap, NameSuffix, F); 1025 // To rename the loop PHIs. 1026 VMap[OrigPH] = NewPH; 1027 Blocks.push_back(NewPH); 1028 1029 // Update LoopInfo. 1030 if (ParentLoop) 1031 ParentLoop->addBasicBlockToLoop(NewPH, *LI); 1032 1033 // Update DominatorTree. 1034 DT->addNewBlock(NewPH, LoopDomBB); 1035 1036 for (Loop *CurLoop : OrigLoop->getLoopsInPreorder()) { 1037 Loop *&NewLoop = LMap[CurLoop]; 1038 if (!NewLoop) { 1039 NewLoop = LI->AllocateLoop(); 1040 1041 // Establish the parent/child relationship. 1042 Loop *OrigParent = CurLoop->getParentLoop(); 1043 assert(OrigParent && "Could not find the original parent loop"); 1044 Loop *NewParentLoop = LMap[OrigParent]; 1045 assert(NewParentLoop && "Could not find the new parent loop"); 1046 1047 NewParentLoop->addChildLoop(NewLoop); 1048 } 1049 } 1050 1051 for (BasicBlock *BB : OrigLoop->getBlocks()) { 1052 Loop *CurLoop = LI->getLoopFor(BB); 1053 Loop *&NewLoop = LMap[CurLoop]; 1054 assert(NewLoop && "Expecting new loop to be allocated"); 1055 1056 BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F); 1057 VMap[BB] = NewBB; 1058 1059 // Update LoopInfo. 1060 NewLoop->addBasicBlockToLoop(NewBB, *LI); 1061 1062 // Add DominatorTree node. After seeing all blocks, update to correct 1063 // IDom. 1064 DT->addNewBlock(NewBB, NewPH); 1065 1066 Blocks.push_back(NewBB); 1067 } 1068 1069 for (BasicBlock *BB : OrigLoop->getBlocks()) { 1070 // Update loop headers. 1071 Loop *CurLoop = LI->getLoopFor(BB); 1072 if (BB == CurLoop->getHeader()) 1073 LMap[CurLoop]->moveToHeader(cast<BasicBlock>(VMap[BB])); 1074 1075 // Update DominatorTree. 1076 BasicBlock *IDomBB = DT->getNode(BB)->getIDom()->getBlock(); 1077 DT->changeImmediateDominator(cast<BasicBlock>(VMap[BB]), 1078 cast<BasicBlock>(VMap[IDomBB])); 1079 } 1080 1081 // Move them physically from the end of the block list. 1082 F->splice(Before->getIterator(), F, NewPH->getIterator()); 1083 F->splice(Before->getIterator(), F, NewLoop->getHeader()->getIterator(), 1084 F->end()); 1085 1086 return NewLoop; 1087 } 1088 1089 /// Duplicate non-Phi instructions from the beginning of block up to 1090 /// StopAt instruction into a split block between BB and its predecessor. 1091 BasicBlock *llvm::DuplicateInstructionsInSplitBetween( 1092 BasicBlock *BB, BasicBlock *PredBB, Instruction *StopAt, 1093 ValueToValueMapTy &ValueMapping, DomTreeUpdater &DTU) { 1094 1095 assert(count(successors(PredBB), BB) == 1 && 1096 "There must be a single edge between PredBB and BB!"); 1097 // We are going to have to map operands from the original BB block to the new 1098 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to 1099 // account for entry from PredBB. 1100 BasicBlock::iterator BI = BB->begin(); 1101 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 1102 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 1103 1104 BasicBlock *NewBB = SplitEdge(PredBB, BB); 1105 NewBB->setName(PredBB->getName() + ".split"); 1106 Instruction *NewTerm = NewBB->getTerminator(); 1107 1108 // FIXME: SplitEdge does not yet take a DTU, so we include the split edge 1109 // in the update set here. 1110 DTU.applyUpdates({{DominatorTree::Delete, PredBB, BB}, 1111 {DominatorTree::Insert, PredBB, NewBB}, 1112 {DominatorTree::Insert, NewBB, BB}}); 1113 1114 // Clone the non-phi instructions of BB into NewBB, keeping track of the 1115 // mapping and using it to remap operands in the cloned instructions. 1116 // Stop once we see the terminator too. This covers the case where BB's 1117 // terminator gets replaced and StopAt == BB's terminator. 1118 for (; StopAt != &*BI && BB->getTerminator() != &*BI; ++BI) { 1119 Instruction *New = BI->clone(); 1120 New->setName(BI->getName()); 1121 New->insertBefore(NewTerm); 1122 New->cloneDebugInfoFrom(&*BI); 1123 ValueMapping[&*BI] = New; 1124 1125 // Remap operands to patch up intra-block references. 1126 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1127 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1128 auto I = ValueMapping.find(Inst); 1129 if (I != ValueMapping.end()) 1130 New->setOperand(i, I->second); 1131 } 1132 1133 // Remap debug variable operands. 1134 remapDebugVariable(ValueMapping, New); 1135 } 1136 1137 return NewBB; 1138 } 1139 1140 void llvm::cloneNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1141 DenseMap<MDNode *, MDNode *> &ClonedScopes, 1142 StringRef Ext, LLVMContext &Context) { 1143 MDBuilder MDB(Context); 1144 1145 for (auto *ScopeList : NoAliasDeclScopes) { 1146 for (const auto &MDOperand : ScopeList->operands()) { 1147 if (MDNode *MD = dyn_cast<MDNode>(MDOperand)) { 1148 AliasScopeNode SNANode(MD); 1149 1150 std::string Name; 1151 auto ScopeName = SNANode.getName(); 1152 if (!ScopeName.empty()) 1153 Name = (Twine(ScopeName) + ":" + Ext).str(); 1154 else 1155 Name = std::string(Ext); 1156 1157 MDNode *NewScope = MDB.createAnonymousAliasScope( 1158 const_cast<MDNode *>(SNANode.getDomain()), Name); 1159 ClonedScopes.insert(std::make_pair(MD, NewScope)); 1160 } 1161 } 1162 } 1163 } 1164 1165 void llvm::adaptNoAliasScopes(Instruction *I, 1166 const DenseMap<MDNode *, MDNode *> &ClonedScopes, 1167 LLVMContext &Context) { 1168 auto CloneScopeList = [&](const MDNode *ScopeList) -> MDNode * { 1169 bool NeedsReplacement = false; 1170 SmallVector<Metadata *, 8> NewScopeList; 1171 for (const auto &MDOp : ScopeList->operands()) { 1172 if (MDNode *MD = dyn_cast<MDNode>(MDOp)) { 1173 if (auto *NewMD = ClonedScopes.lookup(MD)) { 1174 NewScopeList.push_back(NewMD); 1175 NeedsReplacement = true; 1176 continue; 1177 } 1178 NewScopeList.push_back(MD); 1179 } 1180 } 1181 if (NeedsReplacement) 1182 return MDNode::get(Context, NewScopeList); 1183 return nullptr; 1184 }; 1185 1186 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(I)) 1187 if (auto *NewScopeList = CloneScopeList(Decl->getScopeList())) 1188 Decl->setScopeList(NewScopeList); 1189 1190 auto replaceWhenNeeded = [&](unsigned MD_ID) { 1191 if (const MDNode *CSNoAlias = I->getMetadata(MD_ID)) 1192 if (auto *NewScopeList = CloneScopeList(CSNoAlias)) 1193 I->setMetadata(MD_ID, NewScopeList); 1194 }; 1195 replaceWhenNeeded(LLVMContext::MD_noalias); 1196 replaceWhenNeeded(LLVMContext::MD_alias_scope); 1197 } 1198 1199 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1200 ArrayRef<BasicBlock *> NewBlocks, 1201 LLVMContext &Context, StringRef Ext) { 1202 if (NoAliasDeclScopes.empty()) 1203 return; 1204 1205 DenseMap<MDNode *, MDNode *> ClonedScopes; 1206 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1207 << NoAliasDeclScopes.size() << " node(s)\n"); 1208 1209 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1210 // Identify instructions using metadata that needs adaptation 1211 for (BasicBlock *NewBlock : NewBlocks) 1212 for (Instruction &I : *NewBlock) 1213 adaptNoAliasScopes(&I, ClonedScopes, Context); 1214 } 1215 1216 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1217 Instruction *IStart, Instruction *IEnd, 1218 LLVMContext &Context, StringRef Ext) { 1219 if (NoAliasDeclScopes.empty()) 1220 return; 1221 1222 DenseMap<MDNode *, MDNode *> ClonedScopes; 1223 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1224 << NoAliasDeclScopes.size() << " node(s)\n"); 1225 1226 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1227 // Identify instructions using metadata that needs adaptation 1228 assert(IStart->getParent() == IEnd->getParent() && "different basic block ?"); 1229 auto ItStart = IStart->getIterator(); 1230 auto ItEnd = IEnd->getIterator(); 1231 ++ItEnd; // IEnd is included, increment ItEnd to get the end of the range 1232 for (auto &I : llvm::make_range(ItStart, ItEnd)) 1233 adaptNoAliasScopes(&I, ClonedScopes, Context); 1234 } 1235 1236 void llvm::identifyNoAliasScopesToClone( 1237 ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1238 for (BasicBlock *BB : BBs) 1239 for (Instruction &I : *BB) 1240 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1241 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1242 } 1243 1244 void llvm::identifyNoAliasScopesToClone( 1245 BasicBlock::iterator Start, BasicBlock::iterator End, 1246 SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1247 for (Instruction &I : make_range(Start, End)) 1248 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1249 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1250 } 1251