1 //===- CloneFunction.cpp - Clone a function into another function ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the CloneFunctionInto interface, which is used as the 10 // low-level function cloner. This is used by the CloneFunction and function 11 // inliner to do the dirty work of copying the body of a function around. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/Analysis/DomTreeUpdater.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/IR/CFG.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/DebugInfo.h" 23 #include "llvm/IR/DerivedTypes.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/IR/Instructions.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/LLVMContext.h" 28 #include "llvm/IR/MDBuilder.h" 29 #include "llvm/IR/Metadata.h" 30 #include "llvm/IR/Module.h" 31 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 32 #include "llvm/Transforms/Utils/Cloning.h" 33 #include "llvm/Transforms/Utils/Local.h" 34 #include "llvm/Transforms/Utils/ValueMapper.h" 35 #include <map> 36 using namespace llvm; 37 38 #define DEBUG_TYPE "clone-function" 39 40 /// See comments in Cloning.h. 41 BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap, 42 const Twine &NameSuffix, Function *F, 43 ClonedCodeInfo *CodeInfo, 44 DebugInfoFinder *DIFinder) { 45 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F); 46 if (BB->hasName()) 47 NewBB->setName(BB->getName() + NameSuffix); 48 49 bool hasCalls = false, hasDynamicAllocas = false; 50 Module *TheModule = F ? F->getParent() : nullptr; 51 52 // Loop over all instructions, and copy them over. 53 for (const Instruction &I : *BB) { 54 if (DIFinder && TheModule) 55 DIFinder->processInstruction(*TheModule, I); 56 57 Instruction *NewInst = I.clone(); 58 if (I.hasName()) 59 NewInst->setName(I.getName() + NameSuffix); 60 NewBB->getInstList().push_back(NewInst); 61 VMap[&I] = NewInst; // Add instruction map to value. 62 63 hasCalls |= (isa<CallInst>(I) && !I.isDebugOrPseudoInst()); 64 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { 65 if (!AI->isStaticAlloca()) { 66 hasDynamicAllocas = true; 67 } 68 } 69 } 70 71 if (CodeInfo) { 72 CodeInfo->ContainsCalls |= hasCalls; 73 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 74 } 75 return NewBB; 76 } 77 78 // Clone OldFunc into NewFunc, transforming the old arguments into references to 79 // VMap values. 80 // 81 void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc, 82 ValueToValueMapTy &VMap, 83 CloneFunctionChangeType Changes, 84 SmallVectorImpl<ReturnInst *> &Returns, 85 const char *NameSuffix, ClonedCodeInfo *CodeInfo, 86 ValueMapTypeRemapper *TypeMapper, 87 ValueMaterializer *Materializer) { 88 assert(NameSuffix && "NameSuffix cannot be null!"); 89 90 #ifndef NDEBUG 91 for (const Argument &I : OldFunc->args()) 92 assert(VMap.count(&I) && "No mapping from source argument specified!"); 93 #endif 94 95 bool ModuleLevelChanges = Changes > CloneFunctionChangeType::LocalChangesOnly; 96 97 // Copy all attributes other than those stored in the AttributeList. We need 98 // to remap the parameter indices of the AttributeList. 99 AttributeList NewAttrs = NewFunc->getAttributes(); 100 NewFunc->copyAttributesFrom(OldFunc); 101 NewFunc->setAttributes(NewAttrs); 102 103 // Fix up the personality function that got copied over. 104 if (OldFunc->hasPersonalityFn()) 105 NewFunc->setPersonalityFn( 106 MapValue(OldFunc->getPersonalityFn(), VMap, 107 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 108 TypeMapper, Materializer)); 109 110 SmallVector<AttributeSet, 4> NewArgAttrs(NewFunc->arg_size()); 111 AttributeList OldAttrs = OldFunc->getAttributes(); 112 113 // Clone any argument attributes that are present in the VMap. 114 for (const Argument &OldArg : OldFunc->args()) { 115 if (Argument *NewArg = dyn_cast<Argument>(VMap[&OldArg])) { 116 NewArgAttrs[NewArg->getArgNo()] = 117 OldAttrs.getParamAttrs(OldArg.getArgNo()); 118 } 119 } 120 121 NewFunc->setAttributes( 122 AttributeList::get(NewFunc->getContext(), OldAttrs.getFnAttrs(), 123 OldAttrs.getRetAttrs(), NewArgAttrs)); 124 125 // Everything else beyond this point deals with function instructions, 126 // so if we are dealing with a function declaration, we're done. 127 if (OldFunc->isDeclaration()) 128 return; 129 130 // When we remap instructions within the same module, we want to avoid 131 // duplicating inlined DISubprograms, so record all subprograms we find as we 132 // duplicate instructions and then freeze them in the MD map. We also record 133 // information about dbg.value and dbg.declare to avoid duplicating the 134 // types. 135 Optional<DebugInfoFinder> DIFinder; 136 137 // Track the subprogram attachment that needs to be cloned to fine-tune the 138 // mapping within the same module. 139 DISubprogram *SPClonedWithinModule = nullptr; 140 if (Changes < CloneFunctionChangeType::DifferentModule) { 141 assert((NewFunc->getParent() == nullptr || 142 NewFunc->getParent() == OldFunc->getParent()) && 143 "Expected NewFunc to have the same parent, or no parent"); 144 145 // Need to find subprograms, types, and compile units. 146 DIFinder.emplace(); 147 148 SPClonedWithinModule = OldFunc->getSubprogram(); 149 if (SPClonedWithinModule) 150 DIFinder->processSubprogram(SPClonedWithinModule); 151 } else { 152 assert((NewFunc->getParent() == nullptr || 153 NewFunc->getParent() != OldFunc->getParent()) && 154 "Expected NewFunc to have different parents, or no parent"); 155 156 if (Changes == CloneFunctionChangeType::DifferentModule) { 157 assert(NewFunc->getParent() && 158 "Need parent of new function to maintain debug info invariants"); 159 160 // Need to find all the compile units. 161 DIFinder.emplace(); 162 } 163 } 164 165 // Loop over all of the basic blocks in the function, cloning them as 166 // appropriate. Note that we save BE this way in order to handle cloning of 167 // recursive functions into themselves. 168 for (const BasicBlock &BB : *OldFunc) { 169 170 // Create a new basic block and copy instructions into it! 171 BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc, CodeInfo, 172 DIFinder ? &*DIFinder : nullptr); 173 174 // Add basic block mapping. 175 VMap[&BB] = CBB; 176 177 // It is only legal to clone a function if a block address within that 178 // function is never referenced outside of the function. Given that, we 179 // want to map block addresses from the old function to block addresses in 180 // the clone. (This is different from the generic ValueMapper 181 // implementation, which generates an invalid blockaddress when 182 // cloning a function.) 183 if (BB.hasAddressTaken()) { 184 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 185 const_cast<BasicBlock *>(&BB)); 186 VMap[OldBBAddr] = BlockAddress::get(NewFunc, CBB); 187 } 188 189 // Note return instructions for the caller. 190 if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator())) 191 Returns.push_back(RI); 192 } 193 194 if (Changes < CloneFunctionChangeType::DifferentModule && 195 DIFinder->subprogram_count() > 0) { 196 // Turn on module-level changes, since we need to clone (some of) the 197 // debug info metadata. 198 // 199 // FIXME: Metadata effectively owned by a function should be made 200 // local, and only that local metadata should be cloned. 201 ModuleLevelChanges = true; 202 203 auto mapToSelfIfNew = [&VMap](MDNode *N) { 204 // Avoid clobbering an existing mapping. 205 (void)VMap.MD().try_emplace(N, N); 206 }; 207 208 // Avoid cloning types, compile units, and (other) subprograms. 209 for (DISubprogram *ISP : DIFinder->subprograms()) 210 if (ISP != SPClonedWithinModule) 211 mapToSelfIfNew(ISP); 212 213 for (DICompileUnit *CU : DIFinder->compile_units()) 214 mapToSelfIfNew(CU); 215 216 for (DIType *Type : DIFinder->types()) 217 mapToSelfIfNew(Type); 218 } else { 219 assert(!SPClonedWithinModule && 220 "Subprogram should be in DIFinder->subprogram_count()..."); 221 } 222 223 const auto RemapFlag = ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges; 224 // Duplicate the metadata that is attached to the cloned function. 225 // Subprograms/CUs/types that were already mapped to themselves won't be 226 // duplicated. 227 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs; 228 OldFunc->getAllMetadata(MDs); 229 for (auto MD : MDs) { 230 NewFunc->addMetadata(MD.first, *MapMetadata(MD.second, VMap, RemapFlag, 231 TypeMapper, Materializer)); 232 } 233 234 // Loop over all of the instructions in the new function, fixing up operand 235 // references as we go. This uses VMap to do all the hard work. 236 for (Function::iterator 237 BB = cast<BasicBlock>(VMap[&OldFunc->front()])->getIterator(), 238 BE = NewFunc->end(); 239 BB != BE; ++BB) 240 // Loop over all instructions, fixing each one as we find it... 241 for (Instruction &II : *BB) 242 RemapInstruction(&II, VMap, RemapFlag, TypeMapper, Materializer); 243 244 // Only update !llvm.dbg.cu for DifferentModule (not CloneModule). In the 245 // same module, the compile unit will already be listed (or not). When 246 // cloning a module, CloneModule() will handle creating the named metadata. 247 if (Changes != CloneFunctionChangeType::DifferentModule) 248 return; 249 250 // Update !llvm.dbg.cu with compile units added to the new module if this 251 // function is being cloned in isolation. 252 // 253 // FIXME: This is making global / module-level changes, which doesn't seem 254 // like the right encapsulation Consider dropping the requirement to update 255 // !llvm.dbg.cu (either obsoleting the node, or restricting it to 256 // non-discardable compile units) instead of discovering compile units by 257 // visiting the metadata attached to global values, which would allow this 258 // code to be deleted. Alternatively, perhaps give responsibility for this 259 // update to CloneFunctionInto's callers. 260 auto *NewModule = NewFunc->getParent(); 261 auto *NMD = NewModule->getOrInsertNamedMetadata("llvm.dbg.cu"); 262 // Avoid multiple insertions of the same DICompileUnit to NMD. 263 SmallPtrSet<const void *, 8> Visited; 264 for (auto *Operand : NMD->operands()) 265 Visited.insert(Operand); 266 for (auto *Unit : DIFinder->compile_units()) { 267 MDNode *MappedUnit = 268 MapMetadata(Unit, VMap, RF_None, TypeMapper, Materializer); 269 if (Visited.insert(MappedUnit).second) 270 NMD->addOperand(MappedUnit); 271 } 272 } 273 274 /// Return a copy of the specified function and add it to that function's 275 /// module. Also, any references specified in the VMap are changed to refer to 276 /// their mapped value instead of the original one. If any of the arguments to 277 /// the function are in the VMap, the arguments are deleted from the resultant 278 /// function. The VMap is updated to include mappings from all of the 279 /// instructions and basicblocks in the function from their old to new values. 280 /// 281 Function *llvm::CloneFunction(Function *F, ValueToValueMapTy &VMap, 282 ClonedCodeInfo *CodeInfo) { 283 std::vector<Type *> ArgTypes; 284 285 // The user might be deleting arguments to the function by specifying them in 286 // the VMap. If so, we need to not add the arguments to the arg ty vector 287 // 288 for (const Argument &I : F->args()) 289 if (VMap.count(&I) == 0) // Haven't mapped the argument to anything yet? 290 ArgTypes.push_back(I.getType()); 291 292 // Create a new function type... 293 FunctionType *FTy = 294 FunctionType::get(F->getFunctionType()->getReturnType(), ArgTypes, 295 F->getFunctionType()->isVarArg()); 296 297 // Create the new function... 298 Function *NewF = Function::Create(FTy, F->getLinkage(), F->getAddressSpace(), 299 F->getName(), F->getParent()); 300 301 // Loop over the arguments, copying the names of the mapped arguments over... 302 Function::arg_iterator DestI = NewF->arg_begin(); 303 for (const Argument &I : F->args()) 304 if (VMap.count(&I) == 0) { // Is this argument preserved? 305 DestI->setName(I.getName()); // Copy the name over... 306 VMap[&I] = &*DestI++; // Add mapping to VMap 307 } 308 309 SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned. 310 CloneFunctionInto(NewF, F, VMap, CloneFunctionChangeType::LocalChangesOnly, 311 Returns, "", CodeInfo); 312 313 return NewF; 314 } 315 316 namespace { 317 /// This is a private class used to implement CloneAndPruneFunctionInto. 318 struct PruningFunctionCloner { 319 Function *NewFunc; 320 const Function *OldFunc; 321 ValueToValueMapTy &VMap; 322 bool ModuleLevelChanges; 323 const char *NameSuffix; 324 ClonedCodeInfo *CodeInfo; 325 bool HostFuncIsStrictFP; 326 327 Instruction *cloneInstruction(BasicBlock::const_iterator II); 328 329 public: 330 PruningFunctionCloner(Function *newFunc, const Function *oldFunc, 331 ValueToValueMapTy &valueMap, bool moduleLevelChanges, 332 const char *nameSuffix, ClonedCodeInfo *codeInfo) 333 : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap), 334 ModuleLevelChanges(moduleLevelChanges), NameSuffix(nameSuffix), 335 CodeInfo(codeInfo) { 336 HostFuncIsStrictFP = 337 newFunc->getAttributes().hasFnAttr(Attribute::StrictFP); 338 } 339 340 /// The specified block is found to be reachable, clone it and 341 /// anything that it can reach. 342 void CloneBlock(const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 343 std::vector<const BasicBlock *> &ToClone); 344 }; 345 } // namespace 346 347 static bool hasRoundingModeOperand(Intrinsic::ID CIID) { 348 switch (CIID) { 349 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 350 case Intrinsic::INTRINSIC: \ 351 return ROUND_MODE == 1; 352 #define FUNCTION INSTRUCTION 353 #include "llvm/IR/ConstrainedOps.def" 354 default: 355 llvm_unreachable("Unexpected constrained intrinsic id"); 356 } 357 } 358 359 Instruction * 360 PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) { 361 const Instruction &OldInst = *II; 362 Instruction *NewInst = nullptr; 363 if (HostFuncIsStrictFP) { 364 Intrinsic::ID CIID = getConstrainedIntrinsicID(OldInst); 365 if (CIID != Intrinsic::not_intrinsic) { 366 // Instead of cloning the instruction, a call to constrained intrinsic 367 // should be created. 368 // Assume the first arguments of constrained intrinsics are the same as 369 // the operands of original instruction. 370 371 // Determine overloaded types of the intrinsic. 372 SmallVector<Type *, 2> TParams; 373 SmallVector<Intrinsic::IITDescriptor, 8> Descriptor; 374 getIntrinsicInfoTableEntries(CIID, Descriptor); 375 for (unsigned I = 0, E = Descriptor.size(); I != E; ++I) { 376 Intrinsic::IITDescriptor Operand = Descriptor[I]; 377 switch (Operand.Kind) { 378 case Intrinsic::IITDescriptor::Argument: 379 if (Operand.getArgumentKind() != 380 Intrinsic::IITDescriptor::AK_MatchType) { 381 if (I == 0) 382 TParams.push_back(OldInst.getType()); 383 else 384 TParams.push_back(OldInst.getOperand(I - 1)->getType()); 385 } 386 break; 387 case Intrinsic::IITDescriptor::SameVecWidthArgument: 388 ++I; 389 break; 390 default: 391 break; 392 } 393 } 394 395 // Create intrinsic call. 396 LLVMContext &Ctx = NewFunc->getContext(); 397 Function *IFn = 398 Intrinsic::getDeclaration(NewFunc->getParent(), CIID, TParams); 399 SmallVector<Value *, 4> Args; 400 unsigned NumOperands = OldInst.getNumOperands(); 401 if (isa<CallInst>(OldInst)) 402 --NumOperands; 403 for (unsigned I = 0; I < NumOperands; ++I) { 404 Value *Op = OldInst.getOperand(I); 405 Args.push_back(Op); 406 } 407 if (const auto *CmpI = dyn_cast<FCmpInst>(&OldInst)) { 408 FCmpInst::Predicate Pred = CmpI->getPredicate(); 409 StringRef PredName = FCmpInst::getPredicateName(Pred); 410 Args.push_back(MetadataAsValue::get(Ctx, MDString::get(Ctx, PredName))); 411 } 412 413 // The last arguments of a constrained intrinsic are metadata that 414 // represent rounding mode (absents in some intrinsics) and exception 415 // behavior. The inlined function uses default settings. 416 if (hasRoundingModeOperand(CIID)) 417 Args.push_back( 418 MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest"))); 419 Args.push_back( 420 MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore"))); 421 422 NewInst = CallInst::Create(IFn, Args, OldInst.getName() + ".strict"); 423 } 424 } 425 if (!NewInst) 426 NewInst = II->clone(); 427 return NewInst; 428 } 429 430 /// The specified block is found to be reachable, clone it and 431 /// anything that it can reach. 432 void PruningFunctionCloner::CloneBlock( 433 const BasicBlock *BB, BasicBlock::const_iterator StartingInst, 434 std::vector<const BasicBlock *> &ToClone) { 435 WeakTrackingVH &BBEntry = VMap[BB]; 436 437 // Have we already cloned this block? 438 if (BBEntry) 439 return; 440 441 // Nope, clone it now. 442 BasicBlock *NewBB; 443 BBEntry = NewBB = BasicBlock::Create(BB->getContext()); 444 if (BB->hasName()) 445 NewBB->setName(BB->getName() + NameSuffix); 446 447 // It is only legal to clone a function if a block address within that 448 // function is never referenced outside of the function. Given that, we 449 // want to map block addresses from the old function to block addresses in 450 // the clone. (This is different from the generic ValueMapper 451 // implementation, which generates an invalid blockaddress when 452 // cloning a function.) 453 // 454 // Note that we don't need to fix the mapping for unreachable blocks; 455 // the default mapping there is safe. 456 if (BB->hasAddressTaken()) { 457 Constant *OldBBAddr = BlockAddress::get(const_cast<Function *>(OldFunc), 458 const_cast<BasicBlock *>(BB)); 459 VMap[OldBBAddr] = BlockAddress::get(NewFunc, NewBB); 460 } 461 462 bool hasCalls = false, hasDynamicAllocas = false, hasStaticAllocas = false; 463 464 // Loop over all instructions, and copy them over, DCE'ing as we go. This 465 // loop doesn't include the terminator. 466 for (BasicBlock::const_iterator II = StartingInst, IE = --BB->end(); II != IE; 467 ++II) { 468 469 Instruction *NewInst = cloneInstruction(II); 470 471 if (HostFuncIsStrictFP) { 472 // All function calls in the inlined function must get 'strictfp' 473 // attribute to prevent undesirable optimizations. 474 if (auto *Call = dyn_cast<CallInst>(NewInst)) 475 Call->addFnAttr(Attribute::StrictFP); 476 } 477 478 // Eagerly remap operands to the newly cloned instruction, except for PHI 479 // nodes for which we defer processing until we update the CFG. 480 if (!isa<PHINode>(NewInst)) { 481 RemapInstruction(NewInst, VMap, 482 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 483 484 // If we can simplify this instruction to some other value, simply add 485 // a mapping to that value rather than inserting a new instruction into 486 // the basic block. 487 if (Value *V = 488 simplifyInstruction(NewInst, BB->getModule()->getDataLayout())) { 489 // On the off-chance that this simplifies to an instruction in the old 490 // function, map it back into the new function. 491 if (NewFunc != OldFunc) 492 if (Value *MappedV = VMap.lookup(V)) 493 V = MappedV; 494 495 if (!NewInst->mayHaveSideEffects()) { 496 VMap[&*II] = V; 497 NewInst->deleteValue(); 498 continue; 499 } 500 } 501 } 502 503 if (II->hasName()) 504 NewInst->setName(II->getName() + NameSuffix); 505 VMap[&*II] = NewInst; // Add instruction map to value. 506 NewBB->getInstList().push_back(NewInst); 507 hasCalls |= (isa<CallInst>(II) && !II->isDebugOrPseudoInst()); 508 509 if (CodeInfo) { 510 CodeInfo->OrigVMap[&*II] = NewInst; 511 if (auto *CB = dyn_cast<CallBase>(&*II)) 512 if (CB->hasOperandBundles()) 513 CodeInfo->OperandBundleCallSites.push_back(NewInst); 514 } 515 516 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) { 517 if (isa<ConstantInt>(AI->getArraySize())) 518 hasStaticAllocas = true; 519 else 520 hasDynamicAllocas = true; 521 } 522 } 523 524 // Finally, clone over the terminator. 525 const Instruction *OldTI = BB->getTerminator(); 526 bool TerminatorDone = false; 527 if (const BranchInst *BI = dyn_cast<BranchInst>(OldTI)) { 528 if (BI->isConditional()) { 529 // If the condition was a known constant in the callee... 530 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 531 // Or is a known constant in the caller... 532 if (!Cond) { 533 Value *V = VMap.lookup(BI->getCondition()); 534 Cond = dyn_cast_or_null<ConstantInt>(V); 535 } 536 537 // Constant fold to uncond branch! 538 if (Cond) { 539 BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue()); 540 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 541 ToClone.push_back(Dest); 542 TerminatorDone = true; 543 } 544 } 545 } else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) { 546 // If switching on a value known constant in the caller. 547 ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition()); 548 if (!Cond) { // Or known constant after constant prop in the callee... 549 Value *V = VMap.lookup(SI->getCondition()); 550 Cond = dyn_cast_or_null<ConstantInt>(V); 551 } 552 if (Cond) { // Constant fold to uncond branch! 553 SwitchInst::ConstCaseHandle Case = *SI->findCaseValue(Cond); 554 BasicBlock *Dest = const_cast<BasicBlock *>(Case.getCaseSuccessor()); 555 VMap[OldTI] = BranchInst::Create(Dest, NewBB); 556 ToClone.push_back(Dest); 557 TerminatorDone = true; 558 } 559 } 560 561 if (!TerminatorDone) { 562 Instruction *NewInst = OldTI->clone(); 563 if (OldTI->hasName()) 564 NewInst->setName(OldTI->getName() + NameSuffix); 565 NewBB->getInstList().push_back(NewInst); 566 VMap[OldTI] = NewInst; // Add instruction map to value. 567 568 if (CodeInfo) { 569 CodeInfo->OrigVMap[OldTI] = NewInst; 570 if (auto *CB = dyn_cast<CallBase>(OldTI)) 571 if (CB->hasOperandBundles()) 572 CodeInfo->OperandBundleCallSites.push_back(NewInst); 573 } 574 575 // Recursively clone any reachable successor blocks. 576 append_range(ToClone, successors(BB->getTerminator())); 577 } 578 579 if (CodeInfo) { 580 CodeInfo->ContainsCalls |= hasCalls; 581 CodeInfo->ContainsDynamicAllocas |= hasDynamicAllocas; 582 CodeInfo->ContainsDynamicAllocas |= 583 hasStaticAllocas && BB != &BB->getParent()->front(); 584 } 585 } 586 587 /// This works like CloneAndPruneFunctionInto, except that it does not clone the 588 /// entire function. Instead it starts at an instruction provided by the caller 589 /// and copies (and prunes) only the code reachable from that instruction. 590 void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc, 591 const Instruction *StartingInst, 592 ValueToValueMapTy &VMap, 593 bool ModuleLevelChanges, 594 SmallVectorImpl<ReturnInst *> &Returns, 595 const char *NameSuffix, 596 ClonedCodeInfo *CodeInfo) { 597 assert(NameSuffix && "NameSuffix cannot be null!"); 598 599 ValueMapTypeRemapper *TypeMapper = nullptr; 600 ValueMaterializer *Materializer = nullptr; 601 602 #ifndef NDEBUG 603 // If the cloning starts at the beginning of the function, verify that 604 // the function arguments are mapped. 605 if (!StartingInst) 606 for (const Argument &II : OldFunc->args()) 607 assert(VMap.count(&II) && "No mapping from source argument specified!"); 608 #endif 609 610 PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges, 611 NameSuffix, CodeInfo); 612 const BasicBlock *StartingBB; 613 if (StartingInst) 614 StartingBB = StartingInst->getParent(); 615 else { 616 StartingBB = &OldFunc->getEntryBlock(); 617 StartingInst = &StartingBB->front(); 618 } 619 620 // Clone the entry block, and anything recursively reachable from it. 621 std::vector<const BasicBlock *> CloneWorklist; 622 PFC.CloneBlock(StartingBB, StartingInst->getIterator(), CloneWorklist); 623 while (!CloneWorklist.empty()) { 624 const BasicBlock *BB = CloneWorklist.back(); 625 CloneWorklist.pop_back(); 626 PFC.CloneBlock(BB, BB->begin(), CloneWorklist); 627 } 628 629 // Loop over all of the basic blocks in the old function. If the block was 630 // reachable, we have cloned it and the old block is now in the value map: 631 // insert it into the new function in the right order. If not, ignore it. 632 // 633 // Defer PHI resolution until rest of function is resolved. 634 SmallVector<const PHINode *, 16> PHIToResolve; 635 for (const BasicBlock &BI : *OldFunc) { 636 Value *V = VMap.lookup(&BI); 637 BasicBlock *NewBB = cast_or_null<BasicBlock>(V); 638 if (!NewBB) 639 continue; // Dead block. 640 641 // Add the new block to the new function. 642 NewFunc->getBasicBlockList().push_back(NewBB); 643 644 // Handle PHI nodes specially, as we have to remove references to dead 645 // blocks. 646 for (const PHINode &PN : BI.phis()) { 647 // PHI nodes may have been remapped to non-PHI nodes by the caller or 648 // during the cloning process. 649 if (isa<PHINode>(VMap[&PN])) 650 PHIToResolve.push_back(&PN); 651 else 652 break; 653 } 654 655 // Finally, remap the terminator instructions, as those can't be remapped 656 // until all BBs are mapped. 657 RemapInstruction(NewBB->getTerminator(), VMap, 658 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges, 659 TypeMapper, Materializer); 660 } 661 662 // Defer PHI resolution until rest of function is resolved, PHI resolution 663 // requires the CFG to be up-to-date. 664 for (unsigned phino = 0, e = PHIToResolve.size(); phino != e;) { 665 const PHINode *OPN = PHIToResolve[phino]; 666 unsigned NumPreds = OPN->getNumIncomingValues(); 667 const BasicBlock *OldBB = OPN->getParent(); 668 BasicBlock *NewBB = cast<BasicBlock>(VMap[OldBB]); 669 670 // Map operands for blocks that are live and remove operands for blocks 671 // that are dead. 672 for (; phino != PHIToResolve.size() && 673 PHIToResolve[phino]->getParent() == OldBB; 674 ++phino) { 675 OPN = PHIToResolve[phino]; 676 PHINode *PN = cast<PHINode>(VMap[OPN]); 677 for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) { 678 Value *V = VMap.lookup(PN->getIncomingBlock(pred)); 679 if (BasicBlock *MappedBlock = cast_or_null<BasicBlock>(V)) { 680 Value *InVal = 681 MapValue(PN->getIncomingValue(pred), VMap, 682 ModuleLevelChanges ? RF_None : RF_NoModuleLevelChanges); 683 assert(InVal && "Unknown input value?"); 684 PN->setIncomingValue(pred, InVal); 685 PN->setIncomingBlock(pred, MappedBlock); 686 } else { 687 PN->removeIncomingValue(pred, false); 688 --pred; // Revisit the next entry. 689 --e; 690 } 691 } 692 } 693 694 // The loop above has removed PHI entries for those blocks that are dead 695 // and has updated others. However, if a block is live (i.e. copied over) 696 // but its terminator has been changed to not go to this block, then our 697 // phi nodes will have invalid entries. Update the PHI nodes in this 698 // case. 699 PHINode *PN = cast<PHINode>(NewBB->begin()); 700 NumPreds = pred_size(NewBB); 701 if (NumPreds != PN->getNumIncomingValues()) { 702 assert(NumPreds < PN->getNumIncomingValues()); 703 // Count how many times each predecessor comes to this block. 704 std::map<BasicBlock *, unsigned> PredCount; 705 for (BasicBlock *Pred : predecessors(NewBB)) 706 --PredCount[Pred]; 707 708 // Figure out how many entries to remove from each PHI. 709 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 710 ++PredCount[PN->getIncomingBlock(i)]; 711 712 // At this point, the excess predecessor entries are positive in the 713 // map. Loop over all of the PHIs and remove excess predecessor 714 // entries. 715 BasicBlock::iterator I = NewBB->begin(); 716 for (; (PN = dyn_cast<PHINode>(I)); ++I) { 717 for (const auto &PCI : PredCount) { 718 BasicBlock *Pred = PCI.first; 719 for (unsigned NumToRemove = PCI.second; NumToRemove; --NumToRemove) 720 PN->removeIncomingValue(Pred, false); 721 } 722 } 723 } 724 725 // If the loops above have made these phi nodes have 0 or 1 operand, 726 // replace them with undef or the input value. We must do this for 727 // correctness, because 0-operand phis are not valid. 728 PN = cast<PHINode>(NewBB->begin()); 729 if (PN->getNumIncomingValues() == 0) { 730 BasicBlock::iterator I = NewBB->begin(); 731 BasicBlock::const_iterator OldI = OldBB->begin(); 732 while ((PN = dyn_cast<PHINode>(I++))) { 733 Value *NV = UndefValue::get(PN->getType()); 734 PN->replaceAllUsesWith(NV); 735 assert(VMap[&*OldI] == PN && "VMap mismatch"); 736 VMap[&*OldI] = NV; 737 PN->eraseFromParent(); 738 ++OldI; 739 } 740 } 741 } 742 743 // Make a second pass over the PHINodes now that all of them have been 744 // remapped into the new function, simplifying the PHINode and performing any 745 // recursive simplifications exposed. This will transparently update the 746 // WeakTrackingVH in the VMap. Notably, we rely on that so that if we coalesce 747 // two PHINodes, the iteration over the old PHIs remains valid, and the 748 // mapping will just map us to the new node (which may not even be a PHI 749 // node). 750 const DataLayout &DL = NewFunc->getParent()->getDataLayout(); 751 SmallSetVector<const Value *, 8> Worklist; 752 for (unsigned Idx = 0, Size = PHIToResolve.size(); Idx != Size; ++Idx) 753 if (isa<PHINode>(VMap[PHIToResolve[Idx]])) 754 Worklist.insert(PHIToResolve[Idx]); 755 756 // Note that we must test the size on each iteration, the worklist can grow. 757 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { 758 const Value *OrigV = Worklist[Idx]; 759 auto *I = dyn_cast_or_null<Instruction>(VMap.lookup(OrigV)); 760 if (!I) 761 continue; 762 763 // Skip over non-intrinsic callsites, we don't want to remove any nodes from 764 // the CGSCC. 765 CallBase *CB = dyn_cast<CallBase>(I); 766 if (CB && CB->getCalledFunction() && 767 !CB->getCalledFunction()->isIntrinsic()) 768 continue; 769 770 // See if this instruction simplifies. 771 Value *SimpleV = simplifyInstruction(I, DL); 772 if (!SimpleV) 773 continue; 774 775 // Stash away all the uses of the old instruction so we can check them for 776 // recursive simplifications after a RAUW. This is cheaper than checking all 777 // uses of To on the recursive step in most cases. 778 for (const User *U : OrigV->users()) 779 Worklist.insert(cast<Instruction>(U)); 780 781 // Replace the instruction with its simplified value. 782 I->replaceAllUsesWith(SimpleV); 783 784 // If the original instruction had no side effects, remove it. 785 if (isInstructionTriviallyDead(I)) 786 I->eraseFromParent(); 787 else 788 VMap[OrigV] = I; 789 } 790 791 // Simplify conditional branches and switches with a constant operand. We try 792 // to prune these out when cloning, but if the simplification required 793 // looking through PHI nodes, those are only available after forming the full 794 // basic block. That may leave some here, and we still want to prune the dead 795 // code as early as possible. 796 Function::iterator Begin = cast<BasicBlock>(VMap[StartingBB])->getIterator(); 797 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 798 ConstantFoldTerminator(&BB); 799 800 // Some blocks may have become unreachable as a result. Find and delete them. 801 { 802 SmallPtrSet<BasicBlock *, 16> ReachableBlocks; 803 SmallVector<BasicBlock *, 16> Worklist; 804 Worklist.push_back(&*Begin); 805 while (!Worklist.empty()) { 806 BasicBlock *BB = Worklist.pop_back_val(); 807 if (ReachableBlocks.insert(BB).second) 808 append_range(Worklist, successors(BB)); 809 } 810 811 SmallVector<BasicBlock *, 16> UnreachableBlocks; 812 for (BasicBlock &BB : make_range(Begin, NewFunc->end())) 813 if (!ReachableBlocks.contains(&BB)) 814 UnreachableBlocks.push_back(&BB); 815 DeleteDeadBlocks(UnreachableBlocks); 816 } 817 818 // Now that the inlined function body has been fully constructed, go through 819 // and zap unconditional fall-through branches. This happens all the time when 820 // specializing code: code specialization turns conditional branches into 821 // uncond branches, and this code folds them. 822 Function::iterator I = Begin; 823 while (I != NewFunc->end()) { 824 BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator()); 825 if (!BI || BI->isConditional()) { 826 ++I; 827 continue; 828 } 829 830 BasicBlock *Dest = BI->getSuccessor(0); 831 if (!Dest->getSinglePredecessor()) { 832 ++I; 833 continue; 834 } 835 836 // We shouldn't be able to get single-entry PHI nodes here, as instsimplify 837 // above should have zapped all of them.. 838 assert(!isa<PHINode>(Dest->begin())); 839 840 // We know all single-entry PHI nodes in the inlined function have been 841 // removed, so we just need to splice the blocks. 842 BI->eraseFromParent(); 843 844 // Make all PHI nodes that referred to Dest now refer to I as their source. 845 Dest->replaceAllUsesWith(&*I); 846 847 // Move all the instructions in the succ to the pred. 848 I->getInstList().splice(I->end(), Dest->getInstList()); 849 850 // Remove the dest block. 851 Dest->eraseFromParent(); 852 853 // Do not increment I, iteratively merge all things this block branches to. 854 } 855 856 // Make a final pass over the basic blocks from the old function to gather 857 // any return instructions which survived folding. We have to do this here 858 // because we can iteratively remove and merge returns above. 859 for (Function::iterator I = cast<BasicBlock>(VMap[StartingBB])->getIterator(), 860 E = NewFunc->end(); 861 I != E; ++I) 862 if (ReturnInst *RI = dyn_cast<ReturnInst>(I->getTerminator())) 863 Returns.push_back(RI); 864 } 865 866 /// This works exactly like CloneFunctionInto, 867 /// except that it does some simple constant prop and DCE on the fly. The 868 /// effect of this is to copy significantly less code in cases where (for 869 /// example) a function call with constant arguments is inlined, and those 870 /// constant arguments cause a significant amount of code in the callee to be 871 /// dead. Since this doesn't produce an exact copy of the input, it can't be 872 /// used for things like CloneFunction or CloneModule. 873 void llvm::CloneAndPruneFunctionInto( 874 Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, 875 bool ModuleLevelChanges, SmallVectorImpl<ReturnInst *> &Returns, 876 const char *NameSuffix, ClonedCodeInfo *CodeInfo) { 877 CloneAndPruneIntoFromInst(NewFunc, OldFunc, &OldFunc->front().front(), VMap, 878 ModuleLevelChanges, Returns, NameSuffix, CodeInfo); 879 } 880 881 /// Remaps instructions in \p Blocks using the mapping in \p VMap. 882 void llvm::remapInstructionsInBlocks( 883 const SmallVectorImpl<BasicBlock *> &Blocks, ValueToValueMapTy &VMap) { 884 // Rewrite the code to refer to itself. 885 for (auto *BB : Blocks) 886 for (auto &Inst : *BB) 887 RemapInstruction(&Inst, VMap, 888 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 889 } 890 891 /// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p 892 /// Blocks. 893 /// 894 /// Updates LoopInfo and DominatorTree assuming the loop is dominated by block 895 /// \p LoopDomBB. Insert the new blocks before block specified in \p Before. 896 Loop *llvm::cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB, 897 Loop *OrigLoop, ValueToValueMapTy &VMap, 898 const Twine &NameSuffix, LoopInfo *LI, 899 DominatorTree *DT, 900 SmallVectorImpl<BasicBlock *> &Blocks) { 901 Function *F = OrigLoop->getHeader()->getParent(); 902 Loop *ParentLoop = OrigLoop->getParentLoop(); 903 DenseMap<Loop *, Loop *> LMap; 904 905 Loop *NewLoop = LI->AllocateLoop(); 906 LMap[OrigLoop] = NewLoop; 907 if (ParentLoop) 908 ParentLoop->addChildLoop(NewLoop); 909 else 910 LI->addTopLevelLoop(NewLoop); 911 912 BasicBlock *OrigPH = OrigLoop->getLoopPreheader(); 913 assert(OrigPH && "No preheader"); 914 BasicBlock *NewPH = CloneBasicBlock(OrigPH, VMap, NameSuffix, F); 915 // To rename the loop PHIs. 916 VMap[OrigPH] = NewPH; 917 Blocks.push_back(NewPH); 918 919 // Update LoopInfo. 920 if (ParentLoop) 921 ParentLoop->addBasicBlockToLoop(NewPH, *LI); 922 923 // Update DominatorTree. 924 DT->addNewBlock(NewPH, LoopDomBB); 925 926 for (Loop *CurLoop : OrigLoop->getLoopsInPreorder()) { 927 Loop *&NewLoop = LMap[CurLoop]; 928 if (!NewLoop) { 929 NewLoop = LI->AllocateLoop(); 930 931 // Establish the parent/child relationship. 932 Loop *OrigParent = CurLoop->getParentLoop(); 933 assert(OrigParent && "Could not find the original parent loop"); 934 Loop *NewParentLoop = LMap[OrigParent]; 935 assert(NewParentLoop && "Could not find the new parent loop"); 936 937 NewParentLoop->addChildLoop(NewLoop); 938 } 939 } 940 941 for (BasicBlock *BB : OrigLoop->getBlocks()) { 942 Loop *CurLoop = LI->getLoopFor(BB); 943 Loop *&NewLoop = LMap[CurLoop]; 944 assert(NewLoop && "Expecting new loop to be allocated"); 945 946 BasicBlock *NewBB = CloneBasicBlock(BB, VMap, NameSuffix, F); 947 VMap[BB] = NewBB; 948 949 // Update LoopInfo. 950 NewLoop->addBasicBlockToLoop(NewBB, *LI); 951 952 // Add DominatorTree node. After seeing all blocks, update to correct 953 // IDom. 954 DT->addNewBlock(NewBB, NewPH); 955 956 Blocks.push_back(NewBB); 957 } 958 959 for (BasicBlock *BB : OrigLoop->getBlocks()) { 960 // Update loop headers. 961 Loop *CurLoop = LI->getLoopFor(BB); 962 if (BB == CurLoop->getHeader()) 963 LMap[CurLoop]->moveToHeader(cast<BasicBlock>(VMap[BB])); 964 965 // Update DominatorTree. 966 BasicBlock *IDomBB = DT->getNode(BB)->getIDom()->getBlock(); 967 DT->changeImmediateDominator(cast<BasicBlock>(VMap[BB]), 968 cast<BasicBlock>(VMap[IDomBB])); 969 } 970 971 // Move them physically from the end of the block list. 972 F->getBasicBlockList().splice(Before->getIterator(), F->getBasicBlockList(), 973 NewPH); 974 F->getBasicBlockList().splice(Before->getIterator(), F->getBasicBlockList(), 975 NewLoop->getHeader()->getIterator(), F->end()); 976 977 return NewLoop; 978 } 979 980 /// Duplicate non-Phi instructions from the beginning of block up to 981 /// StopAt instruction into a split block between BB and its predecessor. 982 BasicBlock *llvm::DuplicateInstructionsInSplitBetween( 983 BasicBlock *BB, BasicBlock *PredBB, Instruction *StopAt, 984 ValueToValueMapTy &ValueMapping, DomTreeUpdater &DTU) { 985 986 assert(count(successors(PredBB), BB) == 1 && 987 "There must be a single edge between PredBB and BB!"); 988 // We are going to have to map operands from the original BB block to the new 989 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to 990 // account for entry from PredBB. 991 BasicBlock::iterator BI = BB->begin(); 992 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 993 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 994 995 BasicBlock *NewBB = SplitEdge(PredBB, BB); 996 NewBB->setName(PredBB->getName() + ".split"); 997 Instruction *NewTerm = NewBB->getTerminator(); 998 999 // FIXME: SplitEdge does not yet take a DTU, so we include the split edge 1000 // in the update set here. 1001 DTU.applyUpdates({{DominatorTree::Delete, PredBB, BB}, 1002 {DominatorTree::Insert, PredBB, NewBB}, 1003 {DominatorTree::Insert, NewBB, BB}}); 1004 1005 // Clone the non-phi instructions of BB into NewBB, keeping track of the 1006 // mapping and using it to remap operands in the cloned instructions. 1007 // Stop once we see the terminator too. This covers the case where BB's 1008 // terminator gets replaced and StopAt == BB's terminator. 1009 for (; StopAt != &*BI && BB->getTerminator() != &*BI; ++BI) { 1010 Instruction *New = BI->clone(); 1011 New->setName(BI->getName()); 1012 New->insertBefore(NewTerm); 1013 ValueMapping[&*BI] = New; 1014 1015 // Remap operands to patch up intra-block references. 1016 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1017 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1018 auto I = ValueMapping.find(Inst); 1019 if (I != ValueMapping.end()) 1020 New->setOperand(i, I->second); 1021 } 1022 } 1023 1024 return NewBB; 1025 } 1026 1027 void llvm::cloneNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1028 DenseMap<MDNode *, MDNode *> &ClonedScopes, 1029 StringRef Ext, LLVMContext &Context) { 1030 MDBuilder MDB(Context); 1031 1032 for (auto *ScopeList : NoAliasDeclScopes) { 1033 for (auto &MDOperand : ScopeList->operands()) { 1034 if (MDNode *MD = dyn_cast<MDNode>(MDOperand)) { 1035 AliasScopeNode SNANode(MD); 1036 1037 std::string Name; 1038 auto ScopeName = SNANode.getName(); 1039 if (!ScopeName.empty()) 1040 Name = (Twine(ScopeName) + ":" + Ext).str(); 1041 else 1042 Name = std::string(Ext); 1043 1044 MDNode *NewScope = MDB.createAnonymousAliasScope( 1045 const_cast<MDNode *>(SNANode.getDomain()), Name); 1046 ClonedScopes.insert(std::make_pair(MD, NewScope)); 1047 } 1048 } 1049 } 1050 } 1051 1052 void llvm::adaptNoAliasScopes(Instruction *I, 1053 const DenseMap<MDNode *, MDNode *> &ClonedScopes, 1054 LLVMContext &Context) { 1055 auto CloneScopeList = [&](const MDNode *ScopeList) -> MDNode * { 1056 bool NeedsReplacement = false; 1057 SmallVector<Metadata *, 8> NewScopeList; 1058 for (auto &MDOp : ScopeList->operands()) { 1059 if (MDNode *MD = dyn_cast<MDNode>(MDOp)) { 1060 if (auto *NewMD = ClonedScopes.lookup(MD)) { 1061 NewScopeList.push_back(NewMD); 1062 NeedsReplacement = true; 1063 continue; 1064 } 1065 NewScopeList.push_back(MD); 1066 } 1067 } 1068 if (NeedsReplacement) 1069 return MDNode::get(Context, NewScopeList); 1070 return nullptr; 1071 }; 1072 1073 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(I)) 1074 if (auto *NewScopeList = CloneScopeList(Decl->getScopeList())) 1075 Decl->setScopeList(NewScopeList); 1076 1077 auto replaceWhenNeeded = [&](unsigned MD_ID) { 1078 if (const MDNode *CSNoAlias = I->getMetadata(MD_ID)) 1079 if (auto *NewScopeList = CloneScopeList(CSNoAlias)) 1080 I->setMetadata(MD_ID, NewScopeList); 1081 }; 1082 replaceWhenNeeded(LLVMContext::MD_noalias); 1083 replaceWhenNeeded(LLVMContext::MD_alias_scope); 1084 } 1085 1086 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1087 ArrayRef<BasicBlock *> NewBlocks, 1088 LLVMContext &Context, StringRef Ext) { 1089 if (NoAliasDeclScopes.empty()) 1090 return; 1091 1092 DenseMap<MDNode *, MDNode *> ClonedScopes; 1093 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1094 << NoAliasDeclScopes.size() << " node(s)\n"); 1095 1096 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1097 // Identify instructions using metadata that needs adaptation 1098 for (BasicBlock *NewBlock : NewBlocks) 1099 for (Instruction &I : *NewBlock) 1100 adaptNoAliasScopes(&I, ClonedScopes, Context); 1101 } 1102 1103 void llvm::cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes, 1104 Instruction *IStart, Instruction *IEnd, 1105 LLVMContext &Context, StringRef Ext) { 1106 if (NoAliasDeclScopes.empty()) 1107 return; 1108 1109 DenseMap<MDNode *, MDNode *> ClonedScopes; 1110 LLVM_DEBUG(dbgs() << "cloneAndAdaptNoAliasScopes: cloning " 1111 << NoAliasDeclScopes.size() << " node(s)\n"); 1112 1113 cloneNoAliasScopes(NoAliasDeclScopes, ClonedScopes, Ext, Context); 1114 // Identify instructions using metadata that needs adaptation 1115 assert(IStart->getParent() == IEnd->getParent() && "different basic block ?"); 1116 auto ItStart = IStart->getIterator(); 1117 auto ItEnd = IEnd->getIterator(); 1118 ++ItEnd; // IEnd is included, increment ItEnd to get the end of the range 1119 for (auto &I : llvm::make_range(ItStart, ItEnd)) 1120 adaptNoAliasScopes(&I, ClonedScopes, Context); 1121 } 1122 1123 void llvm::identifyNoAliasScopesToClone( 1124 ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1125 for (BasicBlock *BB : BBs) 1126 for (Instruction &I : *BB) 1127 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1128 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1129 } 1130 1131 void llvm::identifyNoAliasScopesToClone( 1132 BasicBlock::iterator Start, BasicBlock::iterator End, 1133 SmallVectorImpl<MDNode *> &NoAliasDeclScopes) { 1134 for (Instruction &I : make_range(Start, End)) 1135 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 1136 NoAliasDeclScopes.push_back(Decl->getScopeList()); 1137 } 1138