1 //===- Local.cpp - Functions to perform local transformations -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions perform various local transformations to the 10 // program. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Utils/Local.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/DenseMapInfo.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/Hashing.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/AssumeBundleQueries.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/DomTreeUpdater.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/MemorySSAUpdater.h" 31 #include "llvm/Analysis/TargetLibraryInfo.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/Analysis/VectorUtils.h" 34 #include "llvm/BinaryFormat/Dwarf.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CFG.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/ConstantRange.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DIBuilder.h" 43 #include "llvm/IR/DataLayout.h" 44 #include "llvm/IR/DebugInfo.h" 45 #include "llvm/IR/DebugInfoMetadata.h" 46 #include "llvm/IR/DebugLoc.h" 47 #include "llvm/IR/DerivedTypes.h" 48 #include "llvm/IR/Dominators.h" 49 #include "llvm/IR/EHPersonalities.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/GetElementPtrTypeIterator.h" 52 #include "llvm/IR/GlobalObject.h" 53 #include "llvm/IR/IRBuilder.h" 54 #include "llvm/IR/InstrTypes.h" 55 #include "llvm/IR/Instruction.h" 56 #include "llvm/IR/Instructions.h" 57 #include "llvm/IR/IntrinsicInst.h" 58 #include "llvm/IR/Intrinsics.h" 59 #include "llvm/IR/IntrinsicsWebAssembly.h" 60 #include "llvm/IR/LLVMContext.h" 61 #include "llvm/IR/MDBuilder.h" 62 #include "llvm/IR/MemoryModelRelaxationAnnotations.h" 63 #include "llvm/IR/Metadata.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/PatternMatch.h" 66 #include "llvm/IR/ProfDataUtils.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/IR/Use.h" 69 #include "llvm/IR/User.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/IR/ValueHandle.h" 72 #include "llvm/Support/Casting.h" 73 #include "llvm/Support/CommandLine.h" 74 #include "llvm/Support/Debug.h" 75 #include "llvm/Support/ErrorHandling.h" 76 #include "llvm/Support/KnownBits.h" 77 #include "llvm/Support/raw_ostream.h" 78 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 79 #include "llvm/Transforms/Utils/ValueMapper.h" 80 #include <algorithm> 81 #include <cassert> 82 #include <cstdint> 83 #include <iterator> 84 #include <map> 85 #include <optional> 86 #include <utility> 87 88 using namespace llvm; 89 using namespace llvm::PatternMatch; 90 91 extern cl::opt<bool> UseNewDbgInfoFormat; 92 93 #define DEBUG_TYPE "local" 94 95 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed"); 96 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd"); 97 98 static cl::opt<bool> PHICSEDebugHash( 99 "phicse-debug-hash", 100 #ifdef EXPENSIVE_CHECKS 101 cl::init(true), 102 #else 103 cl::init(false), 104 #endif 105 cl::Hidden, 106 cl::desc("Perform extra assertion checking to verify that PHINodes's hash " 107 "function is well-behaved w.r.t. its isEqual predicate")); 108 109 static cl::opt<unsigned> PHICSENumPHISmallSize( 110 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden, 111 cl::desc( 112 "When the basic block contains not more than this number of PHI nodes, " 113 "perform a (faster!) exhaustive search instead of set-driven one.")); 114 115 static cl::opt<unsigned> MaxPhiEntriesIncreaseAfterRemovingEmptyBlock( 116 "max-phi-entries-increase-after-removing-empty-block", cl::init(1000), 117 cl::Hidden, 118 cl::desc("Stop removing an empty block if removing it will introduce more " 119 "than this number of phi entries in its successor")); 120 121 // Max recursion depth for collectBitParts used when detecting bswap and 122 // bitreverse idioms. 123 static const unsigned BitPartRecursionMaxDepth = 48; 124 125 //===----------------------------------------------------------------------===// 126 // Local constant propagation. 127 // 128 129 /// ConstantFoldTerminator - If a terminator instruction is predicated on a 130 /// constant value, convert it into an unconditional branch to the constant 131 /// destination. This is a nontrivial operation because the successors of this 132 /// basic block must have their PHI nodes updated. 133 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch 134 /// conditions and indirectbr addresses this might make dead if 135 /// DeleteDeadConditions is true. 136 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions, 137 const TargetLibraryInfo *TLI, 138 DomTreeUpdater *DTU) { 139 Instruction *T = BB->getTerminator(); 140 IRBuilder<> Builder(T); 141 142 // Branch - See if we are conditional jumping on constant 143 if (auto *BI = dyn_cast<BranchInst>(T)) { 144 if (BI->isUnconditional()) return false; // Can't optimize uncond branch 145 146 BasicBlock *Dest1 = BI->getSuccessor(0); 147 BasicBlock *Dest2 = BI->getSuccessor(1); 148 149 if (Dest2 == Dest1) { // Conditional branch to same location? 150 // This branch matches something like this: 151 // br bool %cond, label %Dest, label %Dest 152 // and changes it into: br label %Dest 153 154 // Let the basic block know that we are letting go of one copy of it. 155 assert(BI->getParent() && "Terminator not inserted in block!"); 156 Dest1->removePredecessor(BI->getParent()); 157 158 // Replace the conditional branch with an unconditional one. 159 BranchInst *NewBI = Builder.CreateBr(Dest1); 160 161 // Transfer the metadata to the new branch instruction. 162 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg, 163 LLVMContext::MD_annotation}); 164 165 Value *Cond = BI->getCondition(); 166 BI->eraseFromParent(); 167 if (DeleteDeadConditions) 168 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 169 return true; 170 } 171 172 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { 173 // Are we branching on constant? 174 // YES. Change to unconditional branch... 175 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; 176 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; 177 178 // Let the basic block know that we are letting go of it. Based on this, 179 // it will adjust it's PHI nodes. 180 OldDest->removePredecessor(BB); 181 182 // Replace the conditional branch with an unconditional one. 183 BranchInst *NewBI = Builder.CreateBr(Destination); 184 185 // Transfer the metadata to the new branch instruction. 186 NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg, 187 LLVMContext::MD_annotation}); 188 189 BI->eraseFromParent(); 190 if (DTU) 191 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}}); 192 return true; 193 } 194 195 return false; 196 } 197 198 if (auto *SI = dyn_cast<SwitchInst>(T)) { 199 // If we are switching on a constant, we can convert the switch to an 200 // unconditional branch. 201 auto *CI = dyn_cast<ConstantInt>(SI->getCondition()); 202 BasicBlock *DefaultDest = SI->getDefaultDest(); 203 BasicBlock *TheOnlyDest = DefaultDest; 204 205 // If the default is unreachable, ignore it when searching for TheOnlyDest. 206 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) && 207 SI->getNumCases() > 0) { 208 TheOnlyDest = SI->case_begin()->getCaseSuccessor(); 209 } 210 211 bool Changed = false; 212 213 // Figure out which case it goes to. 214 for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) { 215 // Found case matching a constant operand? 216 if (It->getCaseValue() == CI) { 217 TheOnlyDest = It->getCaseSuccessor(); 218 break; 219 } 220 221 // Check to see if this branch is going to the same place as the default 222 // dest. If so, eliminate it as an explicit compare. 223 if (It->getCaseSuccessor() == DefaultDest) { 224 MDNode *MD = getValidBranchWeightMDNode(*SI); 225 unsigned NCases = SI->getNumCases(); 226 // Fold the case metadata into the default if there will be any branches 227 // left, unless the metadata doesn't match the switch. 228 if (NCases > 1 && MD) { 229 // Collect branch weights into a vector. 230 SmallVector<uint32_t, 8> Weights; 231 extractBranchWeights(MD, Weights); 232 233 // Merge weight of this case to the default weight. 234 unsigned Idx = It->getCaseIndex(); 235 // TODO: Add overflow check. 236 Weights[0] += Weights[Idx + 1]; 237 // Remove weight for this case. 238 std::swap(Weights[Idx + 1], Weights.back()); 239 Weights.pop_back(); 240 setBranchWeights(*SI, Weights, hasBranchWeightOrigin(MD)); 241 } 242 // Remove this entry. 243 BasicBlock *ParentBB = SI->getParent(); 244 DefaultDest->removePredecessor(ParentBB); 245 It = SI->removeCase(It); 246 End = SI->case_end(); 247 248 // Removing this case may have made the condition constant. In that 249 // case, update CI and restart iteration through the cases. 250 if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) { 251 CI = NewCI; 252 It = SI->case_begin(); 253 } 254 255 Changed = true; 256 continue; 257 } 258 259 // Otherwise, check to see if the switch only branches to one destination. 260 // We do this by reseting "TheOnlyDest" to null when we find two non-equal 261 // destinations. 262 if (It->getCaseSuccessor() != TheOnlyDest) 263 TheOnlyDest = nullptr; 264 265 // Increment this iterator as we haven't removed the case. 266 ++It; 267 } 268 269 if (CI && !TheOnlyDest) { 270 // Branching on a constant, but not any of the cases, go to the default 271 // successor. 272 TheOnlyDest = SI->getDefaultDest(); 273 } 274 275 // If we found a single destination that we can fold the switch into, do so 276 // now. 277 if (TheOnlyDest) { 278 // Insert the new branch. 279 Builder.CreateBr(TheOnlyDest); 280 BasicBlock *BB = SI->getParent(); 281 282 SmallSet<BasicBlock *, 8> RemovedSuccessors; 283 284 // Remove entries from PHI nodes which we no longer branch to... 285 BasicBlock *SuccToKeep = TheOnlyDest; 286 for (BasicBlock *Succ : successors(SI)) { 287 if (DTU && Succ != TheOnlyDest) 288 RemovedSuccessors.insert(Succ); 289 // Found case matching a constant operand? 290 if (Succ == SuccToKeep) { 291 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest 292 } else { 293 Succ->removePredecessor(BB); 294 } 295 } 296 297 // Delete the old switch. 298 Value *Cond = SI->getCondition(); 299 SI->eraseFromParent(); 300 if (DeleteDeadConditions) 301 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 302 if (DTU) { 303 std::vector<DominatorTree::UpdateType> Updates; 304 Updates.reserve(RemovedSuccessors.size()); 305 for (auto *RemovedSuccessor : RemovedSuccessors) 306 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor}); 307 DTU->applyUpdates(Updates); 308 } 309 return true; 310 } 311 312 if (SI->getNumCases() == 1) { 313 // Otherwise, we can fold this switch into a conditional branch 314 // instruction if it has only one non-default destination. 315 auto FirstCase = *SI->case_begin(); 316 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(), 317 FirstCase.getCaseValue(), "cond"); 318 319 // Insert the new branch. 320 BranchInst *NewBr = Builder.CreateCondBr(Cond, 321 FirstCase.getCaseSuccessor(), 322 SI->getDefaultDest()); 323 SmallVector<uint32_t> Weights; 324 if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) { 325 uint32_t DefWeight = Weights[0]; 326 uint32_t CaseWeight = Weights[1]; 327 // The TrueWeight should be the weight for the single case of SI. 328 NewBr->setMetadata(LLVMContext::MD_prof, 329 MDBuilder(BB->getContext()) 330 .createBranchWeights(CaseWeight, DefWeight)); 331 } 332 333 // Update make.implicit metadata to the newly-created conditional branch. 334 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit); 335 if (MakeImplicitMD) 336 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD); 337 338 // Delete the old switch. 339 SI->eraseFromParent(); 340 return true; 341 } 342 return Changed; 343 } 344 345 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) { 346 // indirectbr blockaddress(@F, @BB) -> br label @BB 347 if (auto *BA = 348 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) { 349 BasicBlock *TheOnlyDest = BA->getBasicBlock(); 350 SmallSet<BasicBlock *, 8> RemovedSuccessors; 351 352 // Insert the new branch. 353 Builder.CreateBr(TheOnlyDest); 354 355 BasicBlock *SuccToKeep = TheOnlyDest; 356 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { 357 BasicBlock *DestBB = IBI->getDestination(i); 358 if (DTU && DestBB != TheOnlyDest) 359 RemovedSuccessors.insert(DestBB); 360 if (IBI->getDestination(i) == SuccToKeep) { 361 SuccToKeep = nullptr; 362 } else { 363 DestBB->removePredecessor(BB); 364 } 365 } 366 Value *Address = IBI->getAddress(); 367 IBI->eraseFromParent(); 368 if (DeleteDeadConditions) 369 // Delete pointer cast instructions. 370 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI); 371 372 // Also zap the blockaddress constant if there are no users remaining, 373 // otherwise the destination is still marked as having its address taken. 374 if (BA->use_empty()) 375 BA->destroyConstant(); 376 377 // If we didn't find our destination in the IBI successor list, then we 378 // have undefined behavior. Replace the unconditional branch with an 379 // 'unreachable' instruction. 380 if (SuccToKeep) { 381 BB->getTerminator()->eraseFromParent(); 382 new UnreachableInst(BB->getContext(), BB); 383 } 384 385 if (DTU) { 386 std::vector<DominatorTree::UpdateType> Updates; 387 Updates.reserve(RemovedSuccessors.size()); 388 for (auto *RemovedSuccessor : RemovedSuccessors) 389 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor}); 390 DTU->applyUpdates(Updates); 391 } 392 return true; 393 } 394 } 395 396 return false; 397 } 398 399 //===----------------------------------------------------------------------===// 400 // Local dead code elimination. 401 // 402 403 /// isInstructionTriviallyDead - Return true if the result produced by the 404 /// instruction is not used, and the instruction has no side effects. 405 /// 406 bool llvm::isInstructionTriviallyDead(Instruction *I, 407 const TargetLibraryInfo *TLI) { 408 if (!I->use_empty()) 409 return false; 410 return wouldInstructionBeTriviallyDead(I, TLI); 411 } 412 413 bool llvm::wouldInstructionBeTriviallyDeadOnUnusedPaths( 414 Instruction *I, const TargetLibraryInfo *TLI) { 415 // Instructions that are "markers" and have implied meaning on code around 416 // them (without explicit uses), are not dead on unused paths. 417 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 418 if (II->getIntrinsicID() == Intrinsic::stacksave || 419 II->getIntrinsicID() == Intrinsic::launder_invariant_group || 420 II->isLifetimeStartOrEnd()) 421 return false; 422 return wouldInstructionBeTriviallyDead(I, TLI); 423 } 424 425 bool llvm::wouldInstructionBeTriviallyDead(const Instruction *I, 426 const TargetLibraryInfo *TLI) { 427 if (I->isTerminator()) 428 return false; 429 430 // We don't want the landingpad-like instructions removed by anything this 431 // general. 432 if (I->isEHPad()) 433 return false; 434 435 // We don't want debug info removed by anything this general. 436 if (isa<DbgVariableIntrinsic>(I)) 437 return false; 438 439 if (const DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) { 440 if (DLI->getLabel()) 441 return false; 442 return true; 443 } 444 445 if (auto *CB = dyn_cast<CallBase>(I)) 446 if (isRemovableAlloc(CB, TLI)) 447 return true; 448 449 if (!I->willReturn()) { 450 auto *II = dyn_cast<IntrinsicInst>(I); 451 if (!II) 452 return false; 453 454 switch (II->getIntrinsicID()) { 455 case Intrinsic::experimental_guard: { 456 // Guards on true are operationally no-ops. In the future we can 457 // consider more sophisticated tradeoffs for guards considering potential 458 // for check widening, but for now we keep things simple. 459 auto *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)); 460 return Cond && Cond->isOne(); 461 } 462 // TODO: These intrinsics are not safe to remove, because this may remove 463 // a well-defined trap. 464 case Intrinsic::wasm_trunc_signed: 465 case Intrinsic::wasm_trunc_unsigned: 466 case Intrinsic::ptrauth_auth: 467 case Intrinsic::ptrauth_resign: 468 return true; 469 default: 470 return false; 471 } 472 } 473 474 if (!I->mayHaveSideEffects()) 475 return true; 476 477 // Special case intrinsics that "may have side effects" but can be deleted 478 // when dead. 479 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 480 // Safe to delete llvm.stacksave and launder.invariant.group if dead. 481 if (II->getIntrinsicID() == Intrinsic::stacksave || 482 II->getIntrinsicID() == Intrinsic::launder_invariant_group) 483 return true; 484 485 // Intrinsics declare sideeffects to prevent them from moving, but they are 486 // nops without users. 487 if (II->getIntrinsicID() == Intrinsic::allow_runtime_check || 488 II->getIntrinsicID() == Intrinsic::allow_ubsan_check) 489 return true; 490 491 if (II->isLifetimeStartOrEnd()) { 492 auto *Arg = II->getArgOperand(1); 493 // Lifetime intrinsics are dead when their right-hand is undef. 494 if (isa<UndefValue>(Arg)) 495 return true; 496 // If the right-hand is an alloc, global, or argument and the only uses 497 // are lifetime intrinsics then the intrinsics are dead. 498 if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg)) 499 return llvm::all_of(Arg->uses(), [](Use &Use) { 500 if (IntrinsicInst *IntrinsicUse = 501 dyn_cast<IntrinsicInst>(Use.getUser())) 502 return IntrinsicUse->isLifetimeStartOrEnd(); 503 return false; 504 }); 505 return false; 506 } 507 508 // Assumptions are dead if their condition is trivially true. 509 if (II->getIntrinsicID() == Intrinsic::assume && 510 isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) { 511 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0))) 512 return !Cond->isZero(); 513 514 return false; 515 } 516 517 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) { 518 std::optional<fp::ExceptionBehavior> ExBehavior = 519 FPI->getExceptionBehavior(); 520 return *ExBehavior != fp::ebStrict; 521 } 522 } 523 524 if (auto *Call = dyn_cast<CallBase>(I)) { 525 if (Value *FreedOp = getFreedOperand(Call, TLI)) 526 if (Constant *C = dyn_cast<Constant>(FreedOp)) 527 return C->isNullValue() || isa<UndefValue>(C); 528 if (isMathLibCallNoop(Call, TLI)) 529 return true; 530 } 531 532 // Non-volatile atomic loads from constants can be removed. 533 if (auto *LI = dyn_cast<LoadInst>(I)) 534 if (auto *GV = dyn_cast<GlobalVariable>( 535 LI->getPointerOperand()->stripPointerCasts())) 536 if (!LI->isVolatile() && GV->isConstant()) 537 return true; 538 539 return false; 540 } 541 542 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a 543 /// trivially dead instruction, delete it. If that makes any of its operands 544 /// trivially dead, delete them too, recursively. Return true if any 545 /// instructions were deleted. 546 bool llvm::RecursivelyDeleteTriviallyDeadInstructions( 547 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU, 548 std::function<void(Value *)> AboutToDeleteCallback) { 549 Instruction *I = dyn_cast<Instruction>(V); 550 if (!I || !isInstructionTriviallyDead(I, TLI)) 551 return false; 552 553 SmallVector<WeakTrackingVH, 16> DeadInsts; 554 DeadInsts.push_back(I); 555 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU, 556 AboutToDeleteCallback); 557 558 return true; 559 } 560 561 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive( 562 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, 563 MemorySSAUpdater *MSSAU, 564 std::function<void(Value *)> AboutToDeleteCallback) { 565 unsigned S = 0, E = DeadInsts.size(), Alive = 0; 566 for (; S != E; ++S) { 567 auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]); 568 if (!I || !isInstructionTriviallyDead(I)) { 569 DeadInsts[S] = nullptr; 570 ++Alive; 571 } 572 } 573 if (Alive == E) 574 return false; 575 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU, 576 AboutToDeleteCallback); 577 return true; 578 } 579 580 void llvm::RecursivelyDeleteTriviallyDeadInstructions( 581 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, 582 MemorySSAUpdater *MSSAU, 583 std::function<void(Value *)> AboutToDeleteCallback) { 584 // Process the dead instruction list until empty. 585 while (!DeadInsts.empty()) { 586 Value *V = DeadInsts.pop_back_val(); 587 Instruction *I = cast_or_null<Instruction>(V); 588 if (!I) 589 continue; 590 assert(isInstructionTriviallyDead(I, TLI) && 591 "Live instruction found in dead worklist!"); 592 assert(I->use_empty() && "Instructions with uses are not dead."); 593 594 // Don't lose the debug info while deleting the instructions. 595 salvageDebugInfo(*I); 596 597 if (AboutToDeleteCallback) 598 AboutToDeleteCallback(I); 599 600 // Null out all of the instruction's operands to see if any operand becomes 601 // dead as we go. 602 for (Use &OpU : I->operands()) { 603 Value *OpV = OpU.get(); 604 OpU.set(nullptr); 605 606 if (!OpV->use_empty()) 607 continue; 608 609 // If the operand is an instruction that became dead as we nulled out the 610 // operand, and if it is 'trivially' dead, delete it in a future loop 611 // iteration. 612 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 613 if (isInstructionTriviallyDead(OpI, TLI)) 614 DeadInsts.push_back(OpI); 615 } 616 if (MSSAU) 617 MSSAU->removeMemoryAccess(I); 618 619 I->eraseFromParent(); 620 } 621 } 622 623 bool llvm::replaceDbgUsesWithUndef(Instruction *I) { 624 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 625 SmallVector<DbgVariableRecord *, 1> DPUsers; 626 findDbgUsers(DbgUsers, I, &DPUsers); 627 for (auto *DII : DbgUsers) 628 DII->setKillLocation(); 629 for (auto *DVR : DPUsers) 630 DVR->setKillLocation(); 631 return !DbgUsers.empty() || !DPUsers.empty(); 632 } 633 634 /// areAllUsesEqual - Check whether the uses of a value are all the same. 635 /// This is similar to Instruction::hasOneUse() except this will also return 636 /// true when there are no uses or multiple uses that all refer to the same 637 /// value. 638 static bool areAllUsesEqual(Instruction *I) { 639 Value::user_iterator UI = I->user_begin(); 640 Value::user_iterator UE = I->user_end(); 641 if (UI == UE) 642 return true; 643 644 User *TheUse = *UI; 645 for (++UI; UI != UE; ++UI) { 646 if (*UI != TheUse) 647 return false; 648 } 649 return true; 650 } 651 652 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively 653 /// dead PHI node, due to being a def-use chain of single-use nodes that 654 /// either forms a cycle or is terminated by a trivially dead instruction, 655 /// delete it. If that makes any of its operands trivially dead, delete them 656 /// too, recursively. Return true if a change was made. 657 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN, 658 const TargetLibraryInfo *TLI, 659 llvm::MemorySSAUpdater *MSSAU) { 660 SmallPtrSet<Instruction*, 4> Visited; 661 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); 662 I = cast<Instruction>(*I->user_begin())) { 663 if (I->use_empty()) 664 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU); 665 666 // If we find an instruction more than once, we're on a cycle that 667 // won't prove fruitful. 668 if (!Visited.insert(I).second) { 669 // Break the cycle and delete the instruction and its operands. 670 I->replaceAllUsesWith(PoisonValue::get(I->getType())); 671 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU); 672 return true; 673 } 674 } 675 return false; 676 } 677 678 static bool 679 simplifyAndDCEInstruction(Instruction *I, 680 SmallSetVector<Instruction *, 16> &WorkList, 681 const DataLayout &DL, 682 const TargetLibraryInfo *TLI) { 683 if (isInstructionTriviallyDead(I, TLI)) { 684 salvageDebugInfo(*I); 685 686 // Null out all of the instruction's operands to see if any operand becomes 687 // dead as we go. 688 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 689 Value *OpV = I->getOperand(i); 690 I->setOperand(i, nullptr); 691 692 if (!OpV->use_empty() || I == OpV) 693 continue; 694 695 // If the operand is an instruction that became dead as we nulled out the 696 // operand, and if it is 'trivially' dead, delete it in a future loop 697 // iteration. 698 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 699 if (isInstructionTriviallyDead(OpI, TLI)) 700 WorkList.insert(OpI); 701 } 702 703 I->eraseFromParent(); 704 705 return true; 706 } 707 708 if (Value *SimpleV = simplifyInstruction(I, DL)) { 709 // Add the users to the worklist. CAREFUL: an instruction can use itself, 710 // in the case of a phi node. 711 for (User *U : I->users()) { 712 if (U != I) { 713 WorkList.insert(cast<Instruction>(U)); 714 } 715 } 716 717 // Replace the instruction with its simplified value. 718 bool Changed = false; 719 if (!I->use_empty()) { 720 I->replaceAllUsesWith(SimpleV); 721 Changed = true; 722 } 723 if (isInstructionTriviallyDead(I, TLI)) { 724 I->eraseFromParent(); 725 Changed = true; 726 } 727 return Changed; 728 } 729 return false; 730 } 731 732 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to 733 /// simplify any instructions in it and recursively delete dead instructions. 734 /// 735 /// This returns true if it changed the code, note that it can delete 736 /// instructions in other blocks as well in this block. 737 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, 738 const TargetLibraryInfo *TLI) { 739 bool MadeChange = false; 740 const DataLayout &DL = BB->getDataLayout(); 741 742 #ifndef NDEBUG 743 // In debug builds, ensure that the terminator of the block is never replaced 744 // or deleted by these simplifications. The idea of simplification is that it 745 // cannot introduce new instructions, and there is no way to replace the 746 // terminator of a block without introducing a new instruction. 747 AssertingVH<Instruction> TerminatorVH(&BB->back()); 748 #endif 749 750 SmallSetVector<Instruction *, 16> WorkList; 751 // Iterate over the original function, only adding insts to the worklist 752 // if they actually need to be revisited. This avoids having to pre-init 753 // the worklist with the entire function's worth of instructions. 754 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end()); 755 BI != E;) { 756 assert(!BI->isTerminator()); 757 Instruction *I = &*BI; 758 ++BI; 759 760 // We're visiting this instruction now, so make sure it's not in the 761 // worklist from an earlier visit. 762 if (!WorkList.count(I)) 763 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); 764 } 765 766 while (!WorkList.empty()) { 767 Instruction *I = WorkList.pop_back_val(); 768 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); 769 } 770 return MadeChange; 771 } 772 773 //===----------------------------------------------------------------------===// 774 // Control Flow Graph Restructuring. 775 // 776 777 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, 778 DomTreeUpdater *DTU) { 779 780 // If BB has single-entry PHI nodes, fold them. 781 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { 782 Value *NewVal = PN->getIncomingValue(0); 783 // Replace self referencing PHI with poison, it must be dead. 784 if (NewVal == PN) NewVal = PoisonValue::get(PN->getType()); 785 PN->replaceAllUsesWith(NewVal); 786 PN->eraseFromParent(); 787 } 788 789 BasicBlock *PredBB = DestBB->getSinglePredecessor(); 790 assert(PredBB && "Block doesn't have a single predecessor!"); 791 792 bool ReplaceEntryBB = PredBB->isEntryBlock(); 793 794 // DTU updates: Collect all the edges that enter 795 // PredBB. These dominator edges will be redirected to DestBB. 796 SmallVector<DominatorTree::UpdateType, 32> Updates; 797 798 if (DTU) { 799 // To avoid processing the same predecessor more than once. 800 SmallPtrSet<BasicBlock *, 2> SeenPreds; 801 Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1); 802 for (BasicBlock *PredOfPredBB : predecessors(PredBB)) 803 // This predecessor of PredBB may already have DestBB as a successor. 804 if (PredOfPredBB != PredBB) 805 if (SeenPreds.insert(PredOfPredBB).second) 806 Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB}); 807 SeenPreds.clear(); 808 for (BasicBlock *PredOfPredBB : predecessors(PredBB)) 809 if (SeenPreds.insert(PredOfPredBB).second) 810 Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB}); 811 Updates.push_back({DominatorTree::Delete, PredBB, DestBB}); 812 } 813 814 // Zap anything that took the address of DestBB. Not doing this will give the 815 // address an invalid value. 816 if (DestBB->hasAddressTaken()) { 817 BlockAddress *BA = BlockAddress::get(DestBB); 818 Constant *Replacement = 819 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1); 820 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, 821 BA->getType())); 822 BA->destroyConstant(); 823 } 824 825 // Anything that branched to PredBB now branches to DestBB. 826 PredBB->replaceAllUsesWith(DestBB); 827 828 // Splice all the instructions from PredBB to DestBB. 829 PredBB->getTerminator()->eraseFromParent(); 830 DestBB->splice(DestBB->begin(), PredBB); 831 new UnreachableInst(PredBB->getContext(), PredBB); 832 833 // If the PredBB is the entry block of the function, move DestBB up to 834 // become the entry block after we erase PredBB. 835 if (ReplaceEntryBB) 836 DestBB->moveAfter(PredBB); 837 838 if (DTU) { 839 assert(PredBB->size() == 1 && 840 isa<UnreachableInst>(PredBB->getTerminator()) && 841 "The successor list of PredBB isn't empty before " 842 "applying corresponding DTU updates."); 843 DTU->applyUpdatesPermissive(Updates); 844 DTU->deleteBB(PredBB); 845 // Recalculation of DomTree is needed when updating a forward DomTree and 846 // the Entry BB is replaced. 847 if (ReplaceEntryBB && DTU->hasDomTree()) { 848 // The entry block was removed and there is no external interface for 849 // the dominator tree to be notified of this change. In this corner-case 850 // we recalculate the entire tree. 851 DTU->recalculate(*(DestBB->getParent())); 852 } 853 } 854 855 else { 856 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr. 857 } 858 } 859 860 /// Return true if we can choose one of these values to use in place of the 861 /// other. Note that we will always choose the non-undef value to keep. 862 static bool CanMergeValues(Value *First, Value *Second) { 863 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second); 864 } 865 866 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional 867 /// branch to Succ, into Succ. 868 /// 869 /// Assumption: Succ is the single successor for BB. 870 static bool 871 CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ, 872 const SmallPtrSetImpl<BasicBlock *> &BBPreds) { 873 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!"); 874 875 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " 876 << Succ->getName() << "\n"); 877 // Shortcut, if there is only a single predecessor it must be BB and merging 878 // is always safe 879 if (Succ->getSinglePredecessor()) 880 return true; 881 882 // Look at all the phi nodes in Succ, to see if they present a conflict when 883 // merging these blocks 884 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 885 PHINode *PN = cast<PHINode>(I); 886 887 // If the incoming value from BB is again a PHINode in 888 // BB which has the same incoming value for *PI as PN does, we can 889 // merge the phi nodes and then the blocks can still be merged 890 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB)); 891 if (BBPN && BBPN->getParent() == BB) { 892 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 893 BasicBlock *IBB = PN->getIncomingBlock(PI); 894 if (BBPreds.count(IBB) && 895 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB), 896 PN->getIncomingValue(PI))) { 897 LLVM_DEBUG(dbgs() 898 << "Can't fold, phi node " << PN->getName() << " in " 899 << Succ->getName() << " is conflicting with " 900 << BBPN->getName() << " with regard to common predecessor " 901 << IBB->getName() << "\n"); 902 return false; 903 } 904 } 905 } else { 906 Value* Val = PN->getIncomingValueForBlock(BB); 907 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 908 // See if the incoming value for the common predecessor is equal to the 909 // one for BB, in which case this phi node will not prevent the merging 910 // of the block. 911 BasicBlock *IBB = PN->getIncomingBlock(PI); 912 if (BBPreds.count(IBB) && 913 !CanMergeValues(Val, PN->getIncomingValue(PI))) { 914 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() 915 << " in " << Succ->getName() 916 << " is conflicting with regard to common " 917 << "predecessor " << IBB->getName() << "\n"); 918 return false; 919 } 920 } 921 } 922 } 923 924 return true; 925 } 926 927 using PredBlockVector = SmallVector<BasicBlock *, 16>; 928 using IncomingValueMap = SmallDenseMap<BasicBlock *, Value *, 16>; 929 930 /// Determines the value to use as the phi node input for a block. 931 /// 932 /// Select between \p OldVal any value that we know flows from \p BB 933 /// to a particular phi on the basis of which one (if either) is not 934 /// undef. Update IncomingValues based on the selected value. 935 /// 936 /// \param OldVal The value we are considering selecting. 937 /// \param BB The block that the value flows in from. 938 /// \param IncomingValues A map from block-to-value for other phi inputs 939 /// that we have examined. 940 /// 941 /// \returns the selected value. 942 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, 943 IncomingValueMap &IncomingValues) { 944 if (!isa<UndefValue>(OldVal)) { 945 assert((!IncomingValues.count(BB) || 946 IncomingValues.find(BB)->second == OldVal) && 947 "Expected OldVal to match incoming value from BB!"); 948 949 IncomingValues.insert(std::make_pair(BB, OldVal)); 950 return OldVal; 951 } 952 953 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 954 if (It != IncomingValues.end()) return It->second; 955 956 return OldVal; 957 } 958 959 /// Create a map from block to value for the operands of a 960 /// given phi. 961 /// 962 /// Create a map from block to value for each non-undef value flowing 963 /// into \p PN. 964 /// 965 /// \param PN The phi we are collecting the map for. 966 /// \param IncomingValues [out] The map from block to value for this phi. 967 static void gatherIncomingValuesToPhi(PHINode *PN, 968 IncomingValueMap &IncomingValues) { 969 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 970 BasicBlock *BB = PN->getIncomingBlock(i); 971 Value *V = PN->getIncomingValue(i); 972 973 if (!isa<UndefValue>(V)) 974 IncomingValues.insert(std::make_pair(BB, V)); 975 } 976 } 977 978 /// Replace the incoming undef values to a phi with the values 979 /// from a block-to-value map. 980 /// 981 /// \param PN The phi we are replacing the undefs in. 982 /// \param IncomingValues A map from block to value. 983 static void replaceUndefValuesInPhi(PHINode *PN, 984 const IncomingValueMap &IncomingValues) { 985 SmallVector<unsigned> TrueUndefOps; 986 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 987 Value *V = PN->getIncomingValue(i); 988 989 if (!isa<UndefValue>(V)) continue; 990 991 BasicBlock *BB = PN->getIncomingBlock(i); 992 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 993 994 // Keep track of undef/poison incoming values. Those must match, so we fix 995 // them up below if needed. 996 // Note: this is conservatively correct, but we could try harder and group 997 // the undef values per incoming basic block. 998 if (It == IncomingValues.end()) { 999 TrueUndefOps.push_back(i); 1000 continue; 1001 } 1002 1003 // There is a defined value for this incoming block, so map this undef 1004 // incoming value to the defined value. 1005 PN->setIncomingValue(i, It->second); 1006 } 1007 1008 // If there are both undef and poison values incoming, then convert those 1009 // values to undef. It is invalid to have different values for the same 1010 // incoming block. 1011 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) { 1012 return isa<PoisonValue>(PN->getIncomingValue(i)); 1013 }); 1014 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) { 1015 for (unsigned i : TrueUndefOps) 1016 PN->setIncomingValue(i, UndefValue::get(PN->getType())); 1017 } 1018 } 1019 1020 // Only when they shares a single common predecessor, return true. 1021 // Only handles cases when BB can't be merged while its predecessors can be 1022 // redirected. 1023 static bool 1024 CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ, 1025 const SmallPtrSetImpl<BasicBlock *> &BBPreds, 1026 BasicBlock *&CommonPred) { 1027 1028 // There must be phis in BB, otherwise BB will be merged into Succ directly 1029 if (BB->phis().empty() || Succ->phis().empty()) 1030 return false; 1031 1032 // BB must have predecessors not shared that can be redirected to Succ 1033 if (!BB->hasNPredecessorsOrMore(2)) 1034 return false; 1035 1036 if (any_of(BBPreds, [](const BasicBlock *Pred) { 1037 return isa<PHINode>(Pred->begin()) && 1038 isa<IndirectBrInst>(Pred->getTerminator()); 1039 })) 1040 return false; 1041 1042 // Get the single common predecessor of both BB and Succ. Return false 1043 // when there are more than one common predecessors. 1044 for (BasicBlock *SuccPred : predecessors(Succ)) { 1045 if (BBPreds.count(SuccPred)) { 1046 if (CommonPred) 1047 return false; 1048 CommonPred = SuccPred; 1049 } 1050 } 1051 1052 return true; 1053 } 1054 1055 /// Check whether removing \p BB will make the phis in its \p Succ have too 1056 /// many incoming entries. This function does not check whether \p BB is 1057 /// foldable or not. 1058 static bool introduceTooManyPhiEntries(BasicBlock *BB, BasicBlock *Succ) { 1059 // If BB only has one predecessor, then removing it will not introduce more 1060 // incoming edges for phis. 1061 if (BB->hasNPredecessors(1)) 1062 return false; 1063 unsigned NumPreds = pred_size(BB); 1064 unsigned NumChangedPhi = 0; 1065 for (auto &Phi : Succ->phis()) { 1066 // If the incoming value is a phi and the phi is defined in BB, 1067 // then removing BB will not increase the total phi entries of the ir. 1068 if (auto *IncomingPhi = dyn_cast<PHINode>(Phi.getIncomingValueForBlock(BB))) 1069 if (IncomingPhi->getParent() == BB) 1070 continue; 1071 // Otherwise, we need to add entries to the phi 1072 NumChangedPhi++; 1073 } 1074 // For every phi that needs to be changed, (NumPreds - 1) new entries will be 1075 // added. If the total increase in phi entries exceeds 1076 // MaxPhiEntriesIncreaseAfterRemovingEmptyBlock, it will be considered as 1077 // introducing too many new phi entries. 1078 return (NumPreds - 1) * NumChangedPhi > 1079 MaxPhiEntriesIncreaseAfterRemovingEmptyBlock; 1080 } 1081 1082 /// Replace a value flowing from a block to a phi with 1083 /// potentially multiple instances of that value flowing from the 1084 /// block's predecessors to the phi. 1085 /// 1086 /// \param BB The block with the value flowing into the phi. 1087 /// \param BBPreds The predecessors of BB. 1088 /// \param PN The phi that we are updating. 1089 /// \param CommonPred The common predecessor of BB and PN's BasicBlock 1090 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, 1091 const PredBlockVector &BBPreds, 1092 PHINode *PN, 1093 BasicBlock *CommonPred) { 1094 Value *OldVal = PN->removeIncomingValue(BB, false); 1095 assert(OldVal && "No entry in PHI for Pred BB!"); 1096 1097 IncomingValueMap IncomingValues; 1098 1099 // We are merging two blocks - BB, and the block containing PN - and 1100 // as a result we need to redirect edges from the predecessors of BB 1101 // to go to the block containing PN, and update PN 1102 // accordingly. Since we allow merging blocks in the case where the 1103 // predecessor and successor blocks both share some predecessors, 1104 // and where some of those common predecessors might have undef 1105 // values flowing into PN, we want to rewrite those values to be 1106 // consistent with the non-undef values. 1107 1108 gatherIncomingValuesToPhi(PN, IncomingValues); 1109 1110 // If this incoming value is one of the PHI nodes in BB, the new entries 1111 // in the PHI node are the entries from the old PHI. 1112 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) { 1113 PHINode *OldValPN = cast<PHINode>(OldVal); 1114 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) { 1115 // Note that, since we are merging phi nodes and BB and Succ might 1116 // have common predecessors, we could end up with a phi node with 1117 // identical incoming branches. This will be cleaned up later (and 1118 // will trigger asserts if we try to clean it up now, without also 1119 // simplifying the corresponding conditional branch). 1120 BasicBlock *PredBB = OldValPN->getIncomingBlock(i); 1121 1122 if (PredBB == CommonPred) 1123 continue; 1124 1125 Value *PredVal = OldValPN->getIncomingValue(i); 1126 Value *Selected = 1127 selectIncomingValueForBlock(PredVal, PredBB, IncomingValues); 1128 1129 // And add a new incoming value for this predecessor for the 1130 // newly retargeted branch. 1131 PN->addIncoming(Selected, PredBB); 1132 } 1133 if (CommonPred) 1134 PN->addIncoming(OldValPN->getIncomingValueForBlock(CommonPred), BB); 1135 1136 } else { 1137 for (BasicBlock *PredBB : BBPreds) { 1138 // Update existing incoming values in PN for this 1139 // predecessor of BB. 1140 if (PredBB == CommonPred) 1141 continue; 1142 1143 Value *Selected = 1144 selectIncomingValueForBlock(OldVal, PredBB, IncomingValues); 1145 1146 // And add a new incoming value for this predecessor for the 1147 // newly retargeted branch. 1148 PN->addIncoming(Selected, PredBB); 1149 } 1150 if (CommonPred) 1151 PN->addIncoming(OldVal, BB); 1152 } 1153 1154 replaceUndefValuesInPhi(PN, IncomingValues); 1155 } 1156 1157 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, 1158 DomTreeUpdater *DTU) { 1159 assert(BB != &BB->getParent()->getEntryBlock() && 1160 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"); 1161 1162 // We can't simplify infinite loops. 1163 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0); 1164 if (BB == Succ) 1165 return false; 1166 1167 SmallPtrSet<BasicBlock *, 16> BBPreds(pred_begin(BB), pred_end(BB)); 1168 1169 // The single common predecessor of BB and Succ when BB cannot be killed 1170 BasicBlock *CommonPred = nullptr; 1171 1172 bool BBKillable = CanPropagatePredecessorsForPHIs(BB, Succ, BBPreds); 1173 1174 // Even if we can not fold BB into Succ, we may be able to redirect the 1175 // predecessors of BB to Succ. 1176 bool BBPhisMergeable = BBKillable || CanRedirectPredsOfEmptyBBToSucc( 1177 BB, Succ, BBPreds, CommonPred); 1178 1179 if ((!BBKillable && !BBPhisMergeable) || introduceTooManyPhiEntries(BB, Succ)) 1180 return false; 1181 1182 // Check to see if merging these blocks/phis would cause conflicts for any of 1183 // the phi nodes in BB or Succ. If not, we can safely merge. 1184 1185 // Check for cases where Succ has multiple predecessors and a PHI node in BB 1186 // has uses which will not disappear when the PHI nodes are merged. It is 1187 // possible to handle such cases, but difficult: it requires checking whether 1188 // BB dominates Succ, which is non-trivial to calculate in the case where 1189 // Succ has multiple predecessors. Also, it requires checking whether 1190 // constructing the necessary self-referential PHI node doesn't introduce any 1191 // conflicts; this isn't too difficult, but the previous code for doing this 1192 // was incorrect. 1193 // 1194 // Note that if this check finds a live use, BB dominates Succ, so BB is 1195 // something like a loop pre-header (or rarely, a part of an irreducible CFG); 1196 // folding the branch isn't profitable in that case anyway. 1197 if (!Succ->getSinglePredecessor()) { 1198 BasicBlock::iterator BBI = BB->begin(); 1199 while (isa<PHINode>(*BBI)) { 1200 for (Use &U : BBI->uses()) { 1201 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) { 1202 if (PN->getIncomingBlock(U) != BB) 1203 return false; 1204 } else { 1205 return false; 1206 } 1207 } 1208 ++BBI; 1209 } 1210 } 1211 1212 if (BBPhisMergeable && CommonPred) 1213 LLVM_DEBUG(dbgs() << "Found Common Predecessor between: " << BB->getName() 1214 << " and " << Succ->getName() << " : " 1215 << CommonPred->getName() << "\n"); 1216 1217 // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop 1218 // metadata. 1219 // 1220 // FIXME: This is a stop-gap solution to preserve inner-loop metadata given 1221 // current status (that loop metadata is implemented as metadata attached to 1222 // the branch instruction in the loop latch block). To quote from review 1223 // comments, "the current representation of loop metadata (using a loop latch 1224 // terminator attachment) is known to be fundamentally broken. Loop latches 1225 // are not uniquely associated with loops (both in that a latch can be part of 1226 // multiple loops and a loop may have multiple latches). Loop headers are. The 1227 // solution to this problem is also known: Add support for basic block 1228 // metadata, and attach loop metadata to the loop header." 1229 // 1230 // Why bail out: 1231 // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is 1232 // the latch for inner-loop (see reason below), so bail out to prerserve 1233 // inner-loop metadata rather than eliminating 'BB' and attaching its metadata 1234 // to this inner-loop. 1235 // - The reason we believe 'BB' and 'BB->Pred' have different inner-most 1236 // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L, 1237 // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of 1238 // one self-looping basic block, which is contradictory with the assumption. 1239 // 1240 // To illustrate how inner-loop metadata is dropped: 1241 // 1242 // CFG Before 1243 // 1244 // BB is while.cond.exit, attached with loop metdata md2. 1245 // BB->Pred is for.body, attached with loop metadata md1. 1246 // 1247 // entry 1248 // | 1249 // v 1250 // ---> while.cond -------------> while.end 1251 // | | 1252 // | v 1253 // | while.body 1254 // | | 1255 // | v 1256 // | for.body <---- (md1) 1257 // | | |______| 1258 // | v 1259 // | while.cond.exit (md2) 1260 // | | 1261 // |_______| 1262 // 1263 // CFG After 1264 // 1265 // while.cond1 is the merge of while.cond.exit and while.cond above. 1266 // for.body is attached with md2, and md1 is dropped. 1267 // If LoopSimplify runs later (as a part of loop pass), it could create 1268 // dedicated exits for inner-loop (essentially adding `while.cond.exit` 1269 // back), but won't it won't see 'md1' nor restore it for the inner-loop. 1270 // 1271 // entry 1272 // | 1273 // v 1274 // ---> while.cond1 -------------> while.end 1275 // | | 1276 // | v 1277 // | while.body 1278 // | | 1279 // | v 1280 // | for.body <---- (md2) 1281 // |_______| |______| 1282 if (Instruction *TI = BB->getTerminator()) 1283 if (TI->hasMetadata(LLVMContext::MD_loop)) 1284 for (BasicBlock *Pred : predecessors(BB)) 1285 if (Instruction *PredTI = Pred->getTerminator()) 1286 if (PredTI->hasMetadata(LLVMContext::MD_loop)) 1287 return false; 1288 1289 if (BBKillable) 1290 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); 1291 else if (BBPhisMergeable) 1292 LLVM_DEBUG(dbgs() << "Merge Phis in Trivial BB: \n" << *BB); 1293 1294 SmallVector<DominatorTree::UpdateType, 32> Updates; 1295 1296 if (DTU) { 1297 // To avoid processing the same predecessor more than once. 1298 SmallPtrSet<BasicBlock *, 8> SeenPreds; 1299 // All predecessors of BB (except the common predecessor) will be moved to 1300 // Succ. 1301 Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1); 1302 SmallPtrSet<BasicBlock *, 16> SuccPreds(pred_begin(Succ), pred_end(Succ)); 1303 for (auto *PredOfBB : predecessors(BB)) { 1304 // Do not modify those common predecessors of BB and Succ 1305 if (!SuccPreds.contains(PredOfBB)) 1306 if (SeenPreds.insert(PredOfBB).second) 1307 Updates.push_back({DominatorTree::Insert, PredOfBB, Succ}); 1308 } 1309 1310 SeenPreds.clear(); 1311 1312 for (auto *PredOfBB : predecessors(BB)) 1313 // When BB cannot be killed, do not remove the edge between BB and 1314 // CommonPred. 1315 if (SeenPreds.insert(PredOfBB).second && PredOfBB != CommonPred) 1316 Updates.push_back({DominatorTree::Delete, PredOfBB, BB}); 1317 1318 if (BBKillable) 1319 Updates.push_back({DominatorTree::Delete, BB, Succ}); 1320 } 1321 1322 if (isa<PHINode>(Succ->begin())) { 1323 // If there is more than one pred of succ, and there are PHI nodes in 1324 // the successor, then we need to add incoming edges for the PHI nodes 1325 // 1326 const PredBlockVector BBPreds(predecessors(BB)); 1327 1328 // Loop over all of the PHI nodes in the successor of BB. 1329 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 1330 PHINode *PN = cast<PHINode>(I); 1331 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN, CommonPred); 1332 } 1333 } 1334 1335 if (Succ->getSinglePredecessor()) { 1336 // BB is the only predecessor of Succ, so Succ will end up with exactly 1337 // the same predecessors BB had. 1338 // Copy over any phi, debug or lifetime instruction. 1339 BB->getTerminator()->eraseFromParent(); 1340 Succ->splice(Succ->getFirstNonPHIIt(), BB); 1341 } else { 1342 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) { 1343 // We explicitly check for such uses for merging phis. 1344 assert(PN->use_empty() && "There shouldn't be any uses here!"); 1345 PN->eraseFromParent(); 1346 } 1347 } 1348 1349 // If the unconditional branch we replaced contains llvm.loop metadata, we 1350 // add the metadata to the branch instructions in the predecessors. 1351 if (Instruction *TI = BB->getTerminator()) 1352 if (MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop)) 1353 for (BasicBlock *Pred : predecessors(BB)) 1354 Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD); 1355 1356 if (BBKillable) { 1357 // Everything that jumped to BB now goes to Succ. 1358 BB->replaceAllUsesWith(Succ); 1359 1360 if (!Succ->hasName()) 1361 Succ->takeName(BB); 1362 1363 // Clear the successor list of BB to match updates applying to DTU later. 1364 if (BB->getTerminator()) 1365 BB->back().eraseFromParent(); 1366 1367 new UnreachableInst(BB->getContext(), BB); 1368 assert(succ_empty(BB) && "The successor list of BB isn't empty before " 1369 "applying corresponding DTU updates."); 1370 } else if (BBPhisMergeable) { 1371 // Everything except CommonPred that jumped to BB now goes to Succ. 1372 BB->replaceUsesWithIf(Succ, [BBPreds, CommonPred](Use &U) -> bool { 1373 if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser())) 1374 return UseInst->getParent() != CommonPred && 1375 BBPreds.contains(UseInst->getParent()); 1376 return false; 1377 }); 1378 } 1379 1380 if (DTU) 1381 DTU->applyUpdates(Updates); 1382 1383 if (BBKillable) 1384 DeleteDeadBlock(BB, DTU); 1385 1386 return true; 1387 } 1388 1389 static bool 1390 EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB, 1391 SmallPtrSetImpl<PHINode *> &ToRemove) { 1392 // This implementation doesn't currently consider undef operands 1393 // specially. Theoretically, two phis which are identical except for 1394 // one having an undef where the other doesn't could be collapsed. 1395 1396 bool Changed = false; 1397 1398 // Examine each PHI. 1399 // Note that increment of I must *NOT* be in the iteration_expression, since 1400 // we don't want to immediately advance when we restart from the beginning. 1401 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) { 1402 ++I; 1403 // Is there an identical PHI node in this basic block? 1404 // Note that we only look in the upper square's triangle, 1405 // we already checked that the lower triangle PHI's aren't identical. 1406 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) { 1407 if (ToRemove.contains(DuplicatePN)) 1408 continue; 1409 if (!DuplicatePN->isIdenticalToWhenDefined(PN)) 1410 continue; 1411 // A duplicate. Replace this PHI with the base PHI. 1412 ++NumPHICSEs; 1413 DuplicatePN->replaceAllUsesWith(PN); 1414 ToRemove.insert(DuplicatePN); 1415 Changed = true; 1416 1417 // The RAUW can change PHIs that we already visited. 1418 I = BB->begin(); 1419 break; // Start over from the beginning. 1420 } 1421 } 1422 return Changed; 1423 } 1424 1425 static bool 1426 EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB, 1427 SmallPtrSetImpl<PHINode *> &ToRemove) { 1428 // This implementation doesn't currently consider undef operands 1429 // specially. Theoretically, two phis which are identical except for 1430 // one having an undef where the other doesn't could be collapsed. 1431 1432 struct PHIDenseMapInfo { 1433 static PHINode *getEmptyKey() { 1434 return DenseMapInfo<PHINode *>::getEmptyKey(); 1435 } 1436 1437 static PHINode *getTombstoneKey() { 1438 return DenseMapInfo<PHINode *>::getTombstoneKey(); 1439 } 1440 1441 static bool isSentinel(PHINode *PN) { 1442 return PN == getEmptyKey() || PN == getTombstoneKey(); 1443 } 1444 1445 // WARNING: this logic must be kept in sync with 1446 // Instruction::isIdenticalToWhenDefined()! 1447 static unsigned getHashValueImpl(PHINode *PN) { 1448 // Compute a hash value on the operands. Instcombine will likely have 1449 // sorted them, which helps expose duplicates, but we have to check all 1450 // the operands to be safe in case instcombine hasn't run. 1451 return static_cast<unsigned>(hash_combine( 1452 hash_combine_range(PN->value_op_begin(), PN->value_op_end()), 1453 hash_combine_range(PN->block_begin(), PN->block_end()))); 1454 } 1455 1456 static unsigned getHashValue(PHINode *PN) { 1457 #ifndef NDEBUG 1458 // If -phicse-debug-hash was specified, return a constant -- this 1459 // will force all hashing to collide, so we'll exhaustively search 1460 // the table for a match, and the assertion in isEqual will fire if 1461 // there's a bug causing equal keys to hash differently. 1462 if (PHICSEDebugHash) 1463 return 0; 1464 #endif 1465 return getHashValueImpl(PN); 1466 } 1467 1468 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) { 1469 if (isSentinel(LHS) || isSentinel(RHS)) 1470 return LHS == RHS; 1471 return LHS->isIdenticalTo(RHS); 1472 } 1473 1474 static bool isEqual(PHINode *LHS, PHINode *RHS) { 1475 // These comparisons are nontrivial, so assert that equality implies 1476 // hash equality (DenseMap demands this as an invariant). 1477 bool Result = isEqualImpl(LHS, RHS); 1478 assert(!Result || (isSentinel(LHS) && LHS == RHS) || 1479 getHashValueImpl(LHS) == getHashValueImpl(RHS)); 1480 return Result; 1481 } 1482 }; 1483 1484 // Set of unique PHINodes. 1485 DenseSet<PHINode *, PHIDenseMapInfo> PHISet; 1486 PHISet.reserve(4 * PHICSENumPHISmallSize); 1487 1488 // Examine each PHI. 1489 bool Changed = false; 1490 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) { 1491 if (ToRemove.contains(PN)) 1492 continue; 1493 auto Inserted = PHISet.insert(PN); 1494 if (!Inserted.second) { 1495 // A duplicate. Replace this PHI with its duplicate. 1496 ++NumPHICSEs; 1497 PN->replaceAllUsesWith(*Inserted.first); 1498 ToRemove.insert(PN); 1499 Changed = true; 1500 1501 // The RAUW can change PHIs that we already visited. Start over from the 1502 // beginning. 1503 PHISet.clear(); 1504 I = BB->begin(); 1505 } 1506 } 1507 1508 return Changed; 1509 } 1510 1511 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB, 1512 SmallPtrSetImpl<PHINode *> &ToRemove) { 1513 if ( 1514 #ifndef NDEBUG 1515 !PHICSEDebugHash && 1516 #endif 1517 hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize)) 1518 return EliminateDuplicatePHINodesNaiveImpl(BB, ToRemove); 1519 return EliminateDuplicatePHINodesSetBasedImpl(BB, ToRemove); 1520 } 1521 1522 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { 1523 SmallPtrSet<PHINode *, 8> ToRemove; 1524 bool Changed = EliminateDuplicatePHINodes(BB, ToRemove); 1525 for (PHINode *PN : ToRemove) 1526 PN->eraseFromParent(); 1527 return Changed; 1528 } 1529 1530 Align llvm::tryEnforceAlignment(Value *V, Align PrefAlign, 1531 const DataLayout &DL) { 1532 V = V->stripPointerCasts(); 1533 1534 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1535 // TODO: Ideally, this function would not be called if PrefAlign is smaller 1536 // than the current alignment, as the known bits calculation should have 1537 // already taken it into account. However, this is not always the case, 1538 // as computeKnownBits() has a depth limit, while stripPointerCasts() 1539 // doesn't. 1540 Align CurrentAlign = AI->getAlign(); 1541 if (PrefAlign <= CurrentAlign) 1542 return CurrentAlign; 1543 1544 // If the preferred alignment is greater than the natural stack alignment 1545 // then don't round up. This avoids dynamic stack realignment. 1546 MaybeAlign StackAlign = DL.getStackAlignment(); 1547 if (StackAlign && PrefAlign > *StackAlign) 1548 return CurrentAlign; 1549 AI->setAlignment(PrefAlign); 1550 return PrefAlign; 1551 } 1552 1553 if (auto *GO = dyn_cast<GlobalObject>(V)) { 1554 // TODO: as above, this shouldn't be necessary. 1555 Align CurrentAlign = GO->getPointerAlignment(DL); 1556 if (PrefAlign <= CurrentAlign) 1557 return CurrentAlign; 1558 1559 // If there is a large requested alignment and we can, bump up the alignment 1560 // of the global. If the memory we set aside for the global may not be the 1561 // memory used by the final program then it is impossible for us to reliably 1562 // enforce the preferred alignment. 1563 if (!GO->canIncreaseAlignment()) 1564 return CurrentAlign; 1565 1566 if (GO->isThreadLocal()) { 1567 unsigned MaxTLSAlign = GO->getParent()->getMaxTLSAlignment() / CHAR_BIT; 1568 if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign)) 1569 PrefAlign = Align(MaxTLSAlign); 1570 } 1571 1572 GO->setAlignment(PrefAlign); 1573 return PrefAlign; 1574 } 1575 1576 return Align(1); 1577 } 1578 1579 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, 1580 const DataLayout &DL, 1581 const Instruction *CxtI, 1582 AssumptionCache *AC, 1583 const DominatorTree *DT) { 1584 assert(V->getType()->isPointerTy() && 1585 "getOrEnforceKnownAlignment expects a pointer!"); 1586 1587 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT); 1588 unsigned TrailZ = Known.countMinTrailingZeros(); 1589 1590 // Avoid trouble with ridiculously large TrailZ values, such as 1591 // those computed from a null pointer. 1592 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent). 1593 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent); 1594 1595 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ)); 1596 1597 if (PrefAlign && *PrefAlign > Alignment) 1598 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL)); 1599 1600 // We don't need to make any adjustment. 1601 return Alignment; 1602 } 1603 1604 ///===---------------------------------------------------------------------===// 1605 /// Dbg Intrinsic utilities 1606 /// 1607 1608 /// See if there is a dbg.value intrinsic for DIVar for the PHI node. 1609 static bool PhiHasDebugValue(DILocalVariable *DIVar, 1610 DIExpression *DIExpr, 1611 PHINode *APN) { 1612 // Since we can't guarantee that the original dbg.declare intrinsic 1613 // is removed by LowerDbgDeclare(), we need to make sure that we are 1614 // not inserting the same dbg.value intrinsic over and over. 1615 SmallVector<DbgValueInst *, 1> DbgValues; 1616 SmallVector<DbgVariableRecord *, 1> DbgVariableRecords; 1617 findDbgValues(DbgValues, APN, &DbgVariableRecords); 1618 for (auto *DVI : DbgValues) { 1619 assert(is_contained(DVI->getValues(), APN)); 1620 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr)) 1621 return true; 1622 } 1623 for (auto *DVR : DbgVariableRecords) { 1624 assert(is_contained(DVR->location_ops(), APN)); 1625 if ((DVR->getVariable() == DIVar) && (DVR->getExpression() == DIExpr)) 1626 return true; 1627 } 1628 return false; 1629 } 1630 1631 /// Check if the alloc size of \p ValTy is large enough to cover the variable 1632 /// (or fragment of the variable) described by \p DII. 1633 /// 1634 /// This is primarily intended as a helper for the different 1635 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted 1636 /// describes an alloca'd variable, so we need to use the alloc size of the 1637 /// value when doing the comparison. E.g. an i1 value will be identified as 1638 /// covering an n-bit fragment, if the store size of i1 is at least n bits. 1639 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) { 1640 const DataLayout &DL = DII->getDataLayout(); 1641 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy); 1642 if (std::optional<uint64_t> FragmentSize = 1643 DII->getExpression()->getActiveBits(DII->getVariable())) 1644 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize)); 1645 1646 // We can't always calculate the size of the DI variable (e.g. if it is a 1647 // VLA). Try to use the size of the alloca that the dbg intrinsic describes 1648 // intead. 1649 if (DII->isAddressOfVariable()) { 1650 // DII should have exactly 1 location when it is an address. 1651 assert(DII->getNumVariableLocationOps() == 1 && 1652 "address of variable must have exactly 1 location operand."); 1653 if (auto *AI = 1654 dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) { 1655 if (std::optional<TypeSize> FragmentSize = 1656 AI->getAllocationSizeInBits(DL)) { 1657 return TypeSize::isKnownGE(ValueSize, *FragmentSize); 1658 } 1659 } 1660 } 1661 // Could not determine size of variable. Conservatively return false. 1662 return false; 1663 } 1664 // RemoveDIs: duplicate implementation of the above, using DbgVariableRecords, 1665 // the replacement for dbg.values. 1666 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableRecord *DVR) { 1667 const DataLayout &DL = DVR->getModule()->getDataLayout(); 1668 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy); 1669 if (std::optional<uint64_t> FragmentSize = 1670 DVR->getExpression()->getActiveBits(DVR->getVariable())) 1671 return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize)); 1672 1673 // We can't always calculate the size of the DI variable (e.g. if it is a 1674 // VLA). Try to use the size of the alloca that the dbg intrinsic describes 1675 // intead. 1676 if (DVR->isAddressOfVariable()) { 1677 // DVR should have exactly 1 location when it is an address. 1678 assert(DVR->getNumVariableLocationOps() == 1 && 1679 "address of variable must have exactly 1 location operand."); 1680 if (auto *AI = 1681 dyn_cast_or_null<AllocaInst>(DVR->getVariableLocationOp(0))) { 1682 if (std::optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) { 1683 return TypeSize::isKnownGE(ValueSize, *FragmentSize); 1684 } 1685 } 1686 } 1687 // Could not determine size of variable. Conservatively return false. 1688 return false; 1689 } 1690 1691 static void insertDbgValueOrDbgVariableRecord(DIBuilder &Builder, Value *DV, 1692 DILocalVariable *DIVar, 1693 DIExpression *DIExpr, 1694 const DebugLoc &NewLoc, 1695 BasicBlock::iterator Instr) { 1696 if (!UseNewDbgInfoFormat) { 1697 auto DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, 1698 (Instruction *)nullptr); 1699 cast<Instruction *>(DbgVal)->insertBefore(Instr); 1700 } else { 1701 // RemoveDIs: if we're using the new debug-info format, allocate a 1702 // DbgVariableRecord directly instead of a dbg.value intrinsic. 1703 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV); 1704 DbgVariableRecord *DV = 1705 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get()); 1706 Instr->getParent()->insertDbgRecordBefore(DV, Instr); 1707 } 1708 } 1709 1710 static void insertDbgValueOrDbgVariableRecordAfter( 1711 DIBuilder &Builder, Value *DV, DILocalVariable *DIVar, DIExpression *DIExpr, 1712 const DebugLoc &NewLoc, BasicBlock::iterator Instr) { 1713 if (!UseNewDbgInfoFormat) { 1714 auto DbgVal = Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, 1715 (Instruction *)nullptr); 1716 cast<Instruction *>(DbgVal)->insertAfter(&*Instr); 1717 } else { 1718 // RemoveDIs: if we're using the new debug-info format, allocate a 1719 // DbgVariableRecord directly instead of a dbg.value intrinsic. 1720 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV); 1721 DbgVariableRecord *DV = 1722 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get()); 1723 Instr->getParent()->insertDbgRecordAfter(DV, &*Instr); 1724 } 1725 } 1726 1727 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value 1728 /// that has an associated llvm.dbg.declare intrinsic. 1729 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1730 StoreInst *SI, DIBuilder &Builder) { 1731 assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII)); 1732 auto *DIVar = DII->getVariable(); 1733 assert(DIVar && "Missing variable"); 1734 auto *DIExpr = DII->getExpression(); 1735 Value *DV = SI->getValueOperand(); 1736 1737 DebugLoc NewLoc = getDebugValueLoc(DII); 1738 1739 // If the alloca describes the variable itself, i.e. the expression in the 1740 // dbg.declare doesn't start with a dereference, we can perform the 1741 // conversion if the value covers the entire fragment of DII. 1742 // If the alloca describes the *address* of DIVar, i.e. DIExpr is 1743 // *just* a DW_OP_deref, we use DV as is for the dbg.value. 1744 // We conservatively ignore other dereferences, because the following two are 1745 // not equivalent: 1746 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2)) 1747 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2)) 1748 // The former is adding 2 to the address of the variable, whereas the latter 1749 // is adding 2 to the value of the variable. As such, we insist on just a 1750 // deref expression. 1751 bool CanConvert = 1752 DIExpr->isDeref() || (!DIExpr->startsWithDeref() && 1753 valueCoversEntireFragment(DV->getType(), DII)); 1754 if (CanConvert) { 1755 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, 1756 SI->getIterator()); 1757 return; 1758 } 1759 1760 // FIXME: If storing to a part of the variable described by the dbg.declare, 1761 // then we want to insert a dbg.value for the corresponding fragment. 1762 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII 1763 << '\n'); 1764 // For now, when there is a store to parts of the variable (but we do not 1765 // know which part) we insert an dbg.value intrinsic to indicate that we 1766 // know nothing about the variable's content. 1767 DV = PoisonValue::get(DV->getType()); 1768 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, 1769 SI->getIterator()); 1770 } 1771 1772 static DIExpression *dropInitialDeref(const DIExpression *DIExpr) { 1773 int NumEltDropped = DIExpr->getElements()[0] == dwarf::DW_OP_LLVM_arg ? 3 : 1; 1774 return DIExpression::get(DIExpr->getContext(), 1775 DIExpr->getElements().drop_front(NumEltDropped)); 1776 } 1777 1778 void llvm::InsertDebugValueAtStoreLoc(DbgVariableIntrinsic *DII, StoreInst *SI, 1779 DIBuilder &Builder) { 1780 auto *DIVar = DII->getVariable(); 1781 assert(DIVar && "Missing variable"); 1782 auto *DIExpr = DII->getExpression(); 1783 DIExpr = dropInitialDeref(DIExpr); 1784 Value *DV = SI->getValueOperand(); 1785 1786 DebugLoc NewLoc = getDebugValueLoc(DII); 1787 1788 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, 1789 SI->getIterator()); 1790 } 1791 1792 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value 1793 /// that has an associated llvm.dbg.declare intrinsic. 1794 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1795 LoadInst *LI, DIBuilder &Builder) { 1796 auto *DIVar = DII->getVariable(); 1797 auto *DIExpr = DII->getExpression(); 1798 assert(DIVar && "Missing variable"); 1799 1800 if (!valueCoversEntireFragment(LI->getType(), DII)) { 1801 // FIXME: If only referring to a part of the variable described by the 1802 // dbg.declare, then we want to insert a dbg.value for the corresponding 1803 // fragment. 1804 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1805 << *DII << '\n'); 1806 return; 1807 } 1808 1809 DebugLoc NewLoc = getDebugValueLoc(DII); 1810 1811 // We are now tracking the loaded value instead of the address. In the 1812 // future if multi-location support is added to the IR, it might be 1813 // preferable to keep tracking both the loaded value and the original 1814 // address in case the alloca can not be elided. 1815 insertDbgValueOrDbgVariableRecordAfter(Builder, LI, DIVar, DIExpr, NewLoc, 1816 LI->getIterator()); 1817 } 1818 1819 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, 1820 StoreInst *SI, DIBuilder &Builder) { 1821 assert(DVR->isAddressOfVariable() || DVR->isDbgAssign()); 1822 auto *DIVar = DVR->getVariable(); 1823 assert(DIVar && "Missing variable"); 1824 auto *DIExpr = DVR->getExpression(); 1825 Value *DV = SI->getValueOperand(); 1826 1827 DebugLoc NewLoc = getDebugValueLoc(DVR); 1828 1829 // If the alloca describes the variable itself, i.e. the expression in the 1830 // dbg.declare doesn't start with a dereference, we can perform the 1831 // conversion if the value covers the entire fragment of DII. 1832 // If the alloca describes the *address* of DIVar, i.e. DIExpr is 1833 // *just* a DW_OP_deref, we use DV as is for the dbg.value. 1834 // We conservatively ignore other dereferences, because the following two are 1835 // not equivalent: 1836 // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2)) 1837 // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2)) 1838 // The former is adding 2 to the address of the variable, whereas the latter 1839 // is adding 2 to the value of the variable. As such, we insist on just a 1840 // deref expression. 1841 bool CanConvert = 1842 DIExpr->isDeref() || (!DIExpr->startsWithDeref() && 1843 valueCoversEntireFragment(DV->getType(), DVR)); 1844 if (CanConvert) { 1845 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, 1846 SI->getIterator()); 1847 return; 1848 } 1849 1850 // FIXME: If storing to a part of the variable described by the dbg.declare, 1851 // then we want to insert a dbg.value for the corresponding fragment. 1852 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DVR 1853 << '\n'); 1854 assert(UseNewDbgInfoFormat); 1855 1856 // For now, when there is a store to parts of the variable (but we do not 1857 // know which part) we insert an dbg.value intrinsic to indicate that we 1858 // know nothing about the variable's content. 1859 DV = PoisonValue::get(DV->getType()); 1860 ValueAsMetadata *DVAM = ValueAsMetadata::get(DV); 1861 DbgVariableRecord *NewDVR = 1862 new DbgVariableRecord(DVAM, DIVar, DIExpr, NewLoc.get()); 1863 SI->getParent()->insertDbgRecordBefore(NewDVR, SI->getIterator()); 1864 } 1865 1866 void llvm::InsertDebugValueAtStoreLoc(DbgVariableRecord *DVR, StoreInst *SI, 1867 DIBuilder &Builder) { 1868 auto *DIVar = DVR->getVariable(); 1869 assert(DIVar && "Missing variable"); 1870 auto *DIExpr = DVR->getExpression(); 1871 DIExpr = dropInitialDeref(DIExpr); 1872 Value *DV = SI->getValueOperand(); 1873 1874 DebugLoc NewLoc = getDebugValueLoc(DVR); 1875 1876 insertDbgValueOrDbgVariableRecord(Builder, DV, DIVar, DIExpr, NewLoc, 1877 SI->getIterator()); 1878 } 1879 1880 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated 1881 /// llvm.dbg.declare intrinsic. 1882 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1883 PHINode *APN, DIBuilder &Builder) { 1884 auto *DIVar = DII->getVariable(); 1885 auto *DIExpr = DII->getExpression(); 1886 assert(DIVar && "Missing variable"); 1887 1888 if (PhiHasDebugValue(DIVar, DIExpr, APN)) 1889 return; 1890 1891 if (!valueCoversEntireFragment(APN->getType(), DII)) { 1892 // FIXME: If only referring to a part of the variable described by the 1893 // dbg.declare, then we want to insert a dbg.value for the corresponding 1894 // fragment. 1895 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1896 << *DII << '\n'); 1897 return; 1898 } 1899 1900 BasicBlock *BB = APN->getParent(); 1901 auto InsertionPt = BB->getFirstInsertionPt(); 1902 1903 DebugLoc NewLoc = getDebugValueLoc(DII); 1904 1905 // The block may be a catchswitch block, which does not have a valid 1906 // insertion point. 1907 // FIXME: Insert dbg.value markers in the successors when appropriate. 1908 if (InsertionPt != BB->end()) { 1909 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc, 1910 InsertionPt); 1911 } 1912 } 1913 1914 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, LoadInst *LI, 1915 DIBuilder &Builder) { 1916 auto *DIVar = DVR->getVariable(); 1917 auto *DIExpr = DVR->getExpression(); 1918 assert(DIVar && "Missing variable"); 1919 1920 if (!valueCoversEntireFragment(LI->getType(), DVR)) { 1921 // FIXME: If only referring to a part of the variable described by the 1922 // dbg.declare, then we want to insert a DbgVariableRecord for the 1923 // corresponding fragment. 1924 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: " 1925 << *DVR << '\n'); 1926 return; 1927 } 1928 1929 DebugLoc NewLoc = getDebugValueLoc(DVR); 1930 1931 // We are now tracking the loaded value instead of the address. In the 1932 // future if multi-location support is added to the IR, it might be 1933 // preferable to keep tracking both the loaded value and the original 1934 // address in case the alloca can not be elided. 1935 assert(UseNewDbgInfoFormat); 1936 1937 // Create a DbgVariableRecord directly and insert. 1938 ValueAsMetadata *LIVAM = ValueAsMetadata::get(LI); 1939 DbgVariableRecord *DV = 1940 new DbgVariableRecord(LIVAM, DIVar, DIExpr, NewLoc.get()); 1941 LI->getParent()->insertDbgRecordAfter(DV, LI); 1942 } 1943 1944 /// Determine whether this alloca is either a VLA or an array. 1945 static bool isArray(AllocaInst *AI) { 1946 return AI->isArrayAllocation() || 1947 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy()); 1948 } 1949 1950 /// Determine whether this alloca is a structure. 1951 static bool isStructure(AllocaInst *AI) { 1952 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy(); 1953 } 1954 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableRecord *DVR, PHINode *APN, 1955 DIBuilder &Builder) { 1956 auto *DIVar = DVR->getVariable(); 1957 auto *DIExpr = DVR->getExpression(); 1958 assert(DIVar && "Missing variable"); 1959 1960 if (PhiHasDebugValue(DIVar, DIExpr, APN)) 1961 return; 1962 1963 if (!valueCoversEntireFragment(APN->getType(), DVR)) { 1964 // FIXME: If only referring to a part of the variable described by the 1965 // dbg.declare, then we want to insert a DbgVariableRecord for the 1966 // corresponding fragment. 1967 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to DbgVariableRecord: " 1968 << *DVR << '\n'); 1969 return; 1970 } 1971 1972 BasicBlock *BB = APN->getParent(); 1973 auto InsertionPt = BB->getFirstInsertionPt(); 1974 1975 DebugLoc NewLoc = getDebugValueLoc(DVR); 1976 1977 // The block may be a catchswitch block, which does not have a valid 1978 // insertion point. 1979 // FIXME: Insert DbgVariableRecord markers in the successors when appropriate. 1980 if (InsertionPt != BB->end()) { 1981 insertDbgValueOrDbgVariableRecord(Builder, APN, DIVar, DIExpr, NewLoc, 1982 InsertionPt); 1983 } 1984 } 1985 1986 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set 1987 /// of llvm.dbg.value intrinsics. 1988 bool llvm::LowerDbgDeclare(Function &F) { 1989 bool Changed = false; 1990 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); 1991 SmallVector<DbgDeclareInst *, 4> Dbgs; 1992 SmallVector<DbgVariableRecord *> DVRs; 1993 for (auto &FI : F) { 1994 for (Instruction &BI : FI) { 1995 if (auto *DDI = dyn_cast<DbgDeclareInst>(&BI)) 1996 Dbgs.push_back(DDI); 1997 for (DbgVariableRecord &DVR : filterDbgVars(BI.getDbgRecordRange())) { 1998 if (DVR.getType() == DbgVariableRecord::LocationType::Declare) 1999 DVRs.push_back(&DVR); 2000 } 2001 } 2002 } 2003 2004 if (Dbgs.empty() && DVRs.empty()) 2005 return Changed; 2006 2007 auto LowerOne = [&](auto *DDI) { 2008 AllocaInst *AI = 2009 dyn_cast_or_null<AllocaInst>(DDI->getVariableLocationOp(0)); 2010 // If this is an alloca for a scalar variable, insert a dbg.value 2011 // at each load and store to the alloca and erase the dbg.declare. 2012 // The dbg.values allow tracking a variable even if it is not 2013 // stored on the stack, while the dbg.declare can only describe 2014 // the stack slot (and at a lexical-scope granularity). Later 2015 // passes will attempt to elide the stack slot. 2016 if (!AI || isArray(AI) || isStructure(AI)) 2017 return; 2018 2019 // A volatile load/store means that the alloca can't be elided anyway. 2020 if (llvm::any_of(AI->users(), [](User *U) -> bool { 2021 if (LoadInst *LI = dyn_cast<LoadInst>(U)) 2022 return LI->isVolatile(); 2023 if (StoreInst *SI = dyn_cast<StoreInst>(U)) 2024 return SI->isVolatile(); 2025 return false; 2026 })) 2027 return; 2028 2029 SmallVector<const Value *, 8> WorkList; 2030 WorkList.push_back(AI); 2031 while (!WorkList.empty()) { 2032 const Value *V = WorkList.pop_back_val(); 2033 for (const auto &AIUse : V->uses()) { 2034 User *U = AIUse.getUser(); 2035 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 2036 if (AIUse.getOperandNo() == 1) 2037 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 2038 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 2039 ConvertDebugDeclareToDebugValue(DDI, LI, DIB); 2040 } else if (CallInst *CI = dyn_cast<CallInst>(U)) { 2041 // This is a call by-value or some other instruction that takes a 2042 // pointer to the variable. Insert a *value* intrinsic that describes 2043 // the variable by dereferencing the alloca. 2044 if (!CI->isLifetimeStartOrEnd()) { 2045 DebugLoc NewLoc = getDebugValueLoc(DDI); 2046 auto *DerefExpr = 2047 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref); 2048 insertDbgValueOrDbgVariableRecord(DIB, AI, DDI->getVariable(), 2049 DerefExpr, NewLoc, 2050 CI->getIterator()); 2051 } 2052 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) { 2053 if (BI->getType()->isPointerTy()) 2054 WorkList.push_back(BI); 2055 } 2056 } 2057 } 2058 DDI->eraseFromParent(); 2059 Changed = true; 2060 }; 2061 2062 for_each(Dbgs, LowerOne); 2063 for_each(DVRs, LowerOne); 2064 2065 if (Changed) 2066 for (BasicBlock &BB : F) 2067 RemoveRedundantDbgInstrs(&BB); 2068 2069 return Changed; 2070 } 2071 2072 // RemoveDIs: re-implementation of insertDebugValuesForPHIs, but which pulls the 2073 // debug-info out of the block's DbgVariableRecords rather than dbg.value 2074 // intrinsics. 2075 static void 2076 insertDbgVariableRecordsForPHIs(BasicBlock *BB, 2077 SmallVectorImpl<PHINode *> &InsertedPHIs) { 2078 assert(BB && "No BasicBlock to clone DbgVariableRecord(s) from."); 2079 if (InsertedPHIs.size() == 0) 2080 return; 2081 2082 // Map existing PHI nodes to their DbgVariableRecords. 2083 DenseMap<Value *, DbgVariableRecord *> DbgValueMap; 2084 for (auto &I : *BB) { 2085 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) { 2086 for (Value *V : DVR.location_ops()) 2087 if (auto *Loc = dyn_cast_or_null<PHINode>(V)) 2088 DbgValueMap.insert({Loc, &DVR}); 2089 } 2090 } 2091 if (DbgValueMap.size() == 0) 2092 return; 2093 2094 // Map a pair of the destination BB and old DbgVariableRecord to the new 2095 // DbgVariableRecord, so that if a DbgVariableRecord is being rewritten to use 2096 // more than one of the inserted PHIs in the same destination BB, we can 2097 // update the same DbgVariableRecord with all the new PHIs instead of creating 2098 // one copy for each. 2099 MapVector<std::pair<BasicBlock *, DbgVariableRecord *>, DbgVariableRecord *> 2100 NewDbgValueMap; 2101 // Then iterate through the new PHIs and look to see if they use one of the 2102 // previously mapped PHIs. If so, create a new DbgVariableRecord that will 2103 // propagate the info through the new PHI. If we use more than one new PHI in 2104 // a single destination BB with the same old dbg.value, merge the updates so 2105 // that we get a single new DbgVariableRecord with all the new PHIs. 2106 for (auto PHI : InsertedPHIs) { 2107 BasicBlock *Parent = PHI->getParent(); 2108 // Avoid inserting a debug-info record into an EH block. 2109 if (Parent->getFirstNonPHI()->isEHPad()) 2110 continue; 2111 for (auto VI : PHI->operand_values()) { 2112 auto V = DbgValueMap.find(VI); 2113 if (V != DbgValueMap.end()) { 2114 DbgVariableRecord *DbgII = cast<DbgVariableRecord>(V->second); 2115 auto NewDI = NewDbgValueMap.find({Parent, DbgII}); 2116 if (NewDI == NewDbgValueMap.end()) { 2117 DbgVariableRecord *NewDbgII = DbgII->clone(); 2118 NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first; 2119 } 2120 DbgVariableRecord *NewDbgII = NewDI->second; 2121 // If PHI contains VI as an operand more than once, we may 2122 // replaced it in NewDbgII; confirm that it is present. 2123 if (is_contained(NewDbgII->location_ops(), VI)) 2124 NewDbgII->replaceVariableLocationOp(VI, PHI); 2125 } 2126 } 2127 } 2128 // Insert the new DbgVariableRecords into their destination blocks. 2129 for (auto DI : NewDbgValueMap) { 2130 BasicBlock *Parent = DI.first.first; 2131 DbgVariableRecord *NewDbgII = DI.second; 2132 auto InsertionPt = Parent->getFirstInsertionPt(); 2133 assert(InsertionPt != Parent->end() && "Ill-formed basic block"); 2134 2135 Parent->insertDbgRecordBefore(NewDbgII, InsertionPt); 2136 } 2137 } 2138 2139 /// Propagate dbg.value intrinsics through the newly inserted PHIs. 2140 void llvm::insertDebugValuesForPHIs(BasicBlock *BB, 2141 SmallVectorImpl<PHINode *> &InsertedPHIs) { 2142 assert(BB && "No BasicBlock to clone dbg.value(s) from."); 2143 if (InsertedPHIs.size() == 0) 2144 return; 2145 2146 insertDbgVariableRecordsForPHIs(BB, InsertedPHIs); 2147 2148 // Map existing PHI nodes to their dbg.values. 2149 ValueToValueMapTy DbgValueMap; 2150 for (auto &I : *BB) { 2151 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) { 2152 for (Value *V : DbgII->location_ops()) 2153 if (auto *Loc = dyn_cast_or_null<PHINode>(V)) 2154 DbgValueMap.insert({Loc, DbgII}); 2155 } 2156 } 2157 if (DbgValueMap.size() == 0) 2158 return; 2159 2160 // Map a pair of the destination BB and old dbg.value to the new dbg.value, 2161 // so that if a dbg.value is being rewritten to use more than one of the 2162 // inserted PHIs in the same destination BB, we can update the same dbg.value 2163 // with all the new PHIs instead of creating one copy for each. 2164 MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>, 2165 DbgVariableIntrinsic *> 2166 NewDbgValueMap; 2167 // Then iterate through the new PHIs and look to see if they use one of the 2168 // previously mapped PHIs. If so, create a new dbg.value intrinsic that will 2169 // propagate the info through the new PHI. If we use more than one new PHI in 2170 // a single destination BB with the same old dbg.value, merge the updates so 2171 // that we get a single new dbg.value with all the new PHIs. 2172 for (auto *PHI : InsertedPHIs) { 2173 BasicBlock *Parent = PHI->getParent(); 2174 // Avoid inserting an intrinsic into an EH block. 2175 if (Parent->getFirstNonPHI()->isEHPad()) 2176 continue; 2177 for (auto *VI : PHI->operand_values()) { 2178 auto V = DbgValueMap.find(VI); 2179 if (V != DbgValueMap.end()) { 2180 auto *DbgII = cast<DbgVariableIntrinsic>(V->second); 2181 auto [NewDI, Inserted] = NewDbgValueMap.try_emplace({Parent, DbgII}); 2182 if (Inserted) 2183 NewDI->second = cast<DbgVariableIntrinsic>(DbgII->clone()); 2184 DbgVariableIntrinsic *NewDbgII = NewDI->second; 2185 // If PHI contains VI as an operand more than once, we may 2186 // replaced it in NewDbgII; confirm that it is present. 2187 if (is_contained(NewDbgII->location_ops(), VI)) 2188 NewDbgII->replaceVariableLocationOp(VI, PHI); 2189 } 2190 } 2191 } 2192 // Insert thew new dbg.values into their destination blocks. 2193 for (auto DI : NewDbgValueMap) { 2194 BasicBlock *Parent = DI.first.first; 2195 auto *NewDbgII = DI.second; 2196 auto InsertionPt = Parent->getFirstInsertionPt(); 2197 assert(InsertionPt != Parent->end() && "Ill-formed basic block"); 2198 NewDbgII->insertBefore(&*InsertionPt); 2199 } 2200 } 2201 2202 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress, 2203 DIBuilder &Builder, uint8_t DIExprFlags, 2204 int Offset) { 2205 TinyPtrVector<DbgDeclareInst *> DbgDeclares = findDbgDeclares(Address); 2206 TinyPtrVector<DbgVariableRecord *> DVRDeclares = findDVRDeclares(Address); 2207 2208 auto ReplaceOne = [&](auto *DII) { 2209 assert(DII->getVariable() && "Missing variable"); 2210 auto *DIExpr = DII->getExpression(); 2211 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset); 2212 DII->setExpression(DIExpr); 2213 DII->replaceVariableLocationOp(Address, NewAddress); 2214 }; 2215 2216 for_each(DbgDeclares, ReplaceOne); 2217 for_each(DVRDeclares, ReplaceOne); 2218 2219 return !DbgDeclares.empty() || !DVRDeclares.empty(); 2220 } 2221 2222 static void updateOneDbgValueForAlloca(const DebugLoc &Loc, 2223 DILocalVariable *DIVar, 2224 DIExpression *DIExpr, Value *NewAddress, 2225 DbgValueInst *DVI, 2226 DbgVariableRecord *DVR, 2227 DIBuilder &Builder, int Offset) { 2228 assert(DIVar && "Missing variable"); 2229 2230 // This is an alloca-based dbg.value/DbgVariableRecord. The first thing it 2231 // should do with the alloca pointer is dereference it. Otherwise we don't 2232 // know how to handle it and give up. 2233 if (!DIExpr || DIExpr->getNumElements() < 1 || 2234 DIExpr->getElement(0) != dwarf::DW_OP_deref) 2235 return; 2236 2237 // Insert the offset before the first deref. 2238 if (Offset) 2239 DIExpr = DIExpression::prepend(DIExpr, 0, Offset); 2240 2241 if (DVI) { 2242 DVI->setExpression(DIExpr); 2243 DVI->replaceVariableLocationOp(0u, NewAddress); 2244 } else { 2245 assert(DVR); 2246 DVR->setExpression(DIExpr); 2247 DVR->replaceVariableLocationOp(0u, NewAddress); 2248 } 2249 } 2250 2251 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, 2252 DIBuilder &Builder, int Offset) { 2253 SmallVector<DbgValueInst *, 1> DbgUsers; 2254 SmallVector<DbgVariableRecord *, 1> DPUsers; 2255 findDbgValues(DbgUsers, AI, &DPUsers); 2256 2257 // Attempt to replace dbg.values that use this alloca. 2258 for (auto *DVI : DbgUsers) 2259 updateOneDbgValueForAlloca(DVI->getDebugLoc(), DVI->getVariable(), 2260 DVI->getExpression(), NewAllocaAddress, DVI, 2261 nullptr, Builder, Offset); 2262 2263 // Replace any DbgVariableRecords that use this alloca. 2264 for (DbgVariableRecord *DVR : DPUsers) 2265 updateOneDbgValueForAlloca(DVR->getDebugLoc(), DVR->getVariable(), 2266 DVR->getExpression(), NewAllocaAddress, nullptr, 2267 DVR, Builder, Offset); 2268 } 2269 2270 /// Where possible to salvage debug information for \p I do so. 2271 /// If not possible mark undef. 2272 void llvm::salvageDebugInfo(Instruction &I) { 2273 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 2274 SmallVector<DbgVariableRecord *, 1> DPUsers; 2275 findDbgUsers(DbgUsers, &I, &DPUsers); 2276 salvageDebugInfoForDbgValues(I, DbgUsers, DPUsers); 2277 } 2278 2279 template <typename T> static void salvageDbgAssignAddress(T *Assign) { 2280 Instruction *I = dyn_cast<Instruction>(Assign->getAddress()); 2281 // Only instructions can be salvaged at the moment. 2282 if (!I) 2283 return; 2284 2285 assert(!Assign->getAddressExpression()->getFragmentInfo().has_value() && 2286 "address-expression shouldn't have fragment info"); 2287 2288 // The address component of a dbg.assign cannot be variadic. 2289 uint64_t CurrentLocOps = 0; 2290 SmallVector<Value *, 4> AdditionalValues; 2291 SmallVector<uint64_t, 16> Ops; 2292 Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues); 2293 2294 // Check if the salvage failed. 2295 if (!NewV) 2296 return; 2297 2298 DIExpression *SalvagedExpr = DIExpression::appendOpsToArg( 2299 Assign->getAddressExpression(), Ops, 0, /*StackValue=*/false); 2300 assert(!SalvagedExpr->getFragmentInfo().has_value() && 2301 "address-expression shouldn't have fragment info"); 2302 2303 SalvagedExpr = SalvagedExpr->foldConstantMath(); 2304 2305 // Salvage succeeds if no additional values are required. 2306 if (AdditionalValues.empty()) { 2307 Assign->setAddress(NewV); 2308 Assign->setAddressExpression(SalvagedExpr); 2309 } else { 2310 Assign->setKillAddress(); 2311 } 2312 } 2313 2314 void llvm::salvageDebugInfoForDbgValues( 2315 Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers, 2316 ArrayRef<DbgVariableRecord *> DPUsers) { 2317 // These are arbitrary chosen limits on the maximum number of values and the 2318 // maximum size of a debug expression we can salvage up to, used for 2319 // performance reasons. 2320 const unsigned MaxDebugArgs = 16; 2321 const unsigned MaxExpressionSize = 128; 2322 bool Salvaged = false; 2323 2324 for (auto *DII : DbgUsers) { 2325 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(DII)) { 2326 if (DAI->getAddress() == &I) { 2327 salvageDbgAssignAddress(DAI); 2328 Salvaged = true; 2329 } 2330 if (DAI->getValue() != &I) 2331 continue; 2332 } 2333 2334 // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly 2335 // pointing out the value as a DWARF memory location description. 2336 bool StackValue = isa<DbgValueInst>(DII); 2337 auto DIILocation = DII->location_ops(); 2338 assert( 2339 is_contained(DIILocation, &I) && 2340 "DbgVariableIntrinsic must use salvaged instruction as its location"); 2341 SmallVector<Value *, 4> AdditionalValues; 2342 // `I` may appear more than once in DII's location ops, and each use of `I` 2343 // must be updated in the DIExpression and potentially have additional 2344 // values added; thus we call salvageDebugInfoImpl for each `I` instance in 2345 // DIILocation. 2346 Value *Op0 = nullptr; 2347 DIExpression *SalvagedExpr = DII->getExpression(); 2348 auto LocItr = find(DIILocation, &I); 2349 while (SalvagedExpr && LocItr != DIILocation.end()) { 2350 SmallVector<uint64_t, 16> Ops; 2351 unsigned LocNo = std::distance(DIILocation.begin(), LocItr); 2352 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands(); 2353 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues); 2354 if (!Op0) 2355 break; 2356 SalvagedExpr = 2357 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue); 2358 LocItr = std::find(++LocItr, DIILocation.end(), &I); 2359 } 2360 // salvageDebugInfoImpl should fail on examining the first element of 2361 // DbgUsers, or none of them. 2362 if (!Op0) 2363 break; 2364 2365 SalvagedExpr = SalvagedExpr->foldConstantMath(); 2366 DII->replaceVariableLocationOp(&I, Op0); 2367 bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize; 2368 if (AdditionalValues.empty() && IsValidSalvageExpr) { 2369 DII->setExpression(SalvagedExpr); 2370 } else if (isa<DbgValueInst>(DII) && IsValidSalvageExpr && 2371 DII->getNumVariableLocationOps() + AdditionalValues.size() <= 2372 MaxDebugArgs) { 2373 DII->addVariableLocationOps(AdditionalValues, SalvagedExpr); 2374 } else { 2375 // Do not salvage using DIArgList for dbg.declare, as it is not currently 2376 // supported in those instructions. Also do not salvage if the resulting 2377 // DIArgList would contain an unreasonably large number of values. 2378 DII->setKillLocation(); 2379 } 2380 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); 2381 Salvaged = true; 2382 } 2383 // Duplicate of above block for DbgVariableRecords. 2384 for (auto *DVR : DPUsers) { 2385 if (DVR->isDbgAssign()) { 2386 if (DVR->getAddress() == &I) { 2387 salvageDbgAssignAddress(DVR); 2388 Salvaged = true; 2389 } 2390 if (DVR->getValue() != &I) 2391 continue; 2392 } 2393 2394 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they 2395 // are implicitly pointing out the value as a DWARF memory location 2396 // description. 2397 bool StackValue = 2398 DVR->getType() != DbgVariableRecord::LocationType::Declare; 2399 auto DVRLocation = DVR->location_ops(); 2400 assert( 2401 is_contained(DVRLocation, &I) && 2402 "DbgVariableIntrinsic must use salvaged instruction as its location"); 2403 SmallVector<Value *, 4> AdditionalValues; 2404 // 'I' may appear more than once in DVR's location ops, and each use of 'I' 2405 // must be updated in the DIExpression and potentially have additional 2406 // values added; thus we call salvageDebugInfoImpl for each 'I' instance in 2407 // DVRLocation. 2408 Value *Op0 = nullptr; 2409 DIExpression *SalvagedExpr = DVR->getExpression(); 2410 auto LocItr = find(DVRLocation, &I); 2411 while (SalvagedExpr && LocItr != DVRLocation.end()) { 2412 SmallVector<uint64_t, 16> Ops; 2413 unsigned LocNo = std::distance(DVRLocation.begin(), LocItr); 2414 uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands(); 2415 Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues); 2416 if (!Op0) 2417 break; 2418 SalvagedExpr = 2419 DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue); 2420 LocItr = std::find(++LocItr, DVRLocation.end(), &I); 2421 } 2422 // salvageDebugInfoImpl should fail on examining the first element of 2423 // DbgUsers, or none of them. 2424 if (!Op0) 2425 break; 2426 2427 SalvagedExpr = SalvagedExpr->foldConstantMath(); 2428 DVR->replaceVariableLocationOp(&I, Op0); 2429 bool IsValidSalvageExpr = 2430 SalvagedExpr->getNumElements() <= MaxExpressionSize; 2431 if (AdditionalValues.empty() && IsValidSalvageExpr) { 2432 DVR->setExpression(SalvagedExpr); 2433 } else if (DVR->getType() != DbgVariableRecord::LocationType::Declare && 2434 IsValidSalvageExpr && 2435 DVR->getNumVariableLocationOps() + AdditionalValues.size() <= 2436 MaxDebugArgs) { 2437 DVR->addVariableLocationOps(AdditionalValues, SalvagedExpr); 2438 } else { 2439 // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is 2440 // currently only valid for stack value expressions. 2441 // Also do not salvage if the resulting DIArgList would contain an 2442 // unreasonably large number of values. 2443 DVR->setKillLocation(); 2444 } 2445 LLVM_DEBUG(dbgs() << "SALVAGE: " << DVR << '\n'); 2446 Salvaged = true; 2447 } 2448 2449 if (Salvaged) 2450 return; 2451 2452 for (auto *DII : DbgUsers) 2453 DII->setKillLocation(); 2454 2455 for (auto *DVR : DPUsers) 2456 DVR->setKillLocation(); 2457 } 2458 2459 Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL, 2460 uint64_t CurrentLocOps, 2461 SmallVectorImpl<uint64_t> &Opcodes, 2462 SmallVectorImpl<Value *> &AdditionalValues) { 2463 unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace()); 2464 // Rewrite a GEP into a DIExpression. 2465 SmallMapVector<Value *, APInt, 4> VariableOffsets; 2466 APInt ConstantOffset(BitWidth, 0); 2467 if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) 2468 return nullptr; 2469 if (!VariableOffsets.empty() && !CurrentLocOps) { 2470 Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0}); 2471 CurrentLocOps = 1; 2472 } 2473 for (const auto &Offset : VariableOffsets) { 2474 AdditionalValues.push_back(Offset.first); 2475 assert(Offset.second.isStrictlyPositive() && 2476 "Expected strictly positive multiplier for offset."); 2477 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu, 2478 Offset.second.getZExtValue(), dwarf::DW_OP_mul, 2479 dwarf::DW_OP_plus}); 2480 } 2481 DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue()); 2482 return GEP->getOperand(0); 2483 } 2484 2485 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) { 2486 switch (Opcode) { 2487 case Instruction::Add: 2488 return dwarf::DW_OP_plus; 2489 case Instruction::Sub: 2490 return dwarf::DW_OP_minus; 2491 case Instruction::Mul: 2492 return dwarf::DW_OP_mul; 2493 case Instruction::SDiv: 2494 return dwarf::DW_OP_div; 2495 case Instruction::SRem: 2496 return dwarf::DW_OP_mod; 2497 case Instruction::Or: 2498 return dwarf::DW_OP_or; 2499 case Instruction::And: 2500 return dwarf::DW_OP_and; 2501 case Instruction::Xor: 2502 return dwarf::DW_OP_xor; 2503 case Instruction::Shl: 2504 return dwarf::DW_OP_shl; 2505 case Instruction::LShr: 2506 return dwarf::DW_OP_shr; 2507 case Instruction::AShr: 2508 return dwarf::DW_OP_shra; 2509 default: 2510 // TODO: Salvage from each kind of binop we know about. 2511 return 0; 2512 } 2513 } 2514 2515 static void handleSSAValueOperands(uint64_t CurrentLocOps, 2516 SmallVectorImpl<uint64_t> &Opcodes, 2517 SmallVectorImpl<Value *> &AdditionalValues, 2518 Instruction *I) { 2519 if (!CurrentLocOps) { 2520 Opcodes.append({dwarf::DW_OP_LLVM_arg, 0}); 2521 CurrentLocOps = 1; 2522 } 2523 Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps}); 2524 AdditionalValues.push_back(I->getOperand(1)); 2525 } 2526 2527 Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps, 2528 SmallVectorImpl<uint64_t> &Opcodes, 2529 SmallVectorImpl<Value *> &AdditionalValues) { 2530 // Handle binary operations with constant integer operands as a special case. 2531 auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1)); 2532 // Values wider than 64 bits cannot be represented within a DIExpression. 2533 if (ConstInt && ConstInt->getBitWidth() > 64) 2534 return nullptr; 2535 2536 Instruction::BinaryOps BinOpcode = BI->getOpcode(); 2537 // Push any Constant Int operand onto the expression stack. 2538 if (ConstInt) { 2539 uint64_t Val = ConstInt->getSExtValue(); 2540 // Add or Sub Instructions with a constant operand can potentially be 2541 // simplified. 2542 if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) { 2543 uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val); 2544 DIExpression::appendOffset(Opcodes, Offset); 2545 return BI->getOperand(0); 2546 } 2547 Opcodes.append({dwarf::DW_OP_constu, Val}); 2548 } else { 2549 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI); 2550 } 2551 2552 // Add salvaged binary operator to expression stack, if it has a valid 2553 // representation in a DIExpression. 2554 uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode); 2555 if (!DwarfBinOp) 2556 return nullptr; 2557 Opcodes.push_back(DwarfBinOp); 2558 return BI->getOperand(0); 2559 } 2560 2561 uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred) { 2562 // The signedness of the operation is implicit in the typed stack, signed and 2563 // unsigned instructions map to the same DWARF opcode. 2564 switch (Pred) { 2565 case CmpInst::ICMP_EQ: 2566 return dwarf::DW_OP_eq; 2567 case CmpInst::ICMP_NE: 2568 return dwarf::DW_OP_ne; 2569 case CmpInst::ICMP_UGT: 2570 case CmpInst::ICMP_SGT: 2571 return dwarf::DW_OP_gt; 2572 case CmpInst::ICMP_UGE: 2573 case CmpInst::ICMP_SGE: 2574 return dwarf::DW_OP_ge; 2575 case CmpInst::ICMP_ULT: 2576 case CmpInst::ICMP_SLT: 2577 return dwarf::DW_OP_lt; 2578 case CmpInst::ICMP_ULE: 2579 case CmpInst::ICMP_SLE: 2580 return dwarf::DW_OP_le; 2581 default: 2582 return 0; 2583 } 2584 } 2585 2586 Value *getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps, 2587 SmallVectorImpl<uint64_t> &Opcodes, 2588 SmallVectorImpl<Value *> &AdditionalValues) { 2589 // Handle icmp operations with constant integer operands as a special case. 2590 auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1)); 2591 // Values wider than 64 bits cannot be represented within a DIExpression. 2592 if (ConstInt && ConstInt->getBitWidth() > 64) 2593 return nullptr; 2594 // Push any Constant Int operand onto the expression stack. 2595 if (ConstInt) { 2596 if (Icmp->isSigned()) 2597 Opcodes.push_back(dwarf::DW_OP_consts); 2598 else 2599 Opcodes.push_back(dwarf::DW_OP_constu); 2600 uint64_t Val = ConstInt->getSExtValue(); 2601 Opcodes.push_back(Val); 2602 } else { 2603 handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp); 2604 } 2605 2606 // Add salvaged binary operator to expression stack, if it has a valid 2607 // representation in a DIExpression. 2608 uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate()); 2609 if (!DwarfIcmpOp) 2610 return nullptr; 2611 Opcodes.push_back(DwarfIcmpOp); 2612 return Icmp->getOperand(0); 2613 } 2614 2615 Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, 2616 SmallVectorImpl<uint64_t> &Ops, 2617 SmallVectorImpl<Value *> &AdditionalValues) { 2618 auto &M = *I.getModule(); 2619 auto &DL = M.getDataLayout(); 2620 2621 if (auto *CI = dyn_cast<CastInst>(&I)) { 2622 Value *FromValue = CI->getOperand(0); 2623 // No-op casts are irrelevant for debug info. 2624 if (CI->isNoopCast(DL)) { 2625 return FromValue; 2626 } 2627 2628 Type *Type = CI->getType(); 2629 if (Type->isPointerTy()) 2630 Type = DL.getIntPtrType(Type); 2631 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged. 2632 if (Type->isVectorTy() || 2633 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I) || 2634 isa<IntToPtrInst>(&I) || isa<PtrToIntInst>(&I))) 2635 return nullptr; 2636 2637 llvm::Type *FromType = FromValue->getType(); 2638 if (FromType->isPointerTy()) 2639 FromType = DL.getIntPtrType(FromType); 2640 2641 unsigned FromTypeBitSize = FromType->getScalarSizeInBits(); 2642 unsigned ToTypeBitSize = Type->getScalarSizeInBits(); 2643 2644 auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize, 2645 isa<SExtInst>(&I)); 2646 Ops.append(ExtOps.begin(), ExtOps.end()); 2647 return FromValue; 2648 } 2649 2650 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) 2651 return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues); 2652 if (auto *BI = dyn_cast<BinaryOperator>(&I)) 2653 return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues); 2654 if (auto *IC = dyn_cast<ICmpInst>(&I)) 2655 return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues); 2656 2657 // *Not* to do: we should not attempt to salvage load instructions, 2658 // because the validity and lifetime of a dbg.value containing 2659 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples. 2660 return nullptr; 2661 } 2662 2663 /// A replacement for a dbg.value expression. 2664 using DbgValReplacement = std::optional<DIExpression *>; 2665 2666 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr, 2667 /// possibly moving/undefing users to prevent use-before-def. Returns true if 2668 /// changes are made. 2669 static bool rewriteDebugUsers( 2670 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, 2671 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr, 2672 function_ref<DbgValReplacement(DbgVariableRecord &DVR)> RewriteDVRExpr) { 2673 // Find debug users of From. 2674 SmallVector<DbgVariableIntrinsic *, 1> Users; 2675 SmallVector<DbgVariableRecord *, 1> DPUsers; 2676 findDbgUsers(Users, &From, &DPUsers); 2677 if (Users.empty() && DPUsers.empty()) 2678 return false; 2679 2680 // Prevent use-before-def of To. 2681 bool Changed = false; 2682 2683 SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage; 2684 SmallPtrSet<DbgVariableRecord *, 1> UndefOrSalvageDVR; 2685 if (isa<Instruction>(&To)) { 2686 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint; 2687 2688 for (auto *DII : Users) { 2689 // It's common to see a debug user between From and DomPoint. Move it 2690 // after DomPoint to preserve the variable update without any reordering. 2691 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) { 2692 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n'); 2693 DII->moveAfter(&DomPoint); 2694 Changed = true; 2695 2696 // Users which otherwise aren't dominated by the replacement value must 2697 // be salvaged or deleted. 2698 } else if (!DT.dominates(&DomPoint, DII)) { 2699 UndefOrSalvage.insert(DII); 2700 } 2701 } 2702 2703 // DbgVariableRecord implementation of the above. 2704 for (auto *DVR : DPUsers) { 2705 Instruction *MarkedInstr = DVR->getMarker()->MarkedInstr; 2706 Instruction *NextNonDebug = MarkedInstr; 2707 // The next instruction might still be a dbg.declare, skip over it. 2708 if (isa<DbgVariableIntrinsic>(NextNonDebug)) 2709 NextNonDebug = NextNonDebug->getNextNonDebugInstruction(); 2710 2711 if (DomPointAfterFrom && NextNonDebug == &DomPoint) { 2712 LLVM_DEBUG(dbgs() << "MOVE: " << *DVR << '\n'); 2713 DVR->removeFromParent(); 2714 // Ensure there's a marker. 2715 DomPoint.getParent()->insertDbgRecordAfter(DVR, &DomPoint); 2716 Changed = true; 2717 } else if (!DT.dominates(&DomPoint, MarkedInstr)) { 2718 UndefOrSalvageDVR.insert(DVR); 2719 } 2720 } 2721 } 2722 2723 // Update debug users without use-before-def risk. 2724 for (auto *DII : Users) { 2725 if (UndefOrSalvage.count(DII)) 2726 continue; 2727 2728 DbgValReplacement DVRepl = RewriteExpr(*DII); 2729 if (!DVRepl) 2730 continue; 2731 2732 DII->replaceVariableLocationOp(&From, &To); 2733 DII->setExpression(*DVRepl); 2734 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n'); 2735 Changed = true; 2736 } 2737 for (auto *DVR : DPUsers) { 2738 if (UndefOrSalvageDVR.count(DVR)) 2739 continue; 2740 2741 DbgValReplacement DVRepl = RewriteDVRExpr(*DVR); 2742 if (!DVRepl) 2743 continue; 2744 2745 DVR->replaceVariableLocationOp(&From, &To); 2746 DVR->setExpression(*DVRepl); 2747 LLVM_DEBUG(dbgs() << "REWRITE: " << DVR << '\n'); 2748 Changed = true; 2749 } 2750 2751 if (!UndefOrSalvage.empty() || !UndefOrSalvageDVR.empty()) { 2752 // Try to salvage the remaining debug users. 2753 salvageDebugInfo(From); 2754 Changed = true; 2755 } 2756 2757 return Changed; 2758 } 2759 2760 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would 2761 /// losslessly preserve the bits and semantics of the value. This predicate is 2762 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result. 2763 /// 2764 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it 2765 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>, 2766 /// and also does not allow lossless pointer <-> integer conversions. 2767 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, 2768 Type *ToTy) { 2769 // Trivially compatible types. 2770 if (FromTy == ToTy) 2771 return true; 2772 2773 // Handle compatible pointer <-> integer conversions. 2774 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) { 2775 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy); 2776 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) && 2777 !DL.isNonIntegralPointerType(ToTy); 2778 return SameSize && LosslessConversion; 2779 } 2780 2781 // TODO: This is not exhaustive. 2782 return false; 2783 } 2784 2785 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To, 2786 Instruction &DomPoint, DominatorTree &DT) { 2787 // Exit early if From has no debug users. 2788 if (!From.isUsedByMetadata()) 2789 return false; 2790 2791 assert(&From != &To && "Can't replace something with itself"); 2792 2793 Type *FromTy = From.getType(); 2794 Type *ToTy = To.getType(); 2795 2796 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { 2797 return DII.getExpression(); 2798 }; 2799 auto IdentityDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement { 2800 return DVR.getExpression(); 2801 }; 2802 2803 // Handle no-op conversions. 2804 Module &M = *From.getModule(); 2805 const DataLayout &DL = M.getDataLayout(); 2806 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy)) 2807 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR); 2808 2809 // Handle integer-to-integer widening and narrowing. 2810 // FIXME: Use DW_OP_convert when it's available everywhere. 2811 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) { 2812 uint64_t FromBits = FromTy->getPrimitiveSizeInBits(); 2813 uint64_t ToBits = ToTy->getPrimitiveSizeInBits(); 2814 assert(FromBits != ToBits && "Unexpected no-op conversion"); 2815 2816 // When the width of the result grows, assume that a debugger will only 2817 // access the low `FromBits` bits when inspecting the source variable. 2818 if (FromBits < ToBits) 2819 return rewriteDebugUsers(From, To, DomPoint, DT, Identity, IdentityDVR); 2820 2821 // The width of the result has shrunk. Use sign/zero extension to describe 2822 // the source variable's high bits. 2823 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { 2824 DILocalVariable *Var = DII.getVariable(); 2825 2826 // Without knowing signedness, sign/zero extension isn't possible. 2827 auto Signedness = Var->getSignedness(); 2828 if (!Signedness) 2829 return std::nullopt; 2830 2831 bool Signed = *Signedness == DIBasicType::Signedness::Signed; 2832 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits, 2833 Signed); 2834 }; 2835 // RemoveDIs: duplicate implementation working on DbgVariableRecords rather 2836 // than on dbg.value intrinsics. 2837 auto SignOrZeroExtDVR = [&](DbgVariableRecord &DVR) -> DbgValReplacement { 2838 DILocalVariable *Var = DVR.getVariable(); 2839 2840 // Without knowing signedness, sign/zero extension isn't possible. 2841 auto Signedness = Var->getSignedness(); 2842 if (!Signedness) 2843 return std::nullopt; 2844 2845 bool Signed = *Signedness == DIBasicType::Signedness::Signed; 2846 return DIExpression::appendExt(DVR.getExpression(), ToBits, FromBits, 2847 Signed); 2848 }; 2849 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt, 2850 SignOrZeroExtDVR); 2851 } 2852 2853 // TODO: Floating-point conversions, vectors. 2854 return false; 2855 } 2856 2857 bool llvm::handleUnreachableTerminator( 2858 Instruction *I, SmallVectorImpl<Value *> &PoisonedValues) { 2859 bool Changed = false; 2860 // RemoveDIs: erase debug-info on this instruction manually. 2861 I->dropDbgRecords(); 2862 for (Use &U : I->operands()) { 2863 Value *Op = U.get(); 2864 if (isa<Instruction>(Op) && !Op->getType()->isTokenTy()) { 2865 U.set(PoisonValue::get(Op->getType())); 2866 PoisonedValues.push_back(Op); 2867 Changed = true; 2868 } 2869 } 2870 2871 return Changed; 2872 } 2873 2874 std::pair<unsigned, unsigned> 2875 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) { 2876 unsigned NumDeadInst = 0; 2877 unsigned NumDeadDbgInst = 0; 2878 // Delete the instructions backwards, as it has a reduced likelihood of 2879 // having to update as many def-use and use-def chains. 2880 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. 2881 SmallVector<Value *> Uses; 2882 handleUnreachableTerminator(EndInst, Uses); 2883 2884 while (EndInst != &BB->front()) { 2885 // Delete the next to last instruction. 2886 Instruction *Inst = &*--EndInst->getIterator(); 2887 if (!Inst->use_empty() && !Inst->getType()->isTokenTy()) 2888 Inst->replaceAllUsesWith(PoisonValue::get(Inst->getType())); 2889 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) { 2890 // EHPads can't have DbgVariableRecords attached to them, but it might be 2891 // possible for things with token type. 2892 Inst->dropDbgRecords(); 2893 EndInst = Inst; 2894 continue; 2895 } 2896 if (isa<DbgInfoIntrinsic>(Inst)) 2897 ++NumDeadDbgInst; 2898 else 2899 ++NumDeadInst; 2900 // RemoveDIs: erasing debug-info must be done manually. 2901 Inst->dropDbgRecords(); 2902 Inst->eraseFromParent(); 2903 } 2904 return {NumDeadInst, NumDeadDbgInst}; 2905 } 2906 2907 unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA, 2908 DomTreeUpdater *DTU, 2909 MemorySSAUpdater *MSSAU) { 2910 BasicBlock *BB = I->getParent(); 2911 2912 if (MSSAU) 2913 MSSAU->changeToUnreachable(I); 2914 2915 SmallSet<BasicBlock *, 8> UniqueSuccessors; 2916 2917 // Loop over all of the successors, removing BB's entry from any PHI 2918 // nodes. 2919 for (BasicBlock *Successor : successors(BB)) { 2920 Successor->removePredecessor(BB, PreserveLCSSA); 2921 if (DTU) 2922 UniqueSuccessors.insert(Successor); 2923 } 2924 auto *UI = new UnreachableInst(I->getContext(), I->getIterator()); 2925 UI->setDebugLoc(I->getDebugLoc()); 2926 2927 // All instructions after this are dead. 2928 unsigned NumInstrsRemoved = 0; 2929 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end(); 2930 while (BBI != BBE) { 2931 if (!BBI->use_empty()) 2932 BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType())); 2933 BBI++->eraseFromParent(); 2934 ++NumInstrsRemoved; 2935 } 2936 if (DTU) { 2937 SmallVector<DominatorTree::UpdateType, 8> Updates; 2938 Updates.reserve(UniqueSuccessors.size()); 2939 for (BasicBlock *UniqueSuccessor : UniqueSuccessors) 2940 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor}); 2941 DTU->applyUpdates(Updates); 2942 } 2943 BB->flushTerminatorDbgRecords(); 2944 return NumInstrsRemoved; 2945 } 2946 2947 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) { 2948 SmallVector<Value *, 8> Args(II->args()); 2949 SmallVector<OperandBundleDef, 1> OpBundles; 2950 II->getOperandBundlesAsDefs(OpBundles); 2951 CallInst *NewCall = CallInst::Create(II->getFunctionType(), 2952 II->getCalledOperand(), Args, OpBundles); 2953 NewCall->setCallingConv(II->getCallingConv()); 2954 NewCall->setAttributes(II->getAttributes()); 2955 NewCall->setDebugLoc(II->getDebugLoc()); 2956 NewCall->copyMetadata(*II); 2957 2958 // If the invoke had profile metadata, try converting them for CallInst. 2959 uint64_t TotalWeight; 2960 if (NewCall->extractProfTotalWeight(TotalWeight)) { 2961 // Set the total weight if it fits into i32, otherwise reset. 2962 MDBuilder MDB(NewCall->getContext()); 2963 auto NewWeights = uint32_t(TotalWeight) != TotalWeight 2964 ? nullptr 2965 : MDB.createBranchWeights({uint32_t(TotalWeight)}); 2966 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights); 2967 } 2968 2969 return NewCall; 2970 } 2971 2972 // changeToCall - Convert the specified invoke into a normal call. 2973 CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) { 2974 CallInst *NewCall = createCallMatchingInvoke(II); 2975 NewCall->takeName(II); 2976 NewCall->insertBefore(II); 2977 II->replaceAllUsesWith(NewCall); 2978 2979 // Follow the call by a branch to the normal destination. 2980 BasicBlock *NormalDestBB = II->getNormalDest(); 2981 BranchInst::Create(NormalDestBB, II->getIterator()); 2982 2983 // Update PHI nodes in the unwind destination 2984 BasicBlock *BB = II->getParent(); 2985 BasicBlock *UnwindDestBB = II->getUnwindDest(); 2986 UnwindDestBB->removePredecessor(BB); 2987 II->eraseFromParent(); 2988 if (DTU) 2989 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}}); 2990 return NewCall; 2991 } 2992 2993 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI, 2994 BasicBlock *UnwindEdge, 2995 DomTreeUpdater *DTU) { 2996 BasicBlock *BB = CI->getParent(); 2997 2998 // Convert this function call into an invoke instruction. First, split the 2999 // basic block. 3000 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr, 3001 CI->getName() + ".noexc"); 3002 3003 // Delete the unconditional branch inserted by SplitBlock 3004 BB->back().eraseFromParent(); 3005 3006 // Create the new invoke instruction. 3007 SmallVector<Value *, 8> InvokeArgs(CI->args()); 3008 SmallVector<OperandBundleDef, 1> OpBundles; 3009 3010 CI->getOperandBundlesAsDefs(OpBundles); 3011 3012 // Note: we're round tripping operand bundles through memory here, and that 3013 // can potentially be avoided with a cleverer API design that we do not have 3014 // as of this time. 3015 3016 InvokeInst *II = 3017 InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split, 3018 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB); 3019 II->setDebugLoc(CI->getDebugLoc()); 3020 II->setCallingConv(CI->getCallingConv()); 3021 II->setAttributes(CI->getAttributes()); 3022 II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof)); 3023 3024 if (DTU) 3025 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}}); 3026 3027 // Make sure that anything using the call now uses the invoke! This also 3028 // updates the CallGraph if present, because it uses a WeakTrackingVH. 3029 CI->replaceAllUsesWith(II); 3030 3031 // Delete the original call 3032 Split->front().eraseFromParent(); 3033 return Split; 3034 } 3035 3036 static bool markAliveBlocks(Function &F, 3037 SmallPtrSetImpl<BasicBlock *> &Reachable, 3038 DomTreeUpdater *DTU = nullptr) { 3039 SmallVector<BasicBlock*, 128> Worklist; 3040 BasicBlock *BB = &F.front(); 3041 Worklist.push_back(BB); 3042 Reachable.insert(BB); 3043 bool Changed = false; 3044 do { 3045 BB = Worklist.pop_back_val(); 3046 3047 // Do a quick scan of the basic block, turning any obviously unreachable 3048 // instructions into LLVM unreachable insts. The instruction combining pass 3049 // canonicalizes unreachable insts into stores to null or undef. 3050 for (Instruction &I : *BB) { 3051 if (auto *CI = dyn_cast<CallInst>(&I)) { 3052 Value *Callee = CI->getCalledOperand(); 3053 // Handle intrinsic calls. 3054 if (Function *F = dyn_cast<Function>(Callee)) { 3055 auto IntrinsicID = F->getIntrinsicID(); 3056 // Assumptions that are known to be false are equivalent to 3057 // unreachable. Also, if the condition is undefined, then we make the 3058 // choice most beneficial to the optimizer, and choose that to also be 3059 // unreachable. 3060 if (IntrinsicID == Intrinsic::assume) { 3061 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) { 3062 // Don't insert a call to llvm.trap right before the unreachable. 3063 changeToUnreachable(CI, false, DTU); 3064 Changed = true; 3065 break; 3066 } 3067 } else if (IntrinsicID == Intrinsic::experimental_guard) { 3068 // A call to the guard intrinsic bails out of the current 3069 // compilation unit if the predicate passed to it is false. If the 3070 // predicate is a constant false, then we know the guard will bail 3071 // out of the current compile unconditionally, so all code following 3072 // it is dead. 3073 // 3074 // Note: unlike in llvm.assume, it is not "obviously profitable" for 3075 // guards to treat `undef` as `false` since a guard on `undef` can 3076 // still be useful for widening. 3077 if (match(CI->getArgOperand(0), m_Zero())) 3078 if (!isa<UnreachableInst>(CI->getNextNode())) { 3079 changeToUnreachable(CI->getNextNode(), false, DTU); 3080 Changed = true; 3081 break; 3082 } 3083 } 3084 } else if ((isa<ConstantPointerNull>(Callee) && 3085 !NullPointerIsDefined(CI->getFunction(), 3086 cast<PointerType>(Callee->getType()) 3087 ->getAddressSpace())) || 3088 isa<UndefValue>(Callee)) { 3089 changeToUnreachable(CI, false, DTU); 3090 Changed = true; 3091 break; 3092 } 3093 if (CI->doesNotReturn() && !CI->isMustTailCall()) { 3094 // If we found a call to a no-return function, insert an unreachable 3095 // instruction after it. Make sure there isn't *already* one there 3096 // though. 3097 if (!isa<UnreachableInst>(CI->getNextNonDebugInstruction())) { 3098 // Don't insert a call to llvm.trap right before the unreachable. 3099 changeToUnreachable(CI->getNextNonDebugInstruction(), false, DTU); 3100 Changed = true; 3101 } 3102 break; 3103 } 3104 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 3105 // Store to undef and store to null are undefined and used to signal 3106 // that they should be changed to unreachable by passes that can't 3107 // modify the CFG. 3108 3109 // Don't touch volatile stores. 3110 if (SI->isVolatile()) continue; 3111 3112 Value *Ptr = SI->getOperand(1); 3113 3114 if (isa<UndefValue>(Ptr) || 3115 (isa<ConstantPointerNull>(Ptr) && 3116 !NullPointerIsDefined(SI->getFunction(), 3117 SI->getPointerAddressSpace()))) { 3118 changeToUnreachable(SI, false, DTU); 3119 Changed = true; 3120 break; 3121 } 3122 } 3123 } 3124 3125 Instruction *Terminator = BB->getTerminator(); 3126 if (auto *II = dyn_cast<InvokeInst>(Terminator)) { 3127 // Turn invokes that call 'nounwind' functions into ordinary calls. 3128 Value *Callee = II->getCalledOperand(); 3129 if ((isa<ConstantPointerNull>(Callee) && 3130 !NullPointerIsDefined(BB->getParent())) || 3131 isa<UndefValue>(Callee)) { 3132 changeToUnreachable(II, false, DTU); 3133 Changed = true; 3134 } else { 3135 if (II->doesNotReturn() && 3136 !isa<UnreachableInst>(II->getNormalDest()->front())) { 3137 // If we found an invoke of a no-return function, 3138 // create a new empty basic block with an `unreachable` terminator, 3139 // and set it as the normal destination for the invoke, 3140 // unless that is already the case. 3141 // Note that the original normal destination could have other uses. 3142 BasicBlock *OrigNormalDest = II->getNormalDest(); 3143 OrigNormalDest->removePredecessor(II->getParent()); 3144 LLVMContext &Ctx = II->getContext(); 3145 BasicBlock *UnreachableNormalDest = BasicBlock::Create( 3146 Ctx, OrigNormalDest->getName() + ".unreachable", 3147 II->getFunction(), OrigNormalDest); 3148 new UnreachableInst(Ctx, UnreachableNormalDest); 3149 II->setNormalDest(UnreachableNormalDest); 3150 if (DTU) 3151 DTU->applyUpdates( 3152 {{DominatorTree::Delete, BB, OrigNormalDest}, 3153 {DominatorTree::Insert, BB, UnreachableNormalDest}}); 3154 Changed = true; 3155 } 3156 if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) { 3157 if (II->use_empty() && !II->mayHaveSideEffects()) { 3158 // jump to the normal destination branch. 3159 BasicBlock *NormalDestBB = II->getNormalDest(); 3160 BasicBlock *UnwindDestBB = II->getUnwindDest(); 3161 BranchInst::Create(NormalDestBB, II->getIterator()); 3162 UnwindDestBB->removePredecessor(II->getParent()); 3163 II->eraseFromParent(); 3164 if (DTU) 3165 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}}); 3166 } else 3167 changeToCall(II, DTU); 3168 Changed = true; 3169 } 3170 } 3171 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) { 3172 // Remove catchpads which cannot be reached. 3173 struct CatchPadDenseMapInfo { 3174 static CatchPadInst *getEmptyKey() { 3175 return DenseMapInfo<CatchPadInst *>::getEmptyKey(); 3176 } 3177 3178 static CatchPadInst *getTombstoneKey() { 3179 return DenseMapInfo<CatchPadInst *>::getTombstoneKey(); 3180 } 3181 3182 static unsigned getHashValue(CatchPadInst *CatchPad) { 3183 return static_cast<unsigned>(hash_combine_range( 3184 CatchPad->value_op_begin(), CatchPad->value_op_end())); 3185 } 3186 3187 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) { 3188 if (LHS == getEmptyKey() || LHS == getTombstoneKey() || 3189 RHS == getEmptyKey() || RHS == getTombstoneKey()) 3190 return LHS == RHS; 3191 return LHS->isIdenticalTo(RHS); 3192 } 3193 }; 3194 3195 SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases; 3196 // Set of unique CatchPads. 3197 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4, 3198 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>> 3199 HandlerSet; 3200 detail::DenseSetEmpty Empty; 3201 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(), 3202 E = CatchSwitch->handler_end(); 3203 I != E; ++I) { 3204 BasicBlock *HandlerBB = *I; 3205 if (DTU) 3206 ++NumPerSuccessorCases[HandlerBB]; 3207 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI()); 3208 if (!HandlerSet.insert({CatchPad, Empty}).second) { 3209 if (DTU) 3210 --NumPerSuccessorCases[HandlerBB]; 3211 CatchSwitch->removeHandler(I); 3212 --I; 3213 --E; 3214 Changed = true; 3215 } 3216 } 3217 if (DTU) { 3218 std::vector<DominatorTree::UpdateType> Updates; 3219 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases) 3220 if (I.second == 0) 3221 Updates.push_back({DominatorTree::Delete, BB, I.first}); 3222 DTU->applyUpdates(Updates); 3223 } 3224 } 3225 3226 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU); 3227 for (BasicBlock *Successor : successors(BB)) 3228 if (Reachable.insert(Successor).second) 3229 Worklist.push_back(Successor); 3230 } while (!Worklist.empty()); 3231 return Changed; 3232 } 3233 3234 Instruction *llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) { 3235 Instruction *TI = BB->getTerminator(); 3236 3237 if (auto *II = dyn_cast<InvokeInst>(TI)) 3238 return changeToCall(II, DTU); 3239 3240 Instruction *NewTI; 3241 BasicBlock *UnwindDest; 3242 3243 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) { 3244 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI->getIterator()); 3245 UnwindDest = CRI->getUnwindDest(); 3246 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) { 3247 auto *NewCatchSwitch = CatchSwitchInst::Create( 3248 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(), 3249 CatchSwitch->getName(), CatchSwitch->getIterator()); 3250 for (BasicBlock *PadBB : CatchSwitch->handlers()) 3251 NewCatchSwitch->addHandler(PadBB); 3252 3253 NewTI = NewCatchSwitch; 3254 UnwindDest = CatchSwitch->getUnwindDest(); 3255 } else { 3256 llvm_unreachable("Could not find unwind successor"); 3257 } 3258 3259 NewTI->takeName(TI); 3260 NewTI->setDebugLoc(TI->getDebugLoc()); 3261 UnwindDest->removePredecessor(BB); 3262 TI->replaceAllUsesWith(NewTI); 3263 TI->eraseFromParent(); 3264 if (DTU) 3265 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}}); 3266 return NewTI; 3267 } 3268 3269 /// removeUnreachableBlocks - Remove blocks that are not reachable, even 3270 /// if they are in a dead cycle. Return true if a change was made, false 3271 /// otherwise. 3272 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU, 3273 MemorySSAUpdater *MSSAU) { 3274 SmallPtrSet<BasicBlock *, 16> Reachable; 3275 bool Changed = markAliveBlocks(F, Reachable, DTU); 3276 3277 // If there are unreachable blocks in the CFG... 3278 if (Reachable.size() == F.size()) 3279 return Changed; 3280 3281 assert(Reachable.size() < F.size()); 3282 3283 // Are there any blocks left to actually delete? 3284 SmallSetVector<BasicBlock *, 8> BlocksToRemove; 3285 for (BasicBlock &BB : F) { 3286 // Skip reachable basic blocks 3287 if (Reachable.count(&BB)) 3288 continue; 3289 // Skip already-deleted blocks 3290 if (DTU && DTU->isBBPendingDeletion(&BB)) 3291 continue; 3292 BlocksToRemove.insert(&BB); 3293 } 3294 3295 if (BlocksToRemove.empty()) 3296 return Changed; 3297 3298 Changed = true; 3299 NumRemoved += BlocksToRemove.size(); 3300 3301 if (MSSAU) 3302 MSSAU->removeBlocks(BlocksToRemove); 3303 3304 DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU); 3305 3306 return Changed; 3307 } 3308 3309 void llvm::combineMetadata(Instruction *K, const Instruction *J, 3310 ArrayRef<unsigned> KnownIDs, bool DoesKMove) { 3311 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 3312 K->dropUnknownNonDebugMetadata(KnownIDs); 3313 K->getAllMetadataOtherThanDebugLoc(Metadata); 3314 for (const auto &MD : Metadata) { 3315 unsigned Kind = MD.first; 3316 MDNode *JMD = J->getMetadata(Kind); 3317 MDNode *KMD = MD.second; 3318 3319 switch (Kind) { 3320 default: 3321 K->setMetadata(Kind, nullptr); // Remove unknown metadata 3322 break; 3323 case LLVMContext::MD_dbg: 3324 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg"); 3325 case LLVMContext::MD_DIAssignID: 3326 K->mergeDIAssignID(J); 3327 break; 3328 case LLVMContext::MD_tbaa: 3329 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD)); 3330 break; 3331 case LLVMContext::MD_alias_scope: 3332 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD)); 3333 break; 3334 case LLVMContext::MD_noalias: 3335 case LLVMContext::MD_mem_parallel_loop_access: 3336 K->setMetadata(Kind, MDNode::intersect(JMD, KMD)); 3337 break; 3338 case LLVMContext::MD_access_group: 3339 if (DoesKMove) 3340 K->setMetadata(LLVMContext::MD_access_group, 3341 intersectAccessGroups(K, J)); 3342 break; 3343 case LLVMContext::MD_range: 3344 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)) 3345 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD)); 3346 break; 3347 case LLVMContext::MD_fpmath: 3348 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD)); 3349 break; 3350 case LLVMContext::MD_invariant_load: 3351 // If K moves, only set the !invariant.load if it is present in both 3352 // instructions. 3353 if (DoesKMove) 3354 K->setMetadata(Kind, JMD); 3355 break; 3356 case LLVMContext::MD_nonnull: 3357 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)) 3358 K->setMetadata(Kind, JMD); 3359 break; 3360 case LLVMContext::MD_invariant_group: 3361 // Preserve !invariant.group in K. 3362 break; 3363 case LLVMContext::MD_mmra: 3364 // Combine MMRAs 3365 break; 3366 case LLVMContext::MD_align: 3367 if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef)) 3368 K->setMetadata( 3369 Kind, MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); 3370 break; 3371 case LLVMContext::MD_dereferenceable: 3372 case LLVMContext::MD_dereferenceable_or_null: 3373 if (DoesKMove) 3374 K->setMetadata(Kind, 3375 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); 3376 break; 3377 case LLVMContext::MD_preserve_access_index: 3378 // Preserve !preserve.access.index in K. 3379 break; 3380 case LLVMContext::MD_noundef: 3381 // If K does move, keep noundef if it is present in both instructions. 3382 if (DoesKMove) 3383 K->setMetadata(Kind, JMD); 3384 break; 3385 case LLVMContext::MD_nontemporal: 3386 // Preserve !nontemporal if it is present on both instructions. 3387 K->setMetadata(Kind, JMD); 3388 break; 3389 case LLVMContext::MD_prof: 3390 if (DoesKMove) 3391 K->setMetadata(Kind, MDNode::getMergedProfMetadata(KMD, JMD, K, J)); 3392 break; 3393 } 3394 } 3395 // Set !invariant.group from J if J has it. If both instructions have it 3396 // then we will just pick it from J - even when they are different. 3397 // Also make sure that K is load or store - f.e. combining bitcast with load 3398 // could produce bitcast with invariant.group metadata, which is invalid. 3399 // FIXME: we should try to preserve both invariant.group md if they are 3400 // different, but right now instruction can only have one invariant.group. 3401 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group)) 3402 if (isa<LoadInst>(K) || isa<StoreInst>(K)) 3403 K->setMetadata(LLVMContext::MD_invariant_group, JMD); 3404 3405 // Merge MMRAs. 3406 // This is handled separately because we also want to handle cases where K 3407 // doesn't have tags but J does. 3408 auto JMMRA = J->getMetadata(LLVMContext::MD_mmra); 3409 auto KMMRA = K->getMetadata(LLVMContext::MD_mmra); 3410 if (JMMRA || KMMRA) { 3411 K->setMetadata(LLVMContext::MD_mmra, 3412 MMRAMetadata::combine(K->getContext(), JMMRA, KMMRA)); 3413 } 3414 } 3415 3416 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J, 3417 bool KDominatesJ) { 3418 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, 3419 LLVMContext::MD_alias_scope, 3420 LLVMContext::MD_noalias, 3421 LLVMContext::MD_range, 3422 LLVMContext::MD_fpmath, 3423 LLVMContext::MD_invariant_load, 3424 LLVMContext::MD_nonnull, 3425 LLVMContext::MD_invariant_group, 3426 LLVMContext::MD_align, 3427 LLVMContext::MD_dereferenceable, 3428 LLVMContext::MD_dereferenceable_or_null, 3429 LLVMContext::MD_access_group, 3430 LLVMContext::MD_preserve_access_index, 3431 LLVMContext::MD_prof, 3432 LLVMContext::MD_nontemporal, 3433 LLVMContext::MD_noundef, 3434 LLVMContext::MD_mmra}; 3435 combineMetadata(K, J, KnownIDs, KDominatesJ); 3436 } 3437 3438 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) { 3439 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 3440 Source.getAllMetadata(MD); 3441 MDBuilder MDB(Dest.getContext()); 3442 Type *NewType = Dest.getType(); 3443 const DataLayout &DL = Source.getDataLayout(); 3444 for (const auto &MDPair : MD) { 3445 unsigned ID = MDPair.first; 3446 MDNode *N = MDPair.second; 3447 // Note, essentially every kind of metadata should be preserved here! This 3448 // routine is supposed to clone a load instruction changing *only its type*. 3449 // The only metadata it makes sense to drop is metadata which is invalidated 3450 // when the pointer type changes. This should essentially never be the case 3451 // in LLVM, but we explicitly switch over only known metadata to be 3452 // conservatively correct. If you are adding metadata to LLVM which pertains 3453 // to loads, you almost certainly want to add it here. 3454 switch (ID) { 3455 case LLVMContext::MD_dbg: 3456 case LLVMContext::MD_tbaa: 3457 case LLVMContext::MD_prof: 3458 case LLVMContext::MD_fpmath: 3459 case LLVMContext::MD_tbaa_struct: 3460 case LLVMContext::MD_invariant_load: 3461 case LLVMContext::MD_alias_scope: 3462 case LLVMContext::MD_noalias: 3463 case LLVMContext::MD_nontemporal: 3464 case LLVMContext::MD_mem_parallel_loop_access: 3465 case LLVMContext::MD_access_group: 3466 case LLVMContext::MD_noundef: 3467 // All of these directly apply. 3468 Dest.setMetadata(ID, N); 3469 break; 3470 3471 case LLVMContext::MD_nonnull: 3472 copyNonnullMetadata(Source, N, Dest); 3473 break; 3474 3475 case LLVMContext::MD_align: 3476 case LLVMContext::MD_dereferenceable: 3477 case LLVMContext::MD_dereferenceable_or_null: 3478 // These only directly apply if the new type is also a pointer. 3479 if (NewType->isPointerTy()) 3480 Dest.setMetadata(ID, N); 3481 break; 3482 3483 case LLVMContext::MD_range: 3484 copyRangeMetadata(DL, Source, N, Dest); 3485 break; 3486 } 3487 } 3488 } 3489 3490 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) { 3491 auto *ReplInst = dyn_cast<Instruction>(Repl); 3492 if (!ReplInst) 3493 return; 3494 3495 // Patch the replacement so that it is not more restrictive than the value 3496 // being replaced. 3497 WithOverflowInst *UnusedWO; 3498 // When replacing the result of a llvm.*.with.overflow intrinsic with a 3499 // overflowing binary operator, nuw/nsw flags may no longer hold. 3500 if (isa<OverflowingBinaryOperator>(ReplInst) && 3501 match(I, m_ExtractValue<0>(m_WithOverflowInst(UnusedWO)))) 3502 ReplInst->dropPoisonGeneratingFlags(); 3503 // Note that if 'I' is a load being replaced by some operation, 3504 // for example, by an arithmetic operation, then andIRFlags() 3505 // would just erase all math flags from the original arithmetic 3506 // operation, which is clearly not wanted and not needed. 3507 else if (!isa<LoadInst>(I)) 3508 ReplInst->andIRFlags(I); 3509 3510 // Handle attributes. 3511 if (auto *CB1 = dyn_cast<CallBase>(ReplInst)) { 3512 if (auto *CB2 = dyn_cast<CallBase>(I)) { 3513 bool Success = CB1->tryIntersectAttributes(CB2); 3514 assert(Success && "We should not be trying to sink callbases " 3515 "with non-intersectable attributes"); 3516 // For NDEBUG Compile. 3517 (void)Success; 3518 } 3519 } 3520 3521 // FIXME: If both the original and replacement value are part of the 3522 // same control-flow region (meaning that the execution of one 3523 // guarantees the execution of the other), then we can combine the 3524 // noalias scopes here and do better than the general conservative 3525 // answer used in combineMetadata(). 3526 3527 // In general, GVN unifies expressions over different control-flow 3528 // regions, and so we need a conservative combination of the noalias 3529 // scopes. 3530 combineMetadataForCSE(ReplInst, I, false); 3531 } 3532 3533 template <typename RootType, typename ShouldReplaceFn> 3534 static unsigned replaceDominatedUsesWith(Value *From, Value *To, 3535 const RootType &Root, 3536 const ShouldReplaceFn &ShouldReplace) { 3537 assert(From->getType() == To->getType()); 3538 3539 unsigned Count = 0; 3540 for (Use &U : llvm::make_early_inc_range(From->uses())) { 3541 auto *II = dyn_cast<IntrinsicInst>(U.getUser()); 3542 if (II && II->getIntrinsicID() == Intrinsic::fake_use) 3543 continue; 3544 if (!ShouldReplace(Root, U)) 3545 continue; 3546 LLVM_DEBUG(dbgs() << "Replace dominated use of '"; 3547 From->printAsOperand(dbgs()); 3548 dbgs() << "' with " << *To << " in " << *U.getUser() << "\n"); 3549 U.set(To); 3550 ++Count; 3551 } 3552 return Count; 3553 } 3554 3555 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) { 3556 assert(From->getType() == To->getType()); 3557 auto *BB = From->getParent(); 3558 unsigned Count = 0; 3559 3560 for (Use &U : llvm::make_early_inc_range(From->uses())) { 3561 auto *I = cast<Instruction>(U.getUser()); 3562 if (I->getParent() == BB) 3563 continue; 3564 U.set(To); 3565 ++Count; 3566 } 3567 return Count; 3568 } 3569 3570 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, 3571 DominatorTree &DT, 3572 const BasicBlockEdge &Root) { 3573 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) { 3574 return DT.dominates(Root, U); 3575 }; 3576 return ::replaceDominatedUsesWith(From, To, Root, Dominates); 3577 } 3578 3579 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, 3580 DominatorTree &DT, 3581 const BasicBlock *BB) { 3582 auto Dominates = [&DT](const BasicBlock *BB, const Use &U) { 3583 return DT.dominates(BB, U); 3584 }; 3585 return ::replaceDominatedUsesWith(From, To, BB, Dominates); 3586 } 3587 3588 unsigned llvm::replaceDominatedUsesWithIf( 3589 Value *From, Value *To, DominatorTree &DT, const BasicBlockEdge &Root, 3590 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) { 3591 auto DominatesAndShouldReplace = 3592 [&DT, &ShouldReplace, To](const BasicBlockEdge &Root, const Use &U) { 3593 return DT.dominates(Root, U) && ShouldReplace(U, To); 3594 }; 3595 return ::replaceDominatedUsesWith(From, To, Root, DominatesAndShouldReplace); 3596 } 3597 3598 unsigned llvm::replaceDominatedUsesWithIf( 3599 Value *From, Value *To, DominatorTree &DT, const BasicBlock *BB, 3600 function_ref<bool(const Use &U, const Value *To)> ShouldReplace) { 3601 auto DominatesAndShouldReplace = [&DT, &ShouldReplace, 3602 To](const BasicBlock *BB, const Use &U) { 3603 return DT.dominates(BB, U) && ShouldReplace(U, To); 3604 }; 3605 return ::replaceDominatedUsesWith(From, To, BB, DominatesAndShouldReplace); 3606 } 3607 3608 bool llvm::callsGCLeafFunction(const CallBase *Call, 3609 const TargetLibraryInfo &TLI) { 3610 // Check if the function is specifically marked as a gc leaf function. 3611 if (Call->hasFnAttr("gc-leaf-function")) 3612 return true; 3613 if (const Function *F = Call->getCalledFunction()) { 3614 if (F->hasFnAttribute("gc-leaf-function")) 3615 return true; 3616 3617 if (auto IID = F->getIntrinsicID()) { 3618 // Most LLVM intrinsics do not take safepoints. 3619 return IID != Intrinsic::experimental_gc_statepoint && 3620 IID != Intrinsic::experimental_deoptimize && 3621 IID != Intrinsic::memcpy_element_unordered_atomic && 3622 IID != Intrinsic::memmove_element_unordered_atomic; 3623 } 3624 } 3625 3626 // Lib calls can be materialized by some passes, and won't be 3627 // marked as 'gc-leaf-function.' All available Libcalls are 3628 // GC-leaf. 3629 LibFunc LF; 3630 if (TLI.getLibFunc(*Call, LF)) { 3631 return TLI.has(LF); 3632 } 3633 3634 return false; 3635 } 3636 3637 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, 3638 LoadInst &NewLI) { 3639 auto *NewTy = NewLI.getType(); 3640 3641 // This only directly applies if the new type is also a pointer. 3642 if (NewTy->isPointerTy()) { 3643 NewLI.setMetadata(LLVMContext::MD_nonnull, N); 3644 return; 3645 } 3646 3647 // The only other translation we can do is to integral loads with !range 3648 // metadata. 3649 if (!NewTy->isIntegerTy()) 3650 return; 3651 3652 MDBuilder MDB(NewLI.getContext()); 3653 const Value *Ptr = OldLI.getPointerOperand(); 3654 auto *ITy = cast<IntegerType>(NewTy); 3655 auto *NullInt = ConstantExpr::getPtrToInt( 3656 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy); 3657 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1)); 3658 NewLI.setMetadata(LLVMContext::MD_range, 3659 MDB.createRange(NonNullInt, NullInt)); 3660 } 3661 3662 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, 3663 MDNode *N, LoadInst &NewLI) { 3664 auto *NewTy = NewLI.getType(); 3665 // Simply copy the metadata if the type did not change. 3666 if (NewTy == OldLI.getType()) { 3667 NewLI.setMetadata(LLVMContext::MD_range, N); 3668 return; 3669 } 3670 3671 // Give up unless it is converted to a pointer where there is a single very 3672 // valuable mapping we can do reliably. 3673 // FIXME: It would be nice to propagate this in more ways, but the type 3674 // conversions make it hard. 3675 if (!NewTy->isPointerTy()) 3676 return; 3677 3678 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy); 3679 if (BitWidth == OldLI.getType()->getScalarSizeInBits() && 3680 !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) { 3681 MDNode *NN = MDNode::get(OldLI.getContext(), {}); 3682 NewLI.setMetadata(LLVMContext::MD_nonnull, NN); 3683 } 3684 } 3685 3686 void llvm::dropDebugUsers(Instruction &I) { 3687 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 3688 SmallVector<DbgVariableRecord *, 1> DPUsers; 3689 findDbgUsers(DbgUsers, &I, &DPUsers); 3690 for (auto *DII : DbgUsers) 3691 DII->eraseFromParent(); 3692 for (auto *DVR : DPUsers) 3693 DVR->eraseFromParent(); 3694 } 3695 3696 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, 3697 BasicBlock *BB) { 3698 // Since we are moving the instructions out of its basic block, we do not 3699 // retain their original debug locations (DILocations) and debug intrinsic 3700 // instructions. 3701 // 3702 // Doing so would degrade the debugging experience and adversely affect the 3703 // accuracy of profiling information. 3704 // 3705 // Currently, when hoisting the instructions, we take the following actions: 3706 // - Remove their debug intrinsic instructions. 3707 // - Set their debug locations to the values from the insertion point. 3708 // 3709 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values 3710 // need to be deleted, is because there will not be any instructions with a 3711 // DILocation in either branch left after performing the transformation. We 3712 // can only insert a dbg.value after the two branches are joined again. 3713 // 3714 // See PR38762, PR39243 for more details. 3715 // 3716 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to 3717 // encode predicated DIExpressions that yield different results on different 3718 // code paths. 3719 3720 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { 3721 Instruction *I = &*II; 3722 I->dropUBImplyingAttrsAndMetadata(); 3723 if (I->isUsedByMetadata()) 3724 dropDebugUsers(*I); 3725 // RemoveDIs: drop debug-info too as the following code does. 3726 I->dropDbgRecords(); 3727 if (I->isDebugOrPseudoInst()) { 3728 // Remove DbgInfo and pseudo probe Intrinsics. 3729 II = I->eraseFromParent(); 3730 continue; 3731 } 3732 I->setDebugLoc(InsertPt->getDebugLoc()); 3733 ++II; 3734 } 3735 DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(), 3736 BB->getTerminator()->getIterator()); 3737 } 3738 3739 DIExpression *llvm::getExpressionForConstant(DIBuilder &DIB, const Constant &C, 3740 Type &Ty) { 3741 // Create integer constant expression. 3742 auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * { 3743 const APInt &API = cast<ConstantInt>(&CV)->getValue(); 3744 std::optional<int64_t> InitIntOpt = API.trySExtValue(); 3745 return InitIntOpt ? DIB.createConstantValueExpression( 3746 static_cast<uint64_t>(*InitIntOpt)) 3747 : nullptr; 3748 }; 3749 3750 if (isa<ConstantInt>(C)) 3751 return createIntegerExpression(C); 3752 3753 auto *FP = dyn_cast<ConstantFP>(&C); 3754 if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) { 3755 const APFloat &APF = FP->getValueAPF(); 3756 APInt const &API = APF.bitcastToAPInt(); 3757 if (auto Temp = API.getZExtValue()) 3758 return DIB.createConstantValueExpression(static_cast<uint64_t>(Temp)); 3759 return DIB.createConstantValueExpression(*API.getRawData()); 3760 } 3761 3762 if (!Ty.isPointerTy()) 3763 return nullptr; 3764 3765 if (isa<ConstantPointerNull>(C)) 3766 return DIB.createConstantValueExpression(0); 3767 3768 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(&C)) 3769 if (CE->getOpcode() == Instruction::IntToPtr) { 3770 const Value *V = CE->getOperand(0); 3771 if (auto CI = dyn_cast_or_null<ConstantInt>(V)) 3772 return createIntegerExpression(*CI); 3773 } 3774 return nullptr; 3775 } 3776 3777 void llvm::remapDebugVariable(ValueToValueMapTy &Mapping, Instruction *Inst) { 3778 auto RemapDebugOperands = [&Mapping](auto *DV, auto Set) { 3779 for (auto *Op : Set) { 3780 auto I = Mapping.find(Op); 3781 if (I != Mapping.end()) 3782 DV->replaceVariableLocationOp(Op, I->second, /*AllowEmpty=*/true); 3783 } 3784 }; 3785 auto RemapAssignAddress = [&Mapping](auto *DA) { 3786 auto I = Mapping.find(DA->getAddress()); 3787 if (I != Mapping.end()) 3788 DA->setAddress(I->second); 3789 }; 3790 if (auto DVI = dyn_cast<DbgVariableIntrinsic>(Inst)) 3791 RemapDebugOperands(DVI, DVI->location_ops()); 3792 if (auto DAI = dyn_cast<DbgAssignIntrinsic>(Inst)) 3793 RemapAssignAddress(DAI); 3794 for (DbgVariableRecord &DVR : filterDbgVars(Inst->getDbgRecordRange())) { 3795 RemapDebugOperands(&DVR, DVR.location_ops()); 3796 if (DVR.isDbgAssign()) 3797 RemapAssignAddress(&DVR); 3798 } 3799 } 3800 3801 namespace { 3802 3803 /// A potential constituent of a bitreverse or bswap expression. See 3804 /// collectBitParts for a fuller explanation. 3805 struct BitPart { 3806 BitPart(Value *P, unsigned BW) : Provider(P) { 3807 Provenance.resize(BW); 3808 } 3809 3810 /// The Value that this is a bitreverse/bswap of. 3811 Value *Provider; 3812 3813 /// The "provenance" of each bit. Provenance[A] = B means that bit A 3814 /// in Provider becomes bit B in the result of this expression. 3815 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128. 3816 3817 enum { Unset = -1 }; 3818 }; 3819 3820 } // end anonymous namespace 3821 3822 /// Analyze the specified subexpression and see if it is capable of providing 3823 /// pieces of a bswap or bitreverse. The subexpression provides a potential 3824 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in 3825 /// the output of the expression came from a corresponding bit in some other 3826 /// value. This function is recursive, and the end result is a mapping of 3827 /// bitnumber to bitnumber. It is the caller's responsibility to validate that 3828 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse. 3829 /// 3830 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know 3831 /// that the expression deposits the low byte of %X into the high byte of the 3832 /// result and that all other bits are zero. This expression is accepted and a 3833 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to 3834 /// [0-7]. 3835 /// 3836 /// For vector types, all analysis is performed at the per-element level. No 3837 /// cross-element analysis is supported (shuffle/insertion/reduction), and all 3838 /// constant masks must be splatted across all elements. 3839 /// 3840 /// To avoid revisiting values, the BitPart results are memoized into the 3841 /// provided map. To avoid unnecessary copying of BitParts, BitParts are 3842 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to 3843 /// store BitParts objects, not pointers. As we need the concept of a nullptr 3844 /// BitParts (Value has been analyzed and the analysis failed), we an Optional 3845 /// type instead to provide the same functionality. 3846 /// 3847 /// Because we pass around references into \c BPS, we must use a container that 3848 /// does not invalidate internal references (std::map instead of DenseMap). 3849 static const std::optional<BitPart> & 3850 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, 3851 std::map<Value *, std::optional<BitPart>> &BPS, int Depth, 3852 bool &FoundRoot) { 3853 auto [I, Inserted] = BPS.try_emplace(V); 3854 if (!Inserted) 3855 return I->second; 3856 3857 auto &Result = I->second; 3858 auto BitWidth = V->getType()->getScalarSizeInBits(); 3859 3860 // Can't do integer/elements > 128 bits. 3861 if (BitWidth > 128) 3862 return Result; 3863 3864 // Prevent stack overflow by limiting the recursion depth 3865 if (Depth == BitPartRecursionMaxDepth) { 3866 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n"); 3867 return Result; 3868 } 3869 3870 if (auto *I = dyn_cast<Instruction>(V)) { 3871 Value *X, *Y; 3872 const APInt *C; 3873 3874 // If this is an or instruction, it may be an inner node of the bswap. 3875 if (match(V, m_Or(m_Value(X), m_Value(Y)))) { 3876 // Check we have both sources and they are from the same provider. 3877 const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3878 Depth + 1, FoundRoot); 3879 if (!A || !A->Provider) 3880 return Result; 3881 3882 const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, 3883 Depth + 1, FoundRoot); 3884 if (!B || A->Provider != B->Provider) 3885 return Result; 3886 3887 // Try and merge the two together. 3888 Result = BitPart(A->Provider, BitWidth); 3889 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) { 3890 if (A->Provenance[BitIdx] != BitPart::Unset && 3891 B->Provenance[BitIdx] != BitPart::Unset && 3892 A->Provenance[BitIdx] != B->Provenance[BitIdx]) 3893 return Result = std::nullopt; 3894 3895 if (A->Provenance[BitIdx] == BitPart::Unset) 3896 Result->Provenance[BitIdx] = B->Provenance[BitIdx]; 3897 else 3898 Result->Provenance[BitIdx] = A->Provenance[BitIdx]; 3899 } 3900 3901 return Result; 3902 } 3903 3904 // If this is a logical shift by a constant, recurse then shift the result. 3905 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) { 3906 const APInt &BitShift = *C; 3907 3908 // Ensure the shift amount is defined. 3909 if (BitShift.uge(BitWidth)) 3910 return Result; 3911 3912 // For bswap-only, limit shift amounts to whole bytes, for an early exit. 3913 if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0) 3914 return Result; 3915 3916 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3917 Depth + 1, FoundRoot); 3918 if (!Res) 3919 return Result; 3920 Result = Res; 3921 3922 // Perform the "shift" on BitProvenance. 3923 auto &P = Result->Provenance; 3924 if (I->getOpcode() == Instruction::Shl) { 3925 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end()); 3926 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset); 3927 } else { 3928 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue())); 3929 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset); 3930 } 3931 3932 return Result; 3933 } 3934 3935 // If this is a logical 'and' with a mask that clears bits, recurse then 3936 // unset the appropriate bits. 3937 if (match(V, m_And(m_Value(X), m_APInt(C)))) { 3938 const APInt &AndMask = *C; 3939 3940 // Check that the mask allows a multiple of 8 bits for a bswap, for an 3941 // early exit. 3942 unsigned NumMaskedBits = AndMask.popcount(); 3943 if (!MatchBitReversals && (NumMaskedBits % 8) != 0) 3944 return Result; 3945 3946 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3947 Depth + 1, FoundRoot); 3948 if (!Res) 3949 return Result; 3950 Result = Res; 3951 3952 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 3953 // If the AndMask is zero for this bit, clear the bit. 3954 if (AndMask[BitIdx] == 0) 3955 Result->Provenance[BitIdx] = BitPart::Unset; 3956 return Result; 3957 } 3958 3959 // If this is a zext instruction zero extend the result. 3960 if (match(V, m_ZExt(m_Value(X)))) { 3961 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3962 Depth + 1, FoundRoot); 3963 if (!Res) 3964 return Result; 3965 3966 Result = BitPart(Res->Provider, BitWidth); 3967 auto NarrowBitWidth = X->getType()->getScalarSizeInBits(); 3968 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx) 3969 Result->Provenance[BitIdx] = Res->Provenance[BitIdx]; 3970 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx) 3971 Result->Provenance[BitIdx] = BitPart::Unset; 3972 return Result; 3973 } 3974 3975 // If this is a truncate instruction, extract the lower bits. 3976 if (match(V, m_Trunc(m_Value(X)))) { 3977 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3978 Depth + 1, FoundRoot); 3979 if (!Res) 3980 return Result; 3981 3982 Result = BitPart(Res->Provider, BitWidth); 3983 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 3984 Result->Provenance[BitIdx] = Res->Provenance[BitIdx]; 3985 return Result; 3986 } 3987 3988 // BITREVERSE - most likely due to us previous matching a partial 3989 // bitreverse. 3990 if (match(V, m_BitReverse(m_Value(X)))) { 3991 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 3992 Depth + 1, FoundRoot); 3993 if (!Res) 3994 return Result; 3995 3996 Result = BitPart(Res->Provider, BitWidth); 3997 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 3998 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx]; 3999 return Result; 4000 } 4001 4002 // BSWAP - most likely due to us previous matching a partial bswap. 4003 if (match(V, m_BSwap(m_Value(X)))) { 4004 const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 4005 Depth + 1, FoundRoot); 4006 if (!Res) 4007 return Result; 4008 4009 unsigned ByteWidth = BitWidth / 8; 4010 Result = BitPart(Res->Provider, BitWidth); 4011 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) { 4012 unsigned ByteBitOfs = ByteIdx * 8; 4013 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx) 4014 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] = 4015 Res->Provenance[ByteBitOfs + BitIdx]; 4016 } 4017 return Result; 4018 } 4019 4020 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift 4021 // amount (modulo). 4022 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 4023 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 4024 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) || 4025 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) { 4026 // We can treat fshr as a fshl by flipping the modulo amount. 4027 unsigned ModAmt = C->urem(BitWidth); 4028 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr) 4029 ModAmt = BitWidth - ModAmt; 4030 4031 // For bswap-only, limit shift amounts to whole bytes, for an early exit. 4032 if (!MatchBitReversals && (ModAmt % 8) != 0) 4033 return Result; 4034 4035 // Check we have both sources and they are from the same provider. 4036 const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, 4037 Depth + 1, FoundRoot); 4038 if (!LHS || !LHS->Provider) 4039 return Result; 4040 4041 const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, 4042 Depth + 1, FoundRoot); 4043 if (!RHS || LHS->Provider != RHS->Provider) 4044 return Result; 4045 4046 unsigned StartBitRHS = BitWidth - ModAmt; 4047 Result = BitPart(LHS->Provider, BitWidth); 4048 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx) 4049 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx]; 4050 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx) 4051 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS]; 4052 return Result; 4053 } 4054 } 4055 4056 // If we've already found a root input value then we're never going to merge 4057 // these back together. 4058 if (FoundRoot) 4059 return Result; 4060 4061 // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must 4062 // be the root input value to the bswap/bitreverse. 4063 FoundRoot = true; 4064 Result = BitPart(V, BitWidth); 4065 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 4066 Result->Provenance[BitIdx] = BitIdx; 4067 return Result; 4068 } 4069 4070 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, 4071 unsigned BitWidth) { 4072 if (From % 8 != To % 8) 4073 return false; 4074 // Convert from bit indices to byte indices and check for a byte reversal. 4075 From >>= 3; 4076 To >>= 3; 4077 BitWidth >>= 3; 4078 return From == BitWidth - To - 1; 4079 } 4080 4081 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, 4082 unsigned BitWidth) { 4083 return From == BitWidth - To - 1; 4084 } 4085 4086 bool llvm::recognizeBSwapOrBitReverseIdiom( 4087 Instruction *I, bool MatchBSwaps, bool MatchBitReversals, 4088 SmallVectorImpl<Instruction *> &InsertedInsts) { 4089 if (!match(I, m_Or(m_Value(), m_Value())) && 4090 !match(I, m_FShl(m_Value(), m_Value(), m_Value())) && 4091 !match(I, m_FShr(m_Value(), m_Value(), m_Value())) && 4092 !match(I, m_BSwap(m_Value()))) 4093 return false; 4094 if (!MatchBSwaps && !MatchBitReversals) 4095 return false; 4096 Type *ITy = I->getType(); 4097 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128) 4098 return false; // Can't do integer/elements > 128 bits. 4099 4100 // Try to find all the pieces corresponding to the bswap. 4101 bool FoundRoot = false; 4102 std::map<Value *, std::optional<BitPart>> BPS; 4103 const auto &Res = 4104 collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot); 4105 if (!Res) 4106 return false; 4107 ArrayRef<int8_t> BitProvenance = Res->Provenance; 4108 assert(all_of(BitProvenance, 4109 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) && 4110 "Illegal bit provenance index"); 4111 4112 // If the upper bits are zero, then attempt to perform as a truncated op. 4113 Type *DemandedTy = ITy; 4114 if (BitProvenance.back() == BitPart::Unset) { 4115 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset) 4116 BitProvenance = BitProvenance.drop_back(); 4117 if (BitProvenance.empty()) 4118 return false; // TODO - handle null value? 4119 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size()); 4120 if (auto *IVecTy = dyn_cast<VectorType>(ITy)) 4121 DemandedTy = VectorType::get(DemandedTy, IVecTy); 4122 } 4123 4124 // Check BitProvenance hasn't found a source larger than the result type. 4125 unsigned DemandedBW = DemandedTy->getScalarSizeInBits(); 4126 if (DemandedBW > ITy->getScalarSizeInBits()) 4127 return false; 4128 4129 // Now, is the bit permutation correct for a bswap or a bitreverse? We can 4130 // only byteswap values with an even number of bytes. 4131 APInt DemandedMask = APInt::getAllOnes(DemandedBW); 4132 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0; 4133 bool OKForBitReverse = MatchBitReversals; 4134 for (unsigned BitIdx = 0; 4135 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) { 4136 if (BitProvenance[BitIdx] == BitPart::Unset) { 4137 DemandedMask.clearBit(BitIdx); 4138 continue; 4139 } 4140 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx, 4141 DemandedBW); 4142 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx], 4143 BitIdx, DemandedBW); 4144 } 4145 4146 Intrinsic::ID Intrin; 4147 if (OKForBSwap) 4148 Intrin = Intrinsic::bswap; 4149 else if (OKForBitReverse) 4150 Intrin = Intrinsic::bitreverse; 4151 else 4152 return false; 4153 4154 Function *F = 4155 Intrinsic::getOrInsertDeclaration(I->getModule(), Intrin, DemandedTy); 4156 Value *Provider = Res->Provider; 4157 4158 // We may need to truncate the provider. 4159 if (DemandedTy != Provider->getType()) { 4160 auto *Trunc = 4161 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I->getIterator()); 4162 InsertedInsts.push_back(Trunc); 4163 Provider = Trunc; 4164 } 4165 4166 Instruction *Result = CallInst::Create(F, Provider, "rev", I->getIterator()); 4167 InsertedInsts.push_back(Result); 4168 4169 if (!DemandedMask.isAllOnes()) { 4170 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask); 4171 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I->getIterator()); 4172 InsertedInsts.push_back(Result); 4173 } 4174 4175 // We may need to zeroextend back to the result type. 4176 if (ITy != Result->getType()) { 4177 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I->getIterator()); 4178 InsertedInsts.push_back(ExtInst); 4179 } 4180 4181 return true; 4182 } 4183 4184 // CodeGen has special handling for some string functions that may replace 4185 // them with target-specific intrinsics. Since that'd skip our interceptors 4186 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses, 4187 // we mark affected calls as NoBuiltin, which will disable optimization 4188 // in CodeGen. 4189 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin( 4190 CallInst *CI, const TargetLibraryInfo *TLI) { 4191 Function *F = CI->getCalledFunction(); 4192 LibFunc Func; 4193 if (F && !F->hasLocalLinkage() && F->hasName() && 4194 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) && 4195 !F->doesNotAccessMemory()) 4196 CI->addFnAttr(Attribute::NoBuiltin); 4197 } 4198 4199 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) { 4200 // We can't have a PHI with a metadata type. 4201 if (I->getOperand(OpIdx)->getType()->isMetadataTy()) 4202 return false; 4203 4204 // Early exit. 4205 if (!isa<Constant>(I->getOperand(OpIdx))) 4206 return true; 4207 4208 switch (I->getOpcode()) { 4209 default: 4210 return true; 4211 case Instruction::Call: 4212 case Instruction::Invoke: { 4213 const auto &CB = cast<CallBase>(*I); 4214 4215 // Can't handle inline asm. Skip it. 4216 if (CB.isInlineAsm()) 4217 return false; 4218 4219 // Constant bundle operands may need to retain their constant-ness for 4220 // correctness. 4221 if (CB.isBundleOperand(OpIdx)) 4222 return false; 4223 4224 if (OpIdx < CB.arg_size()) { 4225 // Some variadic intrinsics require constants in the variadic arguments, 4226 // which currently aren't markable as immarg. 4227 if (isa<IntrinsicInst>(CB) && 4228 OpIdx >= CB.getFunctionType()->getNumParams()) { 4229 // This is known to be OK for stackmap. 4230 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap; 4231 } 4232 4233 // gcroot is a special case, since it requires a constant argument which 4234 // isn't also required to be a simple ConstantInt. 4235 if (CB.getIntrinsicID() == Intrinsic::gcroot) 4236 return false; 4237 4238 // Some intrinsic operands are required to be immediates. 4239 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg); 4240 } 4241 4242 // It is never allowed to replace the call argument to an intrinsic, but it 4243 // may be possible for a call. 4244 return !isa<IntrinsicInst>(CB); 4245 } 4246 case Instruction::ShuffleVector: 4247 // Shufflevector masks are constant. 4248 return OpIdx != 2; 4249 case Instruction::Switch: 4250 case Instruction::ExtractValue: 4251 // All operands apart from the first are constant. 4252 return OpIdx == 0; 4253 case Instruction::InsertValue: 4254 // All operands apart from the first and the second are constant. 4255 return OpIdx < 2; 4256 case Instruction::Alloca: 4257 // Static allocas (constant size in the entry block) are handled by 4258 // prologue/epilogue insertion so they're free anyway. We definitely don't 4259 // want to make them non-constant. 4260 return !cast<AllocaInst>(I)->isStaticAlloca(); 4261 case Instruction::GetElementPtr: 4262 if (OpIdx == 0) 4263 return true; 4264 gep_type_iterator It = gep_type_begin(I); 4265 for (auto E = std::next(It, OpIdx); It != E; ++It) 4266 if (It.isStruct()) 4267 return false; 4268 return true; 4269 } 4270 } 4271 4272 Value *llvm::invertCondition(Value *Condition) { 4273 // First: Check if it's a constant 4274 if (Constant *C = dyn_cast<Constant>(Condition)) 4275 return ConstantExpr::getNot(C); 4276 4277 // Second: If the condition is already inverted, return the original value 4278 Value *NotCondition; 4279 if (match(Condition, m_Not(m_Value(NotCondition)))) 4280 return NotCondition; 4281 4282 BasicBlock *Parent = nullptr; 4283 Instruction *Inst = dyn_cast<Instruction>(Condition); 4284 if (Inst) 4285 Parent = Inst->getParent(); 4286 else if (Argument *Arg = dyn_cast<Argument>(Condition)) 4287 Parent = &Arg->getParent()->getEntryBlock(); 4288 assert(Parent && "Unsupported condition to invert"); 4289 4290 // Third: Check all the users for an invert 4291 for (User *U : Condition->users()) 4292 if (Instruction *I = dyn_cast<Instruction>(U)) 4293 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition)))) 4294 return I; 4295 4296 // Last option: Create a new instruction 4297 auto *Inverted = 4298 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv"); 4299 if (Inst && !isa<PHINode>(Inst)) 4300 Inverted->insertAfter(Inst); 4301 else 4302 Inverted->insertBefore(&*Parent->getFirstInsertionPt()); 4303 return Inverted; 4304 } 4305 4306 bool llvm::inferAttributesFromOthers(Function &F) { 4307 // Note: We explicitly check for attributes rather than using cover functions 4308 // because some of the cover functions include the logic being implemented. 4309 4310 bool Changed = false; 4311 // readnone + not convergent implies nosync 4312 if (!F.hasFnAttribute(Attribute::NoSync) && 4313 F.doesNotAccessMemory() && !F.isConvergent()) { 4314 F.setNoSync(); 4315 Changed = true; 4316 } 4317 4318 // readonly implies nofree 4319 if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) { 4320 F.setDoesNotFreeMemory(); 4321 Changed = true; 4322 } 4323 4324 // willreturn implies mustprogress 4325 if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) { 4326 F.setMustProgress(); 4327 Changed = true; 4328 } 4329 4330 // TODO: There are a bunch of cases of restrictive memory effects we 4331 // can infer by inspecting arguments of argmemonly-ish functions. 4332 4333 return Changed; 4334 } 4335