1 //===- Local.cpp - Functions to perform local transformations -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions perform various local transformations to the 10 // program. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Utils/Local.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/DenseMapInfo.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/Hashing.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/ADT/TinyPtrVector.h" 28 #include "llvm/Analysis/AssumeBundleQueries.h" 29 #include "llvm/Analysis/ConstantFolding.h" 30 #include "llvm/Analysis/DomTreeUpdater.h" 31 #include "llvm/Analysis/EHPersonalities.h" 32 #include "llvm/Analysis/InstructionSimplify.h" 33 #include "llvm/Analysis/LazyValueInfo.h" 34 #include "llvm/Analysis/MemoryBuiltins.h" 35 #include "llvm/Analysis/MemorySSAUpdater.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 #include "llvm/Analysis/ValueTracking.h" 38 #include "llvm/Analysis/VectorUtils.h" 39 #include "llvm/BinaryFormat/Dwarf.h" 40 #include "llvm/IR/Argument.h" 41 #include "llvm/IR/Attributes.h" 42 #include "llvm/IR/BasicBlock.h" 43 #include "llvm/IR/CFG.h" 44 #include "llvm/IR/Constant.h" 45 #include "llvm/IR/ConstantRange.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DIBuilder.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DebugLoc.h" 51 #include "llvm/IR/DerivedTypes.h" 52 #include "llvm/IR/Dominators.h" 53 #include "llvm/IR/Function.h" 54 #include "llvm/IR/GetElementPtrTypeIterator.h" 55 #include "llvm/IR/GlobalObject.h" 56 #include "llvm/IR/IRBuilder.h" 57 #include "llvm/IR/InstrTypes.h" 58 #include "llvm/IR/Instruction.h" 59 #include "llvm/IR/Instructions.h" 60 #include "llvm/IR/IntrinsicInst.h" 61 #include "llvm/IR/Intrinsics.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/MDBuilder.h" 64 #include "llvm/IR/Metadata.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PatternMatch.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/User.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/IR/ValueHandle.h" 73 #include "llvm/Support/Casting.h" 74 #include "llvm/Support/Debug.h" 75 #include "llvm/Support/ErrorHandling.h" 76 #include "llvm/Support/KnownBits.h" 77 #include "llvm/Support/raw_ostream.h" 78 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 79 #include "llvm/Transforms/Utils/ValueMapper.h" 80 #include <algorithm> 81 #include <cassert> 82 #include <climits> 83 #include <cstdint> 84 #include <iterator> 85 #include <map> 86 #include <utility> 87 88 using namespace llvm; 89 using namespace llvm::PatternMatch; 90 91 #define DEBUG_TYPE "local" 92 93 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed"); 94 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd"); 95 96 static cl::opt<bool> PHICSEDebugHash( 97 "phicse-debug-hash", 98 #ifdef EXPENSIVE_CHECKS 99 cl::init(true), 100 #else 101 cl::init(false), 102 #endif 103 cl::Hidden, 104 cl::desc("Perform extra assertion checking to verify that PHINodes's hash " 105 "function is well-behaved w.r.t. its isEqual predicate")); 106 107 static cl::opt<unsigned> PHICSENumPHISmallSize( 108 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden, 109 cl::desc( 110 "When the basic block contains not more than this number of PHI nodes, " 111 "perform a (faster!) exhaustive search instead of set-driven one.")); 112 113 // Max recursion depth for collectBitParts used when detecting bswap and 114 // bitreverse idioms 115 static const unsigned BitPartRecursionMaxDepth = 64; 116 117 //===----------------------------------------------------------------------===// 118 // Local constant propagation. 119 // 120 121 /// ConstantFoldTerminator - If a terminator instruction is predicated on a 122 /// constant value, convert it into an unconditional branch to the constant 123 /// destination. This is a nontrivial operation because the successors of this 124 /// basic block must have their PHI nodes updated. 125 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch 126 /// conditions and indirectbr addresses this might make dead if 127 /// DeleteDeadConditions is true. 128 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions, 129 const TargetLibraryInfo *TLI, 130 DomTreeUpdater *DTU) { 131 Instruction *T = BB->getTerminator(); 132 IRBuilder<> Builder(T); 133 134 // Branch - See if we are conditional jumping on constant 135 if (auto *BI = dyn_cast<BranchInst>(T)) { 136 if (BI->isUnconditional()) return false; // Can't optimize uncond branch 137 BasicBlock *Dest1 = BI->getSuccessor(0); 138 BasicBlock *Dest2 = BI->getSuccessor(1); 139 140 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { 141 // Are we branching on constant? 142 // YES. Change to unconditional branch... 143 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; 144 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; 145 146 // Let the basic block know that we are letting go of it. Based on this, 147 // it will adjust it's PHI nodes. 148 OldDest->removePredecessor(BB); 149 150 // Replace the conditional branch with an unconditional one. 151 Builder.CreateBr(Destination); 152 BI->eraseFromParent(); 153 if (DTU) 154 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, OldDest}}); 155 return true; 156 } 157 158 if (Dest2 == Dest1) { // Conditional branch to same location? 159 // This branch matches something like this: 160 // br bool %cond, label %Dest, label %Dest 161 // and changes it into: br label %Dest 162 163 // Let the basic block know that we are letting go of one copy of it. 164 assert(BI->getParent() && "Terminator not inserted in block!"); 165 Dest1->removePredecessor(BI->getParent()); 166 167 // Replace the conditional branch with an unconditional one. 168 Builder.CreateBr(Dest1); 169 Value *Cond = BI->getCondition(); 170 BI->eraseFromParent(); 171 if (DeleteDeadConditions) 172 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 173 return true; 174 } 175 return false; 176 } 177 178 if (auto *SI = dyn_cast<SwitchInst>(T)) { 179 // If we are switching on a constant, we can convert the switch to an 180 // unconditional branch. 181 auto *CI = dyn_cast<ConstantInt>(SI->getCondition()); 182 BasicBlock *DefaultDest = SI->getDefaultDest(); 183 BasicBlock *TheOnlyDest = DefaultDest; 184 185 // If the default is unreachable, ignore it when searching for TheOnlyDest. 186 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) && 187 SI->getNumCases() > 0) { 188 TheOnlyDest = SI->case_begin()->getCaseSuccessor(); 189 } 190 191 bool Changed = false; 192 193 // Figure out which case it goes to. 194 for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) { 195 // Found case matching a constant operand? 196 if (i->getCaseValue() == CI) { 197 TheOnlyDest = i->getCaseSuccessor(); 198 break; 199 } 200 201 // Check to see if this branch is going to the same place as the default 202 // dest. If so, eliminate it as an explicit compare. 203 if (i->getCaseSuccessor() == DefaultDest) { 204 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof); 205 unsigned NCases = SI->getNumCases(); 206 // Fold the case metadata into the default if there will be any branches 207 // left, unless the metadata doesn't match the switch. 208 if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) { 209 // Collect branch weights into a vector. 210 SmallVector<uint32_t, 8> Weights; 211 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e; 212 ++MD_i) { 213 auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i)); 214 Weights.push_back(CI->getValue().getZExtValue()); 215 } 216 // Merge weight of this case to the default weight. 217 unsigned idx = i->getCaseIndex(); 218 Weights[0] += Weights[idx+1]; 219 // Remove weight for this case. 220 std::swap(Weights[idx+1], Weights.back()); 221 Weights.pop_back(); 222 SI->setMetadata(LLVMContext::MD_prof, 223 MDBuilder(BB->getContext()). 224 createBranchWeights(Weights)); 225 } 226 // Remove this entry. 227 BasicBlock *ParentBB = SI->getParent(); 228 DefaultDest->removePredecessor(ParentBB); 229 i = SI->removeCase(i); 230 e = SI->case_end(); 231 Changed = true; 232 if (DTU) 233 DTU->applyUpdatesPermissive( 234 {{DominatorTree::Delete, ParentBB, DefaultDest}}); 235 continue; 236 } 237 238 // Otherwise, check to see if the switch only branches to one destination. 239 // We do this by reseting "TheOnlyDest" to null when we find two non-equal 240 // destinations. 241 if (i->getCaseSuccessor() != TheOnlyDest) 242 TheOnlyDest = nullptr; 243 244 // Increment this iterator as we haven't removed the case. 245 ++i; 246 } 247 248 if (CI && !TheOnlyDest) { 249 // Branching on a constant, but not any of the cases, go to the default 250 // successor. 251 TheOnlyDest = SI->getDefaultDest(); 252 } 253 254 // If we found a single destination that we can fold the switch into, do so 255 // now. 256 if (TheOnlyDest) { 257 // Insert the new branch. 258 Builder.CreateBr(TheOnlyDest); 259 BasicBlock *BB = SI->getParent(); 260 std::vector <DominatorTree::UpdateType> Updates; 261 if (DTU) 262 Updates.reserve(SI->getNumSuccessors() - 1); 263 264 // Remove entries from PHI nodes which we no longer branch to... 265 for (BasicBlock *Succ : successors(SI)) { 266 // Found case matching a constant operand? 267 if (Succ == TheOnlyDest) { 268 TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest 269 } else { 270 Succ->removePredecessor(BB); 271 if (DTU) 272 Updates.push_back({DominatorTree::Delete, BB, Succ}); 273 } 274 } 275 276 // Delete the old switch. 277 Value *Cond = SI->getCondition(); 278 SI->eraseFromParent(); 279 if (DeleteDeadConditions) 280 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 281 if (DTU) 282 DTU->applyUpdatesPermissive(Updates); 283 return true; 284 } 285 286 if (SI->getNumCases() == 1) { 287 // Otherwise, we can fold this switch into a conditional branch 288 // instruction if it has only one non-default destination. 289 auto FirstCase = *SI->case_begin(); 290 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(), 291 FirstCase.getCaseValue(), "cond"); 292 293 // Insert the new branch. 294 BranchInst *NewBr = Builder.CreateCondBr(Cond, 295 FirstCase.getCaseSuccessor(), 296 SI->getDefaultDest()); 297 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof); 298 if (MD && MD->getNumOperands() == 3) { 299 ConstantInt *SICase = 300 mdconst::dyn_extract<ConstantInt>(MD->getOperand(2)); 301 ConstantInt *SIDef = 302 mdconst::dyn_extract<ConstantInt>(MD->getOperand(1)); 303 assert(SICase && SIDef); 304 // The TrueWeight should be the weight for the single case of SI. 305 NewBr->setMetadata(LLVMContext::MD_prof, 306 MDBuilder(BB->getContext()). 307 createBranchWeights(SICase->getValue().getZExtValue(), 308 SIDef->getValue().getZExtValue())); 309 } 310 311 // Update make.implicit metadata to the newly-created conditional branch. 312 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit); 313 if (MakeImplicitMD) 314 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD); 315 316 // Delete the old switch. 317 SI->eraseFromParent(); 318 return true; 319 } 320 return Changed; 321 } 322 323 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) { 324 // indirectbr blockaddress(@F, @BB) -> br label @BB 325 if (auto *BA = 326 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) { 327 BasicBlock *TheOnlyDest = BA->getBasicBlock(); 328 std::vector <DominatorTree::UpdateType> Updates; 329 if (DTU) 330 Updates.reserve(IBI->getNumDestinations() - 1); 331 332 // Insert the new branch. 333 Builder.CreateBr(TheOnlyDest); 334 335 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { 336 if (IBI->getDestination(i) == TheOnlyDest) { 337 TheOnlyDest = nullptr; 338 } else { 339 BasicBlock *ParentBB = IBI->getParent(); 340 BasicBlock *DestBB = IBI->getDestination(i); 341 DestBB->removePredecessor(ParentBB); 342 if (DTU) 343 Updates.push_back({DominatorTree::Delete, ParentBB, DestBB}); 344 } 345 } 346 Value *Address = IBI->getAddress(); 347 IBI->eraseFromParent(); 348 if (DeleteDeadConditions) 349 // Delete pointer cast instructions. 350 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI); 351 352 // Also zap the blockaddress constant if there are no users remaining, 353 // otherwise the destination is still marked as having its address taken. 354 if (BA->use_empty()) 355 BA->destroyConstant(); 356 357 // If we didn't find our destination in the IBI successor list, then we 358 // have undefined behavior. Replace the unconditional branch with an 359 // 'unreachable' instruction. 360 if (TheOnlyDest) { 361 BB->getTerminator()->eraseFromParent(); 362 new UnreachableInst(BB->getContext(), BB); 363 } 364 365 if (DTU) 366 DTU->applyUpdatesPermissive(Updates); 367 return true; 368 } 369 } 370 371 return false; 372 } 373 374 //===----------------------------------------------------------------------===// 375 // Local dead code elimination. 376 // 377 378 /// isInstructionTriviallyDead - Return true if the result produced by the 379 /// instruction is not used, and the instruction has no side effects. 380 /// 381 bool llvm::isInstructionTriviallyDead(Instruction *I, 382 const TargetLibraryInfo *TLI) { 383 if (!I->use_empty()) 384 return false; 385 return wouldInstructionBeTriviallyDead(I, TLI); 386 } 387 388 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I, 389 const TargetLibraryInfo *TLI) { 390 if (I->isTerminator()) 391 return false; 392 393 // We don't want the landingpad-like instructions removed by anything this 394 // general. 395 if (I->isEHPad()) 396 return false; 397 398 // We don't want debug info removed by anything this general, unless 399 // debug info is empty. 400 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) { 401 if (DDI->getAddress()) 402 return false; 403 return true; 404 } 405 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) { 406 if (DVI->getValue()) 407 return false; 408 return true; 409 } 410 if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) { 411 if (DLI->getLabel()) 412 return false; 413 return true; 414 } 415 416 if (!I->mayHaveSideEffects()) 417 return true; 418 419 // Special case intrinsics that "may have side effects" but can be deleted 420 // when dead. 421 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 422 // Safe to delete llvm.stacksave and launder.invariant.group if dead. 423 if (II->getIntrinsicID() == Intrinsic::stacksave || 424 II->getIntrinsicID() == Intrinsic::launder_invariant_group) 425 return true; 426 427 if (II->isLifetimeStartOrEnd()) { 428 auto *Arg = II->getArgOperand(1); 429 // Lifetime intrinsics are dead when their right-hand is undef. 430 if (isa<UndefValue>(Arg)) 431 return true; 432 // If the right-hand is an alloc, global, or argument and the only uses 433 // are lifetime intrinsics then the intrinsics are dead. 434 if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg)) 435 return llvm::all_of(Arg->uses(), [](Use &Use) { 436 if (IntrinsicInst *IntrinsicUse = 437 dyn_cast<IntrinsicInst>(Use.getUser())) 438 return IntrinsicUse->isLifetimeStartOrEnd(); 439 return false; 440 }); 441 return false; 442 } 443 444 // Assumptions are dead if their condition is trivially true. Guards on 445 // true are operationally no-ops. In the future we can consider more 446 // sophisticated tradeoffs for guards considering potential for check 447 // widening, but for now we keep things simple. 448 if ((II->getIntrinsicID() == Intrinsic::assume && 449 isAssumeWithEmptyBundle(*II)) || 450 II->getIntrinsicID() == Intrinsic::experimental_guard) { 451 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0))) 452 return !Cond->isZero(); 453 454 return false; 455 } 456 } 457 458 if (isAllocLikeFn(I, TLI)) 459 return true; 460 461 if (CallInst *CI = isFreeCall(I, TLI)) 462 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0))) 463 return C->isNullValue() || isa<UndefValue>(C); 464 465 if (auto *Call = dyn_cast<CallBase>(I)) 466 if (isMathLibCallNoop(Call, TLI)) 467 return true; 468 469 return false; 470 } 471 472 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a 473 /// trivially dead instruction, delete it. If that makes any of its operands 474 /// trivially dead, delete them too, recursively. Return true if any 475 /// instructions were deleted. 476 bool llvm::RecursivelyDeleteTriviallyDeadInstructions( 477 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU, 478 std::function<void(Value *)> AboutToDeleteCallback) { 479 Instruction *I = dyn_cast<Instruction>(V); 480 if (!I || !isInstructionTriviallyDead(I, TLI)) 481 return false; 482 483 SmallVector<WeakTrackingVH, 16> DeadInsts; 484 DeadInsts.push_back(I); 485 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU, 486 AboutToDeleteCallback); 487 488 return true; 489 } 490 491 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive( 492 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, 493 MemorySSAUpdater *MSSAU, 494 std::function<void(Value *)> AboutToDeleteCallback) { 495 unsigned S = 0, E = DeadInsts.size(), Alive = 0; 496 for (; S != E; ++S) { 497 auto *I = cast<Instruction>(DeadInsts[S]); 498 if (!isInstructionTriviallyDead(I)) { 499 DeadInsts[S] = nullptr; 500 ++Alive; 501 } 502 } 503 if (Alive == E) 504 return false; 505 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU, 506 AboutToDeleteCallback); 507 return true; 508 } 509 510 void llvm::RecursivelyDeleteTriviallyDeadInstructions( 511 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, 512 MemorySSAUpdater *MSSAU, 513 std::function<void(Value *)> AboutToDeleteCallback) { 514 // Process the dead instruction list until empty. 515 while (!DeadInsts.empty()) { 516 Value *V = DeadInsts.pop_back_val(); 517 Instruction *I = cast_or_null<Instruction>(V); 518 if (!I) 519 continue; 520 assert(isInstructionTriviallyDead(I, TLI) && 521 "Live instruction found in dead worklist!"); 522 assert(I->use_empty() && "Instructions with uses are not dead."); 523 524 // Don't lose the debug info while deleting the instructions. 525 salvageDebugInfo(*I); 526 527 if (AboutToDeleteCallback) 528 AboutToDeleteCallback(I); 529 530 // Null out all of the instruction's operands to see if any operand becomes 531 // dead as we go. 532 for (Use &OpU : I->operands()) { 533 Value *OpV = OpU.get(); 534 OpU.set(nullptr); 535 536 if (!OpV->use_empty()) 537 continue; 538 539 // If the operand is an instruction that became dead as we nulled out the 540 // operand, and if it is 'trivially' dead, delete it in a future loop 541 // iteration. 542 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 543 if (isInstructionTriviallyDead(OpI, TLI)) 544 DeadInsts.push_back(OpI); 545 } 546 if (MSSAU) 547 MSSAU->removeMemoryAccess(I); 548 549 I->eraseFromParent(); 550 } 551 } 552 553 bool llvm::replaceDbgUsesWithUndef(Instruction *I) { 554 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 555 findDbgUsers(DbgUsers, I); 556 for (auto *DII : DbgUsers) { 557 Value *Undef = UndefValue::get(I->getType()); 558 DII->setOperand(0, MetadataAsValue::get(DII->getContext(), 559 ValueAsMetadata::get(Undef))); 560 } 561 return !DbgUsers.empty(); 562 } 563 564 /// areAllUsesEqual - Check whether the uses of a value are all the same. 565 /// This is similar to Instruction::hasOneUse() except this will also return 566 /// true when there are no uses or multiple uses that all refer to the same 567 /// value. 568 static bool areAllUsesEqual(Instruction *I) { 569 Value::user_iterator UI = I->user_begin(); 570 Value::user_iterator UE = I->user_end(); 571 if (UI == UE) 572 return true; 573 574 User *TheUse = *UI; 575 for (++UI; UI != UE; ++UI) { 576 if (*UI != TheUse) 577 return false; 578 } 579 return true; 580 } 581 582 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively 583 /// dead PHI node, due to being a def-use chain of single-use nodes that 584 /// either forms a cycle or is terminated by a trivially dead instruction, 585 /// delete it. If that makes any of its operands trivially dead, delete them 586 /// too, recursively. Return true if a change was made. 587 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN, 588 const TargetLibraryInfo *TLI, 589 llvm::MemorySSAUpdater *MSSAU) { 590 SmallPtrSet<Instruction*, 4> Visited; 591 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); 592 I = cast<Instruction>(*I->user_begin())) { 593 if (I->use_empty()) 594 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU); 595 596 // If we find an instruction more than once, we're on a cycle that 597 // won't prove fruitful. 598 if (!Visited.insert(I).second) { 599 // Break the cycle and delete the instruction and its operands. 600 I->replaceAllUsesWith(UndefValue::get(I->getType())); 601 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU); 602 return true; 603 } 604 } 605 return false; 606 } 607 608 static bool 609 simplifyAndDCEInstruction(Instruction *I, 610 SmallSetVector<Instruction *, 16> &WorkList, 611 const DataLayout &DL, 612 const TargetLibraryInfo *TLI) { 613 if (isInstructionTriviallyDead(I, TLI)) { 614 salvageDebugInfo(*I); 615 616 // Null out all of the instruction's operands to see if any operand becomes 617 // dead as we go. 618 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 619 Value *OpV = I->getOperand(i); 620 I->setOperand(i, nullptr); 621 622 if (!OpV->use_empty() || I == OpV) 623 continue; 624 625 // If the operand is an instruction that became dead as we nulled out the 626 // operand, and if it is 'trivially' dead, delete it in a future loop 627 // iteration. 628 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 629 if (isInstructionTriviallyDead(OpI, TLI)) 630 WorkList.insert(OpI); 631 } 632 633 I->eraseFromParent(); 634 635 return true; 636 } 637 638 if (Value *SimpleV = SimplifyInstruction(I, DL)) { 639 // Add the users to the worklist. CAREFUL: an instruction can use itself, 640 // in the case of a phi node. 641 for (User *U : I->users()) { 642 if (U != I) { 643 WorkList.insert(cast<Instruction>(U)); 644 } 645 } 646 647 // Replace the instruction with its simplified value. 648 bool Changed = false; 649 if (!I->use_empty()) { 650 I->replaceAllUsesWith(SimpleV); 651 Changed = true; 652 } 653 if (isInstructionTriviallyDead(I, TLI)) { 654 I->eraseFromParent(); 655 Changed = true; 656 } 657 return Changed; 658 } 659 return false; 660 } 661 662 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to 663 /// simplify any instructions in it and recursively delete dead instructions. 664 /// 665 /// This returns true if it changed the code, note that it can delete 666 /// instructions in other blocks as well in this block. 667 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, 668 const TargetLibraryInfo *TLI) { 669 bool MadeChange = false; 670 const DataLayout &DL = BB->getModule()->getDataLayout(); 671 672 #ifndef NDEBUG 673 // In debug builds, ensure that the terminator of the block is never replaced 674 // or deleted by these simplifications. The idea of simplification is that it 675 // cannot introduce new instructions, and there is no way to replace the 676 // terminator of a block without introducing a new instruction. 677 AssertingVH<Instruction> TerminatorVH(&BB->back()); 678 #endif 679 680 SmallSetVector<Instruction *, 16> WorkList; 681 // Iterate over the original function, only adding insts to the worklist 682 // if they actually need to be revisited. This avoids having to pre-init 683 // the worklist with the entire function's worth of instructions. 684 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end()); 685 BI != E;) { 686 assert(!BI->isTerminator()); 687 Instruction *I = &*BI; 688 ++BI; 689 690 // We're visiting this instruction now, so make sure it's not in the 691 // worklist from an earlier visit. 692 if (!WorkList.count(I)) 693 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); 694 } 695 696 while (!WorkList.empty()) { 697 Instruction *I = WorkList.pop_back_val(); 698 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); 699 } 700 return MadeChange; 701 } 702 703 //===----------------------------------------------------------------------===// 704 // Control Flow Graph Restructuring. 705 // 706 707 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, 708 DomTreeUpdater *DTU) { 709 // This only adjusts blocks with PHI nodes. 710 if (!isa<PHINode>(BB->begin())) 711 return; 712 713 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify 714 // them down. This will leave us with single entry phi nodes and other phis 715 // that can be removed. 716 BB->removePredecessor(Pred, true); 717 718 WeakTrackingVH PhiIt = &BB->front(); 719 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) { 720 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt)); 721 Value *OldPhiIt = PhiIt; 722 723 if (!recursivelySimplifyInstruction(PN)) 724 continue; 725 726 // If recursive simplification ended up deleting the next PHI node we would 727 // iterate to, then our iterator is invalid, restart scanning from the top 728 // of the block. 729 if (PhiIt != OldPhiIt) PhiIt = &BB->front(); 730 } 731 if (DTU) 732 DTU->applyUpdatesPermissive({{DominatorTree::Delete, Pred, BB}}); 733 } 734 735 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, 736 DomTreeUpdater *DTU) { 737 738 // If BB has single-entry PHI nodes, fold them. 739 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { 740 Value *NewVal = PN->getIncomingValue(0); 741 // Replace self referencing PHI with undef, it must be dead. 742 if (NewVal == PN) NewVal = UndefValue::get(PN->getType()); 743 PN->replaceAllUsesWith(NewVal); 744 PN->eraseFromParent(); 745 } 746 747 BasicBlock *PredBB = DestBB->getSinglePredecessor(); 748 assert(PredBB && "Block doesn't have a single predecessor!"); 749 750 bool ReplaceEntryBB = false; 751 if (PredBB == &DestBB->getParent()->getEntryBlock()) 752 ReplaceEntryBB = true; 753 754 // DTU updates: Collect all the edges that enter 755 // PredBB. These dominator edges will be redirected to DestBB. 756 SmallVector<DominatorTree::UpdateType, 32> Updates; 757 758 if (DTU) { 759 Updates.push_back({DominatorTree::Delete, PredBB, DestBB}); 760 for (auto I = pred_begin(PredBB), E = pred_end(PredBB); I != E; ++I) { 761 Updates.push_back({DominatorTree::Delete, *I, PredBB}); 762 // This predecessor of PredBB may already have DestBB as a successor. 763 if (llvm::find(successors(*I), DestBB) == succ_end(*I)) 764 Updates.push_back({DominatorTree::Insert, *I, DestBB}); 765 } 766 } 767 768 // Zap anything that took the address of DestBB. Not doing this will give the 769 // address an invalid value. 770 if (DestBB->hasAddressTaken()) { 771 BlockAddress *BA = BlockAddress::get(DestBB); 772 Constant *Replacement = 773 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1); 774 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, 775 BA->getType())); 776 BA->destroyConstant(); 777 } 778 779 // Anything that branched to PredBB now branches to DestBB. 780 PredBB->replaceAllUsesWith(DestBB); 781 782 // Splice all the instructions from PredBB to DestBB. 783 PredBB->getTerminator()->eraseFromParent(); 784 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList()); 785 new UnreachableInst(PredBB->getContext(), PredBB); 786 787 // If the PredBB is the entry block of the function, move DestBB up to 788 // become the entry block after we erase PredBB. 789 if (ReplaceEntryBB) 790 DestBB->moveAfter(PredBB); 791 792 if (DTU) { 793 assert(PredBB->getInstList().size() == 1 && 794 isa<UnreachableInst>(PredBB->getTerminator()) && 795 "The successor list of PredBB isn't empty before " 796 "applying corresponding DTU updates."); 797 DTU->applyUpdatesPermissive(Updates); 798 DTU->deleteBB(PredBB); 799 // Recalculation of DomTree is needed when updating a forward DomTree and 800 // the Entry BB is replaced. 801 if (ReplaceEntryBB && DTU->hasDomTree()) { 802 // The entry block was removed and there is no external interface for 803 // the dominator tree to be notified of this change. In this corner-case 804 // we recalculate the entire tree. 805 DTU->recalculate(*(DestBB->getParent())); 806 } 807 } 808 809 else { 810 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr. 811 } 812 } 813 814 /// Return true if we can choose one of these values to use in place of the 815 /// other. Note that we will always choose the non-undef value to keep. 816 static bool CanMergeValues(Value *First, Value *Second) { 817 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second); 818 } 819 820 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional 821 /// branch to Succ, into Succ. 822 /// 823 /// Assumption: Succ is the single successor for BB. 824 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { 825 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!"); 826 827 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " 828 << Succ->getName() << "\n"); 829 // Shortcut, if there is only a single predecessor it must be BB and merging 830 // is always safe 831 if (Succ->getSinglePredecessor()) return true; 832 833 // Make a list of the predecessors of BB 834 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB)); 835 836 // Look at all the phi nodes in Succ, to see if they present a conflict when 837 // merging these blocks 838 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 839 PHINode *PN = cast<PHINode>(I); 840 841 // If the incoming value from BB is again a PHINode in 842 // BB which has the same incoming value for *PI as PN does, we can 843 // merge the phi nodes and then the blocks can still be merged 844 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB)); 845 if (BBPN && BBPN->getParent() == BB) { 846 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 847 BasicBlock *IBB = PN->getIncomingBlock(PI); 848 if (BBPreds.count(IBB) && 849 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB), 850 PN->getIncomingValue(PI))) { 851 LLVM_DEBUG(dbgs() 852 << "Can't fold, phi node " << PN->getName() << " in " 853 << Succ->getName() << " is conflicting with " 854 << BBPN->getName() << " with regard to common predecessor " 855 << IBB->getName() << "\n"); 856 return false; 857 } 858 } 859 } else { 860 Value* Val = PN->getIncomingValueForBlock(BB); 861 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 862 // See if the incoming value for the common predecessor is equal to the 863 // one for BB, in which case this phi node will not prevent the merging 864 // of the block. 865 BasicBlock *IBB = PN->getIncomingBlock(PI); 866 if (BBPreds.count(IBB) && 867 !CanMergeValues(Val, PN->getIncomingValue(PI))) { 868 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() 869 << " in " << Succ->getName() 870 << " is conflicting with regard to common " 871 << "predecessor " << IBB->getName() << "\n"); 872 return false; 873 } 874 } 875 } 876 } 877 878 return true; 879 } 880 881 using PredBlockVector = SmallVector<BasicBlock *, 16>; 882 using IncomingValueMap = DenseMap<BasicBlock *, Value *>; 883 884 /// Determines the value to use as the phi node input for a block. 885 /// 886 /// Select between \p OldVal any value that we know flows from \p BB 887 /// to a particular phi on the basis of which one (if either) is not 888 /// undef. Update IncomingValues based on the selected value. 889 /// 890 /// \param OldVal The value we are considering selecting. 891 /// \param BB The block that the value flows in from. 892 /// \param IncomingValues A map from block-to-value for other phi inputs 893 /// that we have examined. 894 /// 895 /// \returns the selected value. 896 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, 897 IncomingValueMap &IncomingValues) { 898 if (!isa<UndefValue>(OldVal)) { 899 assert((!IncomingValues.count(BB) || 900 IncomingValues.find(BB)->second == OldVal) && 901 "Expected OldVal to match incoming value from BB!"); 902 903 IncomingValues.insert(std::make_pair(BB, OldVal)); 904 return OldVal; 905 } 906 907 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 908 if (It != IncomingValues.end()) return It->second; 909 910 return OldVal; 911 } 912 913 /// Create a map from block to value for the operands of a 914 /// given phi. 915 /// 916 /// Create a map from block to value for each non-undef value flowing 917 /// into \p PN. 918 /// 919 /// \param PN The phi we are collecting the map for. 920 /// \param IncomingValues [out] The map from block to value for this phi. 921 static void gatherIncomingValuesToPhi(PHINode *PN, 922 IncomingValueMap &IncomingValues) { 923 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 924 BasicBlock *BB = PN->getIncomingBlock(i); 925 Value *V = PN->getIncomingValue(i); 926 927 if (!isa<UndefValue>(V)) 928 IncomingValues.insert(std::make_pair(BB, V)); 929 } 930 } 931 932 /// Replace the incoming undef values to a phi with the values 933 /// from a block-to-value map. 934 /// 935 /// \param PN The phi we are replacing the undefs in. 936 /// \param IncomingValues A map from block to value. 937 static void replaceUndefValuesInPhi(PHINode *PN, 938 const IncomingValueMap &IncomingValues) { 939 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 940 Value *V = PN->getIncomingValue(i); 941 942 if (!isa<UndefValue>(V)) continue; 943 944 BasicBlock *BB = PN->getIncomingBlock(i); 945 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 946 if (It == IncomingValues.end()) continue; 947 948 PN->setIncomingValue(i, It->second); 949 } 950 } 951 952 /// Replace a value flowing from a block to a phi with 953 /// potentially multiple instances of that value flowing from the 954 /// block's predecessors to the phi. 955 /// 956 /// \param BB The block with the value flowing into the phi. 957 /// \param BBPreds The predecessors of BB. 958 /// \param PN The phi that we are updating. 959 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, 960 const PredBlockVector &BBPreds, 961 PHINode *PN) { 962 Value *OldVal = PN->removeIncomingValue(BB, false); 963 assert(OldVal && "No entry in PHI for Pred BB!"); 964 965 IncomingValueMap IncomingValues; 966 967 // We are merging two blocks - BB, and the block containing PN - and 968 // as a result we need to redirect edges from the predecessors of BB 969 // to go to the block containing PN, and update PN 970 // accordingly. Since we allow merging blocks in the case where the 971 // predecessor and successor blocks both share some predecessors, 972 // and where some of those common predecessors might have undef 973 // values flowing into PN, we want to rewrite those values to be 974 // consistent with the non-undef values. 975 976 gatherIncomingValuesToPhi(PN, IncomingValues); 977 978 // If this incoming value is one of the PHI nodes in BB, the new entries 979 // in the PHI node are the entries from the old PHI. 980 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) { 981 PHINode *OldValPN = cast<PHINode>(OldVal); 982 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) { 983 // Note that, since we are merging phi nodes and BB and Succ might 984 // have common predecessors, we could end up with a phi node with 985 // identical incoming branches. This will be cleaned up later (and 986 // will trigger asserts if we try to clean it up now, without also 987 // simplifying the corresponding conditional branch). 988 BasicBlock *PredBB = OldValPN->getIncomingBlock(i); 989 Value *PredVal = OldValPN->getIncomingValue(i); 990 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB, 991 IncomingValues); 992 993 // And add a new incoming value for this predecessor for the 994 // newly retargeted branch. 995 PN->addIncoming(Selected, PredBB); 996 } 997 } else { 998 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) { 999 // Update existing incoming values in PN for this 1000 // predecessor of BB. 1001 BasicBlock *PredBB = BBPreds[i]; 1002 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB, 1003 IncomingValues); 1004 1005 // And add a new incoming value for this predecessor for the 1006 // newly retargeted branch. 1007 PN->addIncoming(Selected, PredBB); 1008 } 1009 } 1010 1011 replaceUndefValuesInPhi(PN, IncomingValues); 1012 } 1013 1014 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, 1015 DomTreeUpdater *DTU) { 1016 assert(BB != &BB->getParent()->getEntryBlock() && 1017 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"); 1018 1019 // We can't eliminate infinite loops. 1020 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0); 1021 if (BB == Succ) return false; 1022 1023 // Check to see if merging these blocks would cause conflicts for any of the 1024 // phi nodes in BB or Succ. If not, we can safely merge. 1025 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false; 1026 1027 // Check for cases where Succ has multiple predecessors and a PHI node in BB 1028 // has uses which will not disappear when the PHI nodes are merged. It is 1029 // possible to handle such cases, but difficult: it requires checking whether 1030 // BB dominates Succ, which is non-trivial to calculate in the case where 1031 // Succ has multiple predecessors. Also, it requires checking whether 1032 // constructing the necessary self-referential PHI node doesn't introduce any 1033 // conflicts; this isn't too difficult, but the previous code for doing this 1034 // was incorrect. 1035 // 1036 // Note that if this check finds a live use, BB dominates Succ, so BB is 1037 // something like a loop pre-header (or rarely, a part of an irreducible CFG); 1038 // folding the branch isn't profitable in that case anyway. 1039 if (!Succ->getSinglePredecessor()) { 1040 BasicBlock::iterator BBI = BB->begin(); 1041 while (isa<PHINode>(*BBI)) { 1042 for (Use &U : BBI->uses()) { 1043 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) { 1044 if (PN->getIncomingBlock(U) != BB) 1045 return false; 1046 } else { 1047 return false; 1048 } 1049 } 1050 ++BBI; 1051 } 1052 } 1053 1054 // We cannot fold the block if it's a branch to an already present callbr 1055 // successor because that creates duplicate successors. 1056 for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { 1057 if (auto *CBI = dyn_cast<CallBrInst>((*I)->getTerminator())) { 1058 if (Succ == CBI->getDefaultDest()) 1059 return false; 1060 for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i) 1061 if (Succ == CBI->getIndirectDest(i)) 1062 return false; 1063 } 1064 } 1065 1066 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); 1067 1068 SmallVector<DominatorTree::UpdateType, 32> Updates; 1069 if (DTU) { 1070 Updates.push_back({DominatorTree::Delete, BB, Succ}); 1071 // All predecessors of BB will be moved to Succ. 1072 for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { 1073 Updates.push_back({DominatorTree::Delete, *I, BB}); 1074 // This predecessor of BB may already have Succ as a successor. 1075 if (llvm::find(successors(*I), Succ) == succ_end(*I)) 1076 Updates.push_back({DominatorTree::Insert, *I, Succ}); 1077 } 1078 } 1079 1080 if (isa<PHINode>(Succ->begin())) { 1081 // If there is more than one pred of succ, and there are PHI nodes in 1082 // the successor, then we need to add incoming edges for the PHI nodes 1083 // 1084 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB)); 1085 1086 // Loop over all of the PHI nodes in the successor of BB. 1087 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 1088 PHINode *PN = cast<PHINode>(I); 1089 1090 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN); 1091 } 1092 } 1093 1094 if (Succ->getSinglePredecessor()) { 1095 // BB is the only predecessor of Succ, so Succ will end up with exactly 1096 // the same predecessors BB had. 1097 1098 // Copy over any phi, debug or lifetime instruction. 1099 BB->getTerminator()->eraseFromParent(); 1100 Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(), 1101 BB->getInstList()); 1102 } else { 1103 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) { 1104 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs. 1105 assert(PN->use_empty() && "There shouldn't be any uses here!"); 1106 PN->eraseFromParent(); 1107 } 1108 } 1109 1110 // If the unconditional branch we replaced contains llvm.loop metadata, we 1111 // add the metadata to the branch instructions in the predecessors. 1112 unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop"); 1113 Instruction *TI = BB->getTerminator(); 1114 if (TI) 1115 if (MDNode *LoopMD = TI->getMetadata(LoopMDKind)) 1116 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 1117 BasicBlock *Pred = *PI; 1118 Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD); 1119 } 1120 1121 // Everything that jumped to BB now goes to Succ. 1122 BB->replaceAllUsesWith(Succ); 1123 if (!Succ->hasName()) Succ->takeName(BB); 1124 1125 // Clear the successor list of BB to match updates applying to DTU later. 1126 if (BB->getTerminator()) 1127 BB->getInstList().pop_back(); 1128 new UnreachableInst(BB->getContext(), BB); 1129 assert(succ_empty(BB) && "The successor list of BB isn't empty before " 1130 "applying corresponding DTU updates."); 1131 1132 if (DTU) { 1133 DTU->applyUpdatesPermissive(Updates); 1134 DTU->deleteBB(BB); 1135 } else { 1136 BB->eraseFromParent(); // Delete the old basic block. 1137 } 1138 return true; 1139 } 1140 1141 static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB) { 1142 // This implementation doesn't currently consider undef operands 1143 // specially. Theoretically, two phis which are identical except for 1144 // one having an undef where the other doesn't could be collapsed. 1145 1146 bool Changed = false; 1147 1148 // Examine each PHI. 1149 // Note that increment of I must *NOT* be in the iteration_expression, since 1150 // we don't want to immediately advance when we restart from the beginning. 1151 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) { 1152 ++I; 1153 // Is there an identical PHI node in this basic block? 1154 // Note that we only look in the upper square's triangle, 1155 // we already checked that the lower triangle PHI's aren't identical. 1156 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) { 1157 if (!DuplicatePN->isIdenticalToWhenDefined(PN)) 1158 continue; 1159 // A duplicate. Replace this PHI with the base PHI. 1160 ++NumPHICSEs; 1161 DuplicatePN->replaceAllUsesWith(PN); 1162 DuplicatePN->eraseFromParent(); 1163 Changed = true; 1164 1165 // The RAUW can change PHIs that we already visited. 1166 I = BB->begin(); 1167 break; // Start over from the beginning. 1168 } 1169 } 1170 return Changed; 1171 } 1172 1173 static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB) { 1174 // This implementation doesn't currently consider undef operands 1175 // specially. Theoretically, two phis which are identical except for 1176 // one having an undef where the other doesn't could be collapsed. 1177 1178 struct PHIDenseMapInfo { 1179 static PHINode *getEmptyKey() { 1180 return DenseMapInfo<PHINode *>::getEmptyKey(); 1181 } 1182 1183 static PHINode *getTombstoneKey() { 1184 return DenseMapInfo<PHINode *>::getTombstoneKey(); 1185 } 1186 1187 static bool isSentinel(PHINode *PN) { 1188 return PN == getEmptyKey() || PN == getTombstoneKey(); 1189 } 1190 1191 // WARNING: this logic must be kept in sync with 1192 // Instruction::isIdenticalToWhenDefined()! 1193 static unsigned getHashValueImpl(PHINode *PN) { 1194 // Compute a hash value on the operands. Instcombine will likely have 1195 // sorted them, which helps expose duplicates, but we have to check all 1196 // the operands to be safe in case instcombine hasn't run. 1197 return static_cast<unsigned>(hash_combine( 1198 hash_combine_range(PN->value_op_begin(), PN->value_op_end()), 1199 hash_combine_range(PN->block_begin(), PN->block_end()))); 1200 } 1201 1202 static unsigned getHashValue(PHINode *PN) { 1203 #ifndef NDEBUG 1204 // If -phicse-debug-hash was specified, return a constant -- this 1205 // will force all hashing to collide, so we'll exhaustively search 1206 // the table for a match, and the assertion in isEqual will fire if 1207 // there's a bug causing equal keys to hash differently. 1208 if (PHICSEDebugHash) 1209 return 0; 1210 #endif 1211 return getHashValueImpl(PN); 1212 } 1213 1214 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) { 1215 if (isSentinel(LHS) || isSentinel(RHS)) 1216 return LHS == RHS; 1217 return LHS->isIdenticalTo(RHS); 1218 } 1219 1220 static bool isEqual(PHINode *LHS, PHINode *RHS) { 1221 // These comparisons are nontrivial, so assert that equality implies 1222 // hash equality (DenseMap demands this as an invariant). 1223 bool Result = isEqualImpl(LHS, RHS); 1224 assert(!Result || (isSentinel(LHS) && LHS == RHS) || 1225 getHashValueImpl(LHS) == getHashValueImpl(RHS)); 1226 return Result; 1227 } 1228 }; 1229 1230 // Set of unique PHINodes. 1231 DenseSet<PHINode *, PHIDenseMapInfo> PHISet; 1232 PHISet.reserve(4 * PHICSENumPHISmallSize); 1233 1234 // Examine each PHI. 1235 bool Changed = false; 1236 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) { 1237 auto Inserted = PHISet.insert(PN); 1238 if (!Inserted.second) { 1239 // A duplicate. Replace this PHI with its duplicate. 1240 ++NumPHICSEs; 1241 PN->replaceAllUsesWith(*Inserted.first); 1242 PN->eraseFromParent(); 1243 Changed = true; 1244 1245 // The RAUW can change PHIs that we already visited. Start over from the 1246 // beginning. 1247 PHISet.clear(); 1248 I = BB->begin(); 1249 } 1250 } 1251 1252 return Changed; 1253 } 1254 1255 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { 1256 if ( 1257 #ifndef NDEBUG 1258 !PHICSEDebugHash && 1259 #endif 1260 hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize)) 1261 return EliminateDuplicatePHINodesNaiveImpl(BB); 1262 return EliminateDuplicatePHINodesSetBasedImpl(BB); 1263 } 1264 1265 /// enforceKnownAlignment - If the specified pointer points to an object that 1266 /// we control, modify the object's alignment to PrefAlign. This isn't 1267 /// often possible though. If alignment is important, a more reliable approach 1268 /// is to simply align all global variables and allocation instructions to 1269 /// their preferred alignment from the beginning. 1270 static Align enforceKnownAlignment(Value *V, Align Alignment, Align PrefAlign, 1271 const DataLayout &DL) { 1272 assert(PrefAlign > Alignment); 1273 1274 V = V->stripPointerCasts(); 1275 1276 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1277 // TODO: ideally, computeKnownBits ought to have used 1278 // AllocaInst::getAlignment() in its computation already, making 1279 // the below max redundant. But, as it turns out, 1280 // stripPointerCasts recurses through infinite layers of bitcasts, 1281 // while computeKnownBits is not allowed to traverse more than 6 1282 // levels. 1283 Alignment = std::max(AI->getAlign(), Alignment); 1284 if (PrefAlign <= Alignment) 1285 return Alignment; 1286 1287 // If the preferred alignment is greater than the natural stack alignment 1288 // then don't round up. This avoids dynamic stack realignment. 1289 if (DL.exceedsNaturalStackAlignment(PrefAlign)) 1290 return Alignment; 1291 AI->setAlignment(PrefAlign); 1292 return PrefAlign; 1293 } 1294 1295 if (auto *GO = dyn_cast<GlobalObject>(V)) { 1296 // TODO: as above, this shouldn't be necessary. 1297 Alignment = max(GO->getAlign(), Alignment); 1298 if (PrefAlign <= Alignment) 1299 return Alignment; 1300 1301 // If there is a large requested alignment and we can, bump up the alignment 1302 // of the global. If the memory we set aside for the global may not be the 1303 // memory used by the final program then it is impossible for us to reliably 1304 // enforce the preferred alignment. 1305 if (!GO->canIncreaseAlignment()) 1306 return Alignment; 1307 1308 GO->setAlignment(PrefAlign); 1309 return PrefAlign; 1310 } 1311 1312 return Alignment; 1313 } 1314 1315 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, 1316 const DataLayout &DL, 1317 const Instruction *CxtI, 1318 AssumptionCache *AC, 1319 const DominatorTree *DT) { 1320 assert(V->getType()->isPointerTy() && 1321 "getOrEnforceKnownAlignment expects a pointer!"); 1322 1323 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT); 1324 unsigned TrailZ = Known.countMinTrailingZeros(); 1325 1326 // Avoid trouble with ridiculously large TrailZ values, such as 1327 // those computed from a null pointer. 1328 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent). 1329 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent); 1330 1331 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ)); 1332 1333 if (PrefAlign && *PrefAlign > Alignment) 1334 Alignment = enforceKnownAlignment(V, Alignment, *PrefAlign, DL); 1335 1336 // We don't need to make any adjustment. 1337 return Alignment; 1338 } 1339 1340 ///===---------------------------------------------------------------------===// 1341 /// Dbg Intrinsic utilities 1342 /// 1343 1344 /// See if there is a dbg.value intrinsic for DIVar for the PHI node. 1345 static bool PhiHasDebugValue(DILocalVariable *DIVar, 1346 DIExpression *DIExpr, 1347 PHINode *APN) { 1348 // Since we can't guarantee that the original dbg.declare instrinsic 1349 // is removed by LowerDbgDeclare(), we need to make sure that we are 1350 // not inserting the same dbg.value intrinsic over and over. 1351 SmallVector<DbgValueInst *, 1> DbgValues; 1352 findDbgValues(DbgValues, APN); 1353 for (auto *DVI : DbgValues) { 1354 assert(DVI->getValue() == APN); 1355 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr)) 1356 return true; 1357 } 1358 return false; 1359 } 1360 1361 /// Check if the alloc size of \p ValTy is large enough to cover the variable 1362 /// (or fragment of the variable) described by \p DII. 1363 /// 1364 /// This is primarily intended as a helper for the different 1365 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is 1366 /// converted describes an alloca'd variable, so we need to use the 1367 /// alloc size of the value when doing the comparison. E.g. an i1 value will be 1368 /// identified as covering an n-bit fragment, if the store size of i1 is at 1369 /// least n bits. 1370 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) { 1371 const DataLayout &DL = DII->getModule()->getDataLayout(); 1372 uint64_t ValueSize = DL.getTypeAllocSizeInBits(ValTy); 1373 if (auto FragmentSize = DII->getFragmentSizeInBits()) 1374 return ValueSize >= *FragmentSize; 1375 // We can't always calculate the size of the DI variable (e.g. if it is a 1376 // VLA). Try to use the size of the alloca that the dbg intrinsic describes 1377 // intead. 1378 if (DII->isAddressOfVariable()) 1379 if (auto *AI = dyn_cast_or_null<AllocaInst>(DII->getVariableLocation())) 1380 if (auto FragmentSize = AI->getAllocationSizeInBits(DL)) 1381 return ValueSize >= *FragmentSize; 1382 // Could not determine size of variable. Conservatively return false. 1383 return false; 1384 } 1385 1386 /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted 1387 /// to a dbg.value. Because no machine insts can come from debug intrinsics, 1388 /// only the scope and inlinedAt is significant. Zero line numbers are used in 1389 /// case this DebugLoc leaks into any adjacent instructions. 1390 static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) { 1391 // Original dbg.declare must have a location. 1392 DebugLoc DeclareLoc = DII->getDebugLoc(); 1393 MDNode *Scope = DeclareLoc.getScope(); 1394 DILocation *InlinedAt = DeclareLoc.getInlinedAt(); 1395 // Produce an unknown location with the correct scope / inlinedAt fields. 1396 return DebugLoc::get(0, 0, Scope, InlinedAt); 1397 } 1398 1399 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value 1400 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic. 1401 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1402 StoreInst *SI, DIBuilder &Builder) { 1403 assert(DII->isAddressOfVariable()); 1404 auto *DIVar = DII->getVariable(); 1405 assert(DIVar && "Missing variable"); 1406 auto *DIExpr = DII->getExpression(); 1407 Value *DV = SI->getValueOperand(); 1408 1409 DebugLoc NewLoc = getDebugValueLoc(DII, SI); 1410 1411 if (!valueCoversEntireFragment(DV->getType(), DII)) { 1412 // FIXME: If storing to a part of the variable described by the dbg.declare, 1413 // then we want to insert a dbg.value for the corresponding fragment. 1414 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1415 << *DII << '\n'); 1416 // For now, when there is a store to parts of the variable (but we do not 1417 // know which part) we insert an dbg.value instrinsic to indicate that we 1418 // know nothing about the variable's content. 1419 DV = UndefValue::get(DV->getType()); 1420 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI); 1421 return; 1422 } 1423 1424 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI); 1425 } 1426 1427 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value 1428 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic. 1429 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1430 LoadInst *LI, DIBuilder &Builder) { 1431 auto *DIVar = DII->getVariable(); 1432 auto *DIExpr = DII->getExpression(); 1433 assert(DIVar && "Missing variable"); 1434 1435 if (!valueCoversEntireFragment(LI->getType(), DII)) { 1436 // FIXME: If only referring to a part of the variable described by the 1437 // dbg.declare, then we want to insert a dbg.value for the corresponding 1438 // fragment. 1439 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1440 << *DII << '\n'); 1441 return; 1442 } 1443 1444 DebugLoc NewLoc = getDebugValueLoc(DII, nullptr); 1445 1446 // We are now tracking the loaded value instead of the address. In the 1447 // future if multi-location support is added to the IR, it might be 1448 // preferable to keep tracking both the loaded value and the original 1449 // address in case the alloca can not be elided. 1450 Instruction *DbgValue = Builder.insertDbgValueIntrinsic( 1451 LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr); 1452 DbgValue->insertAfter(LI); 1453 } 1454 1455 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated 1456 /// llvm.dbg.declare or llvm.dbg.addr intrinsic. 1457 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1458 PHINode *APN, DIBuilder &Builder) { 1459 auto *DIVar = DII->getVariable(); 1460 auto *DIExpr = DII->getExpression(); 1461 assert(DIVar && "Missing variable"); 1462 1463 if (PhiHasDebugValue(DIVar, DIExpr, APN)) 1464 return; 1465 1466 if (!valueCoversEntireFragment(APN->getType(), DII)) { 1467 // FIXME: If only referring to a part of the variable described by the 1468 // dbg.declare, then we want to insert a dbg.value for the corresponding 1469 // fragment. 1470 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1471 << *DII << '\n'); 1472 return; 1473 } 1474 1475 BasicBlock *BB = APN->getParent(); 1476 auto InsertionPt = BB->getFirstInsertionPt(); 1477 1478 DebugLoc NewLoc = getDebugValueLoc(DII, nullptr); 1479 1480 // The block may be a catchswitch block, which does not have a valid 1481 // insertion point. 1482 // FIXME: Insert dbg.value markers in the successors when appropriate. 1483 if (InsertionPt != BB->end()) 1484 Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt); 1485 } 1486 1487 /// Determine whether this alloca is either a VLA or an array. 1488 static bool isArray(AllocaInst *AI) { 1489 return AI->isArrayAllocation() || 1490 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy()); 1491 } 1492 1493 /// Determine whether this alloca is a structure. 1494 static bool isStructure(AllocaInst *AI) { 1495 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy(); 1496 } 1497 1498 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set 1499 /// of llvm.dbg.value intrinsics. 1500 bool llvm::LowerDbgDeclare(Function &F) { 1501 bool Changed = false; 1502 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); 1503 SmallVector<DbgDeclareInst *, 4> Dbgs; 1504 for (auto &FI : F) 1505 for (Instruction &BI : FI) 1506 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI)) 1507 Dbgs.push_back(DDI); 1508 1509 if (Dbgs.empty()) 1510 return Changed; 1511 1512 for (auto &I : Dbgs) { 1513 DbgDeclareInst *DDI = I; 1514 AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress()); 1515 // If this is an alloca for a scalar variable, insert a dbg.value 1516 // at each load and store to the alloca and erase the dbg.declare. 1517 // The dbg.values allow tracking a variable even if it is not 1518 // stored on the stack, while the dbg.declare can only describe 1519 // the stack slot (and at a lexical-scope granularity). Later 1520 // passes will attempt to elide the stack slot. 1521 if (!AI || isArray(AI) || isStructure(AI)) 1522 continue; 1523 1524 // A volatile load/store means that the alloca can't be elided anyway. 1525 if (llvm::any_of(AI->users(), [](User *U) -> bool { 1526 if (LoadInst *LI = dyn_cast<LoadInst>(U)) 1527 return LI->isVolatile(); 1528 if (StoreInst *SI = dyn_cast<StoreInst>(U)) 1529 return SI->isVolatile(); 1530 return false; 1531 })) 1532 continue; 1533 1534 SmallVector<const Value *, 8> WorkList; 1535 WorkList.push_back(AI); 1536 while (!WorkList.empty()) { 1537 const Value *V = WorkList.pop_back_val(); 1538 for (auto &AIUse : V->uses()) { 1539 User *U = AIUse.getUser(); 1540 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1541 if (AIUse.getOperandNo() == 1) 1542 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 1543 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 1544 ConvertDebugDeclareToDebugValue(DDI, LI, DIB); 1545 } else if (CallInst *CI = dyn_cast<CallInst>(U)) { 1546 // This is a call by-value or some other instruction that takes a 1547 // pointer to the variable. Insert a *value* intrinsic that describes 1548 // the variable by dereferencing the alloca. 1549 if (!CI->isLifetimeStartOrEnd()) { 1550 DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr); 1551 auto *DerefExpr = 1552 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref); 1553 DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr, 1554 NewLoc, CI); 1555 } 1556 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) { 1557 if (BI->getType()->isPointerTy()) 1558 WorkList.push_back(BI); 1559 } 1560 } 1561 } 1562 DDI->eraseFromParent(); 1563 Changed = true; 1564 } 1565 1566 if (Changed) 1567 for (BasicBlock &BB : F) 1568 RemoveRedundantDbgInstrs(&BB); 1569 1570 return Changed; 1571 } 1572 1573 /// Propagate dbg.value intrinsics through the newly inserted PHIs. 1574 void llvm::insertDebugValuesForPHIs(BasicBlock *BB, 1575 SmallVectorImpl<PHINode *> &InsertedPHIs) { 1576 assert(BB && "No BasicBlock to clone dbg.value(s) from."); 1577 if (InsertedPHIs.size() == 0) 1578 return; 1579 1580 // Map existing PHI nodes to their dbg.values. 1581 ValueToValueMapTy DbgValueMap; 1582 for (auto &I : *BB) { 1583 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) { 1584 if (auto *Loc = dyn_cast_or_null<PHINode>(DbgII->getVariableLocation())) 1585 DbgValueMap.insert({Loc, DbgII}); 1586 } 1587 } 1588 if (DbgValueMap.size() == 0) 1589 return; 1590 1591 // Then iterate through the new PHIs and look to see if they use one of the 1592 // previously mapped PHIs. If so, insert a new dbg.value intrinsic that will 1593 // propagate the info through the new PHI. 1594 LLVMContext &C = BB->getContext(); 1595 for (auto PHI : InsertedPHIs) { 1596 BasicBlock *Parent = PHI->getParent(); 1597 // Avoid inserting an intrinsic into an EH block. 1598 if (Parent->getFirstNonPHI()->isEHPad()) 1599 continue; 1600 auto PhiMAV = MetadataAsValue::get(C, ValueAsMetadata::get(PHI)); 1601 for (auto VI : PHI->operand_values()) { 1602 auto V = DbgValueMap.find(VI); 1603 if (V != DbgValueMap.end()) { 1604 auto *DbgII = cast<DbgVariableIntrinsic>(V->second); 1605 Instruction *NewDbgII = DbgII->clone(); 1606 NewDbgII->setOperand(0, PhiMAV); 1607 auto InsertionPt = Parent->getFirstInsertionPt(); 1608 assert(InsertionPt != Parent->end() && "Ill-formed basic block"); 1609 NewDbgII->insertBefore(&*InsertionPt); 1610 } 1611 } 1612 } 1613 } 1614 1615 /// Finds all intrinsics declaring local variables as living in the memory that 1616 /// 'V' points to. This may include a mix of dbg.declare and 1617 /// dbg.addr intrinsics. 1618 TinyPtrVector<DbgVariableIntrinsic *> llvm::FindDbgAddrUses(Value *V) { 1619 // This function is hot. Check whether the value has any metadata to avoid a 1620 // DenseMap lookup. 1621 if (!V->isUsedByMetadata()) 1622 return {}; 1623 auto *L = LocalAsMetadata::getIfExists(V); 1624 if (!L) 1625 return {}; 1626 auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L); 1627 if (!MDV) 1628 return {}; 1629 1630 TinyPtrVector<DbgVariableIntrinsic *> Declares; 1631 for (User *U : MDV->users()) { 1632 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(U)) 1633 if (DII->isAddressOfVariable()) 1634 Declares.push_back(DII); 1635 } 1636 1637 return Declares; 1638 } 1639 1640 TinyPtrVector<DbgDeclareInst *> llvm::FindDbgDeclareUses(Value *V) { 1641 TinyPtrVector<DbgDeclareInst *> DDIs; 1642 for (DbgVariableIntrinsic *DVI : FindDbgAddrUses(V)) 1643 if (auto *DDI = dyn_cast<DbgDeclareInst>(DVI)) 1644 DDIs.push_back(DDI); 1645 return DDIs; 1646 } 1647 1648 void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) { 1649 // This function is hot. Check whether the value has any metadata to avoid a 1650 // DenseMap lookup. 1651 if (!V->isUsedByMetadata()) 1652 return; 1653 if (auto *L = LocalAsMetadata::getIfExists(V)) 1654 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L)) 1655 for (User *U : MDV->users()) 1656 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U)) 1657 DbgValues.push_back(DVI); 1658 } 1659 1660 void llvm::findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgUsers, 1661 Value *V) { 1662 // This function is hot. Check whether the value has any metadata to avoid a 1663 // DenseMap lookup. 1664 if (!V->isUsedByMetadata()) 1665 return; 1666 if (auto *L = LocalAsMetadata::getIfExists(V)) 1667 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L)) 1668 for (User *U : MDV->users()) 1669 if (DbgVariableIntrinsic *DII = dyn_cast<DbgVariableIntrinsic>(U)) 1670 DbgUsers.push_back(DII); 1671 } 1672 1673 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress, 1674 DIBuilder &Builder, uint8_t DIExprFlags, 1675 int Offset) { 1676 auto DbgAddrs = FindDbgAddrUses(Address); 1677 for (DbgVariableIntrinsic *DII : DbgAddrs) { 1678 DebugLoc Loc = DII->getDebugLoc(); 1679 auto *DIVar = DII->getVariable(); 1680 auto *DIExpr = DII->getExpression(); 1681 assert(DIVar && "Missing variable"); 1682 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset); 1683 // Insert llvm.dbg.declare immediately before DII, and remove old 1684 // llvm.dbg.declare. 1685 Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII); 1686 DII->eraseFromParent(); 1687 } 1688 return !DbgAddrs.empty(); 1689 } 1690 1691 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress, 1692 DIBuilder &Builder, int Offset) { 1693 DebugLoc Loc = DVI->getDebugLoc(); 1694 auto *DIVar = DVI->getVariable(); 1695 auto *DIExpr = DVI->getExpression(); 1696 assert(DIVar && "Missing variable"); 1697 1698 // This is an alloca-based llvm.dbg.value. The first thing it should do with 1699 // the alloca pointer is dereference it. Otherwise we don't know how to handle 1700 // it and give up. 1701 if (!DIExpr || DIExpr->getNumElements() < 1 || 1702 DIExpr->getElement(0) != dwarf::DW_OP_deref) 1703 return; 1704 1705 // Insert the offset before the first deref. 1706 // We could just change the offset argument of dbg.value, but it's unsigned... 1707 if (Offset) 1708 DIExpr = DIExpression::prepend(DIExpr, 0, Offset); 1709 1710 Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI); 1711 DVI->eraseFromParent(); 1712 } 1713 1714 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, 1715 DIBuilder &Builder, int Offset) { 1716 if (auto *L = LocalAsMetadata::getIfExists(AI)) 1717 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) 1718 for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) { 1719 Use &U = *UI++; 1720 if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser())) 1721 replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset); 1722 } 1723 } 1724 1725 /// Wrap \p V in a ValueAsMetadata instance. 1726 static MetadataAsValue *wrapValueInMetadata(LLVMContext &C, Value *V) { 1727 return MetadataAsValue::get(C, ValueAsMetadata::get(V)); 1728 } 1729 1730 /// Where possible to salvage debug information for \p I do so 1731 /// and return True. If not possible mark undef and return False. 1732 void llvm::salvageDebugInfo(Instruction &I) { 1733 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 1734 findDbgUsers(DbgUsers, &I); 1735 salvageDebugInfoForDbgValues(I, DbgUsers); 1736 } 1737 1738 void llvm::salvageDebugInfoForDbgValues( 1739 Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) { 1740 auto &Ctx = I.getContext(); 1741 bool Salvaged = false; 1742 auto wrapMD = [&](Value *V) { return wrapValueInMetadata(Ctx, V); }; 1743 1744 for (auto *DII : DbgUsers) { 1745 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they 1746 // are implicitly pointing out the value as a DWARF memory location 1747 // description. 1748 bool StackValue = isa<DbgValueInst>(DII); 1749 1750 DIExpression *DIExpr = 1751 salvageDebugInfoImpl(I, DII->getExpression(), StackValue); 1752 1753 // salvageDebugInfoImpl should fail on examining the first element of 1754 // DbgUsers, or none of them. 1755 if (!DIExpr) 1756 break; 1757 1758 DII->setOperand(0, wrapMD(I.getOperand(0))); 1759 DII->setOperand(2, MetadataAsValue::get(Ctx, DIExpr)); 1760 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); 1761 Salvaged = true; 1762 } 1763 1764 if (Salvaged) 1765 return; 1766 1767 for (auto *DII : DbgUsers) { 1768 Value *Undef = UndefValue::get(I.getType()); 1769 DII->setOperand(0, MetadataAsValue::get(DII->getContext(), 1770 ValueAsMetadata::get(Undef))); 1771 } 1772 } 1773 1774 DIExpression *llvm::salvageDebugInfoImpl(Instruction &I, 1775 DIExpression *SrcDIExpr, 1776 bool WithStackValue) { 1777 auto &M = *I.getModule(); 1778 auto &DL = M.getDataLayout(); 1779 1780 // Apply a vector of opcodes to the source DIExpression. 1781 auto doSalvage = [&](SmallVectorImpl<uint64_t> &Ops) -> DIExpression * { 1782 DIExpression *DIExpr = SrcDIExpr; 1783 if (!Ops.empty()) { 1784 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); 1785 } 1786 return DIExpr; 1787 }; 1788 1789 // Apply the given offset to the source DIExpression. 1790 auto applyOffset = [&](uint64_t Offset) -> DIExpression * { 1791 SmallVector<uint64_t, 8> Ops; 1792 DIExpression::appendOffset(Ops, Offset); 1793 return doSalvage(Ops); 1794 }; 1795 1796 // initializer-list helper for applying operators to the source DIExpression. 1797 auto applyOps = [&](ArrayRef<uint64_t> Opcodes) -> DIExpression * { 1798 SmallVector<uint64_t, 8> Ops(Opcodes.begin(), Opcodes.end()); 1799 return doSalvage(Ops); 1800 }; 1801 1802 if (auto *CI = dyn_cast<CastInst>(&I)) { 1803 // No-op casts are irrelevant for debug info. 1804 if (CI->isNoopCast(DL)) 1805 return SrcDIExpr; 1806 1807 Type *Type = CI->getType(); 1808 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged. 1809 if (Type->isVectorTy() || 1810 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I))) 1811 return nullptr; 1812 1813 Value *FromValue = CI->getOperand(0); 1814 unsigned FromTypeBitSize = FromValue->getType()->getScalarSizeInBits(); 1815 unsigned ToTypeBitSize = Type->getScalarSizeInBits(); 1816 1817 return applyOps(DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize, 1818 isa<SExtInst>(&I))); 1819 } 1820 1821 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 1822 unsigned BitWidth = 1823 M.getDataLayout().getIndexSizeInBits(GEP->getPointerAddressSpace()); 1824 // Rewrite a constant GEP into a DIExpression. 1825 APInt Offset(BitWidth, 0); 1826 if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset)) { 1827 return applyOffset(Offset.getSExtValue()); 1828 } else { 1829 return nullptr; 1830 } 1831 } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) { 1832 // Rewrite binary operations with constant integer operands. 1833 auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1)); 1834 if (!ConstInt || ConstInt->getBitWidth() > 64) 1835 return nullptr; 1836 1837 uint64_t Val = ConstInt->getSExtValue(); 1838 switch (BI->getOpcode()) { 1839 case Instruction::Add: 1840 return applyOffset(Val); 1841 case Instruction::Sub: 1842 return applyOffset(-int64_t(Val)); 1843 case Instruction::Mul: 1844 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_mul}); 1845 case Instruction::SDiv: 1846 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_div}); 1847 case Instruction::SRem: 1848 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_mod}); 1849 case Instruction::Or: 1850 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_or}); 1851 case Instruction::And: 1852 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_and}); 1853 case Instruction::Xor: 1854 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_xor}); 1855 case Instruction::Shl: 1856 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shl}); 1857 case Instruction::LShr: 1858 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shr}); 1859 case Instruction::AShr: 1860 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shra}); 1861 default: 1862 // TODO: Salvage constants from each kind of binop we know about. 1863 return nullptr; 1864 } 1865 // *Not* to do: we should not attempt to salvage load instructions, 1866 // because the validity and lifetime of a dbg.value containing 1867 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples. 1868 } 1869 return nullptr; 1870 } 1871 1872 /// A replacement for a dbg.value expression. 1873 using DbgValReplacement = Optional<DIExpression *>; 1874 1875 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr, 1876 /// possibly moving/undefing users to prevent use-before-def. Returns true if 1877 /// changes are made. 1878 static bool rewriteDebugUsers( 1879 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, 1880 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) { 1881 // Find debug users of From. 1882 SmallVector<DbgVariableIntrinsic *, 1> Users; 1883 findDbgUsers(Users, &From); 1884 if (Users.empty()) 1885 return false; 1886 1887 // Prevent use-before-def of To. 1888 bool Changed = false; 1889 SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage; 1890 if (isa<Instruction>(&To)) { 1891 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint; 1892 1893 for (auto *DII : Users) { 1894 // It's common to see a debug user between From and DomPoint. Move it 1895 // after DomPoint to preserve the variable update without any reordering. 1896 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) { 1897 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n'); 1898 DII->moveAfter(&DomPoint); 1899 Changed = true; 1900 1901 // Users which otherwise aren't dominated by the replacement value must 1902 // be salvaged or deleted. 1903 } else if (!DT.dominates(&DomPoint, DII)) { 1904 UndefOrSalvage.insert(DII); 1905 } 1906 } 1907 } 1908 1909 // Update debug users without use-before-def risk. 1910 for (auto *DII : Users) { 1911 if (UndefOrSalvage.count(DII)) 1912 continue; 1913 1914 LLVMContext &Ctx = DII->getContext(); 1915 DbgValReplacement DVR = RewriteExpr(*DII); 1916 if (!DVR) 1917 continue; 1918 1919 DII->setOperand(0, wrapValueInMetadata(Ctx, &To)); 1920 DII->setOperand(2, MetadataAsValue::get(Ctx, *DVR)); 1921 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n'); 1922 Changed = true; 1923 } 1924 1925 if (!UndefOrSalvage.empty()) { 1926 // Try to salvage the remaining debug users. 1927 salvageDebugInfo(From); 1928 Changed = true; 1929 } 1930 1931 return Changed; 1932 } 1933 1934 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would 1935 /// losslessly preserve the bits and semantics of the value. This predicate is 1936 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result. 1937 /// 1938 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it 1939 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>, 1940 /// and also does not allow lossless pointer <-> integer conversions. 1941 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, 1942 Type *ToTy) { 1943 // Trivially compatible types. 1944 if (FromTy == ToTy) 1945 return true; 1946 1947 // Handle compatible pointer <-> integer conversions. 1948 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) { 1949 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy); 1950 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) && 1951 !DL.isNonIntegralPointerType(ToTy); 1952 return SameSize && LosslessConversion; 1953 } 1954 1955 // TODO: This is not exhaustive. 1956 return false; 1957 } 1958 1959 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To, 1960 Instruction &DomPoint, DominatorTree &DT) { 1961 // Exit early if From has no debug users. 1962 if (!From.isUsedByMetadata()) 1963 return false; 1964 1965 assert(&From != &To && "Can't replace something with itself"); 1966 1967 Type *FromTy = From.getType(); 1968 Type *ToTy = To.getType(); 1969 1970 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { 1971 return DII.getExpression(); 1972 }; 1973 1974 // Handle no-op conversions. 1975 Module &M = *From.getModule(); 1976 const DataLayout &DL = M.getDataLayout(); 1977 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy)) 1978 return rewriteDebugUsers(From, To, DomPoint, DT, Identity); 1979 1980 // Handle integer-to-integer widening and narrowing. 1981 // FIXME: Use DW_OP_convert when it's available everywhere. 1982 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) { 1983 uint64_t FromBits = FromTy->getPrimitiveSizeInBits(); 1984 uint64_t ToBits = ToTy->getPrimitiveSizeInBits(); 1985 assert(FromBits != ToBits && "Unexpected no-op conversion"); 1986 1987 // When the width of the result grows, assume that a debugger will only 1988 // access the low `FromBits` bits when inspecting the source variable. 1989 if (FromBits < ToBits) 1990 return rewriteDebugUsers(From, To, DomPoint, DT, Identity); 1991 1992 // The width of the result has shrunk. Use sign/zero extension to describe 1993 // the source variable's high bits. 1994 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { 1995 DILocalVariable *Var = DII.getVariable(); 1996 1997 // Without knowing signedness, sign/zero extension isn't possible. 1998 auto Signedness = Var->getSignedness(); 1999 if (!Signedness) 2000 return None; 2001 2002 bool Signed = *Signedness == DIBasicType::Signedness::Signed; 2003 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits, 2004 Signed); 2005 }; 2006 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt); 2007 } 2008 2009 // TODO: Floating-point conversions, vectors. 2010 return false; 2011 } 2012 2013 std::pair<unsigned, unsigned> 2014 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) { 2015 unsigned NumDeadInst = 0; 2016 unsigned NumDeadDbgInst = 0; 2017 // Delete the instructions backwards, as it has a reduced likelihood of 2018 // having to update as many def-use and use-def chains. 2019 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. 2020 while (EndInst != &BB->front()) { 2021 // Delete the next to last instruction. 2022 Instruction *Inst = &*--EndInst->getIterator(); 2023 if (!Inst->use_empty() && !Inst->getType()->isTokenTy()) 2024 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); 2025 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) { 2026 EndInst = Inst; 2027 continue; 2028 } 2029 if (isa<DbgInfoIntrinsic>(Inst)) 2030 ++NumDeadDbgInst; 2031 else 2032 ++NumDeadInst; 2033 Inst->eraseFromParent(); 2034 } 2035 return {NumDeadInst, NumDeadDbgInst}; 2036 } 2037 2038 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap, 2039 bool PreserveLCSSA, DomTreeUpdater *DTU, 2040 MemorySSAUpdater *MSSAU) { 2041 BasicBlock *BB = I->getParent(); 2042 std::vector <DominatorTree::UpdateType> Updates; 2043 2044 if (MSSAU) 2045 MSSAU->changeToUnreachable(I); 2046 2047 // Loop over all of the successors, removing BB's entry from any PHI 2048 // nodes. 2049 if (DTU) 2050 Updates.reserve(BB->getTerminator()->getNumSuccessors()); 2051 for (BasicBlock *Successor : successors(BB)) { 2052 Successor->removePredecessor(BB, PreserveLCSSA); 2053 if (DTU) 2054 Updates.push_back({DominatorTree::Delete, BB, Successor}); 2055 } 2056 // Insert a call to llvm.trap right before this. This turns the undefined 2057 // behavior into a hard fail instead of falling through into random code. 2058 if (UseLLVMTrap) { 2059 Function *TrapFn = 2060 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap); 2061 CallInst *CallTrap = CallInst::Create(TrapFn, "", I); 2062 CallTrap->setDebugLoc(I->getDebugLoc()); 2063 } 2064 auto *UI = new UnreachableInst(I->getContext(), I); 2065 UI->setDebugLoc(I->getDebugLoc()); 2066 2067 // All instructions after this are dead. 2068 unsigned NumInstrsRemoved = 0; 2069 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end(); 2070 while (BBI != BBE) { 2071 if (!BBI->use_empty()) 2072 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType())); 2073 BB->getInstList().erase(BBI++); 2074 ++NumInstrsRemoved; 2075 } 2076 if (DTU) 2077 DTU->applyUpdatesPermissive(Updates); 2078 return NumInstrsRemoved; 2079 } 2080 2081 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) { 2082 SmallVector<Value *, 8> Args(II->arg_begin(), II->arg_end()); 2083 SmallVector<OperandBundleDef, 1> OpBundles; 2084 II->getOperandBundlesAsDefs(OpBundles); 2085 CallInst *NewCall = CallInst::Create(II->getFunctionType(), 2086 II->getCalledOperand(), Args, OpBundles); 2087 NewCall->setCallingConv(II->getCallingConv()); 2088 NewCall->setAttributes(II->getAttributes()); 2089 NewCall->setDebugLoc(II->getDebugLoc()); 2090 NewCall->copyMetadata(*II); 2091 2092 // If the invoke had profile metadata, try converting them for CallInst. 2093 uint64_t TotalWeight; 2094 if (NewCall->extractProfTotalWeight(TotalWeight)) { 2095 // Set the total weight if it fits into i32, otherwise reset. 2096 MDBuilder MDB(NewCall->getContext()); 2097 auto NewWeights = uint32_t(TotalWeight) != TotalWeight 2098 ? nullptr 2099 : MDB.createBranchWeights({uint32_t(TotalWeight)}); 2100 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights); 2101 } 2102 2103 return NewCall; 2104 } 2105 2106 /// changeToCall - Convert the specified invoke into a normal call. 2107 void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) { 2108 CallInst *NewCall = createCallMatchingInvoke(II); 2109 NewCall->takeName(II); 2110 NewCall->insertBefore(II); 2111 II->replaceAllUsesWith(NewCall); 2112 2113 // Follow the call by a branch to the normal destination. 2114 BasicBlock *NormalDestBB = II->getNormalDest(); 2115 BranchInst::Create(NormalDestBB, II); 2116 2117 // Update PHI nodes in the unwind destination 2118 BasicBlock *BB = II->getParent(); 2119 BasicBlock *UnwindDestBB = II->getUnwindDest(); 2120 UnwindDestBB->removePredecessor(BB); 2121 II->eraseFromParent(); 2122 if (DTU) 2123 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, UnwindDestBB}}); 2124 } 2125 2126 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI, 2127 BasicBlock *UnwindEdge) { 2128 BasicBlock *BB = CI->getParent(); 2129 2130 // Convert this function call into an invoke instruction. First, split the 2131 // basic block. 2132 BasicBlock *Split = 2133 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc"); 2134 2135 // Delete the unconditional branch inserted by splitBasicBlock 2136 BB->getInstList().pop_back(); 2137 2138 // Create the new invoke instruction. 2139 SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end()); 2140 SmallVector<OperandBundleDef, 1> OpBundles; 2141 2142 CI->getOperandBundlesAsDefs(OpBundles); 2143 2144 // Note: we're round tripping operand bundles through memory here, and that 2145 // can potentially be avoided with a cleverer API design that we do not have 2146 // as of this time. 2147 2148 InvokeInst *II = 2149 InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split, 2150 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB); 2151 II->setDebugLoc(CI->getDebugLoc()); 2152 II->setCallingConv(CI->getCallingConv()); 2153 II->setAttributes(CI->getAttributes()); 2154 2155 // Make sure that anything using the call now uses the invoke! This also 2156 // updates the CallGraph if present, because it uses a WeakTrackingVH. 2157 CI->replaceAllUsesWith(II); 2158 2159 // Delete the original call 2160 Split->getInstList().pop_front(); 2161 return Split; 2162 } 2163 2164 static bool markAliveBlocks(Function &F, 2165 SmallPtrSetImpl<BasicBlock *> &Reachable, 2166 DomTreeUpdater *DTU = nullptr) { 2167 SmallVector<BasicBlock*, 128> Worklist; 2168 BasicBlock *BB = &F.front(); 2169 Worklist.push_back(BB); 2170 Reachable.insert(BB); 2171 bool Changed = false; 2172 do { 2173 BB = Worklist.pop_back_val(); 2174 2175 // Do a quick scan of the basic block, turning any obviously unreachable 2176 // instructions into LLVM unreachable insts. The instruction combining pass 2177 // canonicalizes unreachable insts into stores to null or undef. 2178 for (Instruction &I : *BB) { 2179 if (auto *CI = dyn_cast<CallInst>(&I)) { 2180 Value *Callee = CI->getCalledOperand(); 2181 // Handle intrinsic calls. 2182 if (Function *F = dyn_cast<Function>(Callee)) { 2183 auto IntrinsicID = F->getIntrinsicID(); 2184 // Assumptions that are known to be false are equivalent to 2185 // unreachable. Also, if the condition is undefined, then we make the 2186 // choice most beneficial to the optimizer, and choose that to also be 2187 // unreachable. 2188 if (IntrinsicID == Intrinsic::assume) { 2189 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) { 2190 // Don't insert a call to llvm.trap right before the unreachable. 2191 changeToUnreachable(CI, false, false, DTU); 2192 Changed = true; 2193 break; 2194 } 2195 } else if (IntrinsicID == Intrinsic::experimental_guard) { 2196 // A call to the guard intrinsic bails out of the current 2197 // compilation unit if the predicate passed to it is false. If the 2198 // predicate is a constant false, then we know the guard will bail 2199 // out of the current compile unconditionally, so all code following 2200 // it is dead. 2201 // 2202 // Note: unlike in llvm.assume, it is not "obviously profitable" for 2203 // guards to treat `undef` as `false` since a guard on `undef` can 2204 // still be useful for widening. 2205 if (match(CI->getArgOperand(0), m_Zero())) 2206 if (!isa<UnreachableInst>(CI->getNextNode())) { 2207 changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false, 2208 false, DTU); 2209 Changed = true; 2210 break; 2211 } 2212 } 2213 } else if ((isa<ConstantPointerNull>(Callee) && 2214 !NullPointerIsDefined(CI->getFunction())) || 2215 isa<UndefValue>(Callee)) { 2216 changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DTU); 2217 Changed = true; 2218 break; 2219 } 2220 if (CI->doesNotReturn() && !CI->isMustTailCall()) { 2221 // If we found a call to a no-return function, insert an unreachable 2222 // instruction after it. Make sure there isn't *already* one there 2223 // though. 2224 if (!isa<UnreachableInst>(CI->getNextNode())) { 2225 // Don't insert a call to llvm.trap right before the unreachable. 2226 changeToUnreachable(CI->getNextNode(), false, false, DTU); 2227 Changed = true; 2228 } 2229 break; 2230 } 2231 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 2232 // Store to undef and store to null are undefined and used to signal 2233 // that they should be changed to unreachable by passes that can't 2234 // modify the CFG. 2235 2236 // Don't touch volatile stores. 2237 if (SI->isVolatile()) continue; 2238 2239 Value *Ptr = SI->getOperand(1); 2240 2241 if (isa<UndefValue>(Ptr) || 2242 (isa<ConstantPointerNull>(Ptr) && 2243 !NullPointerIsDefined(SI->getFunction(), 2244 SI->getPointerAddressSpace()))) { 2245 changeToUnreachable(SI, true, false, DTU); 2246 Changed = true; 2247 break; 2248 } 2249 } 2250 } 2251 2252 Instruction *Terminator = BB->getTerminator(); 2253 if (auto *II = dyn_cast<InvokeInst>(Terminator)) { 2254 // Turn invokes that call 'nounwind' functions into ordinary calls. 2255 Value *Callee = II->getCalledOperand(); 2256 if ((isa<ConstantPointerNull>(Callee) && 2257 !NullPointerIsDefined(BB->getParent())) || 2258 isa<UndefValue>(Callee)) { 2259 changeToUnreachable(II, true, false, DTU); 2260 Changed = true; 2261 } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) { 2262 if (II->use_empty() && II->onlyReadsMemory()) { 2263 // jump to the normal destination branch. 2264 BasicBlock *NormalDestBB = II->getNormalDest(); 2265 BasicBlock *UnwindDestBB = II->getUnwindDest(); 2266 BranchInst::Create(NormalDestBB, II); 2267 UnwindDestBB->removePredecessor(II->getParent()); 2268 II->eraseFromParent(); 2269 if (DTU) 2270 DTU->applyUpdatesPermissive( 2271 {{DominatorTree::Delete, BB, UnwindDestBB}}); 2272 } else 2273 changeToCall(II, DTU); 2274 Changed = true; 2275 } 2276 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) { 2277 // Remove catchpads which cannot be reached. 2278 struct CatchPadDenseMapInfo { 2279 static CatchPadInst *getEmptyKey() { 2280 return DenseMapInfo<CatchPadInst *>::getEmptyKey(); 2281 } 2282 2283 static CatchPadInst *getTombstoneKey() { 2284 return DenseMapInfo<CatchPadInst *>::getTombstoneKey(); 2285 } 2286 2287 static unsigned getHashValue(CatchPadInst *CatchPad) { 2288 return static_cast<unsigned>(hash_combine_range( 2289 CatchPad->value_op_begin(), CatchPad->value_op_end())); 2290 } 2291 2292 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) { 2293 if (LHS == getEmptyKey() || LHS == getTombstoneKey() || 2294 RHS == getEmptyKey() || RHS == getTombstoneKey()) 2295 return LHS == RHS; 2296 return LHS->isIdenticalTo(RHS); 2297 } 2298 }; 2299 2300 // Set of unique CatchPads. 2301 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4, 2302 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>> 2303 HandlerSet; 2304 detail::DenseSetEmpty Empty; 2305 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(), 2306 E = CatchSwitch->handler_end(); 2307 I != E; ++I) { 2308 BasicBlock *HandlerBB = *I; 2309 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI()); 2310 if (!HandlerSet.insert({CatchPad, Empty}).second) { 2311 CatchSwitch->removeHandler(I); 2312 --I; 2313 --E; 2314 Changed = true; 2315 } 2316 } 2317 } 2318 2319 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU); 2320 for (BasicBlock *Successor : successors(BB)) 2321 if (Reachable.insert(Successor).second) 2322 Worklist.push_back(Successor); 2323 } while (!Worklist.empty()); 2324 return Changed; 2325 } 2326 2327 void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) { 2328 Instruction *TI = BB->getTerminator(); 2329 2330 if (auto *II = dyn_cast<InvokeInst>(TI)) { 2331 changeToCall(II, DTU); 2332 return; 2333 } 2334 2335 Instruction *NewTI; 2336 BasicBlock *UnwindDest; 2337 2338 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) { 2339 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI); 2340 UnwindDest = CRI->getUnwindDest(); 2341 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) { 2342 auto *NewCatchSwitch = CatchSwitchInst::Create( 2343 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(), 2344 CatchSwitch->getName(), CatchSwitch); 2345 for (BasicBlock *PadBB : CatchSwitch->handlers()) 2346 NewCatchSwitch->addHandler(PadBB); 2347 2348 NewTI = NewCatchSwitch; 2349 UnwindDest = CatchSwitch->getUnwindDest(); 2350 } else { 2351 llvm_unreachable("Could not find unwind successor"); 2352 } 2353 2354 NewTI->takeName(TI); 2355 NewTI->setDebugLoc(TI->getDebugLoc()); 2356 UnwindDest->removePredecessor(BB); 2357 TI->replaceAllUsesWith(NewTI); 2358 TI->eraseFromParent(); 2359 if (DTU) 2360 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, UnwindDest}}); 2361 } 2362 2363 /// removeUnreachableBlocks - Remove blocks that are not reachable, even 2364 /// if they are in a dead cycle. Return true if a change was made, false 2365 /// otherwise. 2366 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU, 2367 MemorySSAUpdater *MSSAU) { 2368 SmallPtrSet<BasicBlock *, 16> Reachable; 2369 bool Changed = markAliveBlocks(F, Reachable, DTU); 2370 2371 // If there are unreachable blocks in the CFG... 2372 if (Reachable.size() == F.size()) 2373 return Changed; 2374 2375 assert(Reachable.size() < F.size()); 2376 NumRemoved += F.size() - Reachable.size(); 2377 2378 SmallSetVector<BasicBlock *, 8> DeadBlockSet; 2379 for (BasicBlock &BB : F) { 2380 // Skip reachable basic blocks 2381 if (Reachable.count(&BB)) 2382 continue; 2383 DeadBlockSet.insert(&BB); 2384 } 2385 2386 if (MSSAU) 2387 MSSAU->removeBlocks(DeadBlockSet); 2388 2389 // Loop over all of the basic blocks that are not reachable, dropping all of 2390 // their internal references. Update DTU if available. 2391 std::vector<DominatorTree::UpdateType> Updates; 2392 for (auto *BB : DeadBlockSet) { 2393 for (BasicBlock *Successor : successors(BB)) { 2394 if (!DeadBlockSet.count(Successor)) 2395 Successor->removePredecessor(BB); 2396 if (DTU) 2397 Updates.push_back({DominatorTree::Delete, BB, Successor}); 2398 } 2399 BB->dropAllReferences(); 2400 if (DTU) { 2401 Instruction *TI = BB->getTerminator(); 2402 assert(TI && "Basic block should have a terminator"); 2403 // Terminators like invoke can have users. We have to replace their users, 2404 // before removing them. 2405 if (!TI->use_empty()) 2406 TI->replaceAllUsesWith(UndefValue::get(TI->getType())); 2407 TI->eraseFromParent(); 2408 new UnreachableInst(BB->getContext(), BB); 2409 assert(succ_empty(BB) && "The successor list of BB isn't empty before " 2410 "applying corresponding DTU updates."); 2411 } 2412 } 2413 2414 if (DTU) { 2415 DTU->applyUpdatesPermissive(Updates); 2416 bool Deleted = false; 2417 for (auto *BB : DeadBlockSet) { 2418 if (DTU->isBBPendingDeletion(BB)) 2419 --NumRemoved; 2420 else 2421 Deleted = true; 2422 DTU->deleteBB(BB); 2423 } 2424 if (!Deleted) 2425 return false; 2426 } else { 2427 for (auto *BB : DeadBlockSet) 2428 BB->eraseFromParent(); 2429 } 2430 2431 return true; 2432 } 2433 2434 void llvm::combineMetadata(Instruction *K, const Instruction *J, 2435 ArrayRef<unsigned> KnownIDs, bool DoesKMove) { 2436 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 2437 K->dropUnknownNonDebugMetadata(KnownIDs); 2438 K->getAllMetadataOtherThanDebugLoc(Metadata); 2439 for (const auto &MD : Metadata) { 2440 unsigned Kind = MD.first; 2441 MDNode *JMD = J->getMetadata(Kind); 2442 MDNode *KMD = MD.second; 2443 2444 switch (Kind) { 2445 default: 2446 K->setMetadata(Kind, nullptr); // Remove unknown metadata 2447 break; 2448 case LLVMContext::MD_dbg: 2449 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg"); 2450 case LLVMContext::MD_tbaa: 2451 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD)); 2452 break; 2453 case LLVMContext::MD_alias_scope: 2454 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD)); 2455 break; 2456 case LLVMContext::MD_noalias: 2457 case LLVMContext::MD_mem_parallel_loop_access: 2458 K->setMetadata(Kind, MDNode::intersect(JMD, KMD)); 2459 break; 2460 case LLVMContext::MD_access_group: 2461 K->setMetadata(LLVMContext::MD_access_group, 2462 intersectAccessGroups(K, J)); 2463 break; 2464 case LLVMContext::MD_range: 2465 2466 // If K does move, use most generic range. Otherwise keep the range of 2467 // K. 2468 if (DoesKMove) 2469 // FIXME: If K does move, we should drop the range info and nonnull. 2470 // Currently this function is used with DoesKMove in passes 2471 // doing hoisting/sinking and the current behavior of using the 2472 // most generic range is correct in those cases. 2473 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD)); 2474 break; 2475 case LLVMContext::MD_fpmath: 2476 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD)); 2477 break; 2478 case LLVMContext::MD_invariant_load: 2479 // Only set the !invariant.load if it is present in both instructions. 2480 K->setMetadata(Kind, JMD); 2481 break; 2482 case LLVMContext::MD_nonnull: 2483 // If K does move, keep nonull if it is present in both instructions. 2484 if (DoesKMove) 2485 K->setMetadata(Kind, JMD); 2486 break; 2487 case LLVMContext::MD_invariant_group: 2488 // Preserve !invariant.group in K. 2489 break; 2490 case LLVMContext::MD_align: 2491 K->setMetadata(Kind, 2492 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); 2493 break; 2494 case LLVMContext::MD_dereferenceable: 2495 case LLVMContext::MD_dereferenceable_or_null: 2496 K->setMetadata(Kind, 2497 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); 2498 break; 2499 case LLVMContext::MD_preserve_access_index: 2500 // Preserve !preserve.access.index in K. 2501 break; 2502 } 2503 } 2504 // Set !invariant.group from J if J has it. If both instructions have it 2505 // then we will just pick it from J - even when they are different. 2506 // Also make sure that K is load or store - f.e. combining bitcast with load 2507 // could produce bitcast with invariant.group metadata, which is invalid. 2508 // FIXME: we should try to preserve both invariant.group md if they are 2509 // different, but right now instruction can only have one invariant.group. 2510 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group)) 2511 if (isa<LoadInst>(K) || isa<StoreInst>(K)) 2512 K->setMetadata(LLVMContext::MD_invariant_group, JMD); 2513 } 2514 2515 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J, 2516 bool KDominatesJ) { 2517 unsigned KnownIDs[] = { 2518 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 2519 LLVMContext::MD_noalias, LLVMContext::MD_range, 2520 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull, 2521 LLVMContext::MD_invariant_group, LLVMContext::MD_align, 2522 LLVMContext::MD_dereferenceable, 2523 LLVMContext::MD_dereferenceable_or_null, 2524 LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index}; 2525 combineMetadata(K, J, KnownIDs, KDominatesJ); 2526 } 2527 2528 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) { 2529 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 2530 Source.getAllMetadata(MD); 2531 MDBuilder MDB(Dest.getContext()); 2532 Type *NewType = Dest.getType(); 2533 const DataLayout &DL = Source.getModule()->getDataLayout(); 2534 for (const auto &MDPair : MD) { 2535 unsigned ID = MDPair.first; 2536 MDNode *N = MDPair.second; 2537 // Note, essentially every kind of metadata should be preserved here! This 2538 // routine is supposed to clone a load instruction changing *only its type*. 2539 // The only metadata it makes sense to drop is metadata which is invalidated 2540 // when the pointer type changes. This should essentially never be the case 2541 // in LLVM, but we explicitly switch over only known metadata to be 2542 // conservatively correct. If you are adding metadata to LLVM which pertains 2543 // to loads, you almost certainly want to add it here. 2544 switch (ID) { 2545 case LLVMContext::MD_dbg: 2546 case LLVMContext::MD_tbaa: 2547 case LLVMContext::MD_prof: 2548 case LLVMContext::MD_fpmath: 2549 case LLVMContext::MD_tbaa_struct: 2550 case LLVMContext::MD_invariant_load: 2551 case LLVMContext::MD_alias_scope: 2552 case LLVMContext::MD_noalias: 2553 case LLVMContext::MD_nontemporal: 2554 case LLVMContext::MD_mem_parallel_loop_access: 2555 case LLVMContext::MD_access_group: 2556 // All of these directly apply. 2557 Dest.setMetadata(ID, N); 2558 break; 2559 2560 case LLVMContext::MD_nonnull: 2561 copyNonnullMetadata(Source, N, Dest); 2562 break; 2563 2564 case LLVMContext::MD_align: 2565 case LLVMContext::MD_dereferenceable: 2566 case LLVMContext::MD_dereferenceable_or_null: 2567 // These only directly apply if the new type is also a pointer. 2568 if (NewType->isPointerTy()) 2569 Dest.setMetadata(ID, N); 2570 break; 2571 2572 case LLVMContext::MD_range: 2573 copyRangeMetadata(DL, Source, N, Dest); 2574 break; 2575 } 2576 } 2577 } 2578 2579 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) { 2580 auto *ReplInst = dyn_cast<Instruction>(Repl); 2581 if (!ReplInst) 2582 return; 2583 2584 // Patch the replacement so that it is not more restrictive than the value 2585 // being replaced. 2586 // Note that if 'I' is a load being replaced by some operation, 2587 // for example, by an arithmetic operation, then andIRFlags() 2588 // would just erase all math flags from the original arithmetic 2589 // operation, which is clearly not wanted and not needed. 2590 if (!isa<LoadInst>(I)) 2591 ReplInst->andIRFlags(I); 2592 2593 // FIXME: If both the original and replacement value are part of the 2594 // same control-flow region (meaning that the execution of one 2595 // guarantees the execution of the other), then we can combine the 2596 // noalias scopes here and do better than the general conservative 2597 // answer used in combineMetadata(). 2598 2599 // In general, GVN unifies expressions over different control-flow 2600 // regions, and so we need a conservative combination of the noalias 2601 // scopes. 2602 static const unsigned KnownIDs[] = { 2603 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 2604 LLVMContext::MD_noalias, LLVMContext::MD_range, 2605 LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, 2606 LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull, 2607 LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index}; 2608 combineMetadata(ReplInst, I, KnownIDs, false); 2609 } 2610 2611 template <typename RootType, typename DominatesFn> 2612 static unsigned replaceDominatedUsesWith(Value *From, Value *To, 2613 const RootType &Root, 2614 const DominatesFn &Dominates) { 2615 assert(From->getType() == To->getType()); 2616 2617 unsigned Count = 0; 2618 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end(); 2619 UI != UE;) { 2620 Use &U = *UI++; 2621 if (!Dominates(Root, U)) 2622 continue; 2623 U.set(To); 2624 LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName() 2625 << "' as " << *To << " in " << *U << "\n"); 2626 ++Count; 2627 } 2628 return Count; 2629 } 2630 2631 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) { 2632 assert(From->getType() == To->getType()); 2633 auto *BB = From->getParent(); 2634 unsigned Count = 0; 2635 2636 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end(); 2637 UI != UE;) { 2638 Use &U = *UI++; 2639 auto *I = cast<Instruction>(U.getUser()); 2640 if (I->getParent() == BB) 2641 continue; 2642 U.set(To); 2643 ++Count; 2644 } 2645 return Count; 2646 } 2647 2648 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, 2649 DominatorTree &DT, 2650 const BasicBlockEdge &Root) { 2651 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) { 2652 return DT.dominates(Root, U); 2653 }; 2654 return ::replaceDominatedUsesWith(From, To, Root, Dominates); 2655 } 2656 2657 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, 2658 DominatorTree &DT, 2659 const BasicBlock *BB) { 2660 auto ProperlyDominates = [&DT](const BasicBlock *BB, const Use &U) { 2661 auto *I = cast<Instruction>(U.getUser())->getParent(); 2662 return DT.properlyDominates(BB, I); 2663 }; 2664 return ::replaceDominatedUsesWith(From, To, BB, ProperlyDominates); 2665 } 2666 2667 bool llvm::callsGCLeafFunction(const CallBase *Call, 2668 const TargetLibraryInfo &TLI) { 2669 // Check if the function is specifically marked as a gc leaf function. 2670 if (Call->hasFnAttr("gc-leaf-function")) 2671 return true; 2672 if (const Function *F = Call->getCalledFunction()) { 2673 if (F->hasFnAttribute("gc-leaf-function")) 2674 return true; 2675 2676 if (auto IID = F->getIntrinsicID()) 2677 // Most LLVM intrinsics do not take safepoints. 2678 return IID != Intrinsic::experimental_gc_statepoint && 2679 IID != Intrinsic::experimental_deoptimize; 2680 } 2681 2682 // Lib calls can be materialized by some passes, and won't be 2683 // marked as 'gc-leaf-function.' All available Libcalls are 2684 // GC-leaf. 2685 LibFunc LF; 2686 if (TLI.getLibFunc(*Call, LF)) { 2687 return TLI.has(LF); 2688 } 2689 2690 return false; 2691 } 2692 2693 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, 2694 LoadInst &NewLI) { 2695 auto *NewTy = NewLI.getType(); 2696 2697 // This only directly applies if the new type is also a pointer. 2698 if (NewTy->isPointerTy()) { 2699 NewLI.setMetadata(LLVMContext::MD_nonnull, N); 2700 return; 2701 } 2702 2703 // The only other translation we can do is to integral loads with !range 2704 // metadata. 2705 if (!NewTy->isIntegerTy()) 2706 return; 2707 2708 MDBuilder MDB(NewLI.getContext()); 2709 const Value *Ptr = OldLI.getPointerOperand(); 2710 auto *ITy = cast<IntegerType>(NewTy); 2711 auto *NullInt = ConstantExpr::getPtrToInt( 2712 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy); 2713 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1)); 2714 NewLI.setMetadata(LLVMContext::MD_range, 2715 MDB.createRange(NonNullInt, NullInt)); 2716 } 2717 2718 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, 2719 MDNode *N, LoadInst &NewLI) { 2720 auto *NewTy = NewLI.getType(); 2721 2722 // Give up unless it is converted to a pointer where there is a single very 2723 // valuable mapping we can do reliably. 2724 // FIXME: It would be nice to propagate this in more ways, but the type 2725 // conversions make it hard. 2726 if (!NewTy->isPointerTy()) 2727 return; 2728 2729 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy); 2730 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) { 2731 MDNode *NN = MDNode::get(OldLI.getContext(), None); 2732 NewLI.setMetadata(LLVMContext::MD_nonnull, NN); 2733 } 2734 } 2735 2736 void llvm::dropDebugUsers(Instruction &I) { 2737 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 2738 findDbgUsers(DbgUsers, &I); 2739 for (auto *DII : DbgUsers) 2740 DII->eraseFromParent(); 2741 } 2742 2743 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, 2744 BasicBlock *BB) { 2745 // Since we are moving the instructions out of its basic block, we do not 2746 // retain their original debug locations (DILocations) and debug intrinsic 2747 // instructions. 2748 // 2749 // Doing so would degrade the debugging experience and adversely affect the 2750 // accuracy of profiling information. 2751 // 2752 // Currently, when hoisting the instructions, we take the following actions: 2753 // - Remove their debug intrinsic instructions. 2754 // - Set their debug locations to the values from the insertion point. 2755 // 2756 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values 2757 // need to be deleted, is because there will not be any instructions with a 2758 // DILocation in either branch left after performing the transformation. We 2759 // can only insert a dbg.value after the two branches are joined again. 2760 // 2761 // See PR38762, PR39243 for more details. 2762 // 2763 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to 2764 // encode predicated DIExpressions that yield different results on different 2765 // code paths. 2766 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { 2767 Instruction *I = &*II; 2768 I->dropUnknownNonDebugMetadata(); 2769 if (I->isUsedByMetadata()) 2770 dropDebugUsers(*I); 2771 if (isa<DbgInfoIntrinsic>(I)) { 2772 // Remove DbgInfo Intrinsics. 2773 II = I->eraseFromParent(); 2774 continue; 2775 } 2776 I->setDebugLoc(InsertPt->getDebugLoc()); 2777 ++II; 2778 } 2779 DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(), 2780 BB->begin(), 2781 BB->getTerminator()->getIterator()); 2782 } 2783 2784 namespace { 2785 2786 /// A potential constituent of a bitreverse or bswap expression. See 2787 /// collectBitParts for a fuller explanation. 2788 struct BitPart { 2789 BitPart(Value *P, unsigned BW) : Provider(P) { 2790 Provenance.resize(BW); 2791 } 2792 2793 /// The Value that this is a bitreverse/bswap of. 2794 Value *Provider; 2795 2796 /// The "provenance" of each bit. Provenance[A] = B means that bit A 2797 /// in Provider becomes bit B in the result of this expression. 2798 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128. 2799 2800 enum { Unset = -1 }; 2801 }; 2802 2803 } // end anonymous namespace 2804 2805 /// Analyze the specified subexpression and see if it is capable of providing 2806 /// pieces of a bswap or bitreverse. The subexpression provides a potential 2807 /// piece of a bswap or bitreverse if it can be proven that each non-zero bit in 2808 /// the output of the expression came from a corresponding bit in some other 2809 /// value. This function is recursive, and the end result is a mapping of 2810 /// bitnumber to bitnumber. It is the caller's responsibility to validate that 2811 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse. 2812 /// 2813 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know 2814 /// that the expression deposits the low byte of %X into the high byte of the 2815 /// result and that all other bits are zero. This expression is accepted and a 2816 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to 2817 /// [0-7]. 2818 /// 2819 /// To avoid revisiting values, the BitPart results are memoized into the 2820 /// provided map. To avoid unnecessary copying of BitParts, BitParts are 2821 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to 2822 /// store BitParts objects, not pointers. As we need the concept of a nullptr 2823 /// BitParts (Value has been analyzed and the analysis failed), we an Optional 2824 /// type instead to provide the same functionality. 2825 /// 2826 /// Because we pass around references into \c BPS, we must use a container that 2827 /// does not invalidate internal references (std::map instead of DenseMap). 2828 static const Optional<BitPart> & 2829 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, 2830 std::map<Value *, Optional<BitPart>> &BPS, int Depth) { 2831 auto I = BPS.find(V); 2832 if (I != BPS.end()) 2833 return I->second; 2834 2835 auto &Result = BPS[V] = None; 2836 auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 2837 2838 // Prevent stack overflow by limiting the recursion depth 2839 if (Depth == BitPartRecursionMaxDepth) { 2840 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n"); 2841 return Result; 2842 } 2843 2844 if (Instruction *I = dyn_cast<Instruction>(V)) { 2845 // If this is an or instruction, it may be an inner node of the bswap. 2846 if (I->getOpcode() == Instruction::Or) { 2847 const auto &A = collectBitParts(I->getOperand(0), MatchBSwaps, 2848 MatchBitReversals, BPS, Depth + 1); 2849 const auto &B = collectBitParts(I->getOperand(1), MatchBSwaps, 2850 MatchBitReversals, BPS, Depth + 1); 2851 if (!A || !B) 2852 return Result; 2853 2854 // Try and merge the two together. 2855 if (!A->Provider || A->Provider != B->Provider) 2856 return Result; 2857 2858 Result = BitPart(A->Provider, BitWidth); 2859 for (unsigned i = 0; i < A->Provenance.size(); ++i) { 2860 if (A->Provenance[i] != BitPart::Unset && 2861 B->Provenance[i] != BitPart::Unset && 2862 A->Provenance[i] != B->Provenance[i]) 2863 return Result = None; 2864 2865 if (A->Provenance[i] == BitPart::Unset) 2866 Result->Provenance[i] = B->Provenance[i]; 2867 else 2868 Result->Provenance[i] = A->Provenance[i]; 2869 } 2870 2871 return Result; 2872 } 2873 2874 // If this is a logical shift by a constant, recurse then shift the result. 2875 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) { 2876 unsigned BitShift = 2877 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U); 2878 // Ensure the shift amount is defined. 2879 if (BitShift > BitWidth) 2880 return Result; 2881 2882 const auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps, 2883 MatchBitReversals, BPS, Depth + 1); 2884 if (!Res) 2885 return Result; 2886 Result = Res; 2887 2888 // Perform the "shift" on BitProvenance. 2889 auto &P = Result->Provenance; 2890 if (I->getOpcode() == Instruction::Shl) { 2891 P.erase(std::prev(P.end(), BitShift), P.end()); 2892 P.insert(P.begin(), BitShift, BitPart::Unset); 2893 } else { 2894 P.erase(P.begin(), std::next(P.begin(), BitShift)); 2895 P.insert(P.end(), BitShift, BitPart::Unset); 2896 } 2897 2898 return Result; 2899 } 2900 2901 // If this is a logical 'and' with a mask that clears bits, recurse then 2902 // unset the appropriate bits. 2903 if (I->getOpcode() == Instruction::And && 2904 isa<ConstantInt>(I->getOperand(1))) { 2905 APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1); 2906 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue(); 2907 2908 // Check that the mask allows a multiple of 8 bits for a bswap, for an 2909 // early exit. 2910 unsigned NumMaskedBits = AndMask.countPopulation(); 2911 if (!MatchBitReversals && NumMaskedBits % 8 != 0) 2912 return Result; 2913 2914 const auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps, 2915 MatchBitReversals, BPS, Depth + 1); 2916 if (!Res) 2917 return Result; 2918 Result = Res; 2919 2920 for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1) 2921 // If the AndMask is zero for this bit, clear the bit. 2922 if ((AndMask & Bit) == 0) 2923 Result->Provenance[i] = BitPart::Unset; 2924 return Result; 2925 } 2926 2927 // If this is a zext instruction zero extend the result. 2928 if (I->getOpcode() == Instruction::ZExt) { 2929 const auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps, 2930 MatchBitReversals, BPS, Depth + 1); 2931 if (!Res) 2932 return Result; 2933 2934 Result = BitPart(Res->Provider, BitWidth); 2935 auto NarrowBitWidth = 2936 cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth(); 2937 for (unsigned i = 0; i < NarrowBitWidth; ++i) 2938 Result->Provenance[i] = Res->Provenance[i]; 2939 for (unsigned i = NarrowBitWidth; i < BitWidth; ++i) 2940 Result->Provenance[i] = BitPart::Unset; 2941 return Result; 2942 } 2943 } 2944 2945 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be 2946 // the input value to the bswap/bitreverse. 2947 Result = BitPart(V, BitWidth); 2948 for (unsigned i = 0; i < BitWidth; ++i) 2949 Result->Provenance[i] = i; 2950 return Result; 2951 } 2952 2953 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, 2954 unsigned BitWidth) { 2955 if (From % 8 != To % 8) 2956 return false; 2957 // Convert from bit indices to byte indices and check for a byte reversal. 2958 From >>= 3; 2959 To >>= 3; 2960 BitWidth >>= 3; 2961 return From == BitWidth - To - 1; 2962 } 2963 2964 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, 2965 unsigned BitWidth) { 2966 return From == BitWidth - To - 1; 2967 } 2968 2969 bool llvm::recognizeBSwapOrBitReverseIdiom( 2970 Instruction *I, bool MatchBSwaps, bool MatchBitReversals, 2971 SmallVectorImpl<Instruction *> &InsertedInsts) { 2972 if (Operator::getOpcode(I) != Instruction::Or) 2973 return false; 2974 if (!MatchBSwaps && !MatchBitReversals) 2975 return false; 2976 IntegerType *ITy = dyn_cast<IntegerType>(I->getType()); 2977 if (!ITy || ITy->getBitWidth() > 128) 2978 return false; // Can't do vectors or integers > 128 bits. 2979 unsigned BW = ITy->getBitWidth(); 2980 2981 unsigned DemandedBW = BW; 2982 IntegerType *DemandedTy = ITy; 2983 if (I->hasOneUse()) { 2984 if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) { 2985 DemandedTy = cast<IntegerType>(Trunc->getType()); 2986 DemandedBW = DemandedTy->getBitWidth(); 2987 } 2988 } 2989 2990 // Try to find all the pieces corresponding to the bswap. 2991 std::map<Value *, Optional<BitPart>> BPS; 2992 auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0); 2993 if (!Res) 2994 return false; 2995 auto &BitProvenance = Res->Provenance; 2996 2997 // Now, is the bit permutation correct for a bswap or a bitreverse? We can 2998 // only byteswap values with an even number of bytes. 2999 bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true; 3000 for (unsigned i = 0; i < DemandedBW; ++i) { 3001 OKForBSwap &= 3002 bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW); 3003 OKForBitReverse &= 3004 bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW); 3005 } 3006 3007 Intrinsic::ID Intrin; 3008 if (OKForBSwap && MatchBSwaps) 3009 Intrin = Intrinsic::bswap; 3010 else if (OKForBitReverse && MatchBitReversals) 3011 Intrin = Intrinsic::bitreverse; 3012 else 3013 return false; 3014 3015 if (ITy != DemandedTy) { 3016 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy); 3017 Value *Provider = Res->Provider; 3018 IntegerType *ProviderTy = cast<IntegerType>(Provider->getType()); 3019 // We may need to truncate the provider. 3020 if (DemandedTy != ProviderTy) { 3021 auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy, 3022 "trunc", I); 3023 InsertedInsts.push_back(Trunc); 3024 Provider = Trunc; 3025 } 3026 auto *CI = CallInst::Create(F, Provider, "rev", I); 3027 InsertedInsts.push_back(CI); 3028 auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I); 3029 InsertedInsts.push_back(ExtInst); 3030 return true; 3031 } 3032 3033 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy); 3034 InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I)); 3035 return true; 3036 } 3037 3038 // CodeGen has special handling for some string functions that may replace 3039 // them with target-specific intrinsics. Since that'd skip our interceptors 3040 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses, 3041 // we mark affected calls as NoBuiltin, which will disable optimization 3042 // in CodeGen. 3043 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin( 3044 CallInst *CI, const TargetLibraryInfo *TLI) { 3045 Function *F = CI->getCalledFunction(); 3046 LibFunc Func; 3047 if (F && !F->hasLocalLinkage() && F->hasName() && 3048 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) && 3049 !F->doesNotAccessMemory()) 3050 CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin); 3051 } 3052 3053 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) { 3054 // We can't have a PHI with a metadata type. 3055 if (I->getOperand(OpIdx)->getType()->isMetadataTy()) 3056 return false; 3057 3058 // Early exit. 3059 if (!isa<Constant>(I->getOperand(OpIdx))) 3060 return true; 3061 3062 switch (I->getOpcode()) { 3063 default: 3064 return true; 3065 case Instruction::Call: 3066 case Instruction::Invoke: { 3067 const auto &CB = cast<CallBase>(*I); 3068 3069 // Can't handle inline asm. Skip it. 3070 if (CB.isInlineAsm()) 3071 return false; 3072 3073 // Constant bundle operands may need to retain their constant-ness for 3074 // correctness. 3075 if (CB.isBundleOperand(OpIdx)) 3076 return false; 3077 3078 if (OpIdx < CB.getNumArgOperands()) { 3079 // Some variadic intrinsics require constants in the variadic arguments, 3080 // which currently aren't markable as immarg. 3081 if (isa<IntrinsicInst>(CB) && 3082 OpIdx >= CB.getFunctionType()->getNumParams()) { 3083 // This is known to be OK for stackmap. 3084 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap; 3085 } 3086 3087 // gcroot is a special case, since it requires a constant argument which 3088 // isn't also required to be a simple ConstantInt. 3089 if (CB.getIntrinsicID() == Intrinsic::gcroot) 3090 return false; 3091 3092 // Some intrinsic operands are required to be immediates. 3093 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg); 3094 } 3095 3096 // It is never allowed to replace the call argument to an intrinsic, but it 3097 // may be possible for a call. 3098 return !isa<IntrinsicInst>(CB); 3099 } 3100 case Instruction::ShuffleVector: 3101 // Shufflevector masks are constant. 3102 return OpIdx != 2; 3103 case Instruction::Switch: 3104 case Instruction::ExtractValue: 3105 // All operands apart from the first are constant. 3106 return OpIdx == 0; 3107 case Instruction::InsertValue: 3108 // All operands apart from the first and the second are constant. 3109 return OpIdx < 2; 3110 case Instruction::Alloca: 3111 // Static allocas (constant size in the entry block) are handled by 3112 // prologue/epilogue insertion so they're free anyway. We definitely don't 3113 // want to make them non-constant. 3114 return !cast<AllocaInst>(I)->isStaticAlloca(); 3115 case Instruction::GetElementPtr: 3116 if (OpIdx == 0) 3117 return true; 3118 gep_type_iterator It = gep_type_begin(I); 3119 for (auto E = std::next(It, OpIdx); It != E; ++It) 3120 if (It.isStruct()) 3121 return false; 3122 return true; 3123 } 3124 } 3125 3126 Value *llvm::invertCondition(Value *Condition) { 3127 // First: Check if it's a constant 3128 if (Constant *C = dyn_cast<Constant>(Condition)) 3129 return ConstantExpr::getNot(C); 3130 3131 // Second: If the condition is already inverted, return the original value 3132 Value *NotCondition; 3133 if (match(Condition, m_Not(m_Value(NotCondition)))) 3134 return NotCondition; 3135 3136 BasicBlock *Parent = nullptr; 3137 Instruction *Inst = dyn_cast<Instruction>(Condition); 3138 if (Inst) 3139 Parent = Inst->getParent(); 3140 else if (Argument *Arg = dyn_cast<Argument>(Condition)) 3141 Parent = &Arg->getParent()->getEntryBlock(); 3142 assert(Parent && "Unsupported condition to invert"); 3143 3144 // Third: Check all the users for an invert 3145 for (User *U : Condition->users()) 3146 if (Instruction *I = dyn_cast<Instruction>(U)) 3147 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition)))) 3148 return I; 3149 3150 // Last option: Create a new instruction 3151 auto *Inverted = 3152 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv"); 3153 if (Inst && !isa<PHINode>(Inst)) 3154 Inverted->insertAfter(Inst); 3155 else 3156 Inverted->insertBefore(&*Parent->getFirstInsertionPt()); 3157 return Inverted; 3158 } 3159