1 //===- Local.cpp - Functions to perform local transformations -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions perform various local transformations to the 10 // program. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Utils/Local.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/DenseMapInfo.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/Hashing.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/ADT/TinyPtrVector.h" 28 #include "llvm/Analysis/ConstantFolding.h" 29 #include "llvm/Analysis/DomTreeUpdater.h" 30 #include "llvm/Analysis/EHPersonalities.h" 31 #include "llvm/Analysis/InstructionSimplify.h" 32 #include "llvm/Analysis/LazyValueInfo.h" 33 #include "llvm/Analysis/MemoryBuiltins.h" 34 #include "llvm/Analysis/MemorySSAUpdater.h" 35 #include "llvm/Analysis/TargetLibraryInfo.h" 36 #include "llvm/Analysis/ValueTracking.h" 37 #include "llvm/Analysis/VectorUtils.h" 38 #include "llvm/BinaryFormat/Dwarf.h" 39 #include "llvm/IR/Argument.h" 40 #include "llvm/IR/Attributes.h" 41 #include "llvm/IR/BasicBlock.h" 42 #include "llvm/IR/CFG.h" 43 #include "llvm/IR/CallSite.h" 44 #include "llvm/IR/Constant.h" 45 #include "llvm/IR/ConstantRange.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DIBuilder.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DebugLoc.h" 51 #include "llvm/IR/DerivedTypes.h" 52 #include "llvm/IR/Dominators.h" 53 #include "llvm/IR/Function.h" 54 #include "llvm/IR/GetElementPtrTypeIterator.h" 55 #include "llvm/IR/GlobalObject.h" 56 #include "llvm/IR/IRBuilder.h" 57 #include "llvm/IR/InstrTypes.h" 58 #include "llvm/IR/Instruction.h" 59 #include "llvm/IR/Instructions.h" 60 #include "llvm/IR/IntrinsicInst.h" 61 #include "llvm/IR/Intrinsics.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/MDBuilder.h" 64 #include "llvm/IR/Metadata.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PatternMatch.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/User.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/IR/ValueHandle.h" 73 #include "llvm/Support/Casting.h" 74 #include "llvm/Support/Debug.h" 75 #include "llvm/Support/ErrorHandling.h" 76 #include "llvm/Support/KnownBits.h" 77 #include "llvm/Support/raw_ostream.h" 78 #include "llvm/Transforms/Utils/ValueMapper.h" 79 #include <algorithm> 80 #include <cassert> 81 #include <climits> 82 #include <cstdint> 83 #include <iterator> 84 #include <map> 85 #include <utility> 86 87 using namespace llvm; 88 using namespace llvm::PatternMatch; 89 90 #define DEBUG_TYPE "local" 91 92 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed"); 93 94 // Max recursion depth for collectBitParts used when detecting bswap and 95 // bitreverse idioms 96 static const unsigned BitPartRecursionMaxDepth = 64; 97 98 //===----------------------------------------------------------------------===// 99 // Local constant propagation. 100 // 101 102 /// ConstantFoldTerminator - If a terminator instruction is predicated on a 103 /// constant value, convert it into an unconditional branch to the constant 104 /// destination. This is a nontrivial operation because the successors of this 105 /// basic block must have their PHI nodes updated. 106 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch 107 /// conditions and indirectbr addresses this might make dead if 108 /// DeleteDeadConditions is true. 109 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions, 110 const TargetLibraryInfo *TLI, 111 DomTreeUpdater *DTU) { 112 Instruction *T = BB->getTerminator(); 113 IRBuilder<> Builder(T); 114 115 // Branch - See if we are conditional jumping on constant 116 if (auto *BI = dyn_cast<BranchInst>(T)) { 117 if (BI->isUnconditional()) return false; // Can't optimize uncond branch 118 BasicBlock *Dest1 = BI->getSuccessor(0); 119 BasicBlock *Dest2 = BI->getSuccessor(1); 120 121 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { 122 // Are we branching on constant? 123 // YES. Change to unconditional branch... 124 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; 125 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; 126 127 // Let the basic block know that we are letting go of it. Based on this, 128 // it will adjust it's PHI nodes. 129 OldDest->removePredecessor(BB); 130 131 // Replace the conditional branch with an unconditional one. 132 Builder.CreateBr(Destination); 133 BI->eraseFromParent(); 134 if (DTU) 135 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, OldDest}}); 136 return true; 137 } 138 139 if (Dest2 == Dest1) { // Conditional branch to same location? 140 // This branch matches something like this: 141 // br bool %cond, label %Dest, label %Dest 142 // and changes it into: br label %Dest 143 144 // Let the basic block know that we are letting go of one copy of it. 145 assert(BI->getParent() && "Terminator not inserted in block!"); 146 Dest1->removePredecessor(BI->getParent()); 147 148 // Replace the conditional branch with an unconditional one. 149 Builder.CreateBr(Dest1); 150 Value *Cond = BI->getCondition(); 151 BI->eraseFromParent(); 152 if (DeleteDeadConditions) 153 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 154 return true; 155 } 156 return false; 157 } 158 159 if (auto *SI = dyn_cast<SwitchInst>(T)) { 160 // If we are switching on a constant, we can convert the switch to an 161 // unconditional branch. 162 auto *CI = dyn_cast<ConstantInt>(SI->getCondition()); 163 BasicBlock *DefaultDest = SI->getDefaultDest(); 164 BasicBlock *TheOnlyDest = DefaultDest; 165 166 // If the default is unreachable, ignore it when searching for TheOnlyDest. 167 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) && 168 SI->getNumCases() > 0) { 169 TheOnlyDest = SI->case_begin()->getCaseSuccessor(); 170 } 171 172 // Figure out which case it goes to. 173 for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) { 174 // Found case matching a constant operand? 175 if (i->getCaseValue() == CI) { 176 TheOnlyDest = i->getCaseSuccessor(); 177 break; 178 } 179 180 // Check to see if this branch is going to the same place as the default 181 // dest. If so, eliminate it as an explicit compare. 182 if (i->getCaseSuccessor() == DefaultDest) { 183 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof); 184 unsigned NCases = SI->getNumCases(); 185 // Fold the case metadata into the default if there will be any branches 186 // left, unless the metadata doesn't match the switch. 187 if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) { 188 // Collect branch weights into a vector. 189 SmallVector<uint32_t, 8> Weights; 190 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e; 191 ++MD_i) { 192 auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i)); 193 Weights.push_back(CI->getValue().getZExtValue()); 194 } 195 // Merge weight of this case to the default weight. 196 unsigned idx = i->getCaseIndex(); 197 Weights[0] += Weights[idx+1]; 198 // Remove weight for this case. 199 std::swap(Weights[idx+1], Weights.back()); 200 Weights.pop_back(); 201 SI->setMetadata(LLVMContext::MD_prof, 202 MDBuilder(BB->getContext()). 203 createBranchWeights(Weights)); 204 } 205 // Remove this entry. 206 BasicBlock *ParentBB = SI->getParent(); 207 DefaultDest->removePredecessor(ParentBB); 208 i = SI->removeCase(i); 209 e = SI->case_end(); 210 if (DTU) 211 DTU->applyUpdatesPermissive( 212 {{DominatorTree::Delete, ParentBB, DefaultDest}}); 213 continue; 214 } 215 216 // Otherwise, check to see if the switch only branches to one destination. 217 // We do this by reseting "TheOnlyDest" to null when we find two non-equal 218 // destinations. 219 if (i->getCaseSuccessor() != TheOnlyDest) 220 TheOnlyDest = nullptr; 221 222 // Increment this iterator as we haven't removed the case. 223 ++i; 224 } 225 226 if (CI && !TheOnlyDest) { 227 // Branching on a constant, but not any of the cases, go to the default 228 // successor. 229 TheOnlyDest = SI->getDefaultDest(); 230 } 231 232 // If we found a single destination that we can fold the switch into, do so 233 // now. 234 if (TheOnlyDest) { 235 // Insert the new branch. 236 Builder.CreateBr(TheOnlyDest); 237 BasicBlock *BB = SI->getParent(); 238 std::vector <DominatorTree::UpdateType> Updates; 239 if (DTU) 240 Updates.reserve(SI->getNumSuccessors() - 1); 241 242 // Remove entries from PHI nodes which we no longer branch to... 243 for (BasicBlock *Succ : successors(SI)) { 244 // Found case matching a constant operand? 245 if (Succ == TheOnlyDest) { 246 TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest 247 } else { 248 Succ->removePredecessor(BB); 249 if (DTU) 250 Updates.push_back({DominatorTree::Delete, BB, Succ}); 251 } 252 } 253 254 // Delete the old switch. 255 Value *Cond = SI->getCondition(); 256 SI->eraseFromParent(); 257 if (DeleteDeadConditions) 258 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 259 if (DTU) 260 DTU->applyUpdatesPermissive(Updates); 261 return true; 262 } 263 264 if (SI->getNumCases() == 1) { 265 // Otherwise, we can fold this switch into a conditional branch 266 // instruction if it has only one non-default destination. 267 auto FirstCase = *SI->case_begin(); 268 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(), 269 FirstCase.getCaseValue(), "cond"); 270 271 // Insert the new branch. 272 BranchInst *NewBr = Builder.CreateCondBr(Cond, 273 FirstCase.getCaseSuccessor(), 274 SI->getDefaultDest()); 275 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof); 276 if (MD && MD->getNumOperands() == 3) { 277 ConstantInt *SICase = 278 mdconst::dyn_extract<ConstantInt>(MD->getOperand(2)); 279 ConstantInt *SIDef = 280 mdconst::dyn_extract<ConstantInt>(MD->getOperand(1)); 281 assert(SICase && SIDef); 282 // The TrueWeight should be the weight for the single case of SI. 283 NewBr->setMetadata(LLVMContext::MD_prof, 284 MDBuilder(BB->getContext()). 285 createBranchWeights(SICase->getValue().getZExtValue(), 286 SIDef->getValue().getZExtValue())); 287 } 288 289 // Update make.implicit metadata to the newly-created conditional branch. 290 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit); 291 if (MakeImplicitMD) 292 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD); 293 294 // Delete the old switch. 295 SI->eraseFromParent(); 296 return true; 297 } 298 return false; 299 } 300 301 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) { 302 // indirectbr blockaddress(@F, @BB) -> br label @BB 303 if (auto *BA = 304 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) { 305 BasicBlock *TheOnlyDest = BA->getBasicBlock(); 306 std::vector <DominatorTree::UpdateType> Updates; 307 if (DTU) 308 Updates.reserve(IBI->getNumDestinations() - 1); 309 310 // Insert the new branch. 311 Builder.CreateBr(TheOnlyDest); 312 313 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { 314 if (IBI->getDestination(i) == TheOnlyDest) { 315 TheOnlyDest = nullptr; 316 } else { 317 BasicBlock *ParentBB = IBI->getParent(); 318 BasicBlock *DestBB = IBI->getDestination(i); 319 DestBB->removePredecessor(ParentBB); 320 if (DTU) 321 Updates.push_back({DominatorTree::Delete, ParentBB, DestBB}); 322 } 323 } 324 Value *Address = IBI->getAddress(); 325 IBI->eraseFromParent(); 326 if (DeleteDeadConditions) 327 // Delete pointer cast instructions. 328 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI); 329 330 // Also zap the blockaddress constant if there are no users remaining, 331 // otherwise the destination is still marked as having its address taken. 332 if (BA->use_empty()) 333 BA->destroyConstant(); 334 335 // If we didn't find our destination in the IBI successor list, then we 336 // have undefined behavior. Replace the unconditional branch with an 337 // 'unreachable' instruction. 338 if (TheOnlyDest) { 339 BB->getTerminator()->eraseFromParent(); 340 new UnreachableInst(BB->getContext(), BB); 341 } 342 343 if (DTU) 344 DTU->applyUpdatesPermissive(Updates); 345 return true; 346 } 347 } 348 349 return false; 350 } 351 352 //===----------------------------------------------------------------------===// 353 // Local dead code elimination. 354 // 355 356 /// isInstructionTriviallyDead - Return true if the result produced by the 357 /// instruction is not used, and the instruction has no side effects. 358 /// 359 bool llvm::isInstructionTriviallyDead(Instruction *I, 360 const TargetLibraryInfo *TLI) { 361 if (!I->use_empty()) 362 return false; 363 return wouldInstructionBeTriviallyDead(I, TLI); 364 } 365 366 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I, 367 const TargetLibraryInfo *TLI) { 368 if (I->isTerminator()) 369 return false; 370 371 // We don't want the landingpad-like instructions removed by anything this 372 // general. 373 if (I->isEHPad()) 374 return false; 375 376 // We don't want debug info removed by anything this general, unless 377 // debug info is empty. 378 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) { 379 if (DDI->getAddress()) 380 return false; 381 return true; 382 } 383 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) { 384 if (DVI->getValue()) 385 return false; 386 return true; 387 } 388 if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) { 389 if (DLI->getLabel()) 390 return false; 391 return true; 392 } 393 394 if (!I->mayHaveSideEffects()) 395 return true; 396 397 // Special case intrinsics that "may have side effects" but can be deleted 398 // when dead. 399 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 400 // Safe to delete llvm.stacksave and launder.invariant.group if dead. 401 if (II->getIntrinsicID() == Intrinsic::stacksave || 402 II->getIntrinsicID() == Intrinsic::launder_invariant_group) 403 return true; 404 405 // Lifetime intrinsics are dead when their right-hand is undef. 406 if (II->isLifetimeStartOrEnd()) 407 return isa<UndefValue>(II->getArgOperand(1)); 408 409 // Assumptions are dead if their condition is trivially true. Guards on 410 // true are operationally no-ops. In the future we can consider more 411 // sophisticated tradeoffs for guards considering potential for check 412 // widening, but for now we keep things simple. 413 if (II->getIntrinsicID() == Intrinsic::assume || 414 II->getIntrinsicID() == Intrinsic::experimental_guard) { 415 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0))) 416 return !Cond->isZero(); 417 418 return false; 419 } 420 } 421 422 if (isAllocLikeFn(I, TLI)) 423 return true; 424 425 if (CallInst *CI = isFreeCall(I, TLI)) 426 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0))) 427 return C->isNullValue() || isa<UndefValue>(C); 428 429 if (auto *Call = dyn_cast<CallBase>(I)) 430 if (isMathLibCallNoop(Call, TLI)) 431 return true; 432 433 return false; 434 } 435 436 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a 437 /// trivially dead instruction, delete it. If that makes any of its operands 438 /// trivially dead, delete them too, recursively. Return true if any 439 /// instructions were deleted. 440 bool llvm::RecursivelyDeleteTriviallyDeadInstructions( 441 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU) { 442 Instruction *I = dyn_cast<Instruction>(V); 443 if (!I || !isInstructionTriviallyDead(I, TLI)) 444 return false; 445 446 SmallVector<WeakTrackingVH, 16> DeadInsts; 447 DeadInsts.push_back(I); 448 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU); 449 450 return true; 451 } 452 453 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive( 454 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, 455 MemorySSAUpdater *MSSAU) { 456 unsigned S = 0, E = DeadInsts.size(), Alive = 0; 457 for (; S != E; ++S) { 458 auto *I = cast<Instruction>(DeadInsts[S]); 459 if (!isInstructionTriviallyDead(I)) { 460 DeadInsts[S] = nullptr; 461 ++Alive; 462 } 463 } 464 if (Alive == E) 465 return false; 466 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU); 467 return true; 468 } 469 470 void llvm::RecursivelyDeleteTriviallyDeadInstructions( 471 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, 472 MemorySSAUpdater *MSSAU) { 473 // Process the dead instruction list until empty. 474 while (!DeadInsts.empty()) { 475 Value *V = DeadInsts.pop_back_val(); 476 Instruction *I = cast_or_null<Instruction>(V); 477 if (!I) 478 continue; 479 assert(isInstructionTriviallyDead(I, TLI) && 480 "Live instruction found in dead worklist!"); 481 assert(I->use_empty() && "Instructions with uses are not dead."); 482 483 // Don't lose the debug info while deleting the instructions. 484 salvageDebugInfo(*I); 485 486 // Null out all of the instruction's operands to see if any operand becomes 487 // dead as we go. 488 for (Use &OpU : I->operands()) { 489 Value *OpV = OpU.get(); 490 OpU.set(nullptr); 491 492 if (!OpV->use_empty()) 493 continue; 494 495 // If the operand is an instruction that became dead as we nulled out the 496 // operand, and if it is 'trivially' dead, delete it in a future loop 497 // iteration. 498 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 499 if (isInstructionTriviallyDead(OpI, TLI)) 500 DeadInsts.push_back(OpI); 501 } 502 if (MSSAU) 503 MSSAU->removeMemoryAccess(I); 504 505 I->eraseFromParent(); 506 } 507 } 508 void llvm::setDbgVariableUndef(DbgVariableIntrinsic *DVI) { 509 Value *DbgValue = DVI->getVariableLocation(false); 510 Value *Undef = UndefValue::get(DbgValue ? DbgValue->getType() 511 : Type::getInt1Ty(DVI->getContext())); 512 DVI->setOperand( 513 0, MetadataAsValue::get(DVI->getContext(), ValueAsMetadata::get(Undef))); 514 } 515 516 bool llvm::replaceDbgUsesWithUndef(Instruction *I) { 517 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 518 findDbgUsers(DbgUsers, I); 519 for (auto *DII : DbgUsers) 520 setDbgVariableUndef(DII); 521 return !DbgUsers.empty(); 522 } 523 524 /// areAllUsesEqual - Check whether the uses of a value are all the same. 525 /// This is similar to Instruction::hasOneUse() except this will also return 526 /// true when there are no uses or multiple uses that all refer to the same 527 /// value. 528 static bool areAllUsesEqual(Instruction *I) { 529 Value::user_iterator UI = I->user_begin(); 530 Value::user_iterator UE = I->user_end(); 531 if (UI == UE) 532 return true; 533 534 User *TheUse = *UI; 535 for (++UI; UI != UE; ++UI) { 536 if (*UI != TheUse) 537 return false; 538 } 539 return true; 540 } 541 542 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively 543 /// dead PHI node, due to being a def-use chain of single-use nodes that 544 /// either forms a cycle or is terminated by a trivially dead instruction, 545 /// delete it. If that makes any of its operands trivially dead, delete them 546 /// too, recursively. Return true if a change was made. 547 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN, 548 const TargetLibraryInfo *TLI, 549 llvm::MemorySSAUpdater *MSSAU) { 550 SmallPtrSet<Instruction*, 4> Visited; 551 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); 552 I = cast<Instruction>(*I->user_begin())) { 553 if (I->use_empty()) 554 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU); 555 556 // If we find an instruction more than once, we're on a cycle that 557 // won't prove fruitful. 558 if (!Visited.insert(I).second) { 559 // Break the cycle and delete the instruction and its operands. 560 I->replaceAllUsesWith(UndefValue::get(I->getType())); 561 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU); 562 return true; 563 } 564 } 565 return false; 566 } 567 568 static bool 569 simplifyAndDCEInstruction(Instruction *I, 570 SmallSetVector<Instruction *, 16> &WorkList, 571 const DataLayout &DL, 572 const TargetLibraryInfo *TLI) { 573 if (isInstructionTriviallyDead(I, TLI)) { 574 salvageDebugInfo(*I); 575 576 // Null out all of the instruction's operands to see if any operand becomes 577 // dead as we go. 578 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 579 Value *OpV = I->getOperand(i); 580 I->setOperand(i, nullptr); 581 582 if (!OpV->use_empty() || I == OpV) 583 continue; 584 585 // If the operand is an instruction that became dead as we nulled out the 586 // operand, and if it is 'trivially' dead, delete it in a future loop 587 // iteration. 588 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 589 if (isInstructionTriviallyDead(OpI, TLI)) 590 WorkList.insert(OpI); 591 } 592 593 I->eraseFromParent(); 594 595 return true; 596 } 597 598 if (Value *SimpleV = SimplifyInstruction(I, DL)) { 599 // Add the users to the worklist. CAREFUL: an instruction can use itself, 600 // in the case of a phi node. 601 for (User *U : I->users()) { 602 if (U != I) { 603 WorkList.insert(cast<Instruction>(U)); 604 } 605 } 606 607 // Replace the instruction with its simplified value. 608 bool Changed = false; 609 if (!I->use_empty()) { 610 I->replaceAllUsesWith(SimpleV); 611 Changed = true; 612 } 613 if (isInstructionTriviallyDead(I, TLI)) { 614 I->eraseFromParent(); 615 Changed = true; 616 } 617 return Changed; 618 } 619 return false; 620 } 621 622 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to 623 /// simplify any instructions in it and recursively delete dead instructions. 624 /// 625 /// This returns true if it changed the code, note that it can delete 626 /// instructions in other blocks as well in this block. 627 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, 628 const TargetLibraryInfo *TLI) { 629 bool MadeChange = false; 630 const DataLayout &DL = BB->getModule()->getDataLayout(); 631 632 #ifndef NDEBUG 633 // In debug builds, ensure that the terminator of the block is never replaced 634 // or deleted by these simplifications. The idea of simplification is that it 635 // cannot introduce new instructions, and there is no way to replace the 636 // terminator of a block without introducing a new instruction. 637 AssertingVH<Instruction> TerminatorVH(&BB->back()); 638 #endif 639 640 SmallSetVector<Instruction *, 16> WorkList; 641 // Iterate over the original function, only adding insts to the worklist 642 // if they actually need to be revisited. This avoids having to pre-init 643 // the worklist with the entire function's worth of instructions. 644 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end()); 645 BI != E;) { 646 assert(!BI->isTerminator()); 647 Instruction *I = &*BI; 648 ++BI; 649 650 // We're visiting this instruction now, so make sure it's not in the 651 // worklist from an earlier visit. 652 if (!WorkList.count(I)) 653 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); 654 } 655 656 while (!WorkList.empty()) { 657 Instruction *I = WorkList.pop_back_val(); 658 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); 659 } 660 return MadeChange; 661 } 662 663 //===----------------------------------------------------------------------===// 664 // Control Flow Graph Restructuring. 665 // 666 667 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, 668 DomTreeUpdater *DTU) { 669 // This only adjusts blocks with PHI nodes. 670 if (!isa<PHINode>(BB->begin())) 671 return; 672 673 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify 674 // them down. This will leave us with single entry phi nodes and other phis 675 // that can be removed. 676 BB->removePredecessor(Pred, true); 677 678 WeakTrackingVH PhiIt = &BB->front(); 679 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) { 680 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt)); 681 Value *OldPhiIt = PhiIt; 682 683 if (!recursivelySimplifyInstruction(PN)) 684 continue; 685 686 // If recursive simplification ended up deleting the next PHI node we would 687 // iterate to, then our iterator is invalid, restart scanning from the top 688 // of the block. 689 if (PhiIt != OldPhiIt) PhiIt = &BB->front(); 690 } 691 if (DTU) 692 DTU->applyUpdatesPermissive({{DominatorTree::Delete, Pred, BB}}); 693 } 694 695 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, 696 DomTreeUpdater *DTU) { 697 698 // If BB has single-entry PHI nodes, fold them. 699 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { 700 Value *NewVal = PN->getIncomingValue(0); 701 // Replace self referencing PHI with undef, it must be dead. 702 if (NewVal == PN) NewVal = UndefValue::get(PN->getType()); 703 PN->replaceAllUsesWith(NewVal); 704 PN->eraseFromParent(); 705 } 706 707 BasicBlock *PredBB = DestBB->getSinglePredecessor(); 708 assert(PredBB && "Block doesn't have a single predecessor!"); 709 710 bool ReplaceEntryBB = false; 711 if (PredBB == &DestBB->getParent()->getEntryBlock()) 712 ReplaceEntryBB = true; 713 714 // DTU updates: Collect all the edges that enter 715 // PredBB. These dominator edges will be redirected to DestBB. 716 SmallVector<DominatorTree::UpdateType, 32> Updates; 717 718 if (DTU) { 719 Updates.push_back({DominatorTree::Delete, PredBB, DestBB}); 720 for (auto I = pred_begin(PredBB), E = pred_end(PredBB); I != E; ++I) { 721 Updates.push_back({DominatorTree::Delete, *I, PredBB}); 722 // This predecessor of PredBB may already have DestBB as a successor. 723 if (llvm::find(successors(*I), DestBB) == succ_end(*I)) 724 Updates.push_back({DominatorTree::Insert, *I, DestBB}); 725 } 726 } 727 728 // Zap anything that took the address of DestBB. Not doing this will give the 729 // address an invalid value. 730 if (DestBB->hasAddressTaken()) { 731 BlockAddress *BA = BlockAddress::get(DestBB); 732 Constant *Replacement = 733 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1); 734 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, 735 BA->getType())); 736 BA->destroyConstant(); 737 } 738 739 // Anything that branched to PredBB now branches to DestBB. 740 PredBB->replaceAllUsesWith(DestBB); 741 742 // Splice all the instructions from PredBB to DestBB. 743 PredBB->getTerminator()->eraseFromParent(); 744 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList()); 745 new UnreachableInst(PredBB->getContext(), PredBB); 746 747 // If the PredBB is the entry block of the function, move DestBB up to 748 // become the entry block after we erase PredBB. 749 if (ReplaceEntryBB) 750 DestBB->moveAfter(PredBB); 751 752 if (DTU) { 753 assert(PredBB->getInstList().size() == 1 && 754 isa<UnreachableInst>(PredBB->getTerminator()) && 755 "The successor list of PredBB isn't empty before " 756 "applying corresponding DTU updates."); 757 DTU->applyUpdatesPermissive(Updates); 758 DTU->deleteBB(PredBB); 759 // Recalculation of DomTree is needed when updating a forward DomTree and 760 // the Entry BB is replaced. 761 if (ReplaceEntryBB && DTU->hasDomTree()) { 762 // The entry block was removed and there is no external interface for 763 // the dominator tree to be notified of this change. In this corner-case 764 // we recalculate the entire tree. 765 DTU->recalculate(*(DestBB->getParent())); 766 } 767 } 768 769 else { 770 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr. 771 } 772 } 773 774 /// Return true if we can choose one of these values to use in place of the 775 /// other. Note that we will always choose the non-undef value to keep. 776 static bool CanMergeValues(Value *First, Value *Second) { 777 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second); 778 } 779 780 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional 781 /// branch to Succ, into Succ. 782 /// 783 /// Assumption: Succ is the single successor for BB. 784 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { 785 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!"); 786 787 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " 788 << Succ->getName() << "\n"); 789 // Shortcut, if there is only a single predecessor it must be BB and merging 790 // is always safe 791 if (Succ->getSinglePredecessor()) return true; 792 793 // Make a list of the predecessors of BB 794 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB)); 795 796 // Look at all the phi nodes in Succ, to see if they present a conflict when 797 // merging these blocks 798 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 799 PHINode *PN = cast<PHINode>(I); 800 801 // If the incoming value from BB is again a PHINode in 802 // BB which has the same incoming value for *PI as PN does, we can 803 // merge the phi nodes and then the blocks can still be merged 804 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB)); 805 if (BBPN && BBPN->getParent() == BB) { 806 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 807 BasicBlock *IBB = PN->getIncomingBlock(PI); 808 if (BBPreds.count(IBB) && 809 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB), 810 PN->getIncomingValue(PI))) { 811 LLVM_DEBUG(dbgs() 812 << "Can't fold, phi node " << PN->getName() << " in " 813 << Succ->getName() << " is conflicting with " 814 << BBPN->getName() << " with regard to common predecessor " 815 << IBB->getName() << "\n"); 816 return false; 817 } 818 } 819 } else { 820 Value* Val = PN->getIncomingValueForBlock(BB); 821 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 822 // See if the incoming value for the common predecessor is equal to the 823 // one for BB, in which case this phi node will not prevent the merging 824 // of the block. 825 BasicBlock *IBB = PN->getIncomingBlock(PI); 826 if (BBPreds.count(IBB) && 827 !CanMergeValues(Val, PN->getIncomingValue(PI))) { 828 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() 829 << " in " << Succ->getName() 830 << " is conflicting with regard to common " 831 << "predecessor " << IBB->getName() << "\n"); 832 return false; 833 } 834 } 835 } 836 } 837 838 return true; 839 } 840 841 using PredBlockVector = SmallVector<BasicBlock *, 16>; 842 using IncomingValueMap = DenseMap<BasicBlock *, Value *>; 843 844 /// Determines the value to use as the phi node input for a block. 845 /// 846 /// Select between \p OldVal any value that we know flows from \p BB 847 /// to a particular phi on the basis of which one (if either) is not 848 /// undef. Update IncomingValues based on the selected value. 849 /// 850 /// \param OldVal The value we are considering selecting. 851 /// \param BB The block that the value flows in from. 852 /// \param IncomingValues A map from block-to-value for other phi inputs 853 /// that we have examined. 854 /// 855 /// \returns the selected value. 856 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, 857 IncomingValueMap &IncomingValues) { 858 if (!isa<UndefValue>(OldVal)) { 859 assert((!IncomingValues.count(BB) || 860 IncomingValues.find(BB)->second == OldVal) && 861 "Expected OldVal to match incoming value from BB!"); 862 863 IncomingValues.insert(std::make_pair(BB, OldVal)); 864 return OldVal; 865 } 866 867 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 868 if (It != IncomingValues.end()) return It->second; 869 870 return OldVal; 871 } 872 873 /// Create a map from block to value for the operands of a 874 /// given phi. 875 /// 876 /// Create a map from block to value for each non-undef value flowing 877 /// into \p PN. 878 /// 879 /// \param PN The phi we are collecting the map for. 880 /// \param IncomingValues [out] The map from block to value for this phi. 881 static void gatherIncomingValuesToPhi(PHINode *PN, 882 IncomingValueMap &IncomingValues) { 883 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 884 BasicBlock *BB = PN->getIncomingBlock(i); 885 Value *V = PN->getIncomingValue(i); 886 887 if (!isa<UndefValue>(V)) 888 IncomingValues.insert(std::make_pair(BB, V)); 889 } 890 } 891 892 /// Replace the incoming undef values to a phi with the values 893 /// from a block-to-value map. 894 /// 895 /// \param PN The phi we are replacing the undefs in. 896 /// \param IncomingValues A map from block to value. 897 static void replaceUndefValuesInPhi(PHINode *PN, 898 const IncomingValueMap &IncomingValues) { 899 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 900 Value *V = PN->getIncomingValue(i); 901 902 if (!isa<UndefValue>(V)) continue; 903 904 BasicBlock *BB = PN->getIncomingBlock(i); 905 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 906 if (It == IncomingValues.end()) continue; 907 908 PN->setIncomingValue(i, It->second); 909 } 910 } 911 912 /// Replace a value flowing from a block to a phi with 913 /// potentially multiple instances of that value flowing from the 914 /// block's predecessors to the phi. 915 /// 916 /// \param BB The block with the value flowing into the phi. 917 /// \param BBPreds The predecessors of BB. 918 /// \param PN The phi that we are updating. 919 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, 920 const PredBlockVector &BBPreds, 921 PHINode *PN) { 922 Value *OldVal = PN->removeIncomingValue(BB, false); 923 assert(OldVal && "No entry in PHI for Pred BB!"); 924 925 IncomingValueMap IncomingValues; 926 927 // We are merging two blocks - BB, and the block containing PN - and 928 // as a result we need to redirect edges from the predecessors of BB 929 // to go to the block containing PN, and update PN 930 // accordingly. Since we allow merging blocks in the case where the 931 // predecessor and successor blocks both share some predecessors, 932 // and where some of those common predecessors might have undef 933 // values flowing into PN, we want to rewrite those values to be 934 // consistent with the non-undef values. 935 936 gatherIncomingValuesToPhi(PN, IncomingValues); 937 938 // If this incoming value is one of the PHI nodes in BB, the new entries 939 // in the PHI node are the entries from the old PHI. 940 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) { 941 PHINode *OldValPN = cast<PHINode>(OldVal); 942 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) { 943 // Note that, since we are merging phi nodes and BB and Succ might 944 // have common predecessors, we could end up with a phi node with 945 // identical incoming branches. This will be cleaned up later (and 946 // will trigger asserts if we try to clean it up now, without also 947 // simplifying the corresponding conditional branch). 948 BasicBlock *PredBB = OldValPN->getIncomingBlock(i); 949 Value *PredVal = OldValPN->getIncomingValue(i); 950 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB, 951 IncomingValues); 952 953 // And add a new incoming value for this predecessor for the 954 // newly retargeted branch. 955 PN->addIncoming(Selected, PredBB); 956 } 957 } else { 958 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) { 959 // Update existing incoming values in PN for this 960 // predecessor of BB. 961 BasicBlock *PredBB = BBPreds[i]; 962 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB, 963 IncomingValues); 964 965 // And add a new incoming value for this predecessor for the 966 // newly retargeted branch. 967 PN->addIncoming(Selected, PredBB); 968 } 969 } 970 971 replaceUndefValuesInPhi(PN, IncomingValues); 972 } 973 974 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, 975 DomTreeUpdater *DTU) { 976 assert(BB != &BB->getParent()->getEntryBlock() && 977 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"); 978 979 // We can't eliminate infinite loops. 980 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0); 981 if (BB == Succ) return false; 982 983 // Check to see if merging these blocks would cause conflicts for any of the 984 // phi nodes in BB or Succ. If not, we can safely merge. 985 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false; 986 987 // Check for cases where Succ has multiple predecessors and a PHI node in BB 988 // has uses which will not disappear when the PHI nodes are merged. It is 989 // possible to handle such cases, but difficult: it requires checking whether 990 // BB dominates Succ, which is non-trivial to calculate in the case where 991 // Succ has multiple predecessors. Also, it requires checking whether 992 // constructing the necessary self-referential PHI node doesn't introduce any 993 // conflicts; this isn't too difficult, but the previous code for doing this 994 // was incorrect. 995 // 996 // Note that if this check finds a live use, BB dominates Succ, so BB is 997 // something like a loop pre-header (or rarely, a part of an irreducible CFG); 998 // folding the branch isn't profitable in that case anyway. 999 if (!Succ->getSinglePredecessor()) { 1000 BasicBlock::iterator BBI = BB->begin(); 1001 while (isa<PHINode>(*BBI)) { 1002 for (Use &U : BBI->uses()) { 1003 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) { 1004 if (PN->getIncomingBlock(U) != BB) 1005 return false; 1006 } else { 1007 return false; 1008 } 1009 } 1010 ++BBI; 1011 } 1012 } 1013 1014 // We cannot fold the block if it's a branch to an already present callbr 1015 // successor because that creates duplicate successors. 1016 for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { 1017 if (auto *CBI = dyn_cast<CallBrInst>((*I)->getTerminator())) { 1018 if (Succ == CBI->getDefaultDest()) 1019 return false; 1020 for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i) 1021 if (Succ == CBI->getIndirectDest(i)) 1022 return false; 1023 } 1024 } 1025 1026 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); 1027 1028 SmallVector<DominatorTree::UpdateType, 32> Updates; 1029 if (DTU) { 1030 Updates.push_back({DominatorTree::Delete, BB, Succ}); 1031 // All predecessors of BB will be moved to Succ. 1032 for (auto I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { 1033 Updates.push_back({DominatorTree::Delete, *I, BB}); 1034 // This predecessor of BB may already have Succ as a successor. 1035 if (llvm::find(successors(*I), Succ) == succ_end(*I)) 1036 Updates.push_back({DominatorTree::Insert, *I, Succ}); 1037 } 1038 } 1039 1040 if (isa<PHINode>(Succ->begin())) { 1041 // If there is more than one pred of succ, and there are PHI nodes in 1042 // the successor, then we need to add incoming edges for the PHI nodes 1043 // 1044 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB)); 1045 1046 // Loop over all of the PHI nodes in the successor of BB. 1047 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 1048 PHINode *PN = cast<PHINode>(I); 1049 1050 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN); 1051 } 1052 } 1053 1054 if (Succ->getSinglePredecessor()) { 1055 // BB is the only predecessor of Succ, so Succ will end up with exactly 1056 // the same predecessors BB had. 1057 1058 // Copy over any phi, debug or lifetime instruction. 1059 BB->getTerminator()->eraseFromParent(); 1060 Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(), 1061 BB->getInstList()); 1062 } else { 1063 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) { 1064 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs. 1065 assert(PN->use_empty() && "There shouldn't be any uses here!"); 1066 PN->eraseFromParent(); 1067 } 1068 // If Succ has multiple predecessors, each debug intrinsic in BB may or may 1069 // not be valid when we reach Succ, so the debug variable should be set 1070 // undef since its value is unknown. 1071 Instruction *DbgInsertPoint = Succ->getFirstNonPHI(); 1072 while (DbgInfoIntrinsic *DI = dyn_cast<DbgInfoIntrinsic>(&BB->front())) { 1073 if (auto DVI = dyn_cast<DbgVariableIntrinsic>(DI)) { 1074 if (!isa<DbgDeclareInst>(DVI)) 1075 setDbgVariableUndef(DVI); 1076 DVI->moveBefore(DbgInsertPoint); 1077 } else { 1078 break; 1079 } 1080 } 1081 } 1082 1083 // If the unconditional branch we replaced contains llvm.loop metadata, we 1084 // add the metadata to the branch instructions in the predecessors. 1085 unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop"); 1086 Instruction *TI = BB->getTerminator(); 1087 if (TI) 1088 if (MDNode *LoopMD = TI->getMetadata(LoopMDKind)) 1089 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 1090 BasicBlock *Pred = *PI; 1091 Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD); 1092 } 1093 1094 // Everything that jumped to BB now goes to Succ. 1095 BB->replaceAllUsesWith(Succ); 1096 if (!Succ->hasName()) Succ->takeName(BB); 1097 1098 // Clear the successor list of BB to match updates applying to DTU later. 1099 if (BB->getTerminator()) 1100 BB->getInstList().pop_back(); 1101 new UnreachableInst(BB->getContext(), BB); 1102 assert(succ_empty(BB) && "The successor list of BB isn't empty before " 1103 "applying corresponding DTU updates."); 1104 1105 if (DTU) { 1106 DTU->applyUpdatesPermissive(Updates); 1107 DTU->deleteBB(BB); 1108 } else { 1109 BB->eraseFromParent(); // Delete the old basic block. 1110 } 1111 return true; 1112 } 1113 1114 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { 1115 // This implementation doesn't currently consider undef operands 1116 // specially. Theoretically, two phis which are identical except for 1117 // one having an undef where the other doesn't could be collapsed. 1118 1119 struct PHIDenseMapInfo { 1120 static PHINode *getEmptyKey() { 1121 return DenseMapInfo<PHINode *>::getEmptyKey(); 1122 } 1123 1124 static PHINode *getTombstoneKey() { 1125 return DenseMapInfo<PHINode *>::getTombstoneKey(); 1126 } 1127 1128 static unsigned getHashValue(PHINode *PN) { 1129 // Compute a hash value on the operands. Instcombine will likely have 1130 // sorted them, which helps expose duplicates, but we have to check all 1131 // the operands to be safe in case instcombine hasn't run. 1132 return static_cast<unsigned>(hash_combine( 1133 hash_combine_range(PN->value_op_begin(), PN->value_op_end()), 1134 hash_combine_range(PN->block_begin(), PN->block_end()))); 1135 } 1136 1137 static bool isEqual(PHINode *LHS, PHINode *RHS) { 1138 if (LHS == getEmptyKey() || LHS == getTombstoneKey() || 1139 RHS == getEmptyKey() || RHS == getTombstoneKey()) 1140 return LHS == RHS; 1141 return LHS->isIdenticalTo(RHS); 1142 } 1143 }; 1144 1145 // Set of unique PHINodes. 1146 DenseSet<PHINode *, PHIDenseMapInfo> PHISet; 1147 1148 // Examine each PHI. 1149 bool Changed = false; 1150 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) { 1151 auto Inserted = PHISet.insert(PN); 1152 if (!Inserted.second) { 1153 // A duplicate. Replace this PHI with its duplicate. 1154 PN->replaceAllUsesWith(*Inserted.first); 1155 PN->eraseFromParent(); 1156 Changed = true; 1157 1158 // The RAUW can change PHIs that we already visited. Start over from the 1159 // beginning. 1160 PHISet.clear(); 1161 I = BB->begin(); 1162 } 1163 } 1164 1165 return Changed; 1166 } 1167 1168 /// enforceKnownAlignment - If the specified pointer points to an object that 1169 /// we control, modify the object's alignment to PrefAlign. This isn't 1170 /// often possible though. If alignment is important, a more reliable approach 1171 /// is to simply align all global variables and allocation instructions to 1172 /// their preferred alignment from the beginning. 1173 static unsigned enforceKnownAlignment(Value *V, unsigned Alignment, 1174 unsigned PrefAlign, 1175 const DataLayout &DL) { 1176 assert(PrefAlign > Alignment); 1177 1178 V = V->stripPointerCasts(); 1179 1180 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1181 // TODO: ideally, computeKnownBits ought to have used 1182 // AllocaInst::getAlignment() in its computation already, making 1183 // the below max redundant. But, as it turns out, 1184 // stripPointerCasts recurses through infinite layers of bitcasts, 1185 // while computeKnownBits is not allowed to traverse more than 6 1186 // levels. 1187 Alignment = std::max(AI->getAlignment(), Alignment); 1188 if (PrefAlign <= Alignment) 1189 return Alignment; 1190 1191 // If the preferred alignment is greater than the natural stack alignment 1192 // then don't round up. This avoids dynamic stack realignment. 1193 if (DL.exceedsNaturalStackAlignment(Align(PrefAlign))) 1194 return Alignment; 1195 AI->setAlignment(MaybeAlign(PrefAlign)); 1196 return PrefAlign; 1197 } 1198 1199 if (auto *GO = dyn_cast<GlobalObject>(V)) { 1200 // TODO: as above, this shouldn't be necessary. 1201 Alignment = std::max(GO->getAlignment(), Alignment); 1202 if (PrefAlign <= Alignment) 1203 return Alignment; 1204 1205 // If there is a large requested alignment and we can, bump up the alignment 1206 // of the global. If the memory we set aside for the global may not be the 1207 // memory used by the final program then it is impossible for us to reliably 1208 // enforce the preferred alignment. 1209 if (!GO->canIncreaseAlignment()) 1210 return Alignment; 1211 1212 GO->setAlignment(MaybeAlign(PrefAlign)); 1213 return PrefAlign; 1214 } 1215 1216 return Alignment; 1217 } 1218 1219 unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, 1220 const DataLayout &DL, 1221 const Instruction *CxtI, 1222 AssumptionCache *AC, 1223 const DominatorTree *DT) { 1224 assert(V->getType()->isPointerTy() && 1225 "getOrEnforceKnownAlignment expects a pointer!"); 1226 1227 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT); 1228 unsigned TrailZ = Known.countMinTrailingZeros(); 1229 1230 // Avoid trouble with ridiculously large TrailZ values, such as 1231 // those computed from a null pointer. 1232 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1)); 1233 1234 unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ); 1235 1236 // LLVM doesn't support alignments larger than this currently. 1237 Align = std::min(Align, +Value::MaximumAlignment); 1238 1239 if (PrefAlign > Align) 1240 Align = enforceKnownAlignment(V, Align, PrefAlign, DL); 1241 1242 // We don't need to make any adjustment. 1243 return Align; 1244 } 1245 1246 ///===---------------------------------------------------------------------===// 1247 /// Dbg Intrinsic utilities 1248 /// 1249 1250 /// See if there is a dbg.value intrinsic for DIVar before I. 1251 static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr, 1252 Instruction *I) { 1253 // Since we can't guarantee that the original dbg.declare instrinsic 1254 // is removed by LowerDbgDeclare(), we need to make sure that we are 1255 // not inserting the same dbg.value intrinsic over and over. 1256 BasicBlock::InstListType::iterator PrevI(I); 1257 if (PrevI != I->getParent()->getInstList().begin()) { 1258 --PrevI; 1259 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI)) 1260 if (DVI->getValue() == I->getOperand(0) && 1261 DVI->getVariable() == DIVar && 1262 DVI->getExpression() == DIExpr) 1263 return true; 1264 } 1265 return false; 1266 } 1267 1268 /// See if there is a dbg.value intrinsic for DIVar for the PHI node. 1269 static bool PhiHasDebugValue(DILocalVariable *DIVar, 1270 DIExpression *DIExpr, 1271 PHINode *APN) { 1272 // Since we can't guarantee that the original dbg.declare instrinsic 1273 // is removed by LowerDbgDeclare(), we need to make sure that we are 1274 // not inserting the same dbg.value intrinsic over and over. 1275 SmallVector<DbgValueInst *, 1> DbgValues; 1276 findDbgValues(DbgValues, APN); 1277 for (auto *DVI : DbgValues) { 1278 assert(DVI->getValue() == APN); 1279 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr)) 1280 return true; 1281 } 1282 return false; 1283 } 1284 1285 /// Check if the alloc size of \p ValTy is large enough to cover the variable 1286 /// (or fragment of the variable) described by \p DII. 1287 /// 1288 /// This is primarily intended as a helper for the different 1289 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is 1290 /// converted describes an alloca'd variable, so we need to use the 1291 /// alloc size of the value when doing the comparison. E.g. an i1 value will be 1292 /// identified as covering an n-bit fragment, if the store size of i1 is at 1293 /// least n bits. 1294 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) { 1295 const DataLayout &DL = DII->getModule()->getDataLayout(); 1296 uint64_t ValueSize = DL.getTypeAllocSizeInBits(ValTy); 1297 if (auto FragmentSize = DII->getFragmentSizeInBits()) 1298 return ValueSize >= *FragmentSize; 1299 // We can't always calculate the size of the DI variable (e.g. if it is a 1300 // VLA). Try to use the size of the alloca that the dbg intrinsic describes 1301 // intead. 1302 if (DII->isAddressOfVariable()) 1303 if (auto *AI = dyn_cast_or_null<AllocaInst>(DII->getVariableLocation())) 1304 if (auto FragmentSize = AI->getAllocationSizeInBits(DL)) 1305 return ValueSize >= *FragmentSize; 1306 // Could not determine size of variable. Conservatively return false. 1307 return false; 1308 } 1309 1310 /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted 1311 /// to a dbg.value. Because no machine insts can come from debug intrinsics, 1312 /// only the scope and inlinedAt is significant. Zero line numbers are used in 1313 /// case this DebugLoc leaks into any adjacent instructions. 1314 static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) { 1315 // Original dbg.declare must have a location. 1316 DebugLoc DeclareLoc = DII->getDebugLoc(); 1317 MDNode *Scope = DeclareLoc.getScope(); 1318 DILocation *InlinedAt = DeclareLoc.getInlinedAt(); 1319 // Produce an unknown location with the correct scope / inlinedAt fields. 1320 return DebugLoc::get(0, 0, Scope, InlinedAt); 1321 } 1322 1323 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value 1324 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic. 1325 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1326 StoreInst *SI, DIBuilder &Builder) { 1327 assert(DII->isAddressOfVariable()); 1328 auto *DIVar = DII->getVariable(); 1329 assert(DIVar && "Missing variable"); 1330 auto *DIExpr = DII->getExpression(); 1331 Value *DV = SI->getValueOperand(); 1332 1333 DebugLoc NewLoc = getDebugValueLoc(DII, SI); 1334 1335 if (!valueCoversEntireFragment(DV->getType(), DII)) { 1336 // FIXME: If storing to a part of the variable described by the dbg.declare, 1337 // then we want to insert a dbg.value for the corresponding fragment. 1338 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1339 << *DII << '\n'); 1340 // For now, when there is a store to parts of the variable (but we do not 1341 // know which part) we insert an dbg.value instrinsic to indicate that we 1342 // know nothing about the variable's content. 1343 DV = UndefValue::get(DV->getType()); 1344 if (!LdStHasDebugValue(DIVar, DIExpr, SI)) 1345 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI); 1346 return; 1347 } 1348 1349 if (!LdStHasDebugValue(DIVar, DIExpr, SI)) 1350 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI); 1351 } 1352 1353 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value 1354 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic. 1355 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1356 LoadInst *LI, DIBuilder &Builder) { 1357 auto *DIVar = DII->getVariable(); 1358 auto *DIExpr = DII->getExpression(); 1359 assert(DIVar && "Missing variable"); 1360 1361 if (LdStHasDebugValue(DIVar, DIExpr, LI)) 1362 return; 1363 1364 if (!valueCoversEntireFragment(LI->getType(), DII)) { 1365 // FIXME: If only referring to a part of the variable described by the 1366 // dbg.declare, then we want to insert a dbg.value for the corresponding 1367 // fragment. 1368 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1369 << *DII << '\n'); 1370 return; 1371 } 1372 1373 DebugLoc NewLoc = getDebugValueLoc(DII, nullptr); 1374 1375 // We are now tracking the loaded value instead of the address. In the 1376 // future if multi-location support is added to the IR, it might be 1377 // preferable to keep tracking both the loaded value and the original 1378 // address in case the alloca can not be elided. 1379 Instruction *DbgValue = Builder.insertDbgValueIntrinsic( 1380 LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr); 1381 DbgValue->insertAfter(LI); 1382 } 1383 1384 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated 1385 /// llvm.dbg.declare or llvm.dbg.addr intrinsic. 1386 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1387 PHINode *APN, DIBuilder &Builder) { 1388 auto *DIVar = DII->getVariable(); 1389 auto *DIExpr = DII->getExpression(); 1390 assert(DIVar && "Missing variable"); 1391 1392 if (PhiHasDebugValue(DIVar, DIExpr, APN)) 1393 return; 1394 1395 if (!valueCoversEntireFragment(APN->getType(), DII)) { 1396 // FIXME: If only referring to a part of the variable described by the 1397 // dbg.declare, then we want to insert a dbg.value for the corresponding 1398 // fragment. 1399 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1400 << *DII << '\n'); 1401 return; 1402 } 1403 1404 BasicBlock *BB = APN->getParent(); 1405 auto InsertionPt = BB->getFirstInsertionPt(); 1406 1407 DebugLoc NewLoc = getDebugValueLoc(DII, nullptr); 1408 1409 // The block may be a catchswitch block, which does not have a valid 1410 // insertion point. 1411 // FIXME: Insert dbg.value markers in the successors when appropriate. 1412 if (InsertionPt != BB->end()) 1413 Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt); 1414 } 1415 1416 /// Determine whether this alloca is either a VLA or an array. 1417 static bool isArray(AllocaInst *AI) { 1418 return AI->isArrayAllocation() || 1419 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy()); 1420 } 1421 1422 /// Determine whether this alloca is a structure. 1423 static bool isStructure(AllocaInst *AI) { 1424 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy(); 1425 } 1426 1427 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set 1428 /// of llvm.dbg.value intrinsics. 1429 bool llvm::LowerDbgDeclare(Function &F) { 1430 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); 1431 SmallVector<DbgDeclareInst *, 4> Dbgs; 1432 for (auto &FI : F) 1433 for (Instruction &BI : FI) 1434 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI)) 1435 Dbgs.push_back(DDI); 1436 1437 if (Dbgs.empty()) 1438 return false; 1439 1440 for (auto &I : Dbgs) { 1441 DbgDeclareInst *DDI = I; 1442 AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress()); 1443 // If this is an alloca for a scalar variable, insert a dbg.value 1444 // at each load and store to the alloca and erase the dbg.declare. 1445 // The dbg.values allow tracking a variable even if it is not 1446 // stored on the stack, while the dbg.declare can only describe 1447 // the stack slot (and at a lexical-scope granularity). Later 1448 // passes will attempt to elide the stack slot. 1449 if (!AI || isArray(AI) || isStructure(AI)) 1450 continue; 1451 1452 // A volatile load/store means that the alloca can't be elided anyway. 1453 if (llvm::any_of(AI->users(), [](User *U) -> bool { 1454 if (LoadInst *LI = dyn_cast<LoadInst>(U)) 1455 return LI->isVolatile(); 1456 if (StoreInst *SI = dyn_cast<StoreInst>(U)) 1457 return SI->isVolatile(); 1458 return false; 1459 })) 1460 continue; 1461 1462 SmallVector<const Value *, 8> WorkList; 1463 WorkList.push_back(AI); 1464 while (!WorkList.empty()) { 1465 const Value *V = WorkList.pop_back_val(); 1466 for (auto &AIUse : V->uses()) { 1467 User *U = AIUse.getUser(); 1468 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1469 if (AIUse.getOperandNo() == 1) 1470 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 1471 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 1472 ConvertDebugDeclareToDebugValue(DDI, LI, DIB); 1473 } else if (CallInst *CI = dyn_cast<CallInst>(U)) { 1474 // This is a call by-value or some other instruction that takes a 1475 // pointer to the variable. Insert a *value* intrinsic that describes 1476 // the variable by dereferencing the alloca. 1477 if (!CI->isLifetimeStartOrEnd()) { 1478 DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr); 1479 auto *DerefExpr = 1480 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref); 1481 DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr, 1482 NewLoc, CI); 1483 } 1484 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) { 1485 if (BI->getType()->isPointerTy()) 1486 WorkList.push_back(BI); 1487 } 1488 } 1489 } 1490 DDI->eraseFromParent(); 1491 } 1492 return true; 1493 } 1494 1495 /// Propagate dbg.value intrinsics through the newly inserted PHIs. 1496 void llvm::insertDebugValuesForPHIs(BasicBlock *BB, 1497 SmallVectorImpl<PHINode *> &InsertedPHIs) { 1498 assert(BB && "No BasicBlock to clone dbg.value(s) from."); 1499 if (InsertedPHIs.size() == 0) 1500 return; 1501 1502 // Map existing PHI nodes to their dbg.values. 1503 ValueToValueMapTy DbgValueMap; 1504 for (auto &I : *BB) { 1505 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) { 1506 if (auto *Loc = dyn_cast_or_null<PHINode>(DbgII->getVariableLocation())) 1507 DbgValueMap.insert({Loc, DbgII}); 1508 } 1509 } 1510 if (DbgValueMap.size() == 0) 1511 return; 1512 1513 // Then iterate through the new PHIs and look to see if they use one of the 1514 // previously mapped PHIs. If so, insert a new dbg.value intrinsic that will 1515 // propagate the info through the new PHI. 1516 LLVMContext &C = BB->getContext(); 1517 for (auto PHI : InsertedPHIs) { 1518 BasicBlock *Parent = PHI->getParent(); 1519 // Avoid inserting an intrinsic into an EH block. 1520 if (Parent->getFirstNonPHI()->isEHPad()) 1521 continue; 1522 auto PhiMAV = MetadataAsValue::get(C, ValueAsMetadata::get(PHI)); 1523 for (auto VI : PHI->operand_values()) { 1524 auto V = DbgValueMap.find(VI); 1525 if (V != DbgValueMap.end()) { 1526 auto *DbgII = cast<DbgVariableIntrinsic>(V->second); 1527 Instruction *NewDbgII = DbgII->clone(); 1528 NewDbgII->setOperand(0, PhiMAV); 1529 auto InsertionPt = Parent->getFirstInsertionPt(); 1530 assert(InsertionPt != Parent->end() && "Ill-formed basic block"); 1531 NewDbgII->insertBefore(&*InsertionPt); 1532 } 1533 } 1534 } 1535 } 1536 1537 /// Finds all intrinsics declaring local variables as living in the memory that 1538 /// 'V' points to. This may include a mix of dbg.declare and 1539 /// dbg.addr intrinsics. 1540 TinyPtrVector<DbgVariableIntrinsic *> llvm::FindDbgAddrUses(Value *V) { 1541 // This function is hot. Check whether the value has any metadata to avoid a 1542 // DenseMap lookup. 1543 if (!V->isUsedByMetadata()) 1544 return {}; 1545 auto *L = LocalAsMetadata::getIfExists(V); 1546 if (!L) 1547 return {}; 1548 auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L); 1549 if (!MDV) 1550 return {}; 1551 1552 TinyPtrVector<DbgVariableIntrinsic *> Declares; 1553 for (User *U : MDV->users()) { 1554 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(U)) 1555 if (DII->isAddressOfVariable()) 1556 Declares.push_back(DII); 1557 } 1558 1559 return Declares; 1560 } 1561 1562 void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) { 1563 // This function is hot. Check whether the value has any metadata to avoid a 1564 // DenseMap lookup. 1565 if (!V->isUsedByMetadata()) 1566 return; 1567 if (auto *L = LocalAsMetadata::getIfExists(V)) 1568 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L)) 1569 for (User *U : MDV->users()) 1570 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U)) 1571 DbgValues.push_back(DVI); 1572 } 1573 1574 void llvm::findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgUsers, 1575 Value *V) { 1576 // This function is hot. Check whether the value has any metadata to avoid a 1577 // DenseMap lookup. 1578 if (!V->isUsedByMetadata()) 1579 return; 1580 if (auto *L = LocalAsMetadata::getIfExists(V)) 1581 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L)) 1582 for (User *U : MDV->users()) 1583 if (DbgVariableIntrinsic *DII = dyn_cast<DbgVariableIntrinsic>(U)) 1584 DbgUsers.push_back(DII); 1585 } 1586 1587 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress, 1588 Instruction *InsertBefore, DIBuilder &Builder, 1589 uint8_t DIExprFlags, int Offset) { 1590 auto DbgAddrs = FindDbgAddrUses(Address); 1591 for (DbgVariableIntrinsic *DII : DbgAddrs) { 1592 DebugLoc Loc = DII->getDebugLoc(); 1593 auto *DIVar = DII->getVariable(); 1594 auto *DIExpr = DII->getExpression(); 1595 assert(DIVar && "Missing variable"); 1596 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset); 1597 // Insert llvm.dbg.declare immediately before InsertBefore, and remove old 1598 // llvm.dbg.declare. 1599 Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore); 1600 if (DII == InsertBefore) 1601 InsertBefore = InsertBefore->getNextNode(); 1602 DII->eraseFromParent(); 1603 } 1604 return !DbgAddrs.empty(); 1605 } 1606 1607 bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, 1608 DIBuilder &Builder, uint8_t DIExprFlags, 1609 int Offset) { 1610 return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder, 1611 DIExprFlags, Offset); 1612 } 1613 1614 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress, 1615 DIBuilder &Builder, int Offset) { 1616 DebugLoc Loc = DVI->getDebugLoc(); 1617 auto *DIVar = DVI->getVariable(); 1618 auto *DIExpr = DVI->getExpression(); 1619 assert(DIVar && "Missing variable"); 1620 1621 // This is an alloca-based llvm.dbg.value. The first thing it should do with 1622 // the alloca pointer is dereference it. Otherwise we don't know how to handle 1623 // it and give up. 1624 if (!DIExpr || DIExpr->getNumElements() < 1 || 1625 DIExpr->getElement(0) != dwarf::DW_OP_deref) 1626 return; 1627 1628 // Insert the offset before the first deref. 1629 // We could just change the offset argument of dbg.value, but it's unsigned... 1630 if (Offset) 1631 DIExpr = DIExpression::prepend(DIExpr, 0, Offset); 1632 1633 Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI); 1634 DVI->eraseFromParent(); 1635 } 1636 1637 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, 1638 DIBuilder &Builder, int Offset) { 1639 if (auto *L = LocalAsMetadata::getIfExists(AI)) 1640 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) 1641 for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) { 1642 Use &U = *UI++; 1643 if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser())) 1644 replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset); 1645 } 1646 } 1647 1648 /// Wrap \p V in a ValueAsMetadata instance. 1649 static MetadataAsValue *wrapValueInMetadata(LLVMContext &C, Value *V) { 1650 return MetadataAsValue::get(C, ValueAsMetadata::get(V)); 1651 } 1652 1653 bool llvm::salvageDebugInfo(Instruction &I) { 1654 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 1655 findDbgUsers(DbgUsers, &I); 1656 if (DbgUsers.empty()) 1657 return false; 1658 1659 return salvageDebugInfoForDbgValues(I, DbgUsers); 1660 } 1661 1662 void llvm::salvageDebugInfoOrMarkUndef(Instruction &I) { 1663 if (!salvageDebugInfo(I)) 1664 replaceDbgUsesWithUndef(&I); 1665 } 1666 1667 bool llvm::salvageDebugInfoForDbgValues( 1668 Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) { 1669 auto &Ctx = I.getContext(); 1670 auto wrapMD = [&](Value *V) { return wrapValueInMetadata(Ctx, V); }; 1671 1672 for (auto *DII : DbgUsers) { 1673 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they 1674 // are implicitly pointing out the value as a DWARF memory location 1675 // description. 1676 bool StackValue = isa<DbgValueInst>(DII); 1677 1678 DIExpression *DIExpr = 1679 salvageDebugInfoImpl(I, DII->getExpression(), StackValue); 1680 1681 // salvageDebugInfoImpl should fail on examining the first element of 1682 // DbgUsers, or none of them. 1683 if (!DIExpr) 1684 return false; 1685 1686 DII->setOperand(0, wrapMD(I.getOperand(0))); 1687 DII->setOperand(2, MetadataAsValue::get(Ctx, DIExpr)); 1688 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); 1689 } 1690 1691 return true; 1692 } 1693 1694 DIExpression *llvm::salvageDebugInfoImpl(Instruction &I, 1695 DIExpression *SrcDIExpr, 1696 bool WithStackValue) { 1697 auto &M = *I.getModule(); 1698 auto &DL = M.getDataLayout(); 1699 1700 // Apply a vector of opcodes to the source DIExpression. 1701 auto doSalvage = [&](SmallVectorImpl<uint64_t> &Ops) -> DIExpression * { 1702 DIExpression *DIExpr = SrcDIExpr; 1703 if (!Ops.empty()) { 1704 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); 1705 } 1706 return DIExpr; 1707 }; 1708 1709 // Apply the given offset to the source DIExpression. 1710 auto applyOffset = [&](uint64_t Offset) -> DIExpression * { 1711 SmallVector<uint64_t, 8> Ops; 1712 DIExpression::appendOffset(Ops, Offset); 1713 return doSalvage(Ops); 1714 }; 1715 1716 // initializer-list helper for applying operators to the source DIExpression. 1717 auto applyOps = [&](ArrayRef<uint64_t> Opcodes) -> DIExpression * { 1718 SmallVector<uint64_t, 8> Ops(Opcodes.begin(), Opcodes.end()); 1719 return doSalvage(Ops); 1720 }; 1721 1722 if (auto *CI = dyn_cast<CastInst>(&I)) { 1723 // No-op casts and zexts are irrelevant for debug info. 1724 if (CI->isNoopCast(DL) || isa<ZExtInst>(&I)) 1725 return SrcDIExpr; 1726 1727 Type *Type = CI->getType(); 1728 // Casts other than Trunc or SExt to scalar types cannot be salvaged. 1729 if (Type->isVectorTy() || (!isa<TruncInst>(&I) && !isa<SExtInst>(&I))) 1730 return nullptr; 1731 1732 Value *FromValue = CI->getOperand(0); 1733 unsigned FromTypeBitSize = FromValue->getType()->getScalarSizeInBits(); 1734 unsigned ToTypeBitSize = Type->getScalarSizeInBits(); 1735 1736 return applyOps(DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize, 1737 isa<SExtInst>(&I))); 1738 } 1739 1740 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 1741 unsigned BitWidth = 1742 M.getDataLayout().getIndexSizeInBits(GEP->getPointerAddressSpace()); 1743 // Rewrite a constant GEP into a DIExpression. 1744 APInt Offset(BitWidth, 0); 1745 if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset)) { 1746 return applyOffset(Offset.getSExtValue()); 1747 } else { 1748 return nullptr; 1749 } 1750 } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) { 1751 // Rewrite binary operations with constant integer operands. 1752 auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1)); 1753 if (!ConstInt || ConstInt->getBitWidth() > 64) 1754 return nullptr; 1755 1756 uint64_t Val = ConstInt->getSExtValue(); 1757 switch (BI->getOpcode()) { 1758 case Instruction::Add: 1759 return applyOffset(Val); 1760 case Instruction::Sub: 1761 return applyOffset(-int64_t(Val)); 1762 case Instruction::Mul: 1763 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_mul}); 1764 case Instruction::SDiv: 1765 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_div}); 1766 case Instruction::SRem: 1767 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_mod}); 1768 case Instruction::Or: 1769 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_or}); 1770 case Instruction::And: 1771 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_and}); 1772 case Instruction::Xor: 1773 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_xor}); 1774 case Instruction::Shl: 1775 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shl}); 1776 case Instruction::LShr: 1777 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shr}); 1778 case Instruction::AShr: 1779 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shra}); 1780 default: 1781 // TODO: Salvage constants from each kind of binop we know about. 1782 return nullptr; 1783 } 1784 // *Not* to do: we should not attempt to salvage load instructions, 1785 // because the validity and lifetime of a dbg.value containing 1786 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples. 1787 } 1788 return nullptr; 1789 } 1790 1791 /// A replacement for a dbg.value expression. 1792 using DbgValReplacement = Optional<DIExpression *>; 1793 1794 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr, 1795 /// possibly moving/undefing users to prevent use-before-def. Returns true if 1796 /// changes are made. 1797 static bool rewriteDebugUsers( 1798 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, 1799 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) { 1800 // Find debug users of From. 1801 SmallVector<DbgVariableIntrinsic *, 1> Users; 1802 findDbgUsers(Users, &From); 1803 if (Users.empty()) 1804 return false; 1805 1806 // Prevent use-before-def of To. 1807 bool Changed = false; 1808 SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage; 1809 if (isa<Instruction>(&To)) { 1810 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint; 1811 1812 for (auto *DII : Users) { 1813 // It's common to see a debug user between From and DomPoint. Move it 1814 // after DomPoint to preserve the variable update without any reordering. 1815 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) { 1816 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n'); 1817 DII->moveAfter(&DomPoint); 1818 Changed = true; 1819 1820 // Users which otherwise aren't dominated by the replacement value must 1821 // be salvaged or deleted. 1822 } else if (!DT.dominates(&DomPoint, DII)) { 1823 UndefOrSalvage.insert(DII); 1824 } 1825 } 1826 } 1827 1828 // Update debug users without use-before-def risk. 1829 for (auto *DII : Users) { 1830 if (UndefOrSalvage.count(DII)) 1831 continue; 1832 1833 LLVMContext &Ctx = DII->getContext(); 1834 DbgValReplacement DVR = RewriteExpr(*DII); 1835 if (!DVR) 1836 continue; 1837 1838 DII->setOperand(0, wrapValueInMetadata(Ctx, &To)); 1839 DII->setOperand(2, MetadataAsValue::get(Ctx, *DVR)); 1840 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n'); 1841 Changed = true; 1842 } 1843 1844 if (!UndefOrSalvage.empty()) { 1845 // Try to salvage the remaining debug users. 1846 salvageDebugInfoOrMarkUndef(From); 1847 Changed = true; 1848 } 1849 1850 return Changed; 1851 } 1852 1853 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would 1854 /// losslessly preserve the bits and semantics of the value. This predicate is 1855 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result. 1856 /// 1857 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it 1858 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>, 1859 /// and also does not allow lossless pointer <-> integer conversions. 1860 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, 1861 Type *ToTy) { 1862 // Trivially compatible types. 1863 if (FromTy == ToTy) 1864 return true; 1865 1866 // Handle compatible pointer <-> integer conversions. 1867 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) { 1868 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy); 1869 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) && 1870 !DL.isNonIntegralPointerType(ToTy); 1871 return SameSize && LosslessConversion; 1872 } 1873 1874 // TODO: This is not exhaustive. 1875 return false; 1876 } 1877 1878 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To, 1879 Instruction &DomPoint, DominatorTree &DT) { 1880 // Exit early if From has no debug users. 1881 if (!From.isUsedByMetadata()) 1882 return false; 1883 1884 assert(&From != &To && "Can't replace something with itself"); 1885 1886 Type *FromTy = From.getType(); 1887 Type *ToTy = To.getType(); 1888 1889 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { 1890 return DII.getExpression(); 1891 }; 1892 1893 // Handle no-op conversions. 1894 Module &M = *From.getModule(); 1895 const DataLayout &DL = M.getDataLayout(); 1896 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy)) 1897 return rewriteDebugUsers(From, To, DomPoint, DT, Identity); 1898 1899 // Handle integer-to-integer widening and narrowing. 1900 // FIXME: Use DW_OP_convert when it's available everywhere. 1901 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) { 1902 uint64_t FromBits = FromTy->getPrimitiveSizeInBits(); 1903 uint64_t ToBits = ToTy->getPrimitiveSizeInBits(); 1904 assert(FromBits != ToBits && "Unexpected no-op conversion"); 1905 1906 // When the width of the result grows, assume that a debugger will only 1907 // access the low `FromBits` bits when inspecting the source variable. 1908 if (FromBits < ToBits) 1909 return rewriteDebugUsers(From, To, DomPoint, DT, Identity); 1910 1911 // The width of the result has shrunk. Use sign/zero extension to describe 1912 // the source variable's high bits. 1913 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { 1914 DILocalVariable *Var = DII.getVariable(); 1915 1916 // Without knowing signedness, sign/zero extension isn't possible. 1917 auto Signedness = Var->getSignedness(); 1918 if (!Signedness) 1919 return None; 1920 1921 bool Signed = *Signedness == DIBasicType::Signedness::Signed; 1922 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits, 1923 Signed); 1924 }; 1925 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt); 1926 } 1927 1928 // TODO: Floating-point conversions, vectors. 1929 return false; 1930 } 1931 1932 unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) { 1933 unsigned NumDeadInst = 0; 1934 // Delete the instructions backwards, as it has a reduced likelihood of 1935 // having to update as many def-use and use-def chains. 1936 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. 1937 while (EndInst != &BB->front()) { 1938 // Delete the next to last instruction. 1939 Instruction *Inst = &*--EndInst->getIterator(); 1940 if (!Inst->use_empty() && !Inst->getType()->isTokenTy()) 1941 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); 1942 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) { 1943 EndInst = Inst; 1944 continue; 1945 } 1946 if (!isa<DbgInfoIntrinsic>(Inst)) 1947 ++NumDeadInst; 1948 Inst->eraseFromParent(); 1949 } 1950 return NumDeadInst; 1951 } 1952 1953 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap, 1954 bool PreserveLCSSA, DomTreeUpdater *DTU, 1955 MemorySSAUpdater *MSSAU) { 1956 BasicBlock *BB = I->getParent(); 1957 std::vector <DominatorTree::UpdateType> Updates; 1958 1959 if (MSSAU) 1960 MSSAU->changeToUnreachable(I); 1961 1962 // Loop over all of the successors, removing BB's entry from any PHI 1963 // nodes. 1964 if (DTU) 1965 Updates.reserve(BB->getTerminator()->getNumSuccessors()); 1966 for (BasicBlock *Successor : successors(BB)) { 1967 Successor->removePredecessor(BB, PreserveLCSSA); 1968 if (DTU) 1969 Updates.push_back({DominatorTree::Delete, BB, Successor}); 1970 } 1971 // Insert a call to llvm.trap right before this. This turns the undefined 1972 // behavior into a hard fail instead of falling through into random code. 1973 if (UseLLVMTrap) { 1974 Function *TrapFn = 1975 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap); 1976 CallInst *CallTrap = CallInst::Create(TrapFn, "", I); 1977 CallTrap->setDebugLoc(I->getDebugLoc()); 1978 } 1979 auto *UI = new UnreachableInst(I->getContext(), I); 1980 UI->setDebugLoc(I->getDebugLoc()); 1981 1982 // All instructions after this are dead. 1983 unsigned NumInstrsRemoved = 0; 1984 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end(); 1985 while (BBI != BBE) { 1986 if (!BBI->use_empty()) 1987 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType())); 1988 BB->getInstList().erase(BBI++); 1989 ++NumInstrsRemoved; 1990 } 1991 if (DTU) 1992 DTU->applyUpdatesPermissive(Updates); 1993 return NumInstrsRemoved; 1994 } 1995 1996 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) { 1997 SmallVector<Value *, 8> Args(II->arg_begin(), II->arg_end()); 1998 SmallVector<OperandBundleDef, 1> OpBundles; 1999 II->getOperandBundlesAsDefs(OpBundles); 2000 CallInst *NewCall = CallInst::Create(II->getFunctionType(), 2001 II->getCalledValue(), Args, OpBundles); 2002 NewCall->setCallingConv(II->getCallingConv()); 2003 NewCall->setAttributes(II->getAttributes()); 2004 NewCall->setDebugLoc(II->getDebugLoc()); 2005 NewCall->copyMetadata(*II); 2006 return NewCall; 2007 } 2008 2009 /// changeToCall - Convert the specified invoke into a normal call. 2010 void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) { 2011 CallInst *NewCall = createCallMatchingInvoke(II); 2012 NewCall->takeName(II); 2013 NewCall->insertBefore(II); 2014 II->replaceAllUsesWith(NewCall); 2015 2016 // Follow the call by a branch to the normal destination. 2017 BasicBlock *NormalDestBB = II->getNormalDest(); 2018 BranchInst::Create(NormalDestBB, II); 2019 2020 // Update PHI nodes in the unwind destination 2021 BasicBlock *BB = II->getParent(); 2022 BasicBlock *UnwindDestBB = II->getUnwindDest(); 2023 UnwindDestBB->removePredecessor(BB); 2024 II->eraseFromParent(); 2025 if (DTU) 2026 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, UnwindDestBB}}); 2027 } 2028 2029 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI, 2030 BasicBlock *UnwindEdge) { 2031 BasicBlock *BB = CI->getParent(); 2032 2033 // Convert this function call into an invoke instruction. First, split the 2034 // basic block. 2035 BasicBlock *Split = 2036 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc"); 2037 2038 // Delete the unconditional branch inserted by splitBasicBlock 2039 BB->getInstList().pop_back(); 2040 2041 // Create the new invoke instruction. 2042 SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end()); 2043 SmallVector<OperandBundleDef, 1> OpBundles; 2044 2045 CI->getOperandBundlesAsDefs(OpBundles); 2046 2047 // Note: we're round tripping operand bundles through memory here, and that 2048 // can potentially be avoided with a cleverer API design that we do not have 2049 // as of this time. 2050 2051 InvokeInst *II = 2052 InvokeInst::Create(CI->getFunctionType(), CI->getCalledValue(), Split, 2053 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB); 2054 II->setDebugLoc(CI->getDebugLoc()); 2055 II->setCallingConv(CI->getCallingConv()); 2056 II->setAttributes(CI->getAttributes()); 2057 2058 // Make sure that anything using the call now uses the invoke! This also 2059 // updates the CallGraph if present, because it uses a WeakTrackingVH. 2060 CI->replaceAllUsesWith(II); 2061 2062 // Delete the original call 2063 Split->getInstList().pop_front(); 2064 return Split; 2065 } 2066 2067 static bool markAliveBlocks(Function &F, 2068 SmallPtrSetImpl<BasicBlock *> &Reachable, 2069 DomTreeUpdater *DTU = nullptr) { 2070 SmallVector<BasicBlock*, 128> Worklist; 2071 BasicBlock *BB = &F.front(); 2072 Worklist.push_back(BB); 2073 Reachable.insert(BB); 2074 bool Changed = false; 2075 do { 2076 BB = Worklist.pop_back_val(); 2077 2078 // Do a quick scan of the basic block, turning any obviously unreachable 2079 // instructions into LLVM unreachable insts. The instruction combining pass 2080 // canonicalizes unreachable insts into stores to null or undef. 2081 for (Instruction &I : *BB) { 2082 if (auto *CI = dyn_cast<CallInst>(&I)) { 2083 Value *Callee = CI->getCalledValue(); 2084 // Handle intrinsic calls. 2085 if (Function *F = dyn_cast<Function>(Callee)) { 2086 auto IntrinsicID = F->getIntrinsicID(); 2087 // Assumptions that are known to be false are equivalent to 2088 // unreachable. Also, if the condition is undefined, then we make the 2089 // choice most beneficial to the optimizer, and choose that to also be 2090 // unreachable. 2091 if (IntrinsicID == Intrinsic::assume) { 2092 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) { 2093 // Don't insert a call to llvm.trap right before the unreachable. 2094 changeToUnreachable(CI, false, false, DTU); 2095 Changed = true; 2096 break; 2097 } 2098 } else if (IntrinsicID == Intrinsic::experimental_guard) { 2099 // A call to the guard intrinsic bails out of the current 2100 // compilation unit if the predicate passed to it is false. If the 2101 // predicate is a constant false, then we know the guard will bail 2102 // out of the current compile unconditionally, so all code following 2103 // it is dead. 2104 // 2105 // Note: unlike in llvm.assume, it is not "obviously profitable" for 2106 // guards to treat `undef` as `false` since a guard on `undef` can 2107 // still be useful for widening. 2108 if (match(CI->getArgOperand(0), m_Zero())) 2109 if (!isa<UnreachableInst>(CI->getNextNode())) { 2110 changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false, 2111 false, DTU); 2112 Changed = true; 2113 break; 2114 } 2115 } 2116 } else if ((isa<ConstantPointerNull>(Callee) && 2117 !NullPointerIsDefined(CI->getFunction())) || 2118 isa<UndefValue>(Callee)) { 2119 changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DTU); 2120 Changed = true; 2121 break; 2122 } 2123 if (CI->doesNotReturn() && !CI->isMustTailCall()) { 2124 // If we found a call to a no-return function, insert an unreachable 2125 // instruction after it. Make sure there isn't *already* one there 2126 // though. 2127 if (!isa<UnreachableInst>(CI->getNextNode())) { 2128 // Don't insert a call to llvm.trap right before the unreachable. 2129 changeToUnreachable(CI->getNextNode(), false, false, DTU); 2130 Changed = true; 2131 } 2132 break; 2133 } 2134 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 2135 // Store to undef and store to null are undefined and used to signal 2136 // that they should be changed to unreachable by passes that can't 2137 // modify the CFG. 2138 2139 // Don't touch volatile stores. 2140 if (SI->isVolatile()) continue; 2141 2142 Value *Ptr = SI->getOperand(1); 2143 2144 if (isa<UndefValue>(Ptr) || 2145 (isa<ConstantPointerNull>(Ptr) && 2146 !NullPointerIsDefined(SI->getFunction(), 2147 SI->getPointerAddressSpace()))) { 2148 changeToUnreachable(SI, true, false, DTU); 2149 Changed = true; 2150 break; 2151 } 2152 } 2153 } 2154 2155 Instruction *Terminator = BB->getTerminator(); 2156 if (auto *II = dyn_cast<InvokeInst>(Terminator)) { 2157 // Turn invokes that call 'nounwind' functions into ordinary calls. 2158 Value *Callee = II->getCalledValue(); 2159 if ((isa<ConstantPointerNull>(Callee) && 2160 !NullPointerIsDefined(BB->getParent())) || 2161 isa<UndefValue>(Callee)) { 2162 changeToUnreachable(II, true, false, DTU); 2163 Changed = true; 2164 } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) { 2165 if (II->use_empty() && II->onlyReadsMemory()) { 2166 // jump to the normal destination branch. 2167 BasicBlock *NormalDestBB = II->getNormalDest(); 2168 BasicBlock *UnwindDestBB = II->getUnwindDest(); 2169 BranchInst::Create(NormalDestBB, II); 2170 UnwindDestBB->removePredecessor(II->getParent()); 2171 II->eraseFromParent(); 2172 if (DTU) 2173 DTU->applyUpdatesPermissive( 2174 {{DominatorTree::Delete, BB, UnwindDestBB}}); 2175 } else 2176 changeToCall(II, DTU); 2177 Changed = true; 2178 } 2179 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) { 2180 // Remove catchpads which cannot be reached. 2181 struct CatchPadDenseMapInfo { 2182 static CatchPadInst *getEmptyKey() { 2183 return DenseMapInfo<CatchPadInst *>::getEmptyKey(); 2184 } 2185 2186 static CatchPadInst *getTombstoneKey() { 2187 return DenseMapInfo<CatchPadInst *>::getTombstoneKey(); 2188 } 2189 2190 static unsigned getHashValue(CatchPadInst *CatchPad) { 2191 return static_cast<unsigned>(hash_combine_range( 2192 CatchPad->value_op_begin(), CatchPad->value_op_end())); 2193 } 2194 2195 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) { 2196 if (LHS == getEmptyKey() || LHS == getTombstoneKey() || 2197 RHS == getEmptyKey() || RHS == getTombstoneKey()) 2198 return LHS == RHS; 2199 return LHS->isIdenticalTo(RHS); 2200 } 2201 }; 2202 2203 // Set of unique CatchPads. 2204 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4, 2205 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>> 2206 HandlerSet; 2207 detail::DenseSetEmpty Empty; 2208 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(), 2209 E = CatchSwitch->handler_end(); 2210 I != E; ++I) { 2211 BasicBlock *HandlerBB = *I; 2212 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI()); 2213 if (!HandlerSet.insert({CatchPad, Empty}).second) { 2214 CatchSwitch->removeHandler(I); 2215 --I; 2216 --E; 2217 Changed = true; 2218 } 2219 } 2220 } 2221 2222 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU); 2223 for (BasicBlock *Successor : successors(BB)) 2224 if (Reachable.insert(Successor).second) 2225 Worklist.push_back(Successor); 2226 } while (!Worklist.empty()); 2227 return Changed; 2228 } 2229 2230 void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) { 2231 Instruction *TI = BB->getTerminator(); 2232 2233 if (auto *II = dyn_cast<InvokeInst>(TI)) { 2234 changeToCall(II, DTU); 2235 return; 2236 } 2237 2238 Instruction *NewTI; 2239 BasicBlock *UnwindDest; 2240 2241 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) { 2242 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI); 2243 UnwindDest = CRI->getUnwindDest(); 2244 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) { 2245 auto *NewCatchSwitch = CatchSwitchInst::Create( 2246 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(), 2247 CatchSwitch->getName(), CatchSwitch); 2248 for (BasicBlock *PadBB : CatchSwitch->handlers()) 2249 NewCatchSwitch->addHandler(PadBB); 2250 2251 NewTI = NewCatchSwitch; 2252 UnwindDest = CatchSwitch->getUnwindDest(); 2253 } else { 2254 llvm_unreachable("Could not find unwind successor"); 2255 } 2256 2257 NewTI->takeName(TI); 2258 NewTI->setDebugLoc(TI->getDebugLoc()); 2259 UnwindDest->removePredecessor(BB); 2260 TI->replaceAllUsesWith(NewTI); 2261 TI->eraseFromParent(); 2262 if (DTU) 2263 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, UnwindDest}}); 2264 } 2265 2266 /// removeUnreachableBlocks - Remove blocks that are not reachable, even 2267 /// if they are in a dead cycle. Return true if a change was made, false 2268 /// otherwise. 2269 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU, 2270 MemorySSAUpdater *MSSAU) { 2271 SmallPtrSet<BasicBlock *, 16> Reachable; 2272 bool Changed = markAliveBlocks(F, Reachable, DTU); 2273 2274 // If there are unreachable blocks in the CFG... 2275 if (Reachable.size() == F.size()) 2276 return Changed; 2277 2278 assert(Reachable.size() < F.size()); 2279 NumRemoved += F.size() - Reachable.size(); 2280 2281 SmallSetVector<BasicBlock *, 8> DeadBlockSet; 2282 for (BasicBlock &BB : F) { 2283 // Skip reachable basic blocks 2284 if (Reachable.find(&BB) != Reachable.end()) 2285 continue; 2286 DeadBlockSet.insert(&BB); 2287 } 2288 2289 if (MSSAU) 2290 MSSAU->removeBlocks(DeadBlockSet); 2291 2292 // Loop over all of the basic blocks that are not reachable, dropping all of 2293 // their internal references. Update DTU if available. 2294 std::vector<DominatorTree::UpdateType> Updates; 2295 for (auto *BB : DeadBlockSet) { 2296 for (BasicBlock *Successor : successors(BB)) { 2297 if (!DeadBlockSet.count(Successor)) 2298 Successor->removePredecessor(BB); 2299 if (DTU) 2300 Updates.push_back({DominatorTree::Delete, BB, Successor}); 2301 } 2302 BB->dropAllReferences(); 2303 if (DTU) { 2304 Instruction *TI = BB->getTerminator(); 2305 assert(TI && "Basic block should have a terminator"); 2306 // Terminators like invoke can have users. We have to replace their users, 2307 // before removing them. 2308 if (!TI->use_empty()) 2309 TI->replaceAllUsesWith(UndefValue::get(TI->getType())); 2310 TI->eraseFromParent(); 2311 new UnreachableInst(BB->getContext(), BB); 2312 assert(succ_empty(BB) && "The successor list of BB isn't empty before " 2313 "applying corresponding DTU updates."); 2314 } 2315 } 2316 2317 if (DTU) { 2318 DTU->applyUpdatesPermissive(Updates); 2319 bool Deleted = false; 2320 for (auto *BB : DeadBlockSet) { 2321 if (DTU->isBBPendingDeletion(BB)) 2322 --NumRemoved; 2323 else 2324 Deleted = true; 2325 DTU->deleteBB(BB); 2326 } 2327 if (!Deleted) 2328 return false; 2329 } else { 2330 for (auto *BB : DeadBlockSet) 2331 BB->eraseFromParent(); 2332 } 2333 2334 return true; 2335 } 2336 2337 void llvm::combineMetadata(Instruction *K, const Instruction *J, 2338 ArrayRef<unsigned> KnownIDs, bool DoesKMove) { 2339 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 2340 K->dropUnknownNonDebugMetadata(KnownIDs); 2341 K->getAllMetadataOtherThanDebugLoc(Metadata); 2342 for (const auto &MD : Metadata) { 2343 unsigned Kind = MD.first; 2344 MDNode *JMD = J->getMetadata(Kind); 2345 MDNode *KMD = MD.second; 2346 2347 switch (Kind) { 2348 default: 2349 K->setMetadata(Kind, nullptr); // Remove unknown metadata 2350 break; 2351 case LLVMContext::MD_dbg: 2352 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg"); 2353 case LLVMContext::MD_tbaa: 2354 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD)); 2355 break; 2356 case LLVMContext::MD_alias_scope: 2357 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD)); 2358 break; 2359 case LLVMContext::MD_noalias: 2360 case LLVMContext::MD_mem_parallel_loop_access: 2361 K->setMetadata(Kind, MDNode::intersect(JMD, KMD)); 2362 break; 2363 case LLVMContext::MD_access_group: 2364 K->setMetadata(LLVMContext::MD_access_group, 2365 intersectAccessGroups(K, J)); 2366 break; 2367 case LLVMContext::MD_range: 2368 2369 // If K does move, use most generic range. Otherwise keep the range of 2370 // K. 2371 if (DoesKMove) 2372 // FIXME: If K does move, we should drop the range info and nonnull. 2373 // Currently this function is used with DoesKMove in passes 2374 // doing hoisting/sinking and the current behavior of using the 2375 // most generic range is correct in those cases. 2376 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD)); 2377 break; 2378 case LLVMContext::MD_fpmath: 2379 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD)); 2380 break; 2381 case LLVMContext::MD_invariant_load: 2382 // Only set the !invariant.load if it is present in both instructions. 2383 K->setMetadata(Kind, JMD); 2384 break; 2385 case LLVMContext::MD_nonnull: 2386 // If K does move, keep nonull if it is present in both instructions. 2387 if (DoesKMove) 2388 K->setMetadata(Kind, JMD); 2389 break; 2390 case LLVMContext::MD_invariant_group: 2391 // Preserve !invariant.group in K. 2392 break; 2393 case LLVMContext::MD_align: 2394 K->setMetadata(Kind, 2395 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); 2396 break; 2397 case LLVMContext::MD_dereferenceable: 2398 case LLVMContext::MD_dereferenceable_or_null: 2399 K->setMetadata(Kind, 2400 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); 2401 break; 2402 case LLVMContext::MD_preserve_access_index: 2403 // Preserve !preserve.access.index in K. 2404 break; 2405 } 2406 } 2407 // Set !invariant.group from J if J has it. If both instructions have it 2408 // then we will just pick it from J - even when they are different. 2409 // Also make sure that K is load or store - f.e. combining bitcast with load 2410 // could produce bitcast with invariant.group metadata, which is invalid. 2411 // FIXME: we should try to preserve both invariant.group md if they are 2412 // different, but right now instruction can only have one invariant.group. 2413 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group)) 2414 if (isa<LoadInst>(K) || isa<StoreInst>(K)) 2415 K->setMetadata(LLVMContext::MD_invariant_group, JMD); 2416 } 2417 2418 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J, 2419 bool KDominatesJ) { 2420 unsigned KnownIDs[] = { 2421 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 2422 LLVMContext::MD_noalias, LLVMContext::MD_range, 2423 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull, 2424 LLVMContext::MD_invariant_group, LLVMContext::MD_align, 2425 LLVMContext::MD_dereferenceable, 2426 LLVMContext::MD_dereferenceable_or_null, 2427 LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index}; 2428 combineMetadata(K, J, KnownIDs, KDominatesJ); 2429 } 2430 2431 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) { 2432 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 2433 Source.getAllMetadata(MD); 2434 MDBuilder MDB(Dest.getContext()); 2435 Type *NewType = Dest.getType(); 2436 const DataLayout &DL = Source.getModule()->getDataLayout(); 2437 for (const auto &MDPair : MD) { 2438 unsigned ID = MDPair.first; 2439 MDNode *N = MDPair.second; 2440 // Note, essentially every kind of metadata should be preserved here! This 2441 // routine is supposed to clone a load instruction changing *only its type*. 2442 // The only metadata it makes sense to drop is metadata which is invalidated 2443 // when the pointer type changes. This should essentially never be the case 2444 // in LLVM, but we explicitly switch over only known metadata to be 2445 // conservatively correct. If you are adding metadata to LLVM which pertains 2446 // to loads, you almost certainly want to add it here. 2447 switch (ID) { 2448 case LLVMContext::MD_dbg: 2449 case LLVMContext::MD_tbaa: 2450 case LLVMContext::MD_prof: 2451 case LLVMContext::MD_fpmath: 2452 case LLVMContext::MD_tbaa_struct: 2453 case LLVMContext::MD_invariant_load: 2454 case LLVMContext::MD_alias_scope: 2455 case LLVMContext::MD_noalias: 2456 case LLVMContext::MD_nontemporal: 2457 case LLVMContext::MD_mem_parallel_loop_access: 2458 case LLVMContext::MD_access_group: 2459 // All of these directly apply. 2460 Dest.setMetadata(ID, N); 2461 break; 2462 2463 case LLVMContext::MD_nonnull: 2464 copyNonnullMetadata(Source, N, Dest); 2465 break; 2466 2467 case LLVMContext::MD_align: 2468 case LLVMContext::MD_dereferenceable: 2469 case LLVMContext::MD_dereferenceable_or_null: 2470 // These only directly apply if the new type is also a pointer. 2471 if (NewType->isPointerTy()) 2472 Dest.setMetadata(ID, N); 2473 break; 2474 2475 case LLVMContext::MD_range: 2476 copyRangeMetadata(DL, Source, N, Dest); 2477 break; 2478 } 2479 } 2480 } 2481 2482 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) { 2483 auto *ReplInst = dyn_cast<Instruction>(Repl); 2484 if (!ReplInst) 2485 return; 2486 2487 // Patch the replacement so that it is not more restrictive than the value 2488 // being replaced. 2489 // Note that if 'I' is a load being replaced by some operation, 2490 // for example, by an arithmetic operation, then andIRFlags() 2491 // would just erase all math flags from the original arithmetic 2492 // operation, which is clearly not wanted and not needed. 2493 if (!isa<LoadInst>(I)) 2494 ReplInst->andIRFlags(I); 2495 2496 // FIXME: If both the original and replacement value are part of the 2497 // same control-flow region (meaning that the execution of one 2498 // guarantees the execution of the other), then we can combine the 2499 // noalias scopes here and do better than the general conservative 2500 // answer used in combineMetadata(). 2501 2502 // In general, GVN unifies expressions over different control-flow 2503 // regions, and so we need a conservative combination of the noalias 2504 // scopes. 2505 static const unsigned KnownIDs[] = { 2506 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 2507 LLVMContext::MD_noalias, LLVMContext::MD_range, 2508 LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, 2509 LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull, 2510 LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index}; 2511 combineMetadata(ReplInst, I, KnownIDs, false); 2512 } 2513 2514 template <typename RootType, typename DominatesFn> 2515 static unsigned replaceDominatedUsesWith(Value *From, Value *To, 2516 const RootType &Root, 2517 const DominatesFn &Dominates) { 2518 assert(From->getType() == To->getType()); 2519 2520 unsigned Count = 0; 2521 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end(); 2522 UI != UE;) { 2523 Use &U = *UI++; 2524 if (!Dominates(Root, U)) 2525 continue; 2526 U.set(To); 2527 LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName() 2528 << "' as " << *To << " in " << *U << "\n"); 2529 ++Count; 2530 } 2531 return Count; 2532 } 2533 2534 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) { 2535 assert(From->getType() == To->getType()); 2536 auto *BB = From->getParent(); 2537 unsigned Count = 0; 2538 2539 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end(); 2540 UI != UE;) { 2541 Use &U = *UI++; 2542 auto *I = cast<Instruction>(U.getUser()); 2543 if (I->getParent() == BB) 2544 continue; 2545 U.set(To); 2546 ++Count; 2547 } 2548 return Count; 2549 } 2550 2551 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, 2552 DominatorTree &DT, 2553 const BasicBlockEdge &Root) { 2554 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) { 2555 return DT.dominates(Root, U); 2556 }; 2557 return ::replaceDominatedUsesWith(From, To, Root, Dominates); 2558 } 2559 2560 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, 2561 DominatorTree &DT, 2562 const BasicBlock *BB) { 2563 auto ProperlyDominates = [&DT](const BasicBlock *BB, const Use &U) { 2564 auto *I = cast<Instruction>(U.getUser())->getParent(); 2565 return DT.properlyDominates(BB, I); 2566 }; 2567 return ::replaceDominatedUsesWith(From, To, BB, ProperlyDominates); 2568 } 2569 2570 bool llvm::callsGCLeafFunction(const CallBase *Call, 2571 const TargetLibraryInfo &TLI) { 2572 // Check if the function is specifically marked as a gc leaf function. 2573 if (Call->hasFnAttr("gc-leaf-function")) 2574 return true; 2575 if (const Function *F = Call->getCalledFunction()) { 2576 if (F->hasFnAttribute("gc-leaf-function")) 2577 return true; 2578 2579 if (auto IID = F->getIntrinsicID()) 2580 // Most LLVM intrinsics do not take safepoints. 2581 return IID != Intrinsic::experimental_gc_statepoint && 2582 IID != Intrinsic::experimental_deoptimize; 2583 } 2584 2585 // Lib calls can be materialized by some passes, and won't be 2586 // marked as 'gc-leaf-function.' All available Libcalls are 2587 // GC-leaf. 2588 LibFunc LF; 2589 if (TLI.getLibFunc(ImmutableCallSite(Call), LF)) { 2590 return TLI.has(LF); 2591 } 2592 2593 return false; 2594 } 2595 2596 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, 2597 LoadInst &NewLI) { 2598 auto *NewTy = NewLI.getType(); 2599 2600 // This only directly applies if the new type is also a pointer. 2601 if (NewTy->isPointerTy()) { 2602 NewLI.setMetadata(LLVMContext::MD_nonnull, N); 2603 return; 2604 } 2605 2606 // The only other translation we can do is to integral loads with !range 2607 // metadata. 2608 if (!NewTy->isIntegerTy()) 2609 return; 2610 2611 MDBuilder MDB(NewLI.getContext()); 2612 const Value *Ptr = OldLI.getPointerOperand(); 2613 auto *ITy = cast<IntegerType>(NewTy); 2614 auto *NullInt = ConstantExpr::getPtrToInt( 2615 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy); 2616 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1)); 2617 NewLI.setMetadata(LLVMContext::MD_range, 2618 MDB.createRange(NonNullInt, NullInt)); 2619 } 2620 2621 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, 2622 MDNode *N, LoadInst &NewLI) { 2623 auto *NewTy = NewLI.getType(); 2624 2625 // Give up unless it is converted to a pointer where there is a single very 2626 // valuable mapping we can do reliably. 2627 // FIXME: It would be nice to propagate this in more ways, but the type 2628 // conversions make it hard. 2629 if (!NewTy->isPointerTy()) 2630 return; 2631 2632 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy); 2633 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) { 2634 MDNode *NN = MDNode::get(OldLI.getContext(), None); 2635 NewLI.setMetadata(LLVMContext::MD_nonnull, NN); 2636 } 2637 } 2638 2639 void llvm::dropDebugUsers(Instruction &I) { 2640 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 2641 findDbgUsers(DbgUsers, &I); 2642 for (auto *DII : DbgUsers) 2643 DII->eraseFromParent(); 2644 } 2645 2646 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, 2647 BasicBlock *BB) { 2648 // Since we are moving the instructions out of its basic block, we do not 2649 // retain their original debug locations (DILocations) and debug intrinsic 2650 // instructions. 2651 // 2652 // Doing so would degrade the debugging experience and adversely affect the 2653 // accuracy of profiling information. 2654 // 2655 // Currently, when hoisting the instructions, we take the following actions: 2656 // - Remove their debug intrinsic instructions. 2657 // - Set their debug locations to the values from the insertion point. 2658 // 2659 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values 2660 // need to be deleted, is because there will not be any instructions with a 2661 // DILocation in either branch left after performing the transformation. We 2662 // can only insert a dbg.value after the two branches are joined again. 2663 // 2664 // See PR38762, PR39243 for more details. 2665 // 2666 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to 2667 // encode predicated DIExpressions that yield different results on different 2668 // code paths. 2669 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { 2670 Instruction *I = &*II; 2671 I->dropUnknownNonDebugMetadata(); 2672 if (I->isUsedByMetadata()) 2673 dropDebugUsers(*I); 2674 if (isa<DbgInfoIntrinsic>(I)) { 2675 // Remove DbgInfo Intrinsics. 2676 II = I->eraseFromParent(); 2677 continue; 2678 } 2679 I->setDebugLoc(InsertPt->getDebugLoc()); 2680 ++II; 2681 } 2682 DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(), 2683 BB->begin(), 2684 BB->getTerminator()->getIterator()); 2685 } 2686 2687 namespace { 2688 2689 /// A potential constituent of a bitreverse or bswap expression. See 2690 /// collectBitParts for a fuller explanation. 2691 struct BitPart { 2692 BitPart(Value *P, unsigned BW) : Provider(P) { 2693 Provenance.resize(BW); 2694 } 2695 2696 /// The Value that this is a bitreverse/bswap of. 2697 Value *Provider; 2698 2699 /// The "provenance" of each bit. Provenance[A] = B means that bit A 2700 /// in Provider becomes bit B in the result of this expression. 2701 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128. 2702 2703 enum { Unset = -1 }; 2704 }; 2705 2706 } // end anonymous namespace 2707 2708 /// Analyze the specified subexpression and see if it is capable of providing 2709 /// pieces of a bswap or bitreverse. The subexpression provides a potential 2710 /// piece of a bswap or bitreverse if it can be proven that each non-zero bit in 2711 /// the output of the expression came from a corresponding bit in some other 2712 /// value. This function is recursive, and the end result is a mapping of 2713 /// bitnumber to bitnumber. It is the caller's responsibility to validate that 2714 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse. 2715 /// 2716 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know 2717 /// that the expression deposits the low byte of %X into the high byte of the 2718 /// result and that all other bits are zero. This expression is accepted and a 2719 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to 2720 /// [0-7]. 2721 /// 2722 /// To avoid revisiting values, the BitPart results are memoized into the 2723 /// provided map. To avoid unnecessary copying of BitParts, BitParts are 2724 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to 2725 /// store BitParts objects, not pointers. As we need the concept of a nullptr 2726 /// BitParts (Value has been analyzed and the analysis failed), we an Optional 2727 /// type instead to provide the same functionality. 2728 /// 2729 /// Because we pass around references into \c BPS, we must use a container that 2730 /// does not invalidate internal references (std::map instead of DenseMap). 2731 static const Optional<BitPart> & 2732 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, 2733 std::map<Value *, Optional<BitPart>> &BPS, int Depth) { 2734 auto I = BPS.find(V); 2735 if (I != BPS.end()) 2736 return I->second; 2737 2738 auto &Result = BPS[V] = None; 2739 auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 2740 2741 // Prevent stack overflow by limiting the recursion depth 2742 if (Depth == BitPartRecursionMaxDepth) { 2743 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n"); 2744 return Result; 2745 } 2746 2747 if (Instruction *I = dyn_cast<Instruction>(V)) { 2748 // If this is an or instruction, it may be an inner node of the bswap. 2749 if (I->getOpcode() == Instruction::Or) { 2750 auto &A = collectBitParts(I->getOperand(0), MatchBSwaps, 2751 MatchBitReversals, BPS, Depth + 1); 2752 auto &B = collectBitParts(I->getOperand(1), MatchBSwaps, 2753 MatchBitReversals, BPS, Depth + 1); 2754 if (!A || !B) 2755 return Result; 2756 2757 // Try and merge the two together. 2758 if (!A->Provider || A->Provider != B->Provider) 2759 return Result; 2760 2761 Result = BitPart(A->Provider, BitWidth); 2762 for (unsigned i = 0; i < A->Provenance.size(); ++i) { 2763 if (A->Provenance[i] != BitPart::Unset && 2764 B->Provenance[i] != BitPart::Unset && 2765 A->Provenance[i] != B->Provenance[i]) 2766 return Result = None; 2767 2768 if (A->Provenance[i] == BitPart::Unset) 2769 Result->Provenance[i] = B->Provenance[i]; 2770 else 2771 Result->Provenance[i] = A->Provenance[i]; 2772 } 2773 2774 return Result; 2775 } 2776 2777 // If this is a logical shift by a constant, recurse then shift the result. 2778 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) { 2779 unsigned BitShift = 2780 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U); 2781 // Ensure the shift amount is defined. 2782 if (BitShift > BitWidth) 2783 return Result; 2784 2785 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps, 2786 MatchBitReversals, BPS, Depth + 1); 2787 if (!Res) 2788 return Result; 2789 Result = Res; 2790 2791 // Perform the "shift" on BitProvenance. 2792 auto &P = Result->Provenance; 2793 if (I->getOpcode() == Instruction::Shl) { 2794 P.erase(std::prev(P.end(), BitShift), P.end()); 2795 P.insert(P.begin(), BitShift, BitPart::Unset); 2796 } else { 2797 P.erase(P.begin(), std::next(P.begin(), BitShift)); 2798 P.insert(P.end(), BitShift, BitPart::Unset); 2799 } 2800 2801 return Result; 2802 } 2803 2804 // If this is a logical 'and' with a mask that clears bits, recurse then 2805 // unset the appropriate bits. 2806 if (I->getOpcode() == Instruction::And && 2807 isa<ConstantInt>(I->getOperand(1))) { 2808 APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1); 2809 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue(); 2810 2811 // Check that the mask allows a multiple of 8 bits for a bswap, for an 2812 // early exit. 2813 unsigned NumMaskedBits = AndMask.countPopulation(); 2814 if (!MatchBitReversals && NumMaskedBits % 8 != 0) 2815 return Result; 2816 2817 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps, 2818 MatchBitReversals, BPS, Depth + 1); 2819 if (!Res) 2820 return Result; 2821 Result = Res; 2822 2823 for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1) 2824 // If the AndMask is zero for this bit, clear the bit. 2825 if ((AndMask & Bit) == 0) 2826 Result->Provenance[i] = BitPart::Unset; 2827 return Result; 2828 } 2829 2830 // If this is a zext instruction zero extend the result. 2831 if (I->getOpcode() == Instruction::ZExt) { 2832 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps, 2833 MatchBitReversals, BPS, Depth + 1); 2834 if (!Res) 2835 return Result; 2836 2837 Result = BitPart(Res->Provider, BitWidth); 2838 auto NarrowBitWidth = 2839 cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth(); 2840 for (unsigned i = 0; i < NarrowBitWidth; ++i) 2841 Result->Provenance[i] = Res->Provenance[i]; 2842 for (unsigned i = NarrowBitWidth; i < BitWidth; ++i) 2843 Result->Provenance[i] = BitPart::Unset; 2844 return Result; 2845 } 2846 } 2847 2848 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be 2849 // the input value to the bswap/bitreverse. 2850 Result = BitPart(V, BitWidth); 2851 for (unsigned i = 0; i < BitWidth; ++i) 2852 Result->Provenance[i] = i; 2853 return Result; 2854 } 2855 2856 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, 2857 unsigned BitWidth) { 2858 if (From % 8 != To % 8) 2859 return false; 2860 // Convert from bit indices to byte indices and check for a byte reversal. 2861 From >>= 3; 2862 To >>= 3; 2863 BitWidth >>= 3; 2864 return From == BitWidth - To - 1; 2865 } 2866 2867 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, 2868 unsigned BitWidth) { 2869 return From == BitWidth - To - 1; 2870 } 2871 2872 bool llvm::recognizeBSwapOrBitReverseIdiom( 2873 Instruction *I, bool MatchBSwaps, bool MatchBitReversals, 2874 SmallVectorImpl<Instruction *> &InsertedInsts) { 2875 if (Operator::getOpcode(I) != Instruction::Or) 2876 return false; 2877 if (!MatchBSwaps && !MatchBitReversals) 2878 return false; 2879 IntegerType *ITy = dyn_cast<IntegerType>(I->getType()); 2880 if (!ITy || ITy->getBitWidth() > 128) 2881 return false; // Can't do vectors or integers > 128 bits. 2882 unsigned BW = ITy->getBitWidth(); 2883 2884 unsigned DemandedBW = BW; 2885 IntegerType *DemandedTy = ITy; 2886 if (I->hasOneUse()) { 2887 if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) { 2888 DemandedTy = cast<IntegerType>(Trunc->getType()); 2889 DemandedBW = DemandedTy->getBitWidth(); 2890 } 2891 } 2892 2893 // Try to find all the pieces corresponding to the bswap. 2894 std::map<Value *, Optional<BitPart>> BPS; 2895 auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0); 2896 if (!Res) 2897 return false; 2898 auto &BitProvenance = Res->Provenance; 2899 2900 // Now, is the bit permutation correct for a bswap or a bitreverse? We can 2901 // only byteswap values with an even number of bytes. 2902 bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true; 2903 for (unsigned i = 0; i < DemandedBW; ++i) { 2904 OKForBSwap &= 2905 bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW); 2906 OKForBitReverse &= 2907 bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW); 2908 } 2909 2910 Intrinsic::ID Intrin; 2911 if (OKForBSwap && MatchBSwaps) 2912 Intrin = Intrinsic::bswap; 2913 else if (OKForBitReverse && MatchBitReversals) 2914 Intrin = Intrinsic::bitreverse; 2915 else 2916 return false; 2917 2918 if (ITy != DemandedTy) { 2919 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy); 2920 Value *Provider = Res->Provider; 2921 IntegerType *ProviderTy = cast<IntegerType>(Provider->getType()); 2922 // We may need to truncate the provider. 2923 if (DemandedTy != ProviderTy) { 2924 auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy, 2925 "trunc", I); 2926 InsertedInsts.push_back(Trunc); 2927 Provider = Trunc; 2928 } 2929 auto *CI = CallInst::Create(F, Provider, "rev", I); 2930 InsertedInsts.push_back(CI); 2931 auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I); 2932 InsertedInsts.push_back(ExtInst); 2933 return true; 2934 } 2935 2936 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy); 2937 InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I)); 2938 return true; 2939 } 2940 2941 // CodeGen has special handling for some string functions that may replace 2942 // them with target-specific intrinsics. Since that'd skip our interceptors 2943 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses, 2944 // we mark affected calls as NoBuiltin, which will disable optimization 2945 // in CodeGen. 2946 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin( 2947 CallInst *CI, const TargetLibraryInfo *TLI) { 2948 Function *F = CI->getCalledFunction(); 2949 LibFunc Func; 2950 if (F && !F->hasLocalLinkage() && F->hasName() && 2951 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) && 2952 !F->doesNotAccessMemory()) 2953 CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin); 2954 } 2955 2956 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) { 2957 // We can't have a PHI with a metadata type. 2958 if (I->getOperand(OpIdx)->getType()->isMetadataTy()) 2959 return false; 2960 2961 // Early exit. 2962 if (!isa<Constant>(I->getOperand(OpIdx))) 2963 return true; 2964 2965 switch (I->getOpcode()) { 2966 default: 2967 return true; 2968 case Instruction::Call: 2969 case Instruction::Invoke: 2970 // Can't handle inline asm. Skip it. 2971 if (isa<InlineAsm>(ImmutableCallSite(I).getCalledValue())) 2972 return false; 2973 // Many arithmetic intrinsics have no issue taking a 2974 // variable, however it's hard to distingish these from 2975 // specials such as @llvm.frameaddress that require a constant. 2976 if (isa<IntrinsicInst>(I)) 2977 return false; 2978 2979 // Constant bundle operands may need to retain their constant-ness for 2980 // correctness. 2981 if (ImmutableCallSite(I).isBundleOperand(OpIdx)) 2982 return false; 2983 return true; 2984 case Instruction::ShuffleVector: 2985 // Shufflevector masks are constant. 2986 return OpIdx != 2; 2987 case Instruction::Switch: 2988 case Instruction::ExtractValue: 2989 // All operands apart from the first are constant. 2990 return OpIdx == 0; 2991 case Instruction::InsertValue: 2992 // All operands apart from the first and the second are constant. 2993 return OpIdx < 2; 2994 case Instruction::Alloca: 2995 // Static allocas (constant size in the entry block) are handled by 2996 // prologue/epilogue insertion so they're free anyway. We definitely don't 2997 // want to make them non-constant. 2998 return !cast<AllocaInst>(I)->isStaticAlloca(); 2999 case Instruction::GetElementPtr: 3000 if (OpIdx == 0) 3001 return true; 3002 gep_type_iterator It = gep_type_begin(I); 3003 for (auto E = std::next(It, OpIdx); It != E; ++It) 3004 if (It.isStruct()) 3005 return false; 3006 return true; 3007 } 3008 } 3009 3010 using AllocaForValueMapTy = DenseMap<Value *, AllocaInst *>; 3011 AllocaInst *llvm::findAllocaForValue(Value *V, 3012 AllocaForValueMapTy &AllocaForValue) { 3013 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) 3014 return AI; 3015 // See if we've already calculated (or started to calculate) alloca for a 3016 // given value. 3017 AllocaForValueMapTy::iterator I = AllocaForValue.find(V); 3018 if (I != AllocaForValue.end()) 3019 return I->second; 3020 // Store 0 while we're calculating alloca for value V to avoid 3021 // infinite recursion if the value references itself. 3022 AllocaForValue[V] = nullptr; 3023 AllocaInst *Res = nullptr; 3024 if (CastInst *CI = dyn_cast<CastInst>(V)) 3025 Res = findAllocaForValue(CI->getOperand(0), AllocaForValue); 3026 else if (PHINode *PN = dyn_cast<PHINode>(V)) { 3027 for (Value *IncValue : PN->incoming_values()) { 3028 // Allow self-referencing phi-nodes. 3029 if (IncValue == PN) 3030 continue; 3031 AllocaInst *IncValueAI = findAllocaForValue(IncValue, AllocaForValue); 3032 // AI for incoming values should exist and should all be equal. 3033 if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res)) 3034 return nullptr; 3035 Res = IncValueAI; 3036 } 3037 } else if (GetElementPtrInst *EP = dyn_cast<GetElementPtrInst>(V)) { 3038 Res = findAllocaForValue(EP->getPointerOperand(), AllocaForValue); 3039 } else { 3040 LLVM_DEBUG(dbgs() << "Alloca search cancelled on unknown instruction: " 3041 << *V << "\n"); 3042 } 3043 if (Res) 3044 AllocaForValue[V] = Res; 3045 return Res; 3046 } 3047