1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs a simple dominator tree walk that eliminates trivially 10 // redundant instructions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/EarlyCSE.h" 15 #include "llvm/ADT/DenseMapInfo.h" 16 #include "llvm/ADT/Hashing.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/ScopedHashTable.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/GlobalsModRef.h" 24 #include "llvm/Analysis/GuardUtils.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemorySSA.h" 27 #include "llvm/Analysis/MemorySSAUpdater.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/TargetTransformInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instruction.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/PassManager.h" 43 #include "llvm/IR/PatternMatch.h" 44 #include "llvm/IR/Statepoint.h" 45 #include "llvm/IR/Type.h" 46 #include "llvm/IR/Use.h" 47 #include "llvm/IR/Value.h" 48 #include "llvm/InitializePasses.h" 49 #include "llvm/Pass.h" 50 #include "llvm/Support/Allocator.h" 51 #include "llvm/Support/AtomicOrdering.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/Debug.h" 54 #include "llvm/Support/DebugCounter.h" 55 #include "llvm/Support/RecyclingAllocator.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include "llvm/Transforms/Scalar.h" 58 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 59 #include "llvm/Transforms/Utils/GuardUtils.h" 60 #include "llvm/Transforms/Utils/Local.h" 61 #include <cassert> 62 #include <deque> 63 #include <memory> 64 #include <utility> 65 66 using namespace llvm; 67 using namespace llvm::PatternMatch; 68 69 #define DEBUG_TYPE "early-cse" 70 71 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd"); 72 STATISTIC(NumCSE, "Number of instructions CSE'd"); 73 STATISTIC(NumCSECVP, "Number of compare instructions CVP'd"); 74 STATISTIC(NumCSELoad, "Number of load instructions CSE'd"); 75 STATISTIC(NumCSECall, "Number of call instructions CSE'd"); 76 STATISTIC(NumDSE, "Number of trivial dead stores removed"); 77 78 DEBUG_COUNTER(CSECounter, "early-cse", 79 "Controls which instructions are removed"); 80 81 static cl::opt<unsigned> EarlyCSEMssaOptCap( 82 "earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden, 83 cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange " 84 "for faster compile. Caps the MemorySSA clobbering calls.")); 85 86 static cl::opt<bool> EarlyCSEDebugHash( 87 "earlycse-debug-hash", cl::init(false), cl::Hidden, 88 cl::desc("Perform extra assertion checking to verify that SimpleValue's hash " 89 "function is well-behaved w.r.t. its isEqual predicate")); 90 91 //===----------------------------------------------------------------------===// 92 // SimpleValue 93 //===----------------------------------------------------------------------===// 94 95 namespace { 96 97 /// Struct representing the available values in the scoped hash table. 98 struct SimpleValue { 99 Instruction *Inst; 100 101 SimpleValue(Instruction *I) : Inst(I) { 102 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 103 } 104 105 bool isSentinel() const { 106 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 107 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 108 } 109 110 static bool canHandle(Instruction *Inst) { 111 // This can only handle non-void readnone functions. 112 if (CallInst *CI = dyn_cast<CallInst>(Inst)) 113 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); 114 return isa<CastInst>(Inst) || isa<UnaryOperator>(Inst) || 115 isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) || 116 isa<CmpInst>(Inst) || isa<SelectInst>(Inst) || 117 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 118 isa<ShuffleVectorInst>(Inst) || isa<ExtractValueInst>(Inst) || 119 isa<InsertValueInst>(Inst) || isa<FreezeInst>(Inst); 120 } 121 }; 122 123 } // end anonymous namespace 124 125 namespace llvm { 126 127 template <> struct DenseMapInfo<SimpleValue> { 128 static inline SimpleValue getEmptyKey() { 129 return DenseMapInfo<Instruction *>::getEmptyKey(); 130 } 131 132 static inline SimpleValue getTombstoneKey() { 133 return DenseMapInfo<Instruction *>::getTombstoneKey(); 134 } 135 136 static unsigned getHashValue(SimpleValue Val); 137 static bool isEqual(SimpleValue LHS, SimpleValue RHS); 138 }; 139 140 } // end namespace llvm 141 142 /// Match a 'select' including an optional 'not's of the condition. 143 static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A, 144 Value *&B, 145 SelectPatternFlavor &Flavor) { 146 // Return false if V is not even a select. 147 if (!match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B)))) 148 return false; 149 150 // Look through a 'not' of the condition operand by swapping A/B. 151 Value *CondNot; 152 if (match(Cond, m_Not(m_Value(CondNot)))) { 153 Cond = CondNot; 154 std::swap(A, B); 155 } 156 157 // Match canonical forms of abs/nabs/min/max. We are not using ValueTracking's 158 // more powerful matchSelectPattern() because it may rely on instruction flags 159 // such as "nsw". That would be incompatible with the current hashing 160 // mechanism that may remove flags to increase the likelihood of CSE. 161 162 // These are the canonical forms of abs(X) and nabs(X) created by instcombine: 163 // %N = sub i32 0, %X 164 // %C = icmp slt i32 %X, 0 165 // %ABS = select i1 %C, i32 %N, i32 %X 166 // 167 // %N = sub i32 0, %X 168 // %C = icmp slt i32 %X, 0 169 // %NABS = select i1 %C, i32 %X, i32 %N 170 Flavor = SPF_UNKNOWN; 171 CmpInst::Predicate Pred; 172 if (match(Cond, m_ICmp(Pred, m_Specific(B), m_ZeroInt())) && 173 Pred == ICmpInst::ICMP_SLT && match(A, m_Neg(m_Specific(B)))) { 174 // ABS: B < 0 ? -B : B 175 Flavor = SPF_ABS; 176 return true; 177 } 178 if (match(Cond, m_ICmp(Pred, m_Specific(A), m_ZeroInt())) && 179 Pred == ICmpInst::ICMP_SLT && match(B, m_Neg(m_Specific(A)))) { 180 // NABS: A < 0 ? A : -A 181 Flavor = SPF_NABS; 182 return true; 183 } 184 185 if (!match(Cond, m_ICmp(Pred, m_Specific(A), m_Specific(B)))) { 186 // Check for commuted variants of min/max by swapping predicate. 187 // If we do not match the standard or commuted patterns, this is not a 188 // recognized form of min/max, but it is still a select, so return true. 189 if (!match(Cond, m_ICmp(Pred, m_Specific(B), m_Specific(A)))) 190 return true; 191 Pred = ICmpInst::getSwappedPredicate(Pred); 192 } 193 194 // Check for inverted variants of min/max by swapping operands. 195 bool Inversed = false; 196 switch (Pred) { 197 case CmpInst::ICMP_ULE: 198 case CmpInst::ICMP_UGE: 199 case CmpInst::ICMP_SLE: 200 case CmpInst::ICMP_SGE: 201 Pred = CmpInst::getInversePredicate(Pred); 202 std::swap(A, B); 203 Inversed = true; 204 break; 205 default: 206 break; 207 } 208 209 switch (Pred) { 210 case CmpInst::ICMP_UGT: Flavor = Inversed ? SPF_UMIN : SPF_UMAX; break; 211 case CmpInst::ICMP_ULT: Flavor = Inversed ? SPF_UMAX : SPF_UMIN; break; 212 case CmpInst::ICMP_SGT: Flavor = Inversed ? SPF_SMIN : SPF_SMAX; break; 213 case CmpInst::ICMP_SLT: Flavor = Inversed ? SPF_SMAX : SPF_SMIN; break; 214 default: break; 215 } 216 217 return true; 218 } 219 220 static unsigned getHashValueImpl(SimpleValue Val) { 221 Instruction *Inst = Val.Inst; 222 // Hash in all of the operands as pointers. 223 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) { 224 Value *LHS = BinOp->getOperand(0); 225 Value *RHS = BinOp->getOperand(1); 226 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) 227 std::swap(LHS, RHS); 228 229 return hash_combine(BinOp->getOpcode(), LHS, RHS); 230 } 231 232 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) { 233 // Compares can be commuted by swapping the comparands and 234 // updating the predicate. Choose the form that has the 235 // comparands in sorted order, or in the case of a tie, the 236 // one with the lower predicate. 237 Value *LHS = CI->getOperand(0); 238 Value *RHS = CI->getOperand(1); 239 CmpInst::Predicate Pred = CI->getPredicate(); 240 CmpInst::Predicate SwappedPred = CI->getSwappedPredicate(); 241 if (std::tie(LHS, Pred) > std::tie(RHS, SwappedPred)) { 242 std::swap(LHS, RHS); 243 Pred = SwappedPred; 244 } 245 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); 246 } 247 248 // Hash general selects to allow matching commuted true/false operands. 249 SelectPatternFlavor SPF; 250 Value *Cond, *A, *B; 251 if (matchSelectWithOptionalNotCond(Inst, Cond, A, B, SPF)) { 252 // Hash min/max/abs (cmp + select) to allow for commuted operands. 253 // Min/max may also have non-canonical compare predicate (eg, the compare for 254 // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the 255 // compare. 256 // TODO: We should also detect FP min/max. 257 if (SPF == SPF_SMIN || SPF == SPF_SMAX || 258 SPF == SPF_UMIN || SPF == SPF_UMAX) { 259 if (A > B) 260 std::swap(A, B); 261 return hash_combine(Inst->getOpcode(), SPF, A, B); 262 } 263 if (SPF == SPF_ABS || SPF == SPF_NABS) { 264 // ABS/NABS always puts the input in A and its negation in B. 265 return hash_combine(Inst->getOpcode(), SPF, A, B); 266 } 267 268 // Hash general selects to allow matching commuted true/false operands. 269 270 // If we do not have a compare as the condition, just hash in the condition. 271 CmpInst::Predicate Pred; 272 Value *X, *Y; 273 if (!match(Cond, m_Cmp(Pred, m_Value(X), m_Value(Y)))) 274 return hash_combine(Inst->getOpcode(), Cond, A, B); 275 276 // Similar to cmp normalization (above) - canonicalize the predicate value: 277 // select (icmp Pred, X, Y), A, B --> select (icmp InvPred, X, Y), B, A 278 if (CmpInst::getInversePredicate(Pred) < Pred) { 279 Pred = CmpInst::getInversePredicate(Pred); 280 std::swap(A, B); 281 } 282 return hash_combine(Inst->getOpcode(), Pred, X, Y, A, B); 283 } 284 285 if (CastInst *CI = dyn_cast<CastInst>(Inst)) 286 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); 287 288 if (FreezeInst *FI = dyn_cast<FreezeInst>(Inst)) 289 return hash_combine(FI->getOpcode(), FI->getOperand(0)); 290 291 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) 292 return hash_combine(EVI->getOpcode(), EVI->getOperand(0), 293 hash_combine_range(EVI->idx_begin(), EVI->idx_end())); 294 295 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) 296 return hash_combine(IVI->getOpcode(), IVI->getOperand(0), 297 IVI->getOperand(1), 298 hash_combine_range(IVI->idx_begin(), IVI->idx_end())); 299 300 assert((isa<CallInst>(Inst) || isa<GetElementPtrInst>(Inst) || 301 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 302 isa<ShuffleVectorInst>(Inst) || isa<UnaryOperator>(Inst) || 303 isa<FreezeInst>(Inst)) && 304 "Invalid/unknown instruction"); 305 306 // Handle intrinsics with commutative operands. 307 // TODO: Extend this to handle intrinsics with >2 operands where the 1st 308 // 2 operands are commutative. 309 auto *II = dyn_cast<IntrinsicInst>(Inst); 310 if (II && II->isCommutative() && II->getNumArgOperands() == 2) { 311 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 312 if (LHS > RHS) 313 std::swap(LHS, RHS); 314 return hash_combine(II->getOpcode(), LHS, RHS); 315 } 316 317 // Mix in the opcode. 318 return hash_combine( 319 Inst->getOpcode(), 320 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 321 } 322 323 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { 324 #ifndef NDEBUG 325 // If -earlycse-debug-hash was specified, return a constant -- this 326 // will force all hashing to collide, so we'll exhaustively search 327 // the table for a match, and the assertion in isEqual will fire if 328 // there's a bug causing equal keys to hash differently. 329 if (EarlyCSEDebugHash) 330 return 0; 331 #endif 332 return getHashValueImpl(Val); 333 } 334 335 static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) { 336 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 337 338 if (LHS.isSentinel() || RHS.isSentinel()) 339 return LHSI == RHSI; 340 341 if (LHSI->getOpcode() != RHSI->getOpcode()) 342 return false; 343 if (LHSI->isIdenticalToWhenDefined(RHSI)) 344 return true; 345 346 // If we're not strictly identical, we still might be a commutable instruction 347 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) { 348 if (!LHSBinOp->isCommutative()) 349 return false; 350 351 assert(isa<BinaryOperator>(RHSI) && 352 "same opcode, but different instruction type?"); 353 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI); 354 355 // Commuted equality 356 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && 357 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); 358 } 359 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) { 360 assert(isa<CmpInst>(RHSI) && 361 "same opcode, but different instruction type?"); 362 CmpInst *RHSCmp = cast<CmpInst>(RHSI); 363 // Commuted equality 364 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && 365 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && 366 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); 367 } 368 369 // TODO: Extend this for >2 args by matching the trailing N-2 args. 370 auto *LII = dyn_cast<IntrinsicInst>(LHSI); 371 auto *RII = dyn_cast<IntrinsicInst>(RHSI); 372 if (LII && RII && LII->getIntrinsicID() == RII->getIntrinsicID() && 373 LII->isCommutative() && LII->getNumArgOperands() == 2) { 374 return LII->getArgOperand(0) == RII->getArgOperand(1) && 375 LII->getArgOperand(1) == RII->getArgOperand(0); 376 } 377 378 // Min/max/abs can occur with commuted operands, non-canonical predicates, 379 // and/or non-canonical operands. 380 // Selects can be non-trivially equivalent via inverted conditions and swaps. 381 SelectPatternFlavor LSPF, RSPF; 382 Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB; 383 if (matchSelectWithOptionalNotCond(LHSI, CondL, LHSA, LHSB, LSPF) && 384 matchSelectWithOptionalNotCond(RHSI, CondR, RHSA, RHSB, RSPF)) { 385 if (LSPF == RSPF) { 386 // TODO: We should also detect FP min/max. 387 if (LSPF == SPF_SMIN || LSPF == SPF_SMAX || 388 LSPF == SPF_UMIN || LSPF == SPF_UMAX) 389 return ((LHSA == RHSA && LHSB == RHSB) || 390 (LHSA == RHSB && LHSB == RHSA)); 391 392 if (LSPF == SPF_ABS || LSPF == SPF_NABS) { 393 // Abs results are placed in a defined order by matchSelectPattern. 394 return LHSA == RHSA && LHSB == RHSB; 395 } 396 397 // select Cond, A, B <--> select not(Cond), B, A 398 if (CondL == CondR && LHSA == RHSA && LHSB == RHSB) 399 return true; 400 } 401 402 // If the true/false operands are swapped and the conditions are compares 403 // with inverted predicates, the selects are equal: 404 // select (icmp Pred, X, Y), A, B <--> select (icmp InvPred, X, Y), B, A 405 // 406 // This also handles patterns with a double-negation in the sense of not + 407 // inverse, because we looked through a 'not' in the matching function and 408 // swapped A/B: 409 // select (cmp Pred, X, Y), A, B <--> select (not (cmp InvPred, X, Y)), B, A 410 // 411 // This intentionally does NOT handle patterns with a double-negation in 412 // the sense of not + not, because doing so could result in values 413 // comparing 414 // as equal that hash differently in the min/max/abs cases like: 415 // select (cmp slt, X, Y), X, Y <--> select (not (not (cmp slt, X, Y))), X, Y 416 // ^ hashes as min ^ would not hash as min 417 // In the context of the EarlyCSE pass, however, such cases never reach 418 // this code, as we simplify the double-negation before hashing the second 419 // select (and so still succeed at CSEing them). 420 if (LHSA == RHSB && LHSB == RHSA) { 421 CmpInst::Predicate PredL, PredR; 422 Value *X, *Y; 423 if (match(CondL, m_Cmp(PredL, m_Value(X), m_Value(Y))) && 424 match(CondR, m_Cmp(PredR, m_Specific(X), m_Specific(Y))) && 425 CmpInst::getInversePredicate(PredL) == PredR) 426 return true; 427 } 428 } 429 430 return false; 431 } 432 433 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { 434 // These comparisons are nontrivial, so assert that equality implies 435 // hash equality (DenseMap demands this as an invariant). 436 bool Result = isEqualImpl(LHS, RHS); 437 assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) || 438 getHashValueImpl(LHS) == getHashValueImpl(RHS)); 439 return Result; 440 } 441 442 //===----------------------------------------------------------------------===// 443 // CallValue 444 //===----------------------------------------------------------------------===// 445 446 namespace { 447 448 /// Struct representing the available call values in the scoped hash 449 /// table. 450 struct CallValue { 451 Instruction *Inst; 452 453 CallValue(Instruction *I) : Inst(I) { 454 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 455 } 456 457 bool isSentinel() const { 458 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 459 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 460 } 461 462 static bool canHandle(Instruction *Inst) { 463 // Don't value number anything that returns void. 464 if (Inst->getType()->isVoidTy()) 465 return false; 466 467 CallInst *CI = dyn_cast<CallInst>(Inst); 468 if (!CI || !CI->onlyReadsMemory()) 469 return false; 470 return true; 471 } 472 }; 473 474 } // end anonymous namespace 475 476 namespace llvm { 477 478 template <> struct DenseMapInfo<CallValue> { 479 static inline CallValue getEmptyKey() { 480 return DenseMapInfo<Instruction *>::getEmptyKey(); 481 } 482 483 static inline CallValue getTombstoneKey() { 484 return DenseMapInfo<Instruction *>::getTombstoneKey(); 485 } 486 487 static unsigned getHashValue(CallValue Val); 488 static bool isEqual(CallValue LHS, CallValue RHS); 489 }; 490 491 } // end namespace llvm 492 493 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { 494 Instruction *Inst = Val.Inst; 495 496 // gc.relocate is 'special' call: its second and third operands are 497 // not real values, but indices into statepoint's argument list. 498 // Get values they point to. 499 if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(Inst)) 500 return hash_combine(GCR->getOpcode(), GCR->getOperand(0), 501 GCR->getBasePtr(), GCR->getDerivedPtr()); 502 503 // Hash all of the operands as pointers and mix in the opcode. 504 return hash_combine( 505 Inst->getOpcode(), 506 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 507 } 508 509 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { 510 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 511 if (LHS.isSentinel() || RHS.isSentinel()) 512 return LHSI == RHSI; 513 514 // See comment above in `getHashValue()`. 515 if (const GCRelocateInst *GCR1 = dyn_cast<GCRelocateInst>(LHSI)) 516 if (const GCRelocateInst *GCR2 = dyn_cast<GCRelocateInst>(RHSI)) 517 return GCR1->getOperand(0) == GCR2->getOperand(0) && 518 GCR1->getBasePtr() == GCR2->getBasePtr() && 519 GCR1->getDerivedPtr() == GCR2->getDerivedPtr(); 520 521 return LHSI->isIdenticalTo(RHSI); 522 } 523 524 //===----------------------------------------------------------------------===// 525 // EarlyCSE implementation 526 //===----------------------------------------------------------------------===// 527 528 namespace { 529 530 /// A simple and fast domtree-based CSE pass. 531 /// 532 /// This pass does a simple depth-first walk over the dominator tree, 533 /// eliminating trivially redundant instructions and using instsimplify to 534 /// canonicalize things as it goes. It is intended to be fast and catch obvious 535 /// cases so that instcombine and other passes are more effective. It is 536 /// expected that a later pass of GVN will catch the interesting/hard cases. 537 class EarlyCSE { 538 public: 539 const TargetLibraryInfo &TLI; 540 const TargetTransformInfo &TTI; 541 DominatorTree &DT; 542 AssumptionCache &AC; 543 const SimplifyQuery SQ; 544 MemorySSA *MSSA; 545 std::unique_ptr<MemorySSAUpdater> MSSAUpdater; 546 547 using AllocatorTy = 548 RecyclingAllocator<BumpPtrAllocator, 549 ScopedHashTableVal<SimpleValue, Value *>>; 550 using ScopedHTType = 551 ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>, 552 AllocatorTy>; 553 554 /// A scoped hash table of the current values of all of our simple 555 /// scalar expressions. 556 /// 557 /// As we walk down the domtree, we look to see if instructions are in this: 558 /// if so, we replace them with what we find, otherwise we insert them so 559 /// that dominated values can succeed in their lookup. 560 ScopedHTType AvailableValues; 561 562 /// A scoped hash table of the current values of previously encountered 563 /// memory locations. 564 /// 565 /// This allows us to get efficient access to dominating loads or stores when 566 /// we have a fully redundant load. In addition to the most recent load, we 567 /// keep track of a generation count of the read, which is compared against 568 /// the current generation count. The current generation count is incremented 569 /// after every possibly writing memory operation, which ensures that we only 570 /// CSE loads with other loads that have no intervening store. Ordering 571 /// events (such as fences or atomic instructions) increment the generation 572 /// count as well; essentially, we model these as writes to all possible 573 /// locations. Note that atomic and/or volatile loads and stores can be 574 /// present the table; it is the responsibility of the consumer to inspect 575 /// the atomicity/volatility if needed. 576 struct LoadValue { 577 Instruction *DefInst = nullptr; 578 unsigned Generation = 0; 579 int MatchingId = -1; 580 bool IsAtomic = false; 581 582 LoadValue() = default; 583 LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId, 584 bool IsAtomic) 585 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId), 586 IsAtomic(IsAtomic) {} 587 }; 588 589 using LoadMapAllocator = 590 RecyclingAllocator<BumpPtrAllocator, 591 ScopedHashTableVal<Value *, LoadValue>>; 592 using LoadHTType = 593 ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>, 594 LoadMapAllocator>; 595 596 LoadHTType AvailableLoads; 597 598 // A scoped hash table mapping memory locations (represented as typed 599 // addresses) to generation numbers at which that memory location became 600 // (henceforth indefinitely) invariant. 601 using InvariantMapAllocator = 602 RecyclingAllocator<BumpPtrAllocator, 603 ScopedHashTableVal<MemoryLocation, unsigned>>; 604 using InvariantHTType = 605 ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>, 606 InvariantMapAllocator>; 607 InvariantHTType AvailableInvariants; 608 609 /// A scoped hash table of the current values of read-only call 610 /// values. 611 /// 612 /// It uses the same generation count as loads. 613 using CallHTType = 614 ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>; 615 CallHTType AvailableCalls; 616 617 /// This is the current generation of the memory value. 618 unsigned CurrentGeneration = 0; 619 620 /// Set up the EarlyCSE runner for a particular function. 621 EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI, 622 const TargetTransformInfo &TTI, DominatorTree &DT, 623 AssumptionCache &AC, MemorySSA *MSSA) 624 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA), 625 MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {} 626 627 bool run(); 628 629 private: 630 unsigned ClobberCounter = 0; 631 // Almost a POD, but needs to call the constructors for the scoped hash 632 // tables so that a new scope gets pushed on. These are RAII so that the 633 // scope gets popped when the NodeScope is destroyed. 634 class NodeScope { 635 public: 636 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 637 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls) 638 : Scope(AvailableValues), LoadScope(AvailableLoads), 639 InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {} 640 NodeScope(const NodeScope &) = delete; 641 NodeScope &operator=(const NodeScope &) = delete; 642 643 private: 644 ScopedHTType::ScopeTy Scope; 645 LoadHTType::ScopeTy LoadScope; 646 InvariantHTType::ScopeTy InvariantScope; 647 CallHTType::ScopeTy CallScope; 648 }; 649 650 // Contains all the needed information to create a stack for doing a depth 651 // first traversal of the tree. This includes scopes for values, loads, and 652 // calls as well as the generation. There is a child iterator so that the 653 // children do not need to be store separately. 654 class StackNode { 655 public: 656 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 657 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls, 658 unsigned cg, DomTreeNode *n, DomTreeNode::const_iterator child, 659 DomTreeNode::const_iterator end) 660 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child), 661 EndIter(end), 662 Scopes(AvailableValues, AvailableLoads, AvailableInvariants, 663 AvailableCalls) 664 {} 665 StackNode(const StackNode &) = delete; 666 StackNode &operator=(const StackNode &) = delete; 667 668 // Accessors. 669 unsigned currentGeneration() { return CurrentGeneration; } 670 unsigned childGeneration() { return ChildGeneration; } 671 void childGeneration(unsigned generation) { ChildGeneration = generation; } 672 DomTreeNode *node() { return Node; } 673 DomTreeNode::const_iterator childIter() { return ChildIter; } 674 675 DomTreeNode *nextChild() { 676 DomTreeNode *child = *ChildIter; 677 ++ChildIter; 678 return child; 679 } 680 681 DomTreeNode::const_iterator end() { return EndIter; } 682 bool isProcessed() { return Processed; } 683 void process() { Processed = true; } 684 685 private: 686 unsigned CurrentGeneration; 687 unsigned ChildGeneration; 688 DomTreeNode *Node; 689 DomTreeNode::const_iterator ChildIter; 690 DomTreeNode::const_iterator EndIter; 691 NodeScope Scopes; 692 bool Processed = false; 693 }; 694 695 /// Wrapper class to handle memory instructions, including loads, 696 /// stores and intrinsic loads and stores defined by the target. 697 class ParseMemoryInst { 698 public: 699 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) 700 : Inst(Inst) { 701 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 702 if (TTI.getTgtMemIntrinsic(II, Info)) 703 IsTargetMemInst = true; 704 } 705 706 bool isLoad() const { 707 if (IsTargetMemInst) return Info.ReadMem; 708 return isa<LoadInst>(Inst); 709 } 710 711 bool isStore() const { 712 if (IsTargetMemInst) return Info.WriteMem; 713 return isa<StoreInst>(Inst); 714 } 715 716 bool isAtomic() const { 717 if (IsTargetMemInst) 718 return Info.Ordering != AtomicOrdering::NotAtomic; 719 return Inst->isAtomic(); 720 } 721 722 bool isUnordered() const { 723 if (IsTargetMemInst) 724 return Info.isUnordered(); 725 726 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 727 return LI->isUnordered(); 728 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 729 return SI->isUnordered(); 730 } 731 // Conservative answer 732 return !Inst->isAtomic(); 733 } 734 735 bool isVolatile() const { 736 if (IsTargetMemInst) 737 return Info.IsVolatile; 738 739 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 740 return LI->isVolatile(); 741 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 742 return SI->isVolatile(); 743 } 744 // Conservative answer 745 return true; 746 } 747 748 bool isInvariantLoad() const { 749 if (auto *LI = dyn_cast<LoadInst>(Inst)) 750 return LI->hasMetadata(LLVMContext::MD_invariant_load); 751 return false; 752 } 753 754 bool isMatchingMemLoc(const ParseMemoryInst &Inst) const { 755 return (getPointerOperand() == Inst.getPointerOperand() && 756 getMatchingId() == Inst.getMatchingId()); 757 } 758 759 bool isValid() const { return getPointerOperand() != nullptr; } 760 761 // For regular (non-intrinsic) loads/stores, this is set to -1. For 762 // intrinsic loads/stores, the id is retrieved from the corresponding 763 // field in the MemIntrinsicInfo structure. That field contains 764 // non-negative values only. 765 int getMatchingId() const { 766 if (IsTargetMemInst) return Info.MatchingId; 767 return -1; 768 } 769 770 Value *getPointerOperand() const { 771 if (IsTargetMemInst) return Info.PtrVal; 772 return getLoadStorePointerOperand(Inst); 773 } 774 775 bool mayReadFromMemory() const { 776 if (IsTargetMemInst) return Info.ReadMem; 777 return Inst->mayReadFromMemory(); 778 } 779 780 bool mayWriteToMemory() const { 781 if (IsTargetMemInst) return Info.WriteMem; 782 return Inst->mayWriteToMemory(); 783 } 784 785 private: 786 bool IsTargetMemInst = false; 787 MemIntrinsicInfo Info; 788 Instruction *Inst; 789 }; 790 791 bool processNode(DomTreeNode *Node); 792 793 bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI, 794 const BasicBlock *BB, const BasicBlock *Pred); 795 796 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { 797 if (auto *LI = dyn_cast<LoadInst>(Inst)) 798 return LI; 799 if (auto *SI = dyn_cast<StoreInst>(Inst)) 800 return SI->getValueOperand(); 801 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported"); 802 return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst), 803 ExpectedType); 804 } 805 806 /// Return true if the instruction is known to only operate on memory 807 /// provably invariant in the given "generation". 808 bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt); 809 810 bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration, 811 Instruction *EarlierInst, Instruction *LaterInst); 812 813 void removeMSSA(Instruction &Inst) { 814 if (!MSSA) 815 return; 816 if (VerifyMemorySSA) 817 MSSA->verifyMemorySSA(); 818 // Removing a store here can leave MemorySSA in an unoptimized state by 819 // creating MemoryPhis that have identical arguments and by creating 820 // MemoryUses whose defining access is not an actual clobber. The phi case 821 // is handled by MemorySSA when passing OptimizePhis = true to 822 // removeMemoryAccess. The non-optimized MemoryUse case is lazily updated 823 // by MemorySSA's getClobberingMemoryAccess. 824 MSSAUpdater->removeMemoryAccess(&Inst, true); 825 } 826 }; 827 828 } // end anonymous namespace 829 830 /// Determine if the memory referenced by LaterInst is from the same heap 831 /// version as EarlierInst. 832 /// This is currently called in two scenarios: 833 /// 834 /// load p 835 /// ... 836 /// load p 837 /// 838 /// and 839 /// 840 /// x = load p 841 /// ... 842 /// store x, p 843 /// 844 /// in both cases we want to verify that there are no possible writes to the 845 /// memory referenced by p between the earlier and later instruction. 846 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration, 847 unsigned LaterGeneration, 848 Instruction *EarlierInst, 849 Instruction *LaterInst) { 850 // Check the simple memory generation tracking first. 851 if (EarlierGeneration == LaterGeneration) 852 return true; 853 854 if (!MSSA) 855 return false; 856 857 // If MemorySSA has determined that one of EarlierInst or LaterInst does not 858 // read/write memory, then we can safely return true here. 859 // FIXME: We could be more aggressive when checking doesNotAccessMemory(), 860 // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass 861 // by also checking the MemorySSA MemoryAccess on the instruction. Initial 862 // experiments suggest this isn't worthwhile, at least for C/C++ code compiled 863 // with the default optimization pipeline. 864 auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst); 865 if (!EarlierMA) 866 return true; 867 auto *LaterMA = MSSA->getMemoryAccess(LaterInst); 868 if (!LaterMA) 869 return true; 870 871 // Since we know LaterDef dominates LaterInst and EarlierInst dominates 872 // LaterInst, if LaterDef dominates EarlierInst then it can't occur between 873 // EarlierInst and LaterInst and neither can any other write that potentially 874 // clobbers LaterInst. 875 MemoryAccess *LaterDef; 876 if (ClobberCounter < EarlyCSEMssaOptCap) { 877 LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst); 878 ClobberCounter++; 879 } else 880 LaterDef = LaterMA->getDefiningAccess(); 881 882 return MSSA->dominates(LaterDef, EarlierMA); 883 } 884 885 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) { 886 // A location loaded from with an invariant_load is assumed to *never* change 887 // within the visible scope of the compilation. 888 if (auto *LI = dyn_cast<LoadInst>(I)) 889 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 890 return true; 891 892 auto MemLocOpt = MemoryLocation::getOrNone(I); 893 if (!MemLocOpt) 894 // "target" intrinsic forms of loads aren't currently known to 895 // MemoryLocation::get. TODO 896 return false; 897 MemoryLocation MemLoc = *MemLocOpt; 898 if (!AvailableInvariants.count(MemLoc)) 899 return false; 900 901 // Is the generation at which this became invariant older than the 902 // current one? 903 return AvailableInvariants.lookup(MemLoc) <= GenAt; 904 } 905 906 bool EarlyCSE::handleBranchCondition(Instruction *CondInst, 907 const BranchInst *BI, const BasicBlock *BB, 908 const BasicBlock *Pred) { 909 assert(BI->isConditional() && "Should be a conditional branch!"); 910 assert(BI->getCondition() == CondInst && "Wrong condition?"); 911 assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB); 912 auto *TorF = (BI->getSuccessor(0) == BB) 913 ? ConstantInt::getTrue(BB->getContext()) 914 : ConstantInt::getFalse(BB->getContext()); 915 auto MatchBinOp = [](Instruction *I, unsigned Opcode) { 916 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(I)) 917 return BOp->getOpcode() == Opcode; 918 return false; 919 }; 920 // If the condition is AND operation, we can propagate its operands into the 921 // true branch. If it is OR operation, we can propagate them into the false 922 // branch. 923 unsigned PropagateOpcode = 924 (BI->getSuccessor(0) == BB) ? Instruction::And : Instruction::Or; 925 926 bool MadeChanges = false; 927 SmallVector<Instruction *, 4> WorkList; 928 SmallPtrSet<Instruction *, 4> Visited; 929 WorkList.push_back(CondInst); 930 while (!WorkList.empty()) { 931 Instruction *Curr = WorkList.pop_back_val(); 932 933 AvailableValues.insert(Curr, TorF); 934 LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '" 935 << Curr->getName() << "' as " << *TorF << " in " 936 << BB->getName() << "\n"); 937 if (!DebugCounter::shouldExecute(CSECounter)) { 938 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 939 } else { 940 // Replace all dominated uses with the known value. 941 if (unsigned Count = replaceDominatedUsesWith(Curr, TorF, DT, 942 BasicBlockEdge(Pred, BB))) { 943 NumCSECVP += Count; 944 MadeChanges = true; 945 } 946 } 947 948 if (MatchBinOp(Curr, PropagateOpcode)) 949 for (auto &Op : cast<BinaryOperator>(Curr)->operands()) 950 if (Instruction *OPI = dyn_cast<Instruction>(Op)) 951 if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second) 952 WorkList.push_back(OPI); 953 } 954 955 return MadeChanges; 956 } 957 958 bool EarlyCSE::processNode(DomTreeNode *Node) { 959 bool Changed = false; 960 BasicBlock *BB = Node->getBlock(); 961 962 // If this block has a single predecessor, then the predecessor is the parent 963 // of the domtree node and all of the live out memory values are still current 964 // in this block. If this block has multiple predecessors, then they could 965 // have invalidated the live-out memory values of our parent value. For now, 966 // just be conservative and invalidate memory if this block has multiple 967 // predecessors. 968 if (!BB->getSinglePredecessor()) 969 ++CurrentGeneration; 970 971 // If this node has a single predecessor which ends in a conditional branch, 972 // we can infer the value of the branch condition given that we took this 973 // path. We need the single predecessor to ensure there's not another path 974 // which reaches this block where the condition might hold a different 975 // value. Since we're adding this to the scoped hash table (like any other 976 // def), it will have been popped if we encounter a future merge block. 977 if (BasicBlock *Pred = BB->getSinglePredecessor()) { 978 auto *BI = dyn_cast<BranchInst>(Pred->getTerminator()); 979 if (BI && BI->isConditional()) { 980 auto *CondInst = dyn_cast<Instruction>(BI->getCondition()); 981 if (CondInst && SimpleValue::canHandle(CondInst)) 982 Changed |= handleBranchCondition(CondInst, BI, BB, Pred); 983 } 984 } 985 986 /// LastStore - Keep track of the last non-volatile store that we saw... for 987 /// as long as there in no instruction that reads memory. If we see a store 988 /// to the same location, we delete the dead store. This zaps trivial dead 989 /// stores which can occur in bitfield code among other things. 990 Instruction *LastStore = nullptr; 991 992 // See if any instructions in the block can be eliminated. If so, do it. If 993 // not, add them to AvailableValues. 994 for (Instruction &Inst : make_early_inc_range(BB->getInstList())) { 995 // Dead instructions should just be removed. 996 if (isInstructionTriviallyDead(&Inst, &TLI)) { 997 LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << Inst << '\n'); 998 if (!DebugCounter::shouldExecute(CSECounter)) { 999 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1000 continue; 1001 } 1002 1003 salvageKnowledge(&Inst, &AC); 1004 salvageDebugInfo(Inst); 1005 removeMSSA(Inst); 1006 Inst.eraseFromParent(); 1007 Changed = true; 1008 ++NumSimplify; 1009 continue; 1010 } 1011 1012 // Skip assume intrinsics, they don't really have side effects (although 1013 // they're marked as such to ensure preservation of control dependencies), 1014 // and this pass will not bother with its removal. However, we should mark 1015 // its condition as true for all dominated blocks. 1016 if (match(&Inst, m_Intrinsic<Intrinsic::assume>())) { 1017 auto *CondI = 1018 dyn_cast<Instruction>(cast<CallInst>(Inst).getArgOperand(0)); 1019 if (CondI && SimpleValue::canHandle(CondI)) { 1020 LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << Inst 1021 << '\n'); 1022 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); 1023 } else 1024 LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << Inst << '\n'); 1025 continue; 1026 } 1027 1028 // Skip sideeffect intrinsics, for the same reason as assume intrinsics. 1029 if (match(&Inst, m_Intrinsic<Intrinsic::sideeffect>())) { 1030 LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << Inst << '\n'); 1031 continue; 1032 } 1033 1034 // We can skip all invariant.start intrinsics since they only read memory, 1035 // and we can forward values across it. For invariant starts without 1036 // invariant ends, we can use the fact that the invariantness never ends to 1037 // start a scope in the current generaton which is true for all future 1038 // generations. Also, we dont need to consume the last store since the 1039 // semantics of invariant.start allow us to perform DSE of the last 1040 // store, if there was a store following invariant.start. Consider: 1041 // 1042 // store 30, i8* p 1043 // invariant.start(p) 1044 // store 40, i8* p 1045 // We can DSE the store to 30, since the store 40 to invariant location p 1046 // causes undefined behaviour. 1047 if (match(&Inst, m_Intrinsic<Intrinsic::invariant_start>())) { 1048 // If there are any uses, the scope might end. 1049 if (!Inst.use_empty()) 1050 continue; 1051 MemoryLocation MemLoc = 1052 MemoryLocation::getForArgument(&cast<CallInst>(Inst), 1, TLI); 1053 // Don't start a scope if we already have a better one pushed 1054 if (!AvailableInvariants.count(MemLoc)) 1055 AvailableInvariants.insert(MemLoc, CurrentGeneration); 1056 continue; 1057 } 1058 1059 if (isGuard(&Inst)) { 1060 if (auto *CondI = 1061 dyn_cast<Instruction>(cast<CallInst>(Inst).getArgOperand(0))) { 1062 if (SimpleValue::canHandle(CondI)) { 1063 // Do we already know the actual value of this condition? 1064 if (auto *KnownCond = AvailableValues.lookup(CondI)) { 1065 // Is the condition known to be true? 1066 if (isa<ConstantInt>(KnownCond) && 1067 cast<ConstantInt>(KnownCond)->isOne()) { 1068 LLVM_DEBUG(dbgs() 1069 << "EarlyCSE removing guard: " << Inst << '\n'); 1070 salvageKnowledge(&Inst, &AC); 1071 removeMSSA(Inst); 1072 Inst.eraseFromParent(); 1073 Changed = true; 1074 continue; 1075 } else 1076 // Use the known value if it wasn't true. 1077 cast<CallInst>(Inst).setArgOperand(0, KnownCond); 1078 } 1079 // The condition we're on guarding here is true for all dominated 1080 // locations. 1081 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); 1082 } 1083 } 1084 1085 // Guard intrinsics read all memory, but don't write any memory. 1086 // Accordingly, don't update the generation but consume the last store (to 1087 // avoid an incorrect DSE). 1088 LastStore = nullptr; 1089 continue; 1090 } 1091 1092 // If the instruction can be simplified (e.g. X+0 = X) then replace it with 1093 // its simpler value. 1094 if (Value *V = SimplifyInstruction(&Inst, SQ)) { 1095 LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << Inst << " to: " << *V 1096 << '\n'); 1097 if (!DebugCounter::shouldExecute(CSECounter)) { 1098 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1099 } else { 1100 bool Killed = false; 1101 if (!Inst.use_empty()) { 1102 Inst.replaceAllUsesWith(V); 1103 Changed = true; 1104 } 1105 if (isInstructionTriviallyDead(&Inst, &TLI)) { 1106 salvageKnowledge(&Inst, &AC); 1107 removeMSSA(Inst); 1108 Inst.eraseFromParent(); 1109 Changed = true; 1110 Killed = true; 1111 } 1112 if (Changed) 1113 ++NumSimplify; 1114 if (Killed) 1115 continue; 1116 } 1117 } 1118 1119 // If this is a simple instruction that we can value number, process it. 1120 if (SimpleValue::canHandle(&Inst)) { 1121 // See if the instruction has an available value. If so, use it. 1122 if (Value *V = AvailableValues.lookup(&Inst)) { 1123 LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << Inst << " to: " << *V 1124 << '\n'); 1125 if (!DebugCounter::shouldExecute(CSECounter)) { 1126 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1127 continue; 1128 } 1129 if (auto *I = dyn_cast<Instruction>(V)) 1130 I->andIRFlags(&Inst); 1131 Inst.replaceAllUsesWith(V); 1132 salvageKnowledge(&Inst, &AC); 1133 removeMSSA(Inst); 1134 Inst.eraseFromParent(); 1135 Changed = true; 1136 ++NumCSE; 1137 continue; 1138 } 1139 1140 // Otherwise, just remember that this value is available. 1141 AvailableValues.insert(&Inst, &Inst); 1142 continue; 1143 } 1144 1145 ParseMemoryInst MemInst(&Inst, TTI); 1146 // If this is a non-volatile load, process it. 1147 if (MemInst.isValid() && MemInst.isLoad()) { 1148 // (conservatively) we can't peak past the ordering implied by this 1149 // operation, but we can add this load to our set of available values 1150 if (MemInst.isVolatile() || !MemInst.isUnordered()) { 1151 LastStore = nullptr; 1152 ++CurrentGeneration; 1153 } 1154 1155 if (MemInst.isInvariantLoad()) { 1156 // If we pass an invariant load, we know that memory location is 1157 // indefinitely constant from the moment of first dereferenceability. 1158 // We conservatively treat the invariant_load as that moment. If we 1159 // pass a invariant load after already establishing a scope, don't 1160 // restart it since we want to preserve the earliest point seen. 1161 auto MemLoc = MemoryLocation::get(&Inst); 1162 if (!AvailableInvariants.count(MemLoc)) 1163 AvailableInvariants.insert(MemLoc, CurrentGeneration); 1164 } 1165 1166 // If we have an available version of this load, and if it is the right 1167 // generation or the load is known to be from an invariant location, 1168 // replace this instruction. 1169 // 1170 // If either the dominating load or the current load are invariant, then 1171 // we can assume the current load loads the same value as the dominating 1172 // load. 1173 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 1174 if (InVal.DefInst != nullptr && 1175 InVal.MatchingId == MemInst.getMatchingId() && 1176 // We don't yet handle removing loads with ordering of any kind. 1177 !MemInst.isVolatile() && MemInst.isUnordered() && 1178 // We can't replace an atomic load with one which isn't also atomic. 1179 InVal.IsAtomic >= MemInst.isAtomic() && 1180 (isOperatingOnInvariantMemAt(&Inst, InVal.Generation) || 1181 isSameMemGeneration(InVal.Generation, CurrentGeneration, 1182 InVal.DefInst, &Inst))) { 1183 Value *Op = getOrCreateResult(InVal.DefInst, Inst.getType()); 1184 if (Op != nullptr) { 1185 LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << Inst 1186 << " to: " << *InVal.DefInst << '\n'); 1187 if (!DebugCounter::shouldExecute(CSECounter)) { 1188 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1189 continue; 1190 } 1191 if (!Inst.use_empty()) 1192 Inst.replaceAllUsesWith(Op); 1193 salvageKnowledge(&Inst, &AC); 1194 removeMSSA(Inst); 1195 Inst.eraseFromParent(); 1196 Changed = true; 1197 ++NumCSELoad; 1198 continue; 1199 } 1200 } 1201 1202 // Otherwise, remember that we have this instruction. 1203 AvailableLoads.insert(MemInst.getPointerOperand(), 1204 LoadValue(&Inst, CurrentGeneration, 1205 MemInst.getMatchingId(), 1206 MemInst.isAtomic())); 1207 LastStore = nullptr; 1208 continue; 1209 } 1210 1211 // If this instruction may read from memory or throw (and potentially read 1212 // from memory in the exception handler), forget LastStore. Load/store 1213 // intrinsics will indicate both a read and a write to memory. The target 1214 // may override this (e.g. so that a store intrinsic does not read from 1215 // memory, and thus will be treated the same as a regular store for 1216 // commoning purposes). 1217 if ((Inst.mayReadFromMemory() || Inst.mayThrow()) && 1218 !(MemInst.isValid() && !MemInst.mayReadFromMemory())) 1219 LastStore = nullptr; 1220 1221 // If this is a read-only call, process it. 1222 if (CallValue::canHandle(&Inst)) { 1223 // If we have an available version of this call, and if it is the right 1224 // generation, replace this instruction. 1225 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(&Inst); 1226 if (InVal.first != nullptr && 1227 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first, 1228 &Inst)) { 1229 LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << Inst 1230 << " to: " << *InVal.first << '\n'); 1231 if (!DebugCounter::shouldExecute(CSECounter)) { 1232 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1233 continue; 1234 } 1235 if (!Inst.use_empty()) 1236 Inst.replaceAllUsesWith(InVal.first); 1237 salvageKnowledge(&Inst, &AC); 1238 removeMSSA(Inst); 1239 Inst.eraseFromParent(); 1240 Changed = true; 1241 ++NumCSECall; 1242 continue; 1243 } 1244 1245 // Otherwise, remember that we have this instruction. 1246 AvailableCalls.insert(&Inst, std::make_pair(&Inst, CurrentGeneration)); 1247 continue; 1248 } 1249 1250 // A release fence requires that all stores complete before it, but does 1251 // not prevent the reordering of following loads 'before' the fence. As a 1252 // result, we don't need to consider it as writing to memory and don't need 1253 // to advance the generation. We do need to prevent DSE across the fence, 1254 // but that's handled above. 1255 if (auto *FI = dyn_cast<FenceInst>(&Inst)) 1256 if (FI->getOrdering() == AtomicOrdering::Release) { 1257 assert(Inst.mayReadFromMemory() && "relied on to prevent DSE above"); 1258 continue; 1259 } 1260 1261 // write back DSE - If we write back the same value we just loaded from 1262 // the same location and haven't passed any intervening writes or ordering 1263 // operations, we can remove the write. The primary benefit is in allowing 1264 // the available load table to remain valid and value forward past where 1265 // the store originally was. 1266 if (MemInst.isValid() && MemInst.isStore()) { 1267 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 1268 if (InVal.DefInst && 1269 InVal.DefInst == getOrCreateResult(&Inst, InVal.DefInst->getType()) && 1270 InVal.MatchingId == MemInst.getMatchingId() && 1271 // We don't yet handle removing stores with ordering of any kind. 1272 !MemInst.isVolatile() && MemInst.isUnordered() && 1273 (isOperatingOnInvariantMemAt(&Inst, InVal.Generation) || 1274 isSameMemGeneration(InVal.Generation, CurrentGeneration, 1275 InVal.DefInst, &Inst))) { 1276 // It is okay to have a LastStore to a different pointer here if MemorySSA 1277 // tells us that the load and store are from the same memory generation. 1278 // In that case, LastStore should keep its present value since we're 1279 // removing the current store. 1280 assert((!LastStore || 1281 ParseMemoryInst(LastStore, TTI).getPointerOperand() == 1282 MemInst.getPointerOperand() || 1283 MSSA) && 1284 "can't have an intervening store if not using MemorySSA!"); 1285 LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << Inst << '\n'); 1286 if (!DebugCounter::shouldExecute(CSECounter)) { 1287 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1288 continue; 1289 } 1290 salvageKnowledge(&Inst, &AC); 1291 removeMSSA(Inst); 1292 Inst.eraseFromParent(); 1293 Changed = true; 1294 ++NumDSE; 1295 // We can avoid incrementing the generation count since we were able 1296 // to eliminate this store. 1297 continue; 1298 } 1299 } 1300 1301 // Okay, this isn't something we can CSE at all. Check to see if it is 1302 // something that could modify memory. If so, our available memory values 1303 // cannot be used so bump the generation count. 1304 if (Inst.mayWriteToMemory()) { 1305 ++CurrentGeneration; 1306 1307 if (MemInst.isValid() && MemInst.isStore()) { 1308 // We do a trivial form of DSE if there are two stores to the same 1309 // location with no intervening loads. Delete the earlier store. 1310 // At the moment, we don't remove ordered stores, but do remove 1311 // unordered atomic stores. There's no special requirement (for 1312 // unordered atomics) about removing atomic stores only in favor of 1313 // other atomic stores since we were going to execute the non-atomic 1314 // one anyway and the atomic one might never have become visible. 1315 if (LastStore) { 1316 ParseMemoryInst LastStoreMemInst(LastStore, TTI); 1317 assert(LastStoreMemInst.isUnordered() && 1318 !LastStoreMemInst.isVolatile() && 1319 "Violated invariant"); 1320 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) { 1321 LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore 1322 << " due to: " << Inst << '\n'); 1323 if (!DebugCounter::shouldExecute(CSECounter)) { 1324 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); 1325 } else { 1326 salvageKnowledge(&Inst, &AC); 1327 removeMSSA(*LastStore); 1328 LastStore->eraseFromParent(); 1329 Changed = true; 1330 ++NumDSE; 1331 LastStore = nullptr; 1332 } 1333 } 1334 // fallthrough - we can exploit information about this store 1335 } 1336 1337 // Okay, we just invalidated anything we knew about loaded values. Try 1338 // to salvage *something* by remembering that the stored value is a live 1339 // version of the pointer. It is safe to forward from volatile stores 1340 // to non-volatile loads, so we don't have to check for volatility of 1341 // the store. 1342 AvailableLoads.insert(MemInst.getPointerOperand(), 1343 LoadValue(&Inst, CurrentGeneration, 1344 MemInst.getMatchingId(), 1345 MemInst.isAtomic())); 1346 1347 // Remember that this was the last unordered store we saw for DSE. We 1348 // don't yet handle DSE on ordered or volatile stores since we don't 1349 // have a good way to model the ordering requirement for following 1350 // passes once the store is removed. We could insert a fence, but 1351 // since fences are slightly stronger than stores in their ordering, 1352 // it's not clear this is a profitable transform. Another option would 1353 // be to merge the ordering with that of the post dominating store. 1354 if (MemInst.isUnordered() && !MemInst.isVolatile()) 1355 LastStore = &Inst; 1356 else 1357 LastStore = nullptr; 1358 } 1359 } 1360 } 1361 1362 return Changed; 1363 } 1364 1365 bool EarlyCSE::run() { 1366 // Note, deque is being used here because there is significant performance 1367 // gains over vector when the container becomes very large due to the 1368 // specific access patterns. For more information see the mailing list 1369 // discussion on this: 1370 // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html 1371 std::deque<StackNode *> nodesToProcess; 1372 1373 bool Changed = false; 1374 1375 // Process the root node. 1376 nodesToProcess.push_back(new StackNode( 1377 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls, 1378 CurrentGeneration, DT.getRootNode(), 1379 DT.getRootNode()->begin(), DT.getRootNode()->end())); 1380 1381 assert(!CurrentGeneration && "Create a new EarlyCSE instance to rerun it."); 1382 1383 // Process the stack. 1384 while (!nodesToProcess.empty()) { 1385 // Grab the first item off the stack. Set the current generation, remove 1386 // the node from the stack, and process it. 1387 StackNode *NodeToProcess = nodesToProcess.back(); 1388 1389 // Initialize class members. 1390 CurrentGeneration = NodeToProcess->currentGeneration(); 1391 1392 // Check if the node needs to be processed. 1393 if (!NodeToProcess->isProcessed()) { 1394 // Process the node. 1395 Changed |= processNode(NodeToProcess->node()); 1396 NodeToProcess->childGeneration(CurrentGeneration); 1397 NodeToProcess->process(); 1398 } else if (NodeToProcess->childIter() != NodeToProcess->end()) { 1399 // Push the next child onto the stack. 1400 DomTreeNode *child = NodeToProcess->nextChild(); 1401 nodesToProcess.push_back( 1402 new StackNode(AvailableValues, AvailableLoads, AvailableInvariants, 1403 AvailableCalls, NodeToProcess->childGeneration(), 1404 child, child->begin(), child->end())); 1405 } else { 1406 // It has been processed, and there are no more children to process, 1407 // so delete it and pop it off the stack. 1408 delete NodeToProcess; 1409 nodesToProcess.pop_back(); 1410 } 1411 } // while (!nodes...) 1412 1413 return Changed; 1414 } 1415 1416 PreservedAnalyses EarlyCSEPass::run(Function &F, 1417 FunctionAnalysisManager &AM) { 1418 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1419 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 1420 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 1421 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1422 auto *MSSA = 1423 UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr; 1424 1425 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA); 1426 1427 if (!CSE.run()) 1428 return PreservedAnalyses::all(); 1429 1430 PreservedAnalyses PA; 1431 PA.preserveSet<CFGAnalyses>(); 1432 PA.preserve<GlobalsAA>(); 1433 if (UseMemorySSA) 1434 PA.preserve<MemorySSAAnalysis>(); 1435 return PA; 1436 } 1437 1438 namespace { 1439 1440 /// A simple and fast domtree-based CSE pass. 1441 /// 1442 /// This pass does a simple depth-first walk over the dominator tree, 1443 /// eliminating trivially redundant instructions and using instsimplify to 1444 /// canonicalize things as it goes. It is intended to be fast and catch obvious 1445 /// cases so that instcombine and other passes are more effective. It is 1446 /// expected that a later pass of GVN will catch the interesting/hard cases. 1447 template<bool UseMemorySSA> 1448 class EarlyCSELegacyCommonPass : public FunctionPass { 1449 public: 1450 static char ID; 1451 1452 EarlyCSELegacyCommonPass() : FunctionPass(ID) { 1453 if (UseMemorySSA) 1454 initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry()); 1455 else 1456 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry()); 1457 } 1458 1459 bool runOnFunction(Function &F) override { 1460 if (skipFunction(F)) 1461 return false; 1462 1463 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1464 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1465 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1466 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1467 auto *MSSA = 1468 UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr; 1469 1470 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA); 1471 1472 return CSE.run(); 1473 } 1474 1475 void getAnalysisUsage(AnalysisUsage &AU) const override { 1476 AU.addRequired<AssumptionCacheTracker>(); 1477 AU.addRequired<DominatorTreeWrapperPass>(); 1478 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1479 AU.addRequired<TargetTransformInfoWrapperPass>(); 1480 if (UseMemorySSA) { 1481 AU.addRequired<AAResultsWrapperPass>(); 1482 AU.addRequired<MemorySSAWrapperPass>(); 1483 AU.addPreserved<MemorySSAWrapperPass>(); 1484 } 1485 AU.addPreserved<GlobalsAAWrapperPass>(); 1486 AU.addPreserved<AAResultsWrapperPass>(); 1487 AU.setPreservesCFG(); 1488 } 1489 }; 1490 1491 } // end anonymous namespace 1492 1493 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>; 1494 1495 template<> 1496 char EarlyCSELegacyPass::ID = 0; 1497 1498 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, 1499 false) 1500 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1501 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1502 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1503 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1504 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) 1505 1506 using EarlyCSEMemSSALegacyPass = 1507 EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>; 1508 1509 template<> 1510 char EarlyCSEMemSSALegacyPass::ID = 0; 1511 1512 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) { 1513 if (UseMemorySSA) 1514 return new EarlyCSEMemSSALegacyPass(); 1515 else 1516 return new EarlyCSELegacyPass(); 1517 } 1518 1519 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa", 1520 "Early CSE w/ MemorySSA", false, false) 1521 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1522 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1523 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1524 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1525 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1526 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 1527 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa", 1528 "Early CSE w/ MemorySSA", false, false) 1529