1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs a simple dominator tree walk that eliminates trivially 11 // redundant instructions. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar.h" 16 #include "llvm/ADT/Hashing.h" 17 #include "llvm/ADT/ScopedHashTable.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AssumptionTracker.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/Dominators.h" 23 #include "llvm/IR/Instructions.h" 24 #include "llvm/Pass.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/RecyclingAllocator.h" 27 #include "llvm/Target/TargetLibraryInfo.h" 28 #include "llvm/Transforms/Utils/Local.h" 29 #include <vector> 30 using namespace llvm; 31 32 #define DEBUG_TYPE "early-cse" 33 34 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd"); 35 STATISTIC(NumCSE, "Number of instructions CSE'd"); 36 STATISTIC(NumCSELoad, "Number of load instructions CSE'd"); 37 STATISTIC(NumCSECall, "Number of call instructions CSE'd"); 38 STATISTIC(NumDSE, "Number of trivial dead stores removed"); 39 40 static unsigned getHash(const void *V) { 41 return DenseMapInfo<const void*>::getHashValue(V); 42 } 43 44 //===----------------------------------------------------------------------===// 45 // SimpleValue 46 //===----------------------------------------------------------------------===// 47 48 namespace { 49 /// SimpleValue - Instances of this struct represent available values in the 50 /// scoped hash table. 51 struct SimpleValue { 52 Instruction *Inst; 53 54 SimpleValue(Instruction *I) : Inst(I) { 55 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 56 } 57 58 bool isSentinel() const { 59 return Inst == DenseMapInfo<Instruction*>::getEmptyKey() || 60 Inst == DenseMapInfo<Instruction*>::getTombstoneKey(); 61 } 62 63 static bool canHandle(Instruction *Inst) { 64 // This can only handle non-void readnone functions. 65 if (CallInst *CI = dyn_cast<CallInst>(Inst)) 66 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); 67 return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) || 68 isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) || 69 isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) || 70 isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) || 71 isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst); 72 } 73 }; 74 } 75 76 namespace llvm { 77 template<> struct DenseMapInfo<SimpleValue> { 78 static inline SimpleValue getEmptyKey() { 79 return DenseMapInfo<Instruction*>::getEmptyKey(); 80 } 81 static inline SimpleValue getTombstoneKey() { 82 return DenseMapInfo<Instruction*>::getTombstoneKey(); 83 } 84 static unsigned getHashValue(SimpleValue Val); 85 static bool isEqual(SimpleValue LHS, SimpleValue RHS); 86 }; 87 } 88 89 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { 90 Instruction *Inst = Val.Inst; 91 // Hash in all of the operands as pointers. 92 if (BinaryOperator* BinOp = dyn_cast<BinaryOperator>(Inst)) { 93 Value *LHS = BinOp->getOperand(0); 94 Value *RHS = BinOp->getOperand(1); 95 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) 96 std::swap(LHS, RHS); 97 98 if (isa<OverflowingBinaryOperator>(BinOp)) { 99 // Hash the overflow behavior 100 unsigned Overflow = 101 BinOp->hasNoSignedWrap() * OverflowingBinaryOperator::NoSignedWrap | 102 BinOp->hasNoUnsignedWrap() * OverflowingBinaryOperator::NoUnsignedWrap; 103 return hash_combine(BinOp->getOpcode(), Overflow, LHS, RHS); 104 } 105 106 return hash_combine(BinOp->getOpcode(), LHS, RHS); 107 } 108 109 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) { 110 Value *LHS = CI->getOperand(0); 111 Value *RHS = CI->getOperand(1); 112 CmpInst::Predicate Pred = CI->getPredicate(); 113 if (Inst->getOperand(0) > Inst->getOperand(1)) { 114 std::swap(LHS, RHS); 115 Pred = CI->getSwappedPredicate(); 116 } 117 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); 118 } 119 120 if (CastInst *CI = dyn_cast<CastInst>(Inst)) 121 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); 122 123 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) 124 return hash_combine(EVI->getOpcode(), EVI->getOperand(0), 125 hash_combine_range(EVI->idx_begin(), EVI->idx_end())); 126 127 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) 128 return hash_combine(IVI->getOpcode(), IVI->getOperand(0), 129 IVI->getOperand(1), 130 hash_combine_range(IVI->idx_begin(), IVI->idx_end())); 131 132 assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) || 133 isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) || 134 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 135 isa<ShuffleVectorInst>(Inst)) && "Invalid/unknown instruction"); 136 137 // Mix in the opcode. 138 return hash_combine(Inst->getOpcode(), 139 hash_combine_range(Inst->value_op_begin(), 140 Inst->value_op_end())); 141 } 142 143 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { 144 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 145 146 if (LHS.isSentinel() || RHS.isSentinel()) 147 return LHSI == RHSI; 148 149 if (LHSI->getOpcode() != RHSI->getOpcode()) return false; 150 if (LHSI->isIdenticalTo(RHSI)) return true; 151 152 // If we're not strictly identical, we still might be a commutable instruction 153 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) { 154 if (!LHSBinOp->isCommutative()) 155 return false; 156 157 assert(isa<BinaryOperator>(RHSI) 158 && "same opcode, but different instruction type?"); 159 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI); 160 161 // Check overflow attributes 162 if (isa<OverflowingBinaryOperator>(LHSBinOp)) { 163 assert(isa<OverflowingBinaryOperator>(RHSBinOp) 164 && "same opcode, but different operator type?"); 165 if (LHSBinOp->hasNoUnsignedWrap() != RHSBinOp->hasNoUnsignedWrap() || 166 LHSBinOp->hasNoSignedWrap() != RHSBinOp->hasNoSignedWrap()) 167 return false; 168 } 169 170 // Commuted equality 171 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && 172 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); 173 } 174 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) { 175 assert(isa<CmpInst>(RHSI) 176 && "same opcode, but different instruction type?"); 177 CmpInst *RHSCmp = cast<CmpInst>(RHSI); 178 // Commuted equality 179 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && 180 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && 181 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); 182 } 183 184 return false; 185 } 186 187 //===----------------------------------------------------------------------===// 188 // CallValue 189 //===----------------------------------------------------------------------===// 190 191 namespace { 192 /// CallValue - Instances of this struct represent available call values in 193 /// the scoped hash table. 194 struct CallValue { 195 Instruction *Inst; 196 197 CallValue(Instruction *I) : Inst(I) { 198 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 199 } 200 201 bool isSentinel() const { 202 return Inst == DenseMapInfo<Instruction*>::getEmptyKey() || 203 Inst == DenseMapInfo<Instruction*>::getTombstoneKey(); 204 } 205 206 static bool canHandle(Instruction *Inst) { 207 // Don't value number anything that returns void. 208 if (Inst->getType()->isVoidTy()) 209 return false; 210 211 CallInst *CI = dyn_cast<CallInst>(Inst); 212 if (!CI || !CI->onlyReadsMemory()) 213 return false; 214 return true; 215 } 216 }; 217 } 218 219 namespace llvm { 220 template<> struct DenseMapInfo<CallValue> { 221 static inline CallValue getEmptyKey() { 222 return DenseMapInfo<Instruction*>::getEmptyKey(); 223 } 224 static inline CallValue getTombstoneKey() { 225 return DenseMapInfo<Instruction*>::getTombstoneKey(); 226 } 227 static unsigned getHashValue(CallValue Val); 228 static bool isEqual(CallValue LHS, CallValue RHS); 229 }; 230 } 231 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { 232 Instruction *Inst = Val.Inst; 233 // Hash in all of the operands as pointers. 234 unsigned Res = 0; 235 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) { 236 assert(!Inst->getOperand(i)->getType()->isMetadataTy() && 237 "Cannot value number calls with metadata operands"); 238 Res ^= getHash(Inst->getOperand(i)) << (i & 0xF); 239 } 240 241 // Mix in the opcode. 242 return (Res << 1) ^ Inst->getOpcode(); 243 } 244 245 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { 246 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 247 if (LHS.isSentinel() || RHS.isSentinel()) 248 return LHSI == RHSI; 249 return LHSI->isIdenticalTo(RHSI); 250 } 251 252 253 //===----------------------------------------------------------------------===// 254 // EarlyCSE pass. 255 //===----------------------------------------------------------------------===// 256 257 namespace { 258 259 /// EarlyCSE - This pass does a simple depth-first walk over the dominator 260 /// tree, eliminating trivially redundant instructions and using instsimplify 261 /// to canonicalize things as it goes. It is intended to be fast and catch 262 /// obvious cases so that instcombine and other passes are more effective. It 263 /// is expected that a later pass of GVN will catch the interesting/hard 264 /// cases. 265 class EarlyCSE : public FunctionPass { 266 public: 267 const DataLayout *DL; 268 const TargetLibraryInfo *TLI; 269 DominatorTree *DT; 270 AssumptionTracker *AT; 271 typedef RecyclingAllocator<BumpPtrAllocator, 272 ScopedHashTableVal<SimpleValue, Value*> > AllocatorTy; 273 typedef ScopedHashTable<SimpleValue, Value*, DenseMapInfo<SimpleValue>, 274 AllocatorTy> ScopedHTType; 275 276 /// AvailableValues - This scoped hash table contains the current values of 277 /// all of our simple scalar expressions. As we walk down the domtree, we 278 /// look to see if instructions are in this: if so, we replace them with what 279 /// we find, otherwise we insert them so that dominated values can succeed in 280 /// their lookup. 281 ScopedHTType *AvailableValues; 282 283 /// AvailableLoads - This scoped hash table contains the current values 284 /// of loads. This allows us to get efficient access to dominating loads when 285 /// we have a fully redundant load. In addition to the most recent load, we 286 /// keep track of a generation count of the read, which is compared against 287 /// the current generation count. The current generation count is 288 /// incremented after every possibly writing memory operation, which ensures 289 /// that we only CSE loads with other loads that have no intervening store. 290 typedef RecyclingAllocator<BumpPtrAllocator, 291 ScopedHashTableVal<Value*, std::pair<Value*, unsigned> > > LoadMapAllocator; 292 typedef ScopedHashTable<Value*, std::pair<Value*, unsigned>, 293 DenseMapInfo<Value*>, LoadMapAllocator> LoadHTType; 294 LoadHTType *AvailableLoads; 295 296 /// AvailableCalls - This scoped hash table contains the current values 297 /// of read-only call values. It uses the same generation count as loads. 298 typedef ScopedHashTable<CallValue, std::pair<Value*, unsigned> > CallHTType; 299 CallHTType *AvailableCalls; 300 301 /// CurrentGeneration - This is the current generation of the memory value. 302 unsigned CurrentGeneration; 303 304 static char ID; 305 explicit EarlyCSE() : FunctionPass(ID) { 306 initializeEarlyCSEPass(*PassRegistry::getPassRegistry()); 307 } 308 309 bool runOnFunction(Function &F) override; 310 311 private: 312 313 // NodeScope - almost a POD, but needs to call the constructors for the 314 // scoped hash tables so that a new scope gets pushed on. These are RAII so 315 // that the scope gets popped when the NodeScope is destroyed. 316 class NodeScope { 317 public: 318 NodeScope(ScopedHTType *availableValues, 319 LoadHTType *availableLoads, 320 CallHTType *availableCalls) : 321 Scope(*availableValues), 322 LoadScope(*availableLoads), 323 CallScope(*availableCalls) {} 324 325 private: 326 NodeScope(const NodeScope&) LLVM_DELETED_FUNCTION; 327 void operator=(const NodeScope&) LLVM_DELETED_FUNCTION; 328 329 ScopedHTType::ScopeTy Scope; 330 LoadHTType::ScopeTy LoadScope; 331 CallHTType::ScopeTy CallScope; 332 }; 333 334 // StackNode - contains all the needed information to create a stack for 335 // doing a depth first tranversal of the tree. This includes scopes for 336 // values, loads, and calls as well as the generation. There is a child 337 // iterator so that the children do not need to be store spearately. 338 class StackNode { 339 public: 340 StackNode(ScopedHTType *availableValues, 341 LoadHTType *availableLoads, 342 CallHTType *availableCalls, 343 unsigned cg, DomTreeNode *n, 344 DomTreeNode::iterator child, DomTreeNode::iterator end) : 345 CurrentGeneration(cg), ChildGeneration(cg), Node(n), 346 ChildIter(child), EndIter(end), 347 Scopes(availableValues, availableLoads, availableCalls), 348 Processed(false) {} 349 350 // Accessors. 351 unsigned currentGeneration() { return CurrentGeneration; } 352 unsigned childGeneration() { return ChildGeneration; } 353 void childGeneration(unsigned generation) { ChildGeneration = generation; } 354 DomTreeNode *node() { return Node; } 355 DomTreeNode::iterator childIter() { return ChildIter; } 356 DomTreeNode *nextChild() { 357 DomTreeNode *child = *ChildIter; 358 ++ChildIter; 359 return child; 360 } 361 DomTreeNode::iterator end() { return EndIter; } 362 bool isProcessed() { return Processed; } 363 void process() { Processed = true; } 364 365 private: 366 StackNode(const StackNode&) LLVM_DELETED_FUNCTION; 367 void operator=(const StackNode&) LLVM_DELETED_FUNCTION; 368 369 // Members. 370 unsigned CurrentGeneration; 371 unsigned ChildGeneration; 372 DomTreeNode *Node; 373 DomTreeNode::iterator ChildIter; 374 DomTreeNode::iterator EndIter; 375 NodeScope Scopes; 376 bool Processed; 377 }; 378 379 bool processNode(DomTreeNode *Node); 380 381 // This transformation requires dominator postdominator info 382 void getAnalysisUsage(AnalysisUsage &AU) const override { 383 AU.addRequired<AssumptionTracker>(); 384 AU.addRequired<DominatorTreeWrapperPass>(); 385 AU.addRequired<TargetLibraryInfo>(); 386 AU.setPreservesCFG(); 387 } 388 }; 389 } 390 391 char EarlyCSE::ID = 0; 392 393 // createEarlyCSEPass - The public interface to this file. 394 FunctionPass *llvm::createEarlyCSEPass() { 395 return new EarlyCSE(); 396 } 397 398 INITIALIZE_PASS_BEGIN(EarlyCSE, "early-cse", "Early CSE", false, false) 399 INITIALIZE_PASS_DEPENDENCY(AssumptionTracker) 400 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 401 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 402 INITIALIZE_PASS_END(EarlyCSE, "early-cse", "Early CSE", false, false) 403 404 bool EarlyCSE::processNode(DomTreeNode *Node) { 405 BasicBlock *BB = Node->getBlock(); 406 407 // If this block has a single predecessor, then the predecessor is the parent 408 // of the domtree node and all of the live out memory values are still current 409 // in this block. If this block has multiple predecessors, then they could 410 // have invalidated the live-out memory values of our parent value. For now, 411 // just be conservative and invalidate memory if this block has multiple 412 // predecessors. 413 if (!BB->getSinglePredecessor()) 414 ++CurrentGeneration; 415 416 /// LastStore - Keep track of the last non-volatile store that we saw... for 417 /// as long as there in no instruction that reads memory. If we see a store 418 /// to the same location, we delete the dead store. This zaps trivial dead 419 /// stores which can occur in bitfield code among other things. 420 StoreInst *LastStore = nullptr; 421 422 bool Changed = false; 423 424 // See if any instructions in the block can be eliminated. If so, do it. If 425 // not, add them to AvailableValues. 426 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { 427 Instruction *Inst = I++; 428 429 // Dead instructions should just be removed. 430 if (isInstructionTriviallyDead(Inst, TLI)) { 431 DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); 432 Inst->eraseFromParent(); 433 Changed = true; 434 ++NumSimplify; 435 continue; 436 } 437 438 // If the instruction can be simplified (e.g. X+0 = X) then replace it with 439 // its simpler value. 440 if (Value *V = SimplifyInstruction(Inst, DL, TLI, DT, AT)) { 441 DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n'); 442 Inst->replaceAllUsesWith(V); 443 Inst->eraseFromParent(); 444 Changed = true; 445 ++NumSimplify; 446 continue; 447 } 448 449 // If this is a simple instruction that we can value number, process it. 450 if (SimpleValue::canHandle(Inst)) { 451 // See if the instruction has an available value. If so, use it. 452 if (Value *V = AvailableValues->lookup(Inst)) { 453 DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n'); 454 Inst->replaceAllUsesWith(V); 455 Inst->eraseFromParent(); 456 Changed = true; 457 ++NumCSE; 458 continue; 459 } 460 461 // Otherwise, just remember that this value is available. 462 AvailableValues->insert(Inst, Inst); 463 continue; 464 } 465 466 // If this is a non-volatile load, process it. 467 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 468 // Ignore volatile loads. 469 if (!LI->isSimple()) { 470 LastStore = nullptr; 471 continue; 472 } 473 474 // If we have an available version of this load, and if it is the right 475 // generation, replace this instruction. 476 std::pair<Value*, unsigned> InVal = 477 AvailableLoads->lookup(Inst->getOperand(0)); 478 if (InVal.first != nullptr && InVal.second == CurrentGeneration) { 479 DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst << " to: " 480 << *InVal.first << '\n'); 481 if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first); 482 Inst->eraseFromParent(); 483 Changed = true; 484 ++NumCSELoad; 485 continue; 486 } 487 488 // Otherwise, remember that we have this instruction. 489 AvailableLoads->insert(Inst->getOperand(0), 490 std::pair<Value*, unsigned>(Inst, CurrentGeneration)); 491 LastStore = nullptr; 492 continue; 493 } 494 495 // If this instruction may read from memory, forget LastStore. 496 if (Inst->mayReadFromMemory()) 497 LastStore = nullptr; 498 499 // If this is a read-only call, process it. 500 if (CallValue::canHandle(Inst)) { 501 // If we have an available version of this call, and if it is the right 502 // generation, replace this instruction. 503 std::pair<Value*, unsigned> InVal = AvailableCalls->lookup(Inst); 504 if (InVal.first != nullptr && InVal.second == CurrentGeneration) { 505 DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst << " to: " 506 << *InVal.first << '\n'); 507 if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first); 508 Inst->eraseFromParent(); 509 Changed = true; 510 ++NumCSECall; 511 continue; 512 } 513 514 // Otherwise, remember that we have this instruction. 515 AvailableCalls->insert(Inst, 516 std::pair<Value*, unsigned>(Inst, CurrentGeneration)); 517 continue; 518 } 519 520 // Okay, this isn't something we can CSE at all. Check to see if it is 521 // something that could modify memory. If so, our available memory values 522 // cannot be used so bump the generation count. 523 if (Inst->mayWriteToMemory()) { 524 ++CurrentGeneration; 525 526 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 527 // We do a trivial form of DSE if there are two stores to the same 528 // location with no intervening loads. Delete the earlier store. 529 if (LastStore && 530 LastStore->getPointerOperand() == SI->getPointerOperand()) { 531 DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore << " due to: " 532 << *Inst << '\n'); 533 LastStore->eraseFromParent(); 534 Changed = true; 535 ++NumDSE; 536 LastStore = nullptr; 537 continue; 538 } 539 540 // Okay, we just invalidated anything we knew about loaded values. Try 541 // to salvage *something* by remembering that the stored value is a live 542 // version of the pointer. It is safe to forward from volatile stores 543 // to non-volatile loads, so we don't have to check for volatility of 544 // the store. 545 AvailableLoads->insert(SI->getPointerOperand(), 546 std::pair<Value*, unsigned>(SI->getValueOperand(), CurrentGeneration)); 547 548 // Remember that this was the last store we saw for DSE. 549 if (SI->isSimple()) 550 LastStore = SI; 551 } 552 } 553 } 554 555 return Changed; 556 } 557 558 559 bool EarlyCSE::runOnFunction(Function &F) { 560 if (skipOptnoneFunction(F)) 561 return false; 562 563 std::vector<StackNode *> nodesToProcess; 564 565 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 566 DL = DLP ? &DLP->getDataLayout() : nullptr; 567 TLI = &getAnalysis<TargetLibraryInfo>(); 568 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 569 AT = &getAnalysis<AssumptionTracker>(); 570 571 // Tables that the pass uses when walking the domtree. 572 ScopedHTType AVTable; 573 AvailableValues = &AVTable; 574 LoadHTType LoadTable; 575 AvailableLoads = &LoadTable; 576 CallHTType CallTable; 577 AvailableCalls = &CallTable; 578 579 CurrentGeneration = 0; 580 bool Changed = false; 581 582 // Process the root node. 583 nodesToProcess.push_back( 584 new StackNode(AvailableValues, AvailableLoads, AvailableCalls, 585 CurrentGeneration, DT->getRootNode(), 586 DT->getRootNode()->begin(), 587 DT->getRootNode()->end())); 588 589 // Save the current generation. 590 unsigned LiveOutGeneration = CurrentGeneration; 591 592 // Process the stack. 593 while (!nodesToProcess.empty()) { 594 // Grab the first item off the stack. Set the current generation, remove 595 // the node from the stack, and process it. 596 StackNode *NodeToProcess = nodesToProcess.back(); 597 598 // Initialize class members. 599 CurrentGeneration = NodeToProcess->currentGeneration(); 600 601 // Check if the node needs to be processed. 602 if (!NodeToProcess->isProcessed()) { 603 // Process the node. 604 Changed |= processNode(NodeToProcess->node()); 605 NodeToProcess->childGeneration(CurrentGeneration); 606 NodeToProcess->process(); 607 } else if (NodeToProcess->childIter() != NodeToProcess->end()) { 608 // Push the next child onto the stack. 609 DomTreeNode *child = NodeToProcess->nextChild(); 610 nodesToProcess.push_back( 611 new StackNode(AvailableValues, 612 AvailableLoads, 613 AvailableCalls, 614 NodeToProcess->childGeneration(), child, 615 child->begin(), child->end())); 616 } else { 617 // It has been processed, and there are no more children to process, 618 // so delete it and pop it off the stack. 619 delete NodeToProcess; 620 nodesToProcess.pop_back(); 621 } 622 } // while (!nodes...) 623 624 // Reset the current generation. 625 CurrentGeneration = LiveOutGeneration; 626 627 return Changed; 628 } 629