1 //===-- MachineCSE.cpp - Machine Common Subexpression Elimination Pass ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs global common subexpression elimination on machine 11 // instructions using a scoped hash table based value numbering scheme. It 12 // must be run while the machine function is still in SSA form. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #define DEBUG_TYPE "machine-cse" 17 #include "llvm/CodeGen/Passes.h" 18 #include "llvm/CodeGen/MachineDominators.h" 19 #include "llvm/CodeGen/MachineInstr.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Target/TargetInstrInfo.h" 23 #include "llvm/ADT/DenseMap.h" 24 #include "llvm/ADT/ScopedHashTable.h" 25 #include "llvm/ADT/SmallSet.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/RecyclingAllocator.h" 29 using namespace llvm; 30 31 STATISTIC(NumCoalesces, "Number of copies coalesced"); 32 STATISTIC(NumCSEs, "Number of common subexpression eliminated"); 33 STATISTIC(NumPhysCSEs, 34 "Number of physreg referencing common subexpr eliminated"); 35 STATISTIC(NumCrossBBCSEs, 36 "Number of cross-MBB physreg referencing CS eliminated"); 37 STATISTIC(NumCommutes, "Number of copies coalesced after commuting"); 38 39 namespace { 40 class MachineCSE : public MachineFunctionPass { 41 const TargetInstrInfo *TII; 42 const TargetRegisterInfo *TRI; 43 AliasAnalysis *AA; 44 MachineDominatorTree *DT; 45 MachineRegisterInfo *MRI; 46 public: 47 static char ID; // Pass identification 48 MachineCSE() : MachineFunctionPass(ID), LookAheadLimit(5), CurrVN(0) { 49 initializeMachineCSEPass(*PassRegistry::getPassRegistry()); 50 } 51 52 virtual bool runOnMachineFunction(MachineFunction &MF); 53 54 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 55 AU.setPreservesCFG(); 56 MachineFunctionPass::getAnalysisUsage(AU); 57 AU.addRequired<AliasAnalysis>(); 58 AU.addPreservedID(MachineLoopInfoID); 59 AU.addRequired<MachineDominatorTree>(); 60 AU.addPreserved<MachineDominatorTree>(); 61 } 62 63 virtual void releaseMemory() { 64 ScopeMap.clear(); 65 Exps.clear(); 66 AllocatableRegs.clear(); 67 ReservedRegs.clear(); 68 } 69 70 private: 71 const unsigned LookAheadLimit; 72 typedef RecyclingAllocator<BumpPtrAllocator, 73 ScopedHashTableVal<MachineInstr*, unsigned> > AllocatorTy; 74 typedef ScopedHashTable<MachineInstr*, unsigned, 75 MachineInstrExpressionTrait, AllocatorTy> ScopedHTType; 76 typedef ScopedHTType::ScopeTy ScopeType; 77 DenseMap<MachineBasicBlock*, ScopeType*> ScopeMap; 78 ScopedHTType VNT; 79 SmallVector<MachineInstr*, 64> Exps; 80 unsigned CurrVN; 81 BitVector AllocatableRegs; 82 BitVector ReservedRegs; 83 84 bool PerformTrivialCoalescing(MachineInstr *MI, MachineBasicBlock *MBB); 85 bool isPhysDefTriviallyDead(unsigned Reg, 86 MachineBasicBlock::const_iterator I, 87 MachineBasicBlock::const_iterator E) const; 88 bool hasLivePhysRegDefUses(const MachineInstr *MI, 89 const MachineBasicBlock *MBB, 90 SmallSet<unsigned,8> &PhysRefs, 91 SmallVector<unsigned,2> &PhysDefs) const; 92 bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI, 93 SmallSet<unsigned,8> &PhysRefs, 94 SmallVector<unsigned,2> &PhysDefs, 95 bool &NonLocal) const; 96 bool isCSECandidate(MachineInstr *MI); 97 bool isProfitableToCSE(unsigned CSReg, unsigned Reg, 98 MachineInstr *CSMI, MachineInstr *MI); 99 void EnterScope(MachineBasicBlock *MBB); 100 void ExitScope(MachineBasicBlock *MBB); 101 bool ProcessBlock(MachineBasicBlock *MBB); 102 void ExitScopeIfDone(MachineDomTreeNode *Node, 103 DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren); 104 bool PerformCSE(MachineDomTreeNode *Node); 105 }; 106 } // end anonymous namespace 107 108 char MachineCSE::ID = 0; 109 char &llvm::MachineCSEID = MachineCSE::ID; 110 INITIALIZE_PASS_BEGIN(MachineCSE, "machine-cse", 111 "Machine Common Subexpression Elimination", false, false) 112 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 113 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 114 INITIALIZE_PASS_END(MachineCSE, "machine-cse", 115 "Machine Common Subexpression Elimination", false, false) 116 117 bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI, 118 MachineBasicBlock *MBB) { 119 bool Changed = false; 120 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 121 MachineOperand &MO = MI->getOperand(i); 122 if (!MO.isReg() || !MO.isUse()) 123 continue; 124 unsigned Reg = MO.getReg(); 125 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 126 continue; 127 if (!MRI->hasOneNonDBGUse(Reg)) 128 // Only coalesce single use copies. This ensure the copy will be 129 // deleted. 130 continue; 131 MachineInstr *DefMI = MRI->getVRegDef(Reg); 132 if (DefMI->getParent() != MBB) 133 continue; 134 if (!DefMI->isCopy()) 135 continue; 136 unsigned SrcReg = DefMI->getOperand(1).getReg(); 137 if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) 138 continue; 139 if (DefMI->getOperand(0).getSubReg() || DefMI->getOperand(1).getSubReg()) 140 continue; 141 if (!MRI->constrainRegClass(SrcReg, MRI->getRegClass(Reg))) 142 continue; 143 DEBUG(dbgs() << "Coalescing: " << *DefMI); 144 DEBUG(dbgs() << "*** to: " << *MI); 145 MO.setReg(SrcReg); 146 MRI->clearKillFlags(SrcReg); 147 DefMI->eraseFromParent(); 148 ++NumCoalesces; 149 Changed = true; 150 } 151 152 return Changed; 153 } 154 155 bool 156 MachineCSE::isPhysDefTriviallyDead(unsigned Reg, 157 MachineBasicBlock::const_iterator I, 158 MachineBasicBlock::const_iterator E) const { 159 unsigned LookAheadLeft = LookAheadLimit; 160 while (LookAheadLeft) { 161 // Skip over dbg_value's. 162 while (I != E && I->isDebugValue()) 163 ++I; 164 165 if (I == E) 166 // Reached end of block, register is obviously dead. 167 return true; 168 169 bool SeenDef = false; 170 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 171 const MachineOperand &MO = I->getOperand(i); 172 if (MO.isRegMask() && MO.clobbersPhysReg(Reg)) 173 SeenDef = true; 174 if (!MO.isReg() || !MO.getReg()) 175 continue; 176 if (!TRI->regsOverlap(MO.getReg(), Reg)) 177 continue; 178 if (MO.isUse()) 179 // Found a use! 180 return false; 181 SeenDef = true; 182 } 183 if (SeenDef) 184 // See a def of Reg (or an alias) before encountering any use, it's 185 // trivially dead. 186 return true; 187 188 --LookAheadLeft; 189 ++I; 190 } 191 return false; 192 } 193 194 /// hasLivePhysRegDefUses - Return true if the specified instruction read/write 195 /// physical registers (except for dead defs of physical registers). It also 196 /// returns the physical register def by reference if it's the only one and the 197 /// instruction does not uses a physical register. 198 bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI, 199 const MachineBasicBlock *MBB, 200 SmallSet<unsigned,8> &PhysRefs, 201 SmallVector<unsigned,2> &PhysDefs) const{ 202 MachineBasicBlock::const_iterator I = MI; I = llvm::next(I); 203 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 204 const MachineOperand &MO = MI->getOperand(i); 205 if (!MO.isReg()) 206 continue; 207 unsigned Reg = MO.getReg(); 208 if (!Reg) 209 continue; 210 if (TargetRegisterInfo::isVirtualRegister(Reg)) 211 continue; 212 // If the def is dead, it's ok. But the def may not marked "dead". That's 213 // common since this pass is run before livevariables. We can scan 214 // forward a few instructions and check if it is obviously dead. 215 if (MO.isDef() && 216 (MO.isDead() || isPhysDefTriviallyDead(Reg, I, MBB->end()))) 217 continue; 218 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) { 219 // Reading constant physregs is ok. 220 if (!MRI->isConstantPhysReg(*AI, *MBB->getParent())) 221 PhysRefs.insert(*AI); 222 } 223 if (MO.isDef()) 224 PhysDefs.push_back(Reg); 225 } 226 227 return !PhysRefs.empty(); 228 } 229 230 bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI, 231 SmallSet<unsigned,8> &PhysRefs, 232 SmallVector<unsigned,2> &PhysDefs, 233 bool &NonLocal) const { 234 // For now conservatively returns false if the common subexpression is 235 // not in the same basic block as the given instruction. The only exception 236 // is if the common subexpression is in the sole predecessor block. 237 const MachineBasicBlock *MBB = MI->getParent(); 238 const MachineBasicBlock *CSMBB = CSMI->getParent(); 239 240 bool CrossMBB = false; 241 if (CSMBB != MBB) { 242 if (MBB->pred_size() != 1 || *MBB->pred_begin() != CSMBB) 243 return false; 244 245 for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) { 246 if (AllocatableRegs.test(PhysDefs[i]) || ReservedRegs.test(PhysDefs[i])) 247 // Avoid extending live range of physical registers if they are 248 //allocatable or reserved. 249 return false; 250 } 251 CrossMBB = true; 252 } 253 MachineBasicBlock::const_iterator I = CSMI; I = llvm::next(I); 254 MachineBasicBlock::const_iterator E = MI; 255 MachineBasicBlock::const_iterator EE = CSMBB->end(); 256 unsigned LookAheadLeft = LookAheadLimit; 257 while (LookAheadLeft) { 258 // Skip over dbg_value's. 259 while (I != E && I != EE && I->isDebugValue()) 260 ++I; 261 262 if (I == EE) { 263 assert(CrossMBB && "Reaching end-of-MBB without finding MI?"); 264 (void)CrossMBB; 265 CrossMBB = false; 266 NonLocal = true; 267 I = MBB->begin(); 268 EE = MBB->end(); 269 continue; 270 } 271 272 if (I == E) 273 return true; 274 275 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 276 const MachineOperand &MO = I->getOperand(i); 277 // RegMasks go on instructions like calls that clobber lots of physregs. 278 // Don't attempt to CSE across such an instruction. 279 if (MO.isRegMask()) 280 return false; 281 if (!MO.isReg() || !MO.isDef()) 282 continue; 283 unsigned MOReg = MO.getReg(); 284 if (TargetRegisterInfo::isVirtualRegister(MOReg)) 285 continue; 286 if (PhysRefs.count(MOReg)) 287 return false; 288 } 289 290 --LookAheadLeft; 291 ++I; 292 } 293 294 return false; 295 } 296 297 bool MachineCSE::isCSECandidate(MachineInstr *MI) { 298 if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() || 299 MI->isKill() || MI->isInlineAsm() || MI->isDebugValue()) 300 return false; 301 302 // Ignore copies. 303 if (MI->isCopyLike()) 304 return false; 305 306 // Ignore stuff that we obviously can't move. 307 if (MI->mayStore() || MI->isCall() || MI->isTerminator() || 308 MI->hasUnmodeledSideEffects()) 309 return false; 310 311 if (MI->mayLoad()) { 312 // Okay, this instruction does a load. As a refinement, we allow the target 313 // to decide whether the loaded value is actually a constant. If so, we can 314 // actually use it as a load. 315 if (!MI->isInvariantLoad(AA)) 316 // FIXME: we should be able to hoist loads with no other side effects if 317 // there are no other instructions which can change memory in this loop. 318 // This is a trivial form of alias analysis. 319 return false; 320 } 321 return true; 322 } 323 324 /// isProfitableToCSE - Return true if it's profitable to eliminate MI with a 325 /// common expression that defines Reg. 326 bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg, 327 MachineInstr *CSMI, MachineInstr *MI) { 328 // FIXME: Heuristics that works around the lack the live range splitting. 329 330 // If CSReg is used at all uses of Reg, CSE should not increase register 331 // pressure of CSReg. 332 bool MayIncreasePressure = true; 333 if (TargetRegisterInfo::isVirtualRegister(CSReg) && 334 TargetRegisterInfo::isVirtualRegister(Reg)) { 335 MayIncreasePressure = false; 336 SmallPtrSet<MachineInstr*, 8> CSUses; 337 for (MachineRegisterInfo::use_nodbg_iterator I =MRI->use_nodbg_begin(CSReg), 338 E = MRI->use_nodbg_end(); I != E; ++I) { 339 MachineInstr *Use = &*I; 340 CSUses.insert(Use); 341 } 342 for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg), 343 E = MRI->use_nodbg_end(); I != E; ++I) { 344 MachineInstr *Use = &*I; 345 if (!CSUses.count(Use)) { 346 MayIncreasePressure = true; 347 break; 348 } 349 } 350 } 351 if (!MayIncreasePressure) return true; 352 353 // Heuristics #1: Don't CSE "cheap" computation if the def is not local or in 354 // an immediate predecessor. We don't want to increase register pressure and 355 // end up causing other computation to be spilled. 356 if (MI->isAsCheapAsAMove()) { 357 MachineBasicBlock *CSBB = CSMI->getParent(); 358 MachineBasicBlock *BB = MI->getParent(); 359 if (CSBB != BB && !CSBB->isSuccessor(BB)) 360 return false; 361 } 362 363 // Heuristics #2: If the expression doesn't not use a vr and the only use 364 // of the redundant computation are copies, do not cse. 365 bool HasVRegUse = false; 366 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 367 const MachineOperand &MO = MI->getOperand(i); 368 if (MO.isReg() && MO.isUse() && 369 TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 370 HasVRegUse = true; 371 break; 372 } 373 } 374 if (!HasVRegUse) { 375 bool HasNonCopyUse = false; 376 for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg), 377 E = MRI->use_nodbg_end(); I != E; ++I) { 378 MachineInstr *Use = &*I; 379 // Ignore copies. 380 if (!Use->isCopyLike()) { 381 HasNonCopyUse = true; 382 break; 383 } 384 } 385 if (!HasNonCopyUse) 386 return false; 387 } 388 389 // Heuristics #3: If the common subexpression is used by PHIs, do not reuse 390 // it unless the defined value is already used in the BB of the new use. 391 bool HasPHI = false; 392 SmallPtrSet<MachineBasicBlock*, 4> CSBBs; 393 for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(CSReg), 394 E = MRI->use_nodbg_end(); I != E; ++I) { 395 MachineInstr *Use = &*I; 396 HasPHI |= Use->isPHI(); 397 CSBBs.insert(Use->getParent()); 398 } 399 400 if (!HasPHI) 401 return true; 402 return CSBBs.count(MI->getParent()); 403 } 404 405 void MachineCSE::EnterScope(MachineBasicBlock *MBB) { 406 DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n'); 407 ScopeType *Scope = new ScopeType(VNT); 408 ScopeMap[MBB] = Scope; 409 } 410 411 void MachineCSE::ExitScope(MachineBasicBlock *MBB) { 412 DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n'); 413 DenseMap<MachineBasicBlock*, ScopeType*>::iterator SI = ScopeMap.find(MBB); 414 assert(SI != ScopeMap.end()); 415 ScopeMap.erase(SI); 416 delete SI->second; 417 } 418 419 bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) { 420 bool Changed = false; 421 422 SmallVector<std::pair<unsigned, unsigned>, 8> CSEPairs; 423 SmallVector<unsigned, 2> ImplicitDefsToUpdate; 424 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; ) { 425 MachineInstr *MI = &*I; 426 ++I; 427 428 if (!isCSECandidate(MI)) 429 continue; 430 431 bool FoundCSE = VNT.count(MI); 432 if (!FoundCSE) { 433 // Look for trivial copy coalescing opportunities. 434 if (PerformTrivialCoalescing(MI, MBB)) { 435 Changed = true; 436 437 // After coalescing MI itself may become a copy. 438 if (MI->isCopyLike()) 439 continue; 440 FoundCSE = VNT.count(MI); 441 } 442 } 443 444 // Commute commutable instructions. 445 bool Commuted = false; 446 if (!FoundCSE && MI->isCommutable()) { 447 MachineInstr *NewMI = TII->commuteInstruction(MI); 448 if (NewMI) { 449 Commuted = true; 450 FoundCSE = VNT.count(NewMI); 451 if (NewMI != MI) { 452 // New instruction. It doesn't need to be kept. 453 NewMI->eraseFromParent(); 454 Changed = true; 455 } else if (!FoundCSE) 456 // MI was changed but it didn't help, commute it back! 457 (void)TII->commuteInstruction(MI); 458 } 459 } 460 461 // If the instruction defines physical registers and the values *may* be 462 // used, then it's not safe to replace it with a common subexpression. 463 // It's also not safe if the instruction uses physical registers. 464 bool CrossMBBPhysDef = false; 465 SmallSet<unsigned, 8> PhysRefs; 466 SmallVector<unsigned, 2> PhysDefs; 467 if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs, PhysDefs)) { 468 FoundCSE = false; 469 470 // ... Unless the CS is local or is in the sole predecessor block 471 // and it also defines the physical register which is not clobbered 472 // in between and the physical register uses were not clobbered. 473 unsigned CSVN = VNT.lookup(MI); 474 MachineInstr *CSMI = Exps[CSVN]; 475 if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef)) 476 FoundCSE = true; 477 } 478 479 if (!FoundCSE) { 480 VNT.insert(MI, CurrVN++); 481 Exps.push_back(MI); 482 continue; 483 } 484 485 // Found a common subexpression, eliminate it. 486 unsigned CSVN = VNT.lookup(MI); 487 MachineInstr *CSMI = Exps[CSVN]; 488 DEBUG(dbgs() << "Examining: " << *MI); 489 DEBUG(dbgs() << "*** Found a common subexpression: " << *CSMI); 490 491 // Check if it's profitable to perform this CSE. 492 bool DoCSE = true; 493 unsigned NumDefs = MI->getDesc().getNumDefs() + 494 MI->getDesc().getNumImplicitDefs(); 495 496 for (unsigned i = 0, e = MI->getNumOperands(); NumDefs && i != e; ++i) { 497 MachineOperand &MO = MI->getOperand(i); 498 if (!MO.isReg() || !MO.isDef()) 499 continue; 500 unsigned OldReg = MO.getReg(); 501 unsigned NewReg = CSMI->getOperand(i).getReg(); 502 503 // Go through implicit defs of CSMI and MI, if a def is not dead at MI, 504 // we should make sure it is not dead at CSMI. 505 if (MO.isImplicit() && !MO.isDead() && CSMI->getOperand(i).isDead()) 506 ImplicitDefsToUpdate.push_back(i); 507 if (OldReg == NewReg) { 508 --NumDefs; 509 continue; 510 } 511 512 assert(TargetRegisterInfo::isVirtualRegister(OldReg) && 513 TargetRegisterInfo::isVirtualRegister(NewReg) && 514 "Do not CSE physical register defs!"); 515 516 if (!isProfitableToCSE(NewReg, OldReg, CSMI, MI)) { 517 DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n"); 518 DoCSE = false; 519 break; 520 } 521 522 // Don't perform CSE if the result of the old instruction cannot exist 523 // within the register class of the new instruction. 524 const TargetRegisterClass *OldRC = MRI->getRegClass(OldReg); 525 if (!MRI->constrainRegClass(NewReg, OldRC)) { 526 DEBUG(dbgs() << "*** Not the same register class, avoid CSE!\n"); 527 DoCSE = false; 528 break; 529 } 530 531 CSEPairs.push_back(std::make_pair(OldReg, NewReg)); 532 --NumDefs; 533 } 534 535 // Actually perform the elimination. 536 if (DoCSE) { 537 for (unsigned i = 0, e = CSEPairs.size(); i != e; ++i) { 538 MRI->replaceRegWith(CSEPairs[i].first, CSEPairs[i].second); 539 MRI->clearKillFlags(CSEPairs[i].second); 540 } 541 542 // Go through implicit defs of CSMI and MI, if a def is not dead at MI, 543 // we should make sure it is not dead at CSMI. 544 for (unsigned i = 0, e = ImplicitDefsToUpdate.size(); i != e; ++i) 545 CSMI->getOperand(ImplicitDefsToUpdate[i]).setIsDead(false); 546 547 if (CrossMBBPhysDef) { 548 // Add physical register defs now coming in from a predecessor to MBB 549 // livein list. 550 while (!PhysDefs.empty()) { 551 unsigned LiveIn = PhysDefs.pop_back_val(); 552 if (!MBB->isLiveIn(LiveIn)) 553 MBB->addLiveIn(LiveIn); 554 } 555 ++NumCrossBBCSEs; 556 } 557 558 MI->eraseFromParent(); 559 ++NumCSEs; 560 if (!PhysRefs.empty()) 561 ++NumPhysCSEs; 562 if (Commuted) 563 ++NumCommutes; 564 Changed = true; 565 } else { 566 VNT.insert(MI, CurrVN++); 567 Exps.push_back(MI); 568 } 569 CSEPairs.clear(); 570 ImplicitDefsToUpdate.clear(); 571 } 572 573 return Changed; 574 } 575 576 /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given 577 /// dominator tree node if its a leaf or all of its children are done. Walk 578 /// up the dominator tree to destroy ancestors which are now done. 579 void 580 MachineCSE::ExitScopeIfDone(MachineDomTreeNode *Node, 581 DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren) { 582 if (OpenChildren[Node]) 583 return; 584 585 // Pop scope. 586 ExitScope(Node->getBlock()); 587 588 // Now traverse upwards to pop ancestors whose offsprings are all done. 589 while (MachineDomTreeNode *Parent = Node->getIDom()) { 590 unsigned Left = --OpenChildren[Parent]; 591 if (Left != 0) 592 break; 593 ExitScope(Parent->getBlock()); 594 Node = Parent; 595 } 596 } 597 598 bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) { 599 SmallVector<MachineDomTreeNode*, 32> Scopes; 600 SmallVector<MachineDomTreeNode*, 8> WorkList; 601 DenseMap<MachineDomTreeNode*, unsigned> OpenChildren; 602 603 CurrVN = 0; 604 605 // Perform a DFS walk to determine the order of visit. 606 WorkList.push_back(Node); 607 do { 608 Node = WorkList.pop_back_val(); 609 Scopes.push_back(Node); 610 const std::vector<MachineDomTreeNode*> &Children = Node->getChildren(); 611 unsigned NumChildren = Children.size(); 612 OpenChildren[Node] = NumChildren; 613 for (unsigned i = 0; i != NumChildren; ++i) { 614 MachineDomTreeNode *Child = Children[i]; 615 WorkList.push_back(Child); 616 } 617 } while (!WorkList.empty()); 618 619 // Now perform CSE. 620 bool Changed = false; 621 for (unsigned i = 0, e = Scopes.size(); i != e; ++i) { 622 MachineDomTreeNode *Node = Scopes[i]; 623 MachineBasicBlock *MBB = Node->getBlock(); 624 EnterScope(MBB); 625 Changed |= ProcessBlock(MBB); 626 // If it's a leaf node, it's done. Traverse upwards to pop ancestors. 627 ExitScopeIfDone(Node, OpenChildren); 628 } 629 630 return Changed; 631 } 632 633 bool MachineCSE::runOnMachineFunction(MachineFunction &MF) { 634 TII = MF.getTarget().getInstrInfo(); 635 TRI = MF.getTarget().getRegisterInfo(); 636 MRI = &MF.getRegInfo(); 637 AA = &getAnalysis<AliasAnalysis>(); 638 DT = &getAnalysis<MachineDominatorTree>(); 639 AllocatableRegs = TRI->getAllocatableSet(MF); 640 ReservedRegs = TRI->getReservedRegs(MF); 641 return PerformCSE(DT->getRootNode()); 642 } 643