1 //===-- LiveRangeEdit.cpp - Basic tools for editing a register live range -===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The LiveRangeEdit class represents changes done to a virtual register when it 11 // is spilled or split. 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "regalloc" 15 #include "llvm/CodeGen/LiveRangeEdit.h" 16 #include "llvm/ADT/Statistic.h" 17 #include "llvm/CodeGen/CalcSpillWeights.h" 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/VirtRegMap.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/raw_ostream.h" 23 #include "llvm/Target/TargetInstrInfo.h" 24 25 using namespace llvm; 26 27 STATISTIC(NumDCEDeleted, "Number of instructions deleted by DCE"); 28 STATISTIC(NumDCEFoldedLoads, "Number of single use loads folded after DCE"); 29 STATISTIC(NumFracRanges, "Number of live ranges fractured by DCE"); 30 31 void LiveRangeEdit::Delegate::anchor() { } 32 33 LiveInterval &LiveRangeEdit::createFrom(unsigned OldReg) { 34 unsigned VReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg)); 35 if (VRM) { 36 VRM->setIsSplitFromReg(VReg, VRM->getOriginal(OldReg)); 37 } 38 LiveInterval &LI = LIS.getOrCreateInterval(VReg); 39 return LI; 40 } 41 42 bool LiveRangeEdit::checkRematerializable(VNInfo *VNI, 43 const MachineInstr *DefMI, 44 AliasAnalysis *aa) { 45 assert(DefMI && "Missing instruction"); 46 ScannedRemattable = true; 47 if (!TII.isTriviallyReMaterializable(DefMI, aa)) 48 return false; 49 Remattable.insert(VNI); 50 return true; 51 } 52 53 void LiveRangeEdit::scanRemattable(AliasAnalysis *aa) { 54 for (LiveInterval::vni_iterator I = getParent().vni_begin(), 55 E = getParent().vni_end(); I != E; ++I) { 56 VNInfo *VNI = *I; 57 if (VNI->isUnused()) 58 continue; 59 MachineInstr *DefMI = LIS.getInstructionFromIndex(VNI->def); 60 if (!DefMI) 61 continue; 62 checkRematerializable(VNI, DefMI, aa); 63 } 64 ScannedRemattable = true; 65 } 66 67 bool LiveRangeEdit::anyRematerializable(AliasAnalysis *aa) { 68 if (!ScannedRemattable) 69 scanRemattable(aa); 70 return !Remattable.empty(); 71 } 72 73 /// allUsesAvailableAt - Return true if all registers used by OrigMI at 74 /// OrigIdx are also available with the same value at UseIdx. 75 bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI, 76 SlotIndex OrigIdx, 77 SlotIndex UseIdx) const { 78 OrigIdx = OrigIdx.getRegSlot(true); 79 UseIdx = UseIdx.getRegSlot(true); 80 for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) { 81 const MachineOperand &MO = OrigMI->getOperand(i); 82 if (!MO.isReg() || !MO.getReg() || !MO.readsReg()) 83 continue; 84 85 // We can't remat physreg uses, unless it is a constant. 86 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { 87 if (MRI.isConstantPhysReg(MO.getReg(), *OrigMI->getParent()->getParent())) 88 continue; 89 return false; 90 } 91 92 LiveInterval &li = LIS.getInterval(MO.getReg()); 93 const VNInfo *OVNI = li.getVNInfoAt(OrigIdx); 94 if (!OVNI) 95 continue; 96 97 // Don't allow rematerialization immediately after the original def. 98 // It would be incorrect if OrigMI redefines the register. 99 // See PR14098. 100 if (SlotIndex::isSameInstr(OrigIdx, UseIdx)) 101 return false; 102 103 if (OVNI != li.getVNInfoAt(UseIdx)) 104 return false; 105 } 106 return true; 107 } 108 109 bool LiveRangeEdit::canRematerializeAt(Remat &RM, 110 SlotIndex UseIdx, 111 bool cheapAsAMove) { 112 assert(ScannedRemattable && "Call anyRematerializable first"); 113 114 // Use scanRemattable info. 115 if (!Remattable.count(RM.ParentVNI)) 116 return false; 117 118 // No defining instruction provided. 119 SlotIndex DefIdx; 120 if (RM.OrigMI) 121 DefIdx = LIS.getInstructionIndex(RM.OrigMI); 122 else { 123 DefIdx = RM.ParentVNI->def; 124 RM.OrigMI = LIS.getInstructionFromIndex(DefIdx); 125 assert(RM.OrigMI && "No defining instruction for remattable value"); 126 } 127 128 // If only cheap remats were requested, bail out early. 129 if (cheapAsAMove && !RM.OrigMI->isAsCheapAsAMove()) 130 return false; 131 132 // Verify that all used registers are available with the same values. 133 if (!allUsesAvailableAt(RM.OrigMI, DefIdx, UseIdx)) 134 return false; 135 136 return true; 137 } 138 139 SlotIndex LiveRangeEdit::rematerializeAt(MachineBasicBlock &MBB, 140 MachineBasicBlock::iterator MI, 141 unsigned DestReg, 142 const Remat &RM, 143 const TargetRegisterInfo &tri, 144 bool Late) { 145 assert(RM.OrigMI && "Invalid remat"); 146 TII.reMaterialize(MBB, MI, DestReg, 0, RM.OrigMI, tri); 147 Rematted.insert(RM.ParentVNI); 148 return LIS.getSlotIndexes()->insertMachineInstrInMaps(--MI, Late) 149 .getRegSlot(); 150 } 151 152 void LiveRangeEdit::eraseVirtReg(unsigned Reg) { 153 if (TheDelegate && TheDelegate->LRE_CanEraseVirtReg(Reg)) 154 LIS.removeInterval(Reg); 155 } 156 157 bool LiveRangeEdit::foldAsLoad(LiveInterval *LI, 158 SmallVectorImpl<MachineInstr*> &Dead) { 159 MachineInstr *DefMI = 0, *UseMI = 0; 160 161 // Check that there is a single def and a single use. 162 for (MachineRegisterInfo::reg_nodbg_iterator I = MRI.reg_nodbg_begin(LI->reg), 163 E = MRI.reg_nodbg_end(); I != E; ++I) { 164 MachineOperand &MO = I.getOperand(); 165 MachineInstr *MI = MO.getParent(); 166 if (MO.isDef()) { 167 if (DefMI && DefMI != MI) 168 return false; 169 if (!MI->canFoldAsLoad()) 170 return false; 171 DefMI = MI; 172 } else if (!MO.isUndef()) { 173 if (UseMI && UseMI != MI) 174 return false; 175 // FIXME: Targets don't know how to fold subreg uses. 176 if (MO.getSubReg()) 177 return false; 178 UseMI = MI; 179 } 180 } 181 if (!DefMI || !UseMI) 182 return false; 183 184 // Since we're moving the DefMI load, make sure we're not extending any live 185 // ranges. 186 if (!allUsesAvailableAt(DefMI, 187 LIS.getInstructionIndex(DefMI), 188 LIS.getInstructionIndex(UseMI))) 189 return false; 190 191 // We also need to make sure it is safe to move the load. 192 // Assume there are stores between DefMI and UseMI. 193 bool SawStore = true; 194 if (!DefMI->isSafeToMove(&TII, 0, SawStore)) 195 return false; 196 197 DEBUG(dbgs() << "Try to fold single def: " << *DefMI 198 << " into single use: " << *UseMI); 199 200 SmallVector<unsigned, 8> Ops; 201 if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second) 202 return false; 203 204 MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI); 205 if (!FoldMI) 206 return false; 207 DEBUG(dbgs() << " folded: " << *FoldMI); 208 LIS.ReplaceMachineInstrInMaps(UseMI, FoldMI); 209 UseMI->eraseFromParent(); 210 DefMI->addRegisterDead(LI->reg, 0); 211 Dead.push_back(DefMI); 212 ++NumDCEFoldedLoads; 213 return true; 214 } 215 216 /// Find all live intervals that need to shrink, then remove the instruction. 217 void LiveRangeEdit::eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink) { 218 assert(MI->allDefsAreDead() && "Def isn't really dead"); 219 SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); 220 221 // Never delete a bundled instruction. 222 if (MI->isBundled()) { 223 return; 224 } 225 // Never delete inline asm. 226 if (MI->isInlineAsm()) { 227 DEBUG(dbgs() << "Won't delete: " << Idx << '\t' << *MI); 228 return; 229 } 230 231 // Use the same criteria as DeadMachineInstructionElim. 232 bool SawStore = false; 233 if (!MI->isSafeToMove(&TII, 0, SawStore)) { 234 DEBUG(dbgs() << "Can't delete: " << Idx << '\t' << *MI); 235 return; 236 } 237 238 DEBUG(dbgs() << "Deleting dead def " << Idx << '\t' << *MI); 239 240 // Collect virtual registers to be erased after MI is gone. 241 SmallVector<unsigned, 8> RegsToErase; 242 bool ReadsPhysRegs = false; 243 244 // Check for live intervals that may shrink 245 for (MachineInstr::mop_iterator MOI = MI->operands_begin(), 246 MOE = MI->operands_end(); MOI != MOE; ++MOI) { 247 if (!MOI->isReg()) 248 continue; 249 unsigned Reg = MOI->getReg(); 250 if (!TargetRegisterInfo::isVirtualRegister(Reg)) { 251 // Check if MI reads any unreserved physregs. 252 if (Reg && MOI->readsReg() && !MRI.isReserved(Reg)) 253 ReadsPhysRegs = true; 254 else if (MOI->isDef()) { 255 for (MCRegUnitIterator Units(Reg, MRI.getTargetRegisterInfo()); 256 Units.isValid(); ++Units) { 257 if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) { 258 if (VNInfo *VNI = LI->getVNInfoAt(Idx)) 259 LI->removeValNo(VNI); 260 } 261 } 262 } 263 continue; 264 } 265 LiveInterval &LI = LIS.getInterval(Reg); 266 267 // Shrink read registers, unless it is likely to be expensive and 268 // unlikely to change anything. We typically don't want to shrink the 269 // PIC base register that has lots of uses everywhere. 270 // Always shrink COPY uses that probably come from live range splitting. 271 if (MI->readsVirtualRegister(Reg) && 272 (MI->isCopy() || MOI->isDef() || MRI.hasOneNonDBGUse(Reg) || 273 LI.killedAt(Idx))) 274 ToShrink.insert(&LI); 275 276 // Remove defined value. 277 if (MOI->isDef()) { 278 if (VNInfo *VNI = LI.getVNInfoAt(Idx)) { 279 if (TheDelegate) 280 TheDelegate->LRE_WillShrinkVirtReg(LI.reg); 281 LI.removeValNo(VNI); 282 if (LI.empty()) 283 RegsToErase.push_back(Reg); 284 } 285 } 286 } 287 288 // Currently, we don't support DCE of physreg live ranges. If MI reads 289 // any unreserved physregs, don't erase the instruction, but turn it into 290 // a KILL instead. This way, the physreg live ranges don't end up 291 // dangling. 292 // FIXME: It would be better to have something like shrinkToUses() for 293 // physregs. That could potentially enable more DCE and it would free up 294 // the physreg. It would not happen often, though. 295 if (ReadsPhysRegs) { 296 MI->setDesc(TII.get(TargetOpcode::KILL)); 297 // Remove all operands that aren't physregs. 298 for (unsigned i = MI->getNumOperands(); i; --i) { 299 const MachineOperand &MO = MI->getOperand(i-1); 300 if (MO.isReg() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) 301 continue; 302 MI->RemoveOperand(i-1); 303 } 304 DEBUG(dbgs() << "Converted physregs to:\t" << *MI); 305 } else { 306 if (TheDelegate) 307 TheDelegate->LRE_WillEraseInstruction(MI); 308 LIS.RemoveMachineInstrFromMaps(MI); 309 MI->eraseFromParent(); 310 ++NumDCEDeleted; 311 } 312 313 // Erase any virtregs that are now empty and unused. There may be <undef> 314 // uses around. Keep the empty live range in that case. 315 for (unsigned i = 0, e = RegsToErase.size(); i != e; ++i) { 316 unsigned Reg = RegsToErase[i]; 317 if (LIS.hasInterval(Reg) && MRI.reg_nodbg_empty(Reg)) { 318 ToShrink.remove(&LIS.getInterval(Reg)); 319 eraseVirtReg(Reg); 320 } 321 } 322 } 323 324 void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead, 325 ArrayRef<unsigned> RegsBeingSpilled) { 326 ToShrinkSet ToShrink; 327 328 for (;;) { 329 // Erase all dead defs. 330 while (!Dead.empty()) 331 eliminateDeadDef(Dead.pop_back_val(), ToShrink); 332 333 if (ToShrink.empty()) 334 break; 335 336 // Shrink just one live interval. Then delete new dead defs. 337 LiveInterval *LI = ToShrink.back(); 338 ToShrink.pop_back(); 339 if (foldAsLoad(LI, Dead)) 340 continue; 341 if (TheDelegate) 342 TheDelegate->LRE_WillShrinkVirtReg(LI->reg); 343 if (!LIS.shrinkToUses(LI, &Dead)) 344 continue; 345 346 // Don't create new intervals for a register being spilled. 347 // The new intervals would have to be spilled anyway so its not worth it. 348 // Also they currently aren't spilled so creating them and not spilling 349 // them results in incorrect code. 350 bool BeingSpilled = false; 351 for (unsigned i = 0, e = RegsBeingSpilled.size(); i != e; ++i) { 352 if (LI->reg == RegsBeingSpilled[i]) { 353 BeingSpilled = true; 354 break; 355 } 356 } 357 358 if (BeingSpilled) continue; 359 360 // LI may have been separated, create new intervals. 361 LI->RenumberValues(); 362 ConnectedVNInfoEqClasses ConEQ(LIS); 363 unsigned NumComp = ConEQ.Classify(LI); 364 if (NumComp <= 1) 365 continue; 366 ++NumFracRanges; 367 bool IsOriginal = VRM && VRM->getOriginal(LI->reg) == LI->reg; 368 DEBUG(dbgs() << NumComp << " components: " << *LI << '\n'); 369 SmallVector<LiveInterval*, 8> Dups(1, LI); 370 for (unsigned i = 1; i != NumComp; ++i) { 371 Dups.push_back(&createFrom(LI->reg)); 372 // If LI is an original interval that hasn't been split yet, make the new 373 // intervals their own originals instead of referring to LI. The original 374 // interval must contain all the split products, and LI doesn't. 375 if (IsOriginal) 376 VRM->setIsSplitFromReg(Dups.back()->reg, 0); 377 if (TheDelegate) 378 TheDelegate->LRE_DidCloneVirtReg(Dups.back()->reg, LI->reg); 379 } 380 ConEQ.Distribute(&Dups[0], MRI); 381 DEBUG({ 382 for (unsigned i = 0; i != NumComp; ++i) 383 dbgs() << '\t' << *Dups[i] << '\n'; 384 }); 385 } 386 } 387 388 // Keep track of new virtual registers created via 389 // MachineRegisterInfo::createVirtualRegister. 390 void 391 LiveRangeEdit::MRI_NoteNewVirtualRegister(unsigned VReg) 392 { 393 if (VRM) 394 VRM->grow(); 395 396 NewRegs.push_back(VReg); 397 } 398 399 void 400 LiveRangeEdit::calculateRegClassAndHint(MachineFunction &MF, 401 const MachineLoopInfo &Loops, 402 const MachineBlockFrequencyInfo &MBFI) { 403 VirtRegAuxInfo VRAI(MF, LIS, Loops, MBFI); 404 for (unsigned I = 0, Size = size(); I < Size; ++I) { 405 LiveInterval &LI = LIS.getInterval(get(I)); 406 if (MRI.recomputeRegClass(LI.reg, MF.getTarget())) 407 DEBUG(dbgs() << "Inflated " << PrintReg(LI.reg) << " to " 408 << MRI.getRegClass(LI.reg)->getName() << '\n'); 409 VRAI.CalculateWeightAndHint(LI); 410 } 411 } 412