1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "spiller" 16 #include "Spiller.h" 17 #include "VirtRegMap.h" 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineFunction.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/Target/TargetMachine.h" 23 #include "llvm/Target/TargetInstrInfo.h" 24 #include "llvm/Support/Debug.h" 25 #include "llvm/Support/raw_ostream.h" 26 27 using namespace llvm; 28 29 namespace { 30 class InlineSpiller : public Spiller { 31 MachineFunction &mf_; 32 LiveIntervals &lis_; 33 VirtRegMap &vrm_; 34 MachineFrameInfo &mfi_; 35 MachineRegisterInfo &mri_; 36 const TargetInstrInfo &tii_; 37 const TargetRegisterInfo &tri_; 38 const BitVector reserved_; 39 40 // Variables that are valid during spill(), but used by multiple methods. 41 LiveInterval *li_; 42 std::vector<LiveInterval*> *newIntervals_; 43 const TargetRegisterClass *rc_; 44 int stackSlot_; 45 const SmallVectorImpl<LiveInterval*> *spillIs_; 46 47 // Values of the current interval that can potentially remat. 48 SmallPtrSet<VNInfo*, 8> reMattable_; 49 50 // Values in reMattable_ that failed to remat at some point. 51 SmallPtrSet<VNInfo*, 8> usedValues_; 52 53 ~InlineSpiller() {} 54 55 public: 56 InlineSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm) 57 : mf_(*mf), lis_(*lis), vrm_(*vrm), 58 mfi_(*mf->getFrameInfo()), 59 mri_(mf->getRegInfo()), 60 tii_(*mf->getTarget().getInstrInfo()), 61 tri_(*mf->getTarget().getRegisterInfo()), 62 reserved_(tri_.getReservedRegs(mf_)) {} 63 64 void spill(LiveInterval *li, 65 std::vector<LiveInterval*> &newIntervals, 66 SmallVectorImpl<LiveInterval*> &spillIs, 67 SlotIndex *earliestIndex); 68 69 private: 70 bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, 71 SlotIndex UseIdx); 72 bool reMaterializeFor(MachineBasicBlock::iterator MI); 73 void reMaterializeAll(); 74 75 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 76 const SmallVectorImpl<unsigned> &Ops); 77 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 78 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 79 }; 80 } 81 82 namespace llvm { 83 Spiller *createInlineSpiller(MachineFunction *mf, 84 LiveIntervals *lis, 85 const MachineLoopInfo *mli, 86 VirtRegMap *vrm) { 87 return new InlineSpiller(mf, lis, vrm); 88 } 89 } 90 91 /// allUsesAvailableAt - Return true if all registers used by OrigMI at 92 /// OrigIdx are also available with the same value at UseIdx. 93 bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI, 94 SlotIndex OrigIdx, 95 SlotIndex UseIdx) { 96 OrigIdx = OrigIdx.getUseIndex(); 97 UseIdx = UseIdx.getUseIndex(); 98 for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) { 99 const MachineOperand &MO = OrigMI->getOperand(i); 100 if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg) 101 continue; 102 // Reserved registers are OK. 103 if (MO.isUndef() || !lis_.hasInterval(MO.getReg())) 104 continue; 105 // We don't want to move any defs. 106 if (MO.isDef()) 107 return false; 108 // We cannot depend on virtual registers in spillIs_. They will be spilled. 109 for (unsigned si = 0, se = spillIs_->size(); si != se; ++si) 110 if ((*spillIs_)[si]->reg == MO.getReg()) 111 return false; 112 113 LiveInterval &LI = lis_.getInterval(MO.getReg()); 114 const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx); 115 if (!OVNI) 116 continue; 117 if (OVNI != LI.getVNInfoAt(UseIdx)) 118 return false; 119 } 120 return true; 121 } 122 123 /// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of 124 /// reloading it. 125 bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 126 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 127 VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx); 128 if (!OrigVNI) { 129 DEBUG(dbgs() << "\tadding <undef> flags: "); 130 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 131 MachineOperand &MO = MI->getOperand(i); 132 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) 133 MO.setIsUndef(); 134 } 135 DEBUG(dbgs() << UseIdx << '\t' << *MI); 136 return true; 137 } 138 if (!reMattable_.count(OrigVNI)) { 139 DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": " 140 << UseIdx << '\t' << *MI); 141 return false; 142 } 143 MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def); 144 if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) { 145 usedValues_.insert(OrigVNI); 146 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 147 return false; 148 } 149 150 // If the instruction also writes li_->reg, it had better not require the same 151 // register for uses and defs. 152 bool Reads, Writes; 153 SmallVector<unsigned, 8> Ops; 154 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops); 155 if (Writes) { 156 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 157 MachineOperand &MO = MI->getOperand(Ops[i]); 158 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 159 usedValues_.insert(OrigVNI); 160 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 161 return false; 162 } 163 } 164 } 165 166 // Alocate a new register for the remat. 167 unsigned NewVReg = mri_.createVirtualRegister(rc_); 168 vrm_.grow(); 169 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 170 NewLI.markNotSpillable(); 171 newIntervals_->push_back(&NewLI); 172 173 // Finally we can rematerialize OrigMI before MI. 174 MachineBasicBlock &MBB = *MI->getParent(); 175 tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_); 176 MachineBasicBlock::iterator RematMI = MI; 177 SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex(); 178 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI); 179 180 // Replace operands 181 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 182 MachineOperand &MO = MI->getOperand(Ops[i]); 183 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) { 184 MO.setReg(NewVReg); 185 MO.setIsKill(); 186 } 187 } 188 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 189 190 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true, 191 lis_.getVNInfoAllocator()); 192 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 193 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 194 return true; 195 } 196 197 /// reMaterializeAll - Try to rematerialize as many uses of li_ as possible, 198 /// and trim the live ranges after. 199 void InlineSpiller::reMaterializeAll() { 200 // Do a quick scan of the interval values to find if any are remattable. 201 reMattable_.clear(); 202 usedValues_.clear(); 203 for (LiveInterval::const_vni_iterator I = li_->vni_begin(), 204 E = li_->vni_end(); I != E; ++I) { 205 VNInfo *VNI = *I; 206 if (VNI->isUnused() || !VNI->isDefAccurate()) 207 continue; 208 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 209 if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI)) 210 continue; 211 reMattable_.insert(VNI); 212 } 213 214 // Often, no defs are remattable. 215 if (reMattable_.empty()) 216 return; 217 218 // Try to remat before all uses of li_->reg. 219 bool anyRemat = false; 220 for (MachineRegisterInfo::use_nodbg_iterator 221 RI = mri_.use_nodbg_begin(li_->reg); 222 MachineInstr *MI = RI.skipInstruction();) 223 anyRemat |= reMaterializeFor(MI); 224 225 if (!anyRemat) 226 return; 227 228 // Remove any values that were completely rematted. 229 bool anyRemoved = false; 230 for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(), 231 E = reMattable_.end(); I != E; ++I) { 232 VNInfo *VNI = *I; 233 if (VNI->hasPHIKill() || usedValues_.count(VNI)) 234 continue; 235 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 236 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 237 lis_.RemoveMachineInstrFromMaps(DefMI); 238 vrm_.RemoveMachineInstrFromMaps(DefMI); 239 DefMI->eraseFromParent(); 240 li_->removeValNo(VNI); 241 anyRemoved = true; 242 } 243 244 if (!anyRemoved) 245 return; 246 247 // Removing values may cause debug uses where li_ is not live. 248 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg); 249 MachineInstr *MI = RI.skipInstruction();) { 250 if (!MI->isDebugValue()) 251 continue; 252 // Try to preserve the debug value if li_ is live immediately after it. 253 MachineBasicBlock::iterator NextMI = MI; 254 ++NextMI; 255 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) { 256 SlotIndex NearIdx = lis_.getInstructionIndex(NextMI); 257 if (li_->liveAt(NearIdx)) 258 continue; 259 } 260 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 261 MI->eraseFromParent(); 262 } 263 } 264 265 /// foldMemoryOperand - Try folding stack slot references in Ops into MI. 266 /// Return true on success, and MI will be erased. 267 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 268 const SmallVectorImpl<unsigned> &Ops) { 269 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 270 // operands. 271 SmallVector<unsigned, 8> FoldOps; 272 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 273 unsigned Idx = Ops[i]; 274 MachineOperand &MO = MI->getOperand(Idx); 275 if (MO.isImplicit()) 276 continue; 277 // FIXME: Teach targets to deal with subregs. 278 if (MO.getSubReg()) 279 return false; 280 // Tied use operands should not be passed to foldMemoryOperand. 281 if (!MI->isRegTiedToDefOperand(Idx)) 282 FoldOps.push_back(Idx); 283 } 284 285 MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_); 286 if (!FoldMI) 287 return false; 288 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 289 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 290 MI->eraseFromParent(); 291 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 292 return true; 293 } 294 295 /// insertReload - Insert a reload of NewLI.reg before MI. 296 void InlineSpiller::insertReload(LiveInterval &NewLI, 297 MachineBasicBlock::iterator MI) { 298 MachineBasicBlock &MBB = *MI->getParent(); 299 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 300 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 301 --MI; // Point to load instruction. 302 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 303 vrm_.addSpillSlotUse(stackSlot_, MI); 304 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 305 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true, 306 lis_.getVNInfoAllocator()); 307 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 308 } 309 310 /// insertSpill - Insert a spill of NewLI.reg after MI. 311 void InlineSpiller::insertSpill(LiveInterval &NewLI, 312 MachineBasicBlock::iterator MI) { 313 MachineBasicBlock &MBB = *MI->getParent(); 314 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 315 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 316 --MI; // Point to store instruction. 317 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 318 vrm_.addSpillSlotUse(stackSlot_, MI); 319 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 320 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true, 321 lis_.getVNInfoAllocator()); 322 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 323 } 324 325 void InlineSpiller::spill(LiveInterval *li, 326 std::vector<LiveInterval*> &newIntervals, 327 SmallVectorImpl<LiveInterval*> &spillIs, 328 SlotIndex *earliestIndex) { 329 DEBUG(dbgs() << "Inline spilling " << *li << "\n"); 330 assert(li->isSpillable() && "Attempting to spill already spilled value."); 331 assert(!li->isStackSlot() && "Trying to spill a stack slot."); 332 333 li_ = li; 334 newIntervals_ = &newIntervals; 335 rc_ = mri_.getRegClass(li->reg); 336 spillIs_ = &spillIs; 337 338 reMaterializeAll(); 339 340 // Remat may handle everything. 341 if (li_->empty()) 342 return; 343 344 stackSlot_ = vrm_.assignVirt2StackSlot(li->reg); 345 346 // Iterate over instructions using register. 347 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg); 348 MachineInstr *MI = RI.skipInstruction();) { 349 350 // Debug values are not allowed to affect codegen. 351 if (MI->isDebugValue()) { 352 // Modify DBG_VALUE now that the value is in a spill slot. 353 uint64_t Offset = MI->getOperand(1).getImm(); 354 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 355 DebugLoc DL = MI->getDebugLoc(); 356 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, 357 Offset, MDPtr, DL)) { 358 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 359 MachineBasicBlock *MBB = MI->getParent(); 360 MBB->insert(MBB->erase(MI), NewDV); 361 } else { 362 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 363 MI->eraseFromParent(); 364 } 365 continue; 366 } 367 368 // Analyze instruction. 369 bool Reads, Writes; 370 SmallVector<unsigned, 8> Ops; 371 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops); 372 373 // Attempt to fold memory ops. 374 if (foldMemoryOperand(MI, Ops)) 375 continue; 376 377 // Allocate interval around instruction. 378 // FIXME: Infer regclass from instruction alone. 379 unsigned NewVReg = mri_.createVirtualRegister(rc_); 380 vrm_.grow(); 381 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 382 NewLI.markNotSpillable(); 383 384 if (Reads) 385 insertReload(NewLI, MI); 386 387 // Rewrite instruction operands. 388 bool hasLiveDef = false; 389 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 390 MachineOperand &MO = MI->getOperand(Ops[i]); 391 MO.setReg(NewVReg); 392 if (MO.isUse()) { 393 if (!MI->isRegTiedToDefOperand(Ops[i])) 394 MO.setIsKill(); 395 } else { 396 if (!MO.isDead()) 397 hasLiveDef = true; 398 } 399 } 400 401 // FIXME: Use a second vreg if instruction has no tied ops. 402 if (Writes && hasLiveDef) 403 insertSpill(NewLI, MI); 404 405 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 406 newIntervals.push_back(&NewLI); 407 } 408 } 409