1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "spiller" 16 #include "Spiller.h" 17 #include "SplitKit.h" 18 #include "VirtRegMap.h" 19 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineLoopInfo.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/Target/TargetMachine.h" 25 #include "llvm/Target/TargetInstrInfo.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/raw_ostream.h" 28 29 using namespace llvm; 30 31 namespace { 32 class InlineSpiller : public Spiller { 33 MachineFunctionPass &pass_; 34 MachineFunction &mf_; 35 LiveIntervals &lis_; 36 MachineLoopInfo &loops_; 37 VirtRegMap &vrm_; 38 MachineFrameInfo &mfi_; 39 MachineRegisterInfo &mri_; 40 const TargetInstrInfo &tii_; 41 const TargetRegisterInfo &tri_; 42 const BitVector reserved_; 43 44 SplitAnalysis splitAnalysis_; 45 46 // Variables that are valid during spill(), but used by multiple methods. 47 LiveInterval *li_; 48 std::vector<LiveInterval*> *newIntervals_; 49 const TargetRegisterClass *rc_; 50 int stackSlot_; 51 const SmallVectorImpl<LiveInterval*> *spillIs_; 52 53 // Values of the current interval that can potentially remat. 54 SmallPtrSet<VNInfo*, 8> reMattable_; 55 56 // Values in reMattable_ that failed to remat at some point. 57 SmallPtrSet<VNInfo*, 8> usedValues_; 58 59 ~InlineSpiller() {} 60 61 public: 62 InlineSpiller(MachineFunctionPass &pass, 63 MachineFunction &mf, 64 VirtRegMap &vrm) 65 : pass_(pass), 66 mf_(mf), 67 lis_(pass.getAnalysis<LiveIntervals>()), 68 loops_(pass.getAnalysis<MachineLoopInfo>()), 69 vrm_(vrm), 70 mfi_(*mf.getFrameInfo()), 71 mri_(mf.getRegInfo()), 72 tii_(*mf.getTarget().getInstrInfo()), 73 tri_(*mf.getTarget().getRegisterInfo()), 74 reserved_(tri_.getReservedRegs(mf_)), 75 splitAnalysis_(mf, lis_, loops_) {} 76 77 void spill(LiveInterval *li, 78 std::vector<LiveInterval*> &newIntervals, 79 SmallVectorImpl<LiveInterval*> &spillIs, 80 SlotIndex *earliestIndex); 81 82 private: 83 bool split(); 84 85 bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, 86 SlotIndex UseIdx); 87 bool reMaterializeFor(MachineBasicBlock::iterator MI); 88 void reMaterializeAll(); 89 90 bool coalesceStackAccess(MachineInstr *MI); 91 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 92 const SmallVectorImpl<unsigned> &Ops); 93 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 94 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 95 }; 96 } 97 98 namespace llvm { 99 Spiller *createInlineSpiller(MachineFunctionPass &pass, 100 MachineFunction &mf, 101 VirtRegMap &vrm) { 102 return new InlineSpiller(pass, mf, vrm); 103 } 104 } 105 106 /// split - try splitting the current interval into pieces that may allocate 107 /// separately. Return true if successful. 108 bool InlineSpiller::split() { 109 // FIXME: Add intra-MBB splitting. 110 if (lis_.intervalIsInOneMBB(*li_)) 111 return false; 112 113 splitAnalysis_.analyze(li_); 114 115 if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) { 116 SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_) 117 .splitAroundLoop(loop); 118 return true; 119 } 120 return false; 121 } 122 123 /// allUsesAvailableAt - Return true if all registers used by OrigMI at 124 /// OrigIdx are also available with the same value at UseIdx. 125 bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI, 126 SlotIndex OrigIdx, 127 SlotIndex UseIdx) { 128 OrigIdx = OrigIdx.getUseIndex(); 129 UseIdx = UseIdx.getUseIndex(); 130 for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) { 131 const MachineOperand &MO = OrigMI->getOperand(i); 132 if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg) 133 continue; 134 // Reserved registers are OK. 135 if (MO.isUndef() || !lis_.hasInterval(MO.getReg())) 136 continue; 137 // We don't want to move any defs. 138 if (MO.isDef()) 139 return false; 140 // We cannot depend on virtual registers in spillIs_. They will be spilled. 141 for (unsigned si = 0, se = spillIs_->size(); si != se; ++si) 142 if ((*spillIs_)[si]->reg == MO.getReg()) 143 return false; 144 145 LiveInterval &LI = lis_.getInterval(MO.getReg()); 146 const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx); 147 if (!OVNI) 148 continue; 149 if (OVNI != LI.getVNInfoAt(UseIdx)) 150 return false; 151 } 152 return true; 153 } 154 155 /// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of 156 /// reloading it. 157 bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 158 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 159 VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx); 160 if (!OrigVNI) { 161 DEBUG(dbgs() << "\tadding <undef> flags: "); 162 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 163 MachineOperand &MO = MI->getOperand(i); 164 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) 165 MO.setIsUndef(); 166 } 167 DEBUG(dbgs() << UseIdx << '\t' << *MI); 168 return true; 169 } 170 if (!reMattable_.count(OrigVNI)) { 171 DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": " 172 << UseIdx << '\t' << *MI); 173 return false; 174 } 175 MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def); 176 if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) { 177 usedValues_.insert(OrigVNI); 178 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 179 return false; 180 } 181 182 // If the instruction also writes li_->reg, it had better not require the same 183 // register for uses and defs. 184 bool Reads, Writes; 185 SmallVector<unsigned, 8> Ops; 186 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops); 187 if (Writes) { 188 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 189 MachineOperand &MO = MI->getOperand(Ops[i]); 190 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 191 usedValues_.insert(OrigVNI); 192 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 193 return false; 194 } 195 } 196 } 197 198 // Alocate a new register for the remat. 199 unsigned NewVReg = mri_.createVirtualRegister(rc_); 200 vrm_.grow(); 201 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 202 NewLI.markNotSpillable(); 203 newIntervals_->push_back(&NewLI); 204 205 // Finally we can rematerialize OrigMI before MI. 206 MachineBasicBlock &MBB = *MI->getParent(); 207 tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_); 208 MachineBasicBlock::iterator RematMI = MI; 209 SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex(); 210 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI); 211 212 // Replace operands 213 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 214 MachineOperand &MO = MI->getOperand(Ops[i]); 215 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) { 216 MO.setReg(NewVReg); 217 MO.setIsKill(); 218 } 219 } 220 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 221 222 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true, 223 lis_.getVNInfoAllocator()); 224 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 225 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 226 return true; 227 } 228 229 /// reMaterializeAll - Try to rematerialize as many uses of li_ as possible, 230 /// and trim the live ranges after. 231 void InlineSpiller::reMaterializeAll() { 232 // Do a quick scan of the interval values to find if any are remattable. 233 reMattable_.clear(); 234 usedValues_.clear(); 235 for (LiveInterval::const_vni_iterator I = li_->vni_begin(), 236 E = li_->vni_end(); I != E; ++I) { 237 VNInfo *VNI = *I; 238 if (VNI->isUnused() || !VNI->isDefAccurate()) 239 continue; 240 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 241 if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI)) 242 continue; 243 reMattable_.insert(VNI); 244 } 245 246 // Often, no defs are remattable. 247 if (reMattable_.empty()) 248 return; 249 250 // Try to remat before all uses of li_->reg. 251 bool anyRemat = false; 252 for (MachineRegisterInfo::use_nodbg_iterator 253 RI = mri_.use_nodbg_begin(li_->reg); 254 MachineInstr *MI = RI.skipInstruction();) 255 anyRemat |= reMaterializeFor(MI); 256 257 if (!anyRemat) 258 return; 259 260 // Remove any values that were completely rematted. 261 bool anyRemoved = false; 262 for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(), 263 E = reMattable_.end(); I != E; ++I) { 264 VNInfo *VNI = *I; 265 if (VNI->hasPHIKill() || usedValues_.count(VNI)) 266 continue; 267 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 268 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 269 lis_.RemoveMachineInstrFromMaps(DefMI); 270 vrm_.RemoveMachineInstrFromMaps(DefMI); 271 DefMI->eraseFromParent(); 272 li_->removeValNo(VNI); 273 anyRemoved = true; 274 } 275 276 if (!anyRemoved) 277 return; 278 279 // Removing values may cause debug uses where li_ is not live. 280 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg); 281 MachineInstr *MI = RI.skipInstruction();) { 282 if (!MI->isDebugValue()) 283 continue; 284 // Try to preserve the debug value if li_ is live immediately after it. 285 MachineBasicBlock::iterator NextMI = MI; 286 ++NextMI; 287 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) { 288 SlotIndex NearIdx = lis_.getInstructionIndex(NextMI); 289 if (li_->liveAt(NearIdx)) 290 continue; 291 } 292 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 293 MI->eraseFromParent(); 294 } 295 } 296 297 /// If MI is a load or store of stackSlot_, it can be removed. 298 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) { 299 int FI = 0; 300 unsigned reg; 301 if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) && 302 !(reg = tii_.isStoreToStackSlot(MI, FI))) 303 return false; 304 305 // We have a stack access. Is it the right register and slot? 306 if (reg != li_->reg || FI != stackSlot_) 307 return false; 308 309 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 310 lis_.RemoveMachineInstrFromMaps(MI); 311 MI->eraseFromParent(); 312 return true; 313 } 314 315 /// foldMemoryOperand - Try folding stack slot references in Ops into MI. 316 /// Return true on success, and MI will be erased. 317 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 318 const SmallVectorImpl<unsigned> &Ops) { 319 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 320 // operands. 321 SmallVector<unsigned, 8> FoldOps; 322 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 323 unsigned Idx = Ops[i]; 324 MachineOperand &MO = MI->getOperand(Idx); 325 if (MO.isImplicit()) 326 continue; 327 // FIXME: Teach targets to deal with subregs. 328 if (MO.getSubReg()) 329 return false; 330 // Tied use operands should not be passed to foldMemoryOperand. 331 if (!MI->isRegTiedToDefOperand(Idx)) 332 FoldOps.push_back(Idx); 333 } 334 335 MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_); 336 if (!FoldMI) 337 return false; 338 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 339 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 340 MI->eraseFromParent(); 341 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 342 return true; 343 } 344 345 /// insertReload - Insert a reload of NewLI.reg before MI. 346 void InlineSpiller::insertReload(LiveInterval &NewLI, 347 MachineBasicBlock::iterator MI) { 348 MachineBasicBlock &MBB = *MI->getParent(); 349 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 350 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 351 --MI; // Point to load instruction. 352 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 353 vrm_.addSpillSlotUse(stackSlot_, MI); 354 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 355 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true, 356 lis_.getVNInfoAllocator()); 357 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 358 } 359 360 /// insertSpill - Insert a spill of NewLI.reg after MI. 361 void InlineSpiller::insertSpill(LiveInterval &NewLI, 362 MachineBasicBlock::iterator MI) { 363 MachineBasicBlock &MBB = *MI->getParent(); 364 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 365 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 366 --MI; // Point to store instruction. 367 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 368 vrm_.addSpillSlotUse(stackSlot_, MI); 369 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 370 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true, 371 lis_.getVNInfoAllocator()); 372 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 373 } 374 375 void InlineSpiller::spill(LiveInterval *li, 376 std::vector<LiveInterval*> &newIntervals, 377 SmallVectorImpl<LiveInterval*> &spillIs, 378 SlotIndex *earliestIndex) { 379 DEBUG(dbgs() << "Inline spilling " << *li << "\n"); 380 assert(li->isSpillable() && "Attempting to spill already spilled value."); 381 assert(!li->isStackSlot() && "Trying to spill a stack slot."); 382 383 li_ = li; 384 newIntervals_ = &newIntervals; 385 rc_ = mri_.getRegClass(li->reg); 386 spillIs_ = &spillIs; 387 388 if (split()) 389 return; 390 391 reMaterializeAll(); 392 393 // Remat may handle everything. 394 if (li_->empty()) 395 return; 396 397 stackSlot_ = vrm_.getStackSlot(li->reg); 398 if (stackSlot_ == VirtRegMap::NO_STACK_SLOT) 399 stackSlot_ = vrm_.assignVirt2StackSlot(li->reg); 400 401 // Iterate over instructions using register. 402 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg); 403 MachineInstr *MI = RI.skipInstruction();) { 404 405 // Debug values are not allowed to affect codegen. 406 if (MI->isDebugValue()) { 407 // Modify DBG_VALUE now that the value is in a spill slot. 408 uint64_t Offset = MI->getOperand(1).getImm(); 409 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 410 DebugLoc DL = MI->getDebugLoc(); 411 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, 412 Offset, MDPtr, DL)) { 413 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 414 MachineBasicBlock *MBB = MI->getParent(); 415 MBB->insert(MBB->erase(MI), NewDV); 416 } else { 417 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 418 MI->eraseFromParent(); 419 } 420 continue; 421 } 422 423 // Stack slot accesses may coalesce away. 424 if (coalesceStackAccess(MI)) 425 continue; 426 427 // Analyze instruction. 428 bool Reads, Writes; 429 SmallVector<unsigned, 8> Ops; 430 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops); 431 432 // Attempt to fold memory ops. 433 if (foldMemoryOperand(MI, Ops)) 434 continue; 435 436 // Allocate interval around instruction. 437 // FIXME: Infer regclass from instruction alone. 438 unsigned NewVReg = mri_.createVirtualRegister(rc_); 439 vrm_.grow(); 440 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 441 NewLI.markNotSpillable(); 442 443 if (Reads) 444 insertReload(NewLI, MI); 445 446 // Rewrite instruction operands. 447 bool hasLiveDef = false; 448 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 449 MachineOperand &MO = MI->getOperand(Ops[i]); 450 MO.setReg(NewVReg); 451 if (MO.isUse()) { 452 if (!MI->isRegTiedToDefOperand(Ops[i])) 453 MO.setIsKill(); 454 } else { 455 if (!MO.isDead()) 456 hasLiveDef = true; 457 } 458 } 459 460 // FIXME: Use a second vreg if instruction has no tied ops. 461 if (Writes && hasLiveDef) 462 insertSpill(NewLI, MI); 463 464 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 465 newIntervals.push_back(&NewLI); 466 } 467 } 468