1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "spiller" 16 #include "Spiller.h" 17 #include "SplitKit.h" 18 #include "VirtRegMap.h" 19 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineLoopInfo.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/Target/TargetMachine.h" 25 #include "llvm/Target/TargetInstrInfo.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/raw_ostream.h" 28 29 using namespace llvm; 30 31 namespace { 32 class InlineSpiller : public Spiller { 33 MachineFunctionPass &pass_; 34 MachineFunction &mf_; 35 LiveIntervals &lis_; 36 MachineLoopInfo &loops_; 37 VirtRegMap &vrm_; 38 MachineFrameInfo &mfi_; 39 MachineRegisterInfo &mri_; 40 const TargetInstrInfo &tii_; 41 const TargetRegisterInfo &tri_; 42 const BitVector reserved_; 43 44 SplitAnalysis splitAnalysis_; 45 46 // Variables that are valid during spill(), but used by multiple methods. 47 LiveInterval *li_; 48 std::vector<LiveInterval*> *newIntervals_; 49 const TargetRegisterClass *rc_; 50 int stackSlot_; 51 const SmallVectorImpl<LiveInterval*> *spillIs_; 52 53 // Values of the current interval that can potentially remat. 54 SmallPtrSet<VNInfo*, 8> reMattable_; 55 56 // Values in reMattable_ that failed to remat at some point. 57 SmallPtrSet<VNInfo*, 8> usedValues_; 58 59 ~InlineSpiller() {} 60 61 public: 62 InlineSpiller(MachineFunctionPass &pass, 63 MachineFunction &mf, 64 VirtRegMap &vrm) 65 : pass_(pass), 66 mf_(mf), 67 lis_(pass.getAnalysis<LiveIntervals>()), 68 loops_(pass.getAnalysis<MachineLoopInfo>()), 69 vrm_(vrm), 70 mfi_(*mf.getFrameInfo()), 71 mri_(mf.getRegInfo()), 72 tii_(*mf.getTarget().getInstrInfo()), 73 tri_(*mf.getTarget().getRegisterInfo()), 74 reserved_(tri_.getReservedRegs(mf_)), 75 splitAnalysis_(mf, lis_, loops_) {} 76 77 void spill(LiveInterval *li, 78 std::vector<LiveInterval*> &newIntervals, 79 SmallVectorImpl<LiveInterval*> &spillIs, 80 SlotIndex *earliestIndex); 81 82 private: 83 bool split(); 84 85 bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, 86 SlotIndex UseIdx); 87 bool reMaterializeFor(MachineBasicBlock::iterator MI); 88 void reMaterializeAll(); 89 90 bool coalesceStackAccess(MachineInstr *MI); 91 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 92 const SmallVectorImpl<unsigned> &Ops); 93 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 94 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 95 }; 96 } 97 98 namespace llvm { 99 Spiller *createInlineSpiller(MachineFunctionPass &pass, 100 MachineFunction &mf, 101 VirtRegMap &vrm) { 102 return new InlineSpiller(pass, mf, vrm); 103 } 104 } 105 106 /// split - try splitting the current interval into pieces that may allocate 107 /// separately. Return true if successful. 108 bool InlineSpiller::split() { 109 // FIXME: Add intra-MBB splitting. 110 if (lis_.intervalIsInOneMBB(*li_)) 111 return false; 112 113 splitAnalysis_.analyze(li_); 114 115 if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) { 116 // We can split, but li_ may be left intact with fewer uses. 117 if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_) 118 .splitAroundLoop(loop)) 119 return true; 120 } 121 return false; 122 } 123 124 /// allUsesAvailableAt - Return true if all registers used by OrigMI at 125 /// OrigIdx are also available with the same value at UseIdx. 126 bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI, 127 SlotIndex OrigIdx, 128 SlotIndex UseIdx) { 129 OrigIdx = OrigIdx.getUseIndex(); 130 UseIdx = UseIdx.getUseIndex(); 131 for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) { 132 const MachineOperand &MO = OrigMI->getOperand(i); 133 if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg) 134 continue; 135 // Reserved registers are OK. 136 if (MO.isUndef() || !lis_.hasInterval(MO.getReg())) 137 continue; 138 // We don't want to move any defs. 139 if (MO.isDef()) 140 return false; 141 // We cannot depend on virtual registers in spillIs_. They will be spilled. 142 for (unsigned si = 0, se = spillIs_->size(); si != se; ++si) 143 if ((*spillIs_)[si]->reg == MO.getReg()) 144 return false; 145 146 LiveInterval &LI = lis_.getInterval(MO.getReg()); 147 const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx); 148 if (!OVNI) 149 continue; 150 if (OVNI != LI.getVNInfoAt(UseIdx)) 151 return false; 152 } 153 return true; 154 } 155 156 /// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of 157 /// reloading it. 158 bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 159 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 160 VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx); 161 if (!OrigVNI) { 162 DEBUG(dbgs() << "\tadding <undef> flags: "); 163 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 164 MachineOperand &MO = MI->getOperand(i); 165 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) 166 MO.setIsUndef(); 167 } 168 DEBUG(dbgs() << UseIdx << '\t' << *MI); 169 return true; 170 } 171 if (!reMattable_.count(OrigVNI)) { 172 DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": " 173 << UseIdx << '\t' << *MI); 174 return false; 175 } 176 MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def); 177 if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) { 178 usedValues_.insert(OrigVNI); 179 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 180 return false; 181 } 182 183 // If the instruction also writes li_->reg, it had better not require the same 184 // register for uses and defs. 185 bool Reads, Writes; 186 SmallVector<unsigned, 8> Ops; 187 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops); 188 if (Writes) { 189 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 190 MachineOperand &MO = MI->getOperand(Ops[i]); 191 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 192 usedValues_.insert(OrigVNI); 193 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 194 return false; 195 } 196 } 197 } 198 199 // Alocate a new register for the remat. 200 unsigned NewVReg = mri_.createVirtualRegister(rc_); 201 vrm_.grow(); 202 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 203 NewLI.markNotSpillable(); 204 newIntervals_->push_back(&NewLI); 205 206 // Finally we can rematerialize OrigMI before MI. 207 MachineBasicBlock &MBB = *MI->getParent(); 208 tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_); 209 MachineBasicBlock::iterator RematMI = MI; 210 SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex(); 211 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI); 212 213 // Replace operands 214 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 215 MachineOperand &MO = MI->getOperand(Ops[i]); 216 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) { 217 MO.setReg(NewVReg); 218 MO.setIsKill(); 219 } 220 } 221 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 222 223 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true, 224 lis_.getVNInfoAllocator()); 225 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 226 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 227 return true; 228 } 229 230 /// reMaterializeAll - Try to rematerialize as many uses of li_ as possible, 231 /// and trim the live ranges after. 232 void InlineSpiller::reMaterializeAll() { 233 // Do a quick scan of the interval values to find if any are remattable. 234 reMattable_.clear(); 235 usedValues_.clear(); 236 for (LiveInterval::const_vni_iterator I = li_->vni_begin(), 237 E = li_->vni_end(); I != E; ++I) { 238 VNInfo *VNI = *I; 239 if (VNI->isUnused() || !VNI->isDefAccurate()) 240 continue; 241 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 242 if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI)) 243 continue; 244 reMattable_.insert(VNI); 245 } 246 247 // Often, no defs are remattable. 248 if (reMattable_.empty()) 249 return; 250 251 // Try to remat before all uses of li_->reg. 252 bool anyRemat = false; 253 for (MachineRegisterInfo::use_nodbg_iterator 254 RI = mri_.use_nodbg_begin(li_->reg); 255 MachineInstr *MI = RI.skipInstruction();) 256 anyRemat |= reMaterializeFor(MI); 257 258 if (!anyRemat) 259 return; 260 261 // Remove any values that were completely rematted. 262 bool anyRemoved = false; 263 for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(), 264 E = reMattable_.end(); I != E; ++I) { 265 VNInfo *VNI = *I; 266 if (VNI->hasPHIKill() || usedValues_.count(VNI)) 267 continue; 268 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 269 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 270 lis_.RemoveMachineInstrFromMaps(DefMI); 271 vrm_.RemoveMachineInstrFromMaps(DefMI); 272 DefMI->eraseFromParent(); 273 VNI->setIsDefAccurate(false); 274 anyRemoved = true; 275 } 276 277 if (!anyRemoved) 278 return; 279 280 // Removing values may cause debug uses where li_ is not live. 281 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg); 282 MachineInstr *MI = RI.skipInstruction();) { 283 if (!MI->isDebugValue()) 284 continue; 285 // Try to preserve the debug value if li_ is live immediately after it. 286 MachineBasicBlock::iterator NextMI = MI; 287 ++NextMI; 288 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) { 289 VNInfo *VNI = li_->getVNInfoAt(lis_.getInstructionIndex(NextMI)); 290 if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI))) 291 continue; 292 } 293 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 294 MI->eraseFromParent(); 295 } 296 } 297 298 /// If MI is a load or store of stackSlot_, it can be removed. 299 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) { 300 int FI = 0; 301 unsigned reg; 302 if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) && 303 !(reg = tii_.isStoreToStackSlot(MI, FI))) 304 return false; 305 306 // We have a stack access. Is it the right register and slot? 307 if (reg != li_->reg || FI != stackSlot_) 308 return false; 309 310 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 311 lis_.RemoveMachineInstrFromMaps(MI); 312 MI->eraseFromParent(); 313 return true; 314 } 315 316 /// foldMemoryOperand - Try folding stack slot references in Ops into MI. 317 /// Return true on success, and MI will be erased. 318 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 319 const SmallVectorImpl<unsigned> &Ops) { 320 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 321 // operands. 322 SmallVector<unsigned, 8> FoldOps; 323 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 324 unsigned Idx = Ops[i]; 325 MachineOperand &MO = MI->getOperand(Idx); 326 if (MO.isImplicit()) 327 continue; 328 // FIXME: Teach targets to deal with subregs. 329 if (MO.getSubReg()) 330 return false; 331 // Tied use operands should not be passed to foldMemoryOperand. 332 if (!MI->isRegTiedToDefOperand(Idx)) 333 FoldOps.push_back(Idx); 334 } 335 336 MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_); 337 if (!FoldMI) 338 return false; 339 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 340 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 341 MI->eraseFromParent(); 342 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 343 return true; 344 } 345 346 /// insertReload - Insert a reload of NewLI.reg before MI. 347 void InlineSpiller::insertReload(LiveInterval &NewLI, 348 MachineBasicBlock::iterator MI) { 349 MachineBasicBlock &MBB = *MI->getParent(); 350 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 351 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 352 --MI; // Point to load instruction. 353 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 354 vrm_.addSpillSlotUse(stackSlot_, MI); 355 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 356 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true, 357 lis_.getVNInfoAllocator()); 358 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 359 } 360 361 /// insertSpill - Insert a spill of NewLI.reg after MI. 362 void InlineSpiller::insertSpill(LiveInterval &NewLI, 363 MachineBasicBlock::iterator MI) { 364 MachineBasicBlock &MBB = *MI->getParent(); 365 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 366 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 367 --MI; // Point to store instruction. 368 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 369 vrm_.addSpillSlotUse(stackSlot_, MI); 370 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 371 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true, 372 lis_.getVNInfoAllocator()); 373 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 374 } 375 376 void InlineSpiller::spill(LiveInterval *li, 377 std::vector<LiveInterval*> &newIntervals, 378 SmallVectorImpl<LiveInterval*> &spillIs, 379 SlotIndex *earliestIndex) { 380 DEBUG(dbgs() << "Inline spilling " << *li << "\n"); 381 assert(li->isSpillable() && "Attempting to spill already spilled value."); 382 assert(!li->isStackSlot() && "Trying to spill a stack slot."); 383 384 li_ = li; 385 newIntervals_ = &newIntervals; 386 rc_ = mri_.getRegClass(li->reg); 387 spillIs_ = &spillIs; 388 389 if (split()) 390 return; 391 392 reMaterializeAll(); 393 394 // Remat may handle everything. 395 if (li_->empty()) 396 return; 397 398 stackSlot_ = vrm_.getStackSlot(li->reg); 399 if (stackSlot_ == VirtRegMap::NO_STACK_SLOT) 400 stackSlot_ = vrm_.assignVirt2StackSlot(li->reg); 401 402 // Iterate over instructions using register. 403 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg); 404 MachineInstr *MI = RI.skipInstruction();) { 405 406 // Debug values are not allowed to affect codegen. 407 if (MI->isDebugValue()) { 408 // Modify DBG_VALUE now that the value is in a spill slot. 409 uint64_t Offset = MI->getOperand(1).getImm(); 410 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 411 DebugLoc DL = MI->getDebugLoc(); 412 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, 413 Offset, MDPtr, DL)) { 414 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 415 MachineBasicBlock *MBB = MI->getParent(); 416 MBB->insert(MBB->erase(MI), NewDV); 417 } else { 418 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 419 MI->eraseFromParent(); 420 } 421 continue; 422 } 423 424 // Stack slot accesses may coalesce away. 425 if (coalesceStackAccess(MI)) 426 continue; 427 428 // Analyze instruction. 429 bool Reads, Writes; 430 SmallVector<unsigned, 8> Ops; 431 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops); 432 433 // Attempt to fold memory ops. 434 if (foldMemoryOperand(MI, Ops)) 435 continue; 436 437 // Allocate interval around instruction. 438 // FIXME: Infer regclass from instruction alone. 439 unsigned NewVReg = mri_.createVirtualRegister(rc_); 440 vrm_.grow(); 441 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 442 NewLI.markNotSpillable(); 443 444 if (Reads) 445 insertReload(NewLI, MI); 446 447 // Rewrite instruction operands. 448 bool hasLiveDef = false; 449 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 450 MachineOperand &MO = MI->getOperand(Ops[i]); 451 MO.setReg(NewVReg); 452 if (MO.isUse()) { 453 if (!MI->isRegTiedToDefOperand(Ops[i])) 454 MO.setIsKill(); 455 } else { 456 if (!MO.isDead()) 457 hasLiveDef = true; 458 } 459 } 460 461 // FIXME: Use a second vreg if instruction has no tied ops. 462 if (Writes && hasLiveDef) 463 insertSpill(NewLI, MI); 464 465 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 466 newIntervals.push_back(&NewLI); 467 } 468 } 469