1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "spiller" 16 #include "Spiller.h" 17 #include "SplitKit.h" 18 #include "VirtRegMap.h" 19 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineLoopInfo.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/Target/TargetMachine.h" 25 #include "llvm/Target/TargetInstrInfo.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/raw_ostream.h" 28 29 using namespace llvm; 30 31 namespace { 32 class InlineSpiller : public Spiller { 33 MachineFunctionPass &pass_; 34 MachineFunction &mf_; 35 LiveIntervals &lis_; 36 MachineLoopInfo &loops_; 37 VirtRegMap &vrm_; 38 MachineFrameInfo &mfi_; 39 MachineRegisterInfo &mri_; 40 const TargetInstrInfo &tii_; 41 const TargetRegisterInfo &tri_; 42 const BitVector reserved_; 43 44 SplitAnalysis splitAnalysis_; 45 46 // Variables that are valid during spill(), but used by multiple methods. 47 LiveInterval *li_; 48 std::vector<LiveInterval*> *newIntervals_; 49 const TargetRegisterClass *rc_; 50 int stackSlot_; 51 const SmallVectorImpl<LiveInterval*> *spillIs_; 52 53 // Values of the current interval that can potentially remat. 54 SmallPtrSet<VNInfo*, 8> reMattable_; 55 56 // Values in reMattable_ that failed to remat at some point. 57 SmallPtrSet<VNInfo*, 8> usedValues_; 58 59 ~InlineSpiller() {} 60 61 public: 62 InlineSpiller(MachineFunctionPass &pass, 63 MachineFunction &mf, 64 VirtRegMap &vrm) 65 : pass_(pass), 66 mf_(mf), 67 lis_(pass.getAnalysis<LiveIntervals>()), 68 loops_(pass.getAnalysis<MachineLoopInfo>()), 69 vrm_(vrm), 70 mfi_(*mf.getFrameInfo()), 71 mri_(mf.getRegInfo()), 72 tii_(*mf.getTarget().getInstrInfo()), 73 tri_(*mf.getTarget().getRegisterInfo()), 74 reserved_(tri_.getReservedRegs(mf_)), 75 splitAnalysis_(mf, lis_, loops_) {} 76 77 void spill(LiveInterval *li, 78 std::vector<LiveInterval*> &newIntervals, 79 SmallVectorImpl<LiveInterval*> &spillIs, 80 SlotIndex *earliestIndex); 81 82 private: 83 bool split(); 84 85 bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, 86 SlotIndex UseIdx); 87 bool reMaterializeFor(MachineBasicBlock::iterator MI); 88 void reMaterializeAll(); 89 90 bool coalesceStackAccess(MachineInstr *MI); 91 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 92 const SmallVectorImpl<unsigned> &Ops); 93 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 94 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 95 }; 96 } 97 98 namespace llvm { 99 Spiller *createInlineSpiller(MachineFunctionPass &pass, 100 MachineFunction &mf, 101 VirtRegMap &vrm) { 102 return new InlineSpiller(pass, mf, vrm); 103 } 104 } 105 106 /// split - try splitting the current interval into pieces that may allocate 107 /// separately. Return true if successful. 108 bool InlineSpiller::split() { 109 splitAnalysis_.analyze(li_); 110 111 if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) { 112 // We can split, but li_ may be left intact with fewer uses. 113 if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_) 114 .splitAroundLoop(loop)) 115 return true; 116 } 117 118 // Try splitting into single block intervals. 119 SplitAnalysis::BlockPtrSet blocks; 120 if (splitAnalysis_.getMultiUseBlocks(blocks)) { 121 if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_) 122 .splitSingleBlocks(blocks)) 123 return true; 124 } 125 126 // Try splitting inside a basic block. 127 if (const MachineBasicBlock *MBB = splitAnalysis_.getBlockForInsideSplit()) { 128 if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_) 129 .splitInsideBlock(MBB)) 130 return true; 131 } 132 133 // We may have been able to split out some uses, but the original interval is 134 // intact, and it should still be spilled. 135 return false; 136 } 137 138 /// allUsesAvailableAt - Return true if all registers used by OrigMI at 139 /// OrigIdx are also available with the same value at UseIdx. 140 bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI, 141 SlotIndex OrigIdx, 142 SlotIndex UseIdx) { 143 OrigIdx = OrigIdx.getUseIndex(); 144 UseIdx = UseIdx.getUseIndex(); 145 for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) { 146 const MachineOperand &MO = OrigMI->getOperand(i); 147 if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg) 148 continue; 149 // Reserved registers are OK. 150 if (MO.isUndef() || !lis_.hasInterval(MO.getReg())) 151 continue; 152 // We don't want to move any defs. 153 if (MO.isDef()) 154 return false; 155 // We cannot depend on virtual registers in spillIs_. They will be spilled. 156 for (unsigned si = 0, se = spillIs_->size(); si != se; ++si) 157 if ((*spillIs_)[si]->reg == MO.getReg()) 158 return false; 159 160 LiveInterval &LI = lis_.getInterval(MO.getReg()); 161 const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx); 162 if (!OVNI) 163 continue; 164 if (OVNI != LI.getVNInfoAt(UseIdx)) 165 return false; 166 } 167 return true; 168 } 169 170 /// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of 171 /// reloading it. 172 bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 173 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 174 VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx); 175 if (!OrigVNI) { 176 DEBUG(dbgs() << "\tadding <undef> flags: "); 177 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 178 MachineOperand &MO = MI->getOperand(i); 179 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) 180 MO.setIsUndef(); 181 } 182 DEBUG(dbgs() << UseIdx << '\t' << *MI); 183 return true; 184 } 185 if (!reMattable_.count(OrigVNI)) { 186 DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": " 187 << UseIdx << '\t' << *MI); 188 return false; 189 } 190 MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def); 191 if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) { 192 usedValues_.insert(OrigVNI); 193 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 194 return false; 195 } 196 197 // If the instruction also writes li_->reg, it had better not require the same 198 // register for uses and defs. 199 bool Reads, Writes; 200 SmallVector<unsigned, 8> Ops; 201 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops); 202 if (Writes) { 203 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 204 MachineOperand &MO = MI->getOperand(Ops[i]); 205 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 206 usedValues_.insert(OrigVNI); 207 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 208 return false; 209 } 210 } 211 } 212 213 // Alocate a new register for the remat. 214 unsigned NewVReg = mri_.createVirtualRegister(rc_); 215 vrm_.grow(); 216 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 217 NewLI.markNotSpillable(); 218 newIntervals_->push_back(&NewLI); 219 220 // Finally we can rematerialize OrigMI before MI. 221 MachineBasicBlock &MBB = *MI->getParent(); 222 tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_); 223 MachineBasicBlock::iterator RematMI = MI; 224 SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex(); 225 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI); 226 227 // Replace operands 228 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 229 MachineOperand &MO = MI->getOperand(Ops[i]); 230 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) { 231 MO.setReg(NewVReg); 232 MO.setIsKill(); 233 } 234 } 235 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 236 237 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true, 238 lis_.getVNInfoAllocator()); 239 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 240 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 241 return true; 242 } 243 244 /// reMaterializeAll - Try to rematerialize as many uses of li_ as possible, 245 /// and trim the live ranges after. 246 void InlineSpiller::reMaterializeAll() { 247 // Do a quick scan of the interval values to find if any are remattable. 248 reMattable_.clear(); 249 usedValues_.clear(); 250 for (LiveInterval::const_vni_iterator I = li_->vni_begin(), 251 E = li_->vni_end(); I != E; ++I) { 252 VNInfo *VNI = *I; 253 if (VNI->isUnused() || !VNI->isDefAccurate()) 254 continue; 255 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 256 if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI)) 257 continue; 258 reMattable_.insert(VNI); 259 } 260 261 // Often, no defs are remattable. 262 if (reMattable_.empty()) 263 return; 264 265 // Try to remat before all uses of li_->reg. 266 bool anyRemat = false; 267 for (MachineRegisterInfo::use_nodbg_iterator 268 RI = mri_.use_nodbg_begin(li_->reg); 269 MachineInstr *MI = RI.skipInstruction();) 270 anyRemat |= reMaterializeFor(MI); 271 272 if (!anyRemat) 273 return; 274 275 // Remove any values that were completely rematted. 276 bool anyRemoved = false; 277 for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(), 278 E = reMattable_.end(); I != E; ++I) { 279 VNInfo *VNI = *I; 280 if (VNI->hasPHIKill() || usedValues_.count(VNI)) 281 continue; 282 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 283 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 284 lis_.RemoveMachineInstrFromMaps(DefMI); 285 vrm_.RemoveMachineInstrFromMaps(DefMI); 286 DefMI->eraseFromParent(); 287 VNI->setIsDefAccurate(false); 288 anyRemoved = true; 289 } 290 291 if (!anyRemoved) 292 return; 293 294 // Removing values may cause debug uses where li_ is not live. 295 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg); 296 MachineInstr *MI = RI.skipInstruction();) { 297 if (!MI->isDebugValue()) 298 continue; 299 // Try to preserve the debug value if li_ is live immediately after it. 300 MachineBasicBlock::iterator NextMI = MI; 301 ++NextMI; 302 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) { 303 VNInfo *VNI = li_->getVNInfoAt(lis_.getInstructionIndex(NextMI)); 304 if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI))) 305 continue; 306 } 307 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 308 MI->eraseFromParent(); 309 } 310 } 311 312 /// If MI is a load or store of stackSlot_, it can be removed. 313 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) { 314 int FI = 0; 315 unsigned reg; 316 if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) && 317 !(reg = tii_.isStoreToStackSlot(MI, FI))) 318 return false; 319 320 // We have a stack access. Is it the right register and slot? 321 if (reg != li_->reg || FI != stackSlot_) 322 return false; 323 324 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 325 lis_.RemoveMachineInstrFromMaps(MI); 326 MI->eraseFromParent(); 327 return true; 328 } 329 330 /// foldMemoryOperand - Try folding stack slot references in Ops into MI. 331 /// Return true on success, and MI will be erased. 332 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 333 const SmallVectorImpl<unsigned> &Ops) { 334 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 335 // operands. 336 SmallVector<unsigned, 8> FoldOps; 337 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 338 unsigned Idx = Ops[i]; 339 MachineOperand &MO = MI->getOperand(Idx); 340 if (MO.isImplicit()) 341 continue; 342 // FIXME: Teach targets to deal with subregs. 343 if (MO.getSubReg()) 344 return false; 345 // Tied use operands should not be passed to foldMemoryOperand. 346 if (!MI->isRegTiedToDefOperand(Idx)) 347 FoldOps.push_back(Idx); 348 } 349 350 MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_); 351 if (!FoldMI) 352 return false; 353 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 354 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 355 MI->eraseFromParent(); 356 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 357 return true; 358 } 359 360 /// insertReload - Insert a reload of NewLI.reg before MI. 361 void InlineSpiller::insertReload(LiveInterval &NewLI, 362 MachineBasicBlock::iterator MI) { 363 MachineBasicBlock &MBB = *MI->getParent(); 364 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 365 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 366 --MI; // Point to load instruction. 367 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 368 vrm_.addSpillSlotUse(stackSlot_, MI); 369 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 370 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true, 371 lis_.getVNInfoAllocator()); 372 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 373 } 374 375 /// insertSpill - Insert a spill of NewLI.reg after MI. 376 void InlineSpiller::insertSpill(LiveInterval &NewLI, 377 MachineBasicBlock::iterator MI) { 378 MachineBasicBlock &MBB = *MI->getParent(); 379 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 380 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 381 --MI; // Point to store instruction. 382 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 383 vrm_.addSpillSlotUse(stackSlot_, MI); 384 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 385 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true, 386 lis_.getVNInfoAllocator()); 387 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 388 } 389 390 void InlineSpiller::spill(LiveInterval *li, 391 std::vector<LiveInterval*> &newIntervals, 392 SmallVectorImpl<LiveInterval*> &spillIs, 393 SlotIndex *earliestIndex) { 394 DEBUG(dbgs() << "Inline spilling " << *li << "\n"); 395 assert(li->isSpillable() && "Attempting to spill already spilled value."); 396 assert(!li->isStackSlot() && "Trying to spill a stack slot."); 397 398 li_ = li; 399 newIntervals_ = &newIntervals; 400 rc_ = mri_.getRegClass(li->reg); 401 spillIs_ = &spillIs; 402 403 if (split()) 404 return; 405 406 reMaterializeAll(); 407 408 // Remat may handle everything. 409 if (li_->empty()) 410 return; 411 412 stackSlot_ = vrm_.getStackSlot(li->reg); 413 if (stackSlot_ == VirtRegMap::NO_STACK_SLOT) 414 stackSlot_ = vrm_.assignVirt2StackSlot(li->reg); 415 416 // Iterate over instructions using register. 417 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg); 418 MachineInstr *MI = RI.skipInstruction();) { 419 420 // Debug values are not allowed to affect codegen. 421 if (MI->isDebugValue()) { 422 // Modify DBG_VALUE now that the value is in a spill slot. 423 uint64_t Offset = MI->getOperand(1).getImm(); 424 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 425 DebugLoc DL = MI->getDebugLoc(); 426 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, 427 Offset, MDPtr, DL)) { 428 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 429 MachineBasicBlock *MBB = MI->getParent(); 430 MBB->insert(MBB->erase(MI), NewDV); 431 } else { 432 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 433 MI->eraseFromParent(); 434 } 435 continue; 436 } 437 438 // Stack slot accesses may coalesce away. 439 if (coalesceStackAccess(MI)) 440 continue; 441 442 // Analyze instruction. 443 bool Reads, Writes; 444 SmallVector<unsigned, 8> Ops; 445 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops); 446 447 // Attempt to fold memory ops. 448 if (foldMemoryOperand(MI, Ops)) 449 continue; 450 451 // Allocate interval around instruction. 452 // FIXME: Infer regclass from instruction alone. 453 unsigned NewVReg = mri_.createVirtualRegister(rc_); 454 vrm_.grow(); 455 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 456 NewLI.markNotSpillable(); 457 458 if (Reads) 459 insertReload(NewLI, MI); 460 461 // Rewrite instruction operands. 462 bool hasLiveDef = false; 463 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 464 MachineOperand &MO = MI->getOperand(Ops[i]); 465 MO.setReg(NewVReg); 466 if (MO.isUse()) { 467 if (!MI->isRegTiedToDefOperand(Ops[i])) 468 MO.setIsKill(); 469 } else { 470 if (!MO.isDead()) 471 hasLiveDef = true; 472 } 473 } 474 475 // FIXME: Use a second vreg if instruction has no tied ops. 476 if (Writes && hasLiveDef) 477 insertSpill(NewLI, MI); 478 479 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 480 newIntervals.push_back(&NewLI); 481 } 482 } 483