1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "spiller" 16 #include "Spiller.h" 17 #include "SplitKit.h" 18 #include "VirtRegMap.h" 19 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineLoopInfo.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/Target/TargetMachine.h" 25 #include "llvm/Target/TargetInstrInfo.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/raw_ostream.h" 28 29 using namespace llvm; 30 31 namespace { 32 class InlineSpiller : public Spiller { 33 MachineFunctionPass &pass_; 34 MachineFunction &mf_; 35 LiveIntervals &lis_; 36 MachineLoopInfo &loops_; 37 VirtRegMap &vrm_; 38 MachineFrameInfo &mfi_; 39 MachineRegisterInfo &mri_; 40 const TargetInstrInfo &tii_; 41 const TargetRegisterInfo &tri_; 42 const BitVector reserved_; 43 44 SplitAnalysis splitAnalysis_; 45 46 // Variables that are valid during spill(), but used by multiple methods. 47 LiveInterval *li_; 48 std::vector<LiveInterval*> *newIntervals_; 49 const TargetRegisterClass *rc_; 50 int stackSlot_; 51 const SmallVectorImpl<LiveInterval*> *spillIs_; 52 53 // Values of the current interval that can potentially remat. 54 SmallPtrSet<VNInfo*, 8> reMattable_; 55 56 // Values in reMattable_ that failed to remat at some point. 57 SmallPtrSet<VNInfo*, 8> usedValues_; 58 59 ~InlineSpiller() {} 60 61 public: 62 InlineSpiller(MachineFunctionPass &pass, 63 MachineFunction &mf, 64 VirtRegMap &vrm) 65 : pass_(pass), 66 mf_(mf), 67 lis_(pass.getAnalysis<LiveIntervals>()), 68 loops_(pass.getAnalysis<MachineLoopInfo>()), 69 vrm_(vrm), 70 mfi_(*mf.getFrameInfo()), 71 mri_(mf.getRegInfo()), 72 tii_(*mf.getTarget().getInstrInfo()), 73 tri_(*mf.getTarget().getRegisterInfo()), 74 reserved_(tri_.getReservedRegs(mf_)), 75 splitAnalysis_(mf, lis_, loops_) {} 76 77 void spill(LiveInterval *li, 78 std::vector<LiveInterval*> &newIntervals, 79 SmallVectorImpl<LiveInterval*> &spillIs, 80 SlotIndex *earliestIndex); 81 82 private: 83 bool split(); 84 85 bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx, 86 SlotIndex UseIdx); 87 bool reMaterializeFor(MachineBasicBlock::iterator MI); 88 void reMaterializeAll(); 89 90 bool coalesceStackAccess(MachineInstr *MI); 91 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 92 const SmallVectorImpl<unsigned> &Ops); 93 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 94 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 95 }; 96 } 97 98 namespace llvm { 99 Spiller *createInlineSpiller(MachineFunctionPass &pass, 100 MachineFunction &mf, 101 VirtRegMap &vrm) { 102 return new InlineSpiller(pass, mf, vrm); 103 } 104 } 105 106 /// split - try splitting the current interval into pieces that may allocate 107 /// separately. Return true if successful. 108 bool InlineSpiller::split() { 109 // FIXME: Add intra-MBB splitting. 110 if (lis_.intervalIsInOneMBB(*li_)) 111 return false; 112 113 splitAnalysis_.analyze(li_); 114 115 if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) { 116 // We can split, but li_ may be left intact with fewer uses. 117 if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_) 118 .splitAroundLoop(loop)) 119 return true; 120 } 121 122 // Try splitting into single block intervals. 123 SplitAnalysis::BlockPtrSet blocks; 124 if (splitAnalysis_.getMultiUseBlocks(blocks)) { 125 if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_) 126 .splitSingleBlocks(blocks)) 127 return true; 128 } 129 130 return false; 131 } 132 133 /// allUsesAvailableAt - Return true if all registers used by OrigMI at 134 /// OrigIdx are also available with the same value at UseIdx. 135 bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI, 136 SlotIndex OrigIdx, 137 SlotIndex UseIdx) { 138 OrigIdx = OrigIdx.getUseIndex(); 139 UseIdx = UseIdx.getUseIndex(); 140 for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) { 141 const MachineOperand &MO = OrigMI->getOperand(i); 142 if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg) 143 continue; 144 // Reserved registers are OK. 145 if (MO.isUndef() || !lis_.hasInterval(MO.getReg())) 146 continue; 147 // We don't want to move any defs. 148 if (MO.isDef()) 149 return false; 150 // We cannot depend on virtual registers in spillIs_. They will be spilled. 151 for (unsigned si = 0, se = spillIs_->size(); si != se; ++si) 152 if ((*spillIs_)[si]->reg == MO.getReg()) 153 return false; 154 155 LiveInterval &LI = lis_.getInterval(MO.getReg()); 156 const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx); 157 if (!OVNI) 158 continue; 159 if (OVNI != LI.getVNInfoAt(UseIdx)) 160 return false; 161 } 162 return true; 163 } 164 165 /// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of 166 /// reloading it. 167 bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 168 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 169 VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx); 170 if (!OrigVNI) { 171 DEBUG(dbgs() << "\tadding <undef> flags: "); 172 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 173 MachineOperand &MO = MI->getOperand(i); 174 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) 175 MO.setIsUndef(); 176 } 177 DEBUG(dbgs() << UseIdx << '\t' << *MI); 178 return true; 179 } 180 if (!reMattable_.count(OrigVNI)) { 181 DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": " 182 << UseIdx << '\t' << *MI); 183 return false; 184 } 185 MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def); 186 if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) { 187 usedValues_.insert(OrigVNI); 188 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 189 return false; 190 } 191 192 // If the instruction also writes li_->reg, it had better not require the same 193 // register for uses and defs. 194 bool Reads, Writes; 195 SmallVector<unsigned, 8> Ops; 196 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops); 197 if (Writes) { 198 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 199 MachineOperand &MO = MI->getOperand(Ops[i]); 200 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 201 usedValues_.insert(OrigVNI); 202 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 203 return false; 204 } 205 } 206 } 207 208 // Alocate a new register for the remat. 209 unsigned NewVReg = mri_.createVirtualRegister(rc_); 210 vrm_.grow(); 211 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 212 NewLI.markNotSpillable(); 213 newIntervals_->push_back(&NewLI); 214 215 // Finally we can rematerialize OrigMI before MI. 216 MachineBasicBlock &MBB = *MI->getParent(); 217 tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_); 218 MachineBasicBlock::iterator RematMI = MI; 219 SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex(); 220 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI); 221 222 // Replace operands 223 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 224 MachineOperand &MO = MI->getOperand(Ops[i]); 225 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) { 226 MO.setReg(NewVReg); 227 MO.setIsKill(); 228 } 229 } 230 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 231 232 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true, 233 lis_.getVNInfoAllocator()); 234 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 235 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 236 return true; 237 } 238 239 /// reMaterializeAll - Try to rematerialize as many uses of li_ as possible, 240 /// and trim the live ranges after. 241 void InlineSpiller::reMaterializeAll() { 242 // Do a quick scan of the interval values to find if any are remattable. 243 reMattable_.clear(); 244 usedValues_.clear(); 245 for (LiveInterval::const_vni_iterator I = li_->vni_begin(), 246 E = li_->vni_end(); I != E; ++I) { 247 VNInfo *VNI = *I; 248 if (VNI->isUnused() || !VNI->isDefAccurate()) 249 continue; 250 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 251 if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI)) 252 continue; 253 reMattable_.insert(VNI); 254 } 255 256 // Often, no defs are remattable. 257 if (reMattable_.empty()) 258 return; 259 260 // Try to remat before all uses of li_->reg. 261 bool anyRemat = false; 262 for (MachineRegisterInfo::use_nodbg_iterator 263 RI = mri_.use_nodbg_begin(li_->reg); 264 MachineInstr *MI = RI.skipInstruction();) 265 anyRemat |= reMaterializeFor(MI); 266 267 if (!anyRemat) 268 return; 269 270 // Remove any values that were completely rematted. 271 bool anyRemoved = false; 272 for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(), 273 E = reMattable_.end(); I != E; ++I) { 274 VNInfo *VNI = *I; 275 if (VNI->hasPHIKill() || usedValues_.count(VNI)) 276 continue; 277 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 278 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 279 lis_.RemoveMachineInstrFromMaps(DefMI); 280 vrm_.RemoveMachineInstrFromMaps(DefMI); 281 DefMI->eraseFromParent(); 282 VNI->setIsDefAccurate(false); 283 anyRemoved = true; 284 } 285 286 if (!anyRemoved) 287 return; 288 289 // Removing values may cause debug uses where li_ is not live. 290 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg); 291 MachineInstr *MI = RI.skipInstruction();) { 292 if (!MI->isDebugValue()) 293 continue; 294 // Try to preserve the debug value if li_ is live immediately after it. 295 MachineBasicBlock::iterator NextMI = MI; 296 ++NextMI; 297 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) { 298 VNInfo *VNI = li_->getVNInfoAt(lis_.getInstructionIndex(NextMI)); 299 if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI))) 300 continue; 301 } 302 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 303 MI->eraseFromParent(); 304 } 305 } 306 307 /// If MI is a load or store of stackSlot_, it can be removed. 308 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) { 309 int FI = 0; 310 unsigned reg; 311 if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) && 312 !(reg = tii_.isStoreToStackSlot(MI, FI))) 313 return false; 314 315 // We have a stack access. Is it the right register and slot? 316 if (reg != li_->reg || FI != stackSlot_) 317 return false; 318 319 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 320 lis_.RemoveMachineInstrFromMaps(MI); 321 MI->eraseFromParent(); 322 return true; 323 } 324 325 /// foldMemoryOperand - Try folding stack slot references in Ops into MI. 326 /// Return true on success, and MI will be erased. 327 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 328 const SmallVectorImpl<unsigned> &Ops) { 329 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 330 // operands. 331 SmallVector<unsigned, 8> FoldOps; 332 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 333 unsigned Idx = Ops[i]; 334 MachineOperand &MO = MI->getOperand(Idx); 335 if (MO.isImplicit()) 336 continue; 337 // FIXME: Teach targets to deal with subregs. 338 if (MO.getSubReg()) 339 return false; 340 // Tied use operands should not be passed to foldMemoryOperand. 341 if (!MI->isRegTiedToDefOperand(Idx)) 342 FoldOps.push_back(Idx); 343 } 344 345 MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_); 346 if (!FoldMI) 347 return false; 348 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 349 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 350 MI->eraseFromParent(); 351 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 352 return true; 353 } 354 355 /// insertReload - Insert a reload of NewLI.reg before MI. 356 void InlineSpiller::insertReload(LiveInterval &NewLI, 357 MachineBasicBlock::iterator MI) { 358 MachineBasicBlock &MBB = *MI->getParent(); 359 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 360 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 361 --MI; // Point to load instruction. 362 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 363 vrm_.addSpillSlotUse(stackSlot_, MI); 364 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 365 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true, 366 lis_.getVNInfoAllocator()); 367 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 368 } 369 370 /// insertSpill - Insert a spill of NewLI.reg after MI. 371 void InlineSpiller::insertSpill(LiveInterval &NewLI, 372 MachineBasicBlock::iterator MI) { 373 MachineBasicBlock &MBB = *MI->getParent(); 374 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 375 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 376 --MI; // Point to store instruction. 377 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 378 vrm_.addSpillSlotUse(stackSlot_, MI); 379 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 380 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true, 381 lis_.getVNInfoAllocator()); 382 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 383 } 384 385 void InlineSpiller::spill(LiveInterval *li, 386 std::vector<LiveInterval*> &newIntervals, 387 SmallVectorImpl<LiveInterval*> &spillIs, 388 SlotIndex *earliestIndex) { 389 DEBUG(dbgs() << "Inline spilling " << *li << "\n"); 390 assert(li->isSpillable() && "Attempting to spill already spilled value."); 391 assert(!li->isStackSlot() && "Trying to spill a stack slot."); 392 393 li_ = li; 394 newIntervals_ = &newIntervals; 395 rc_ = mri_.getRegClass(li->reg); 396 spillIs_ = &spillIs; 397 398 if (split()) 399 return; 400 401 reMaterializeAll(); 402 403 // Remat may handle everything. 404 if (li_->empty()) 405 return; 406 407 stackSlot_ = vrm_.getStackSlot(li->reg); 408 if (stackSlot_ == VirtRegMap::NO_STACK_SLOT) 409 stackSlot_ = vrm_.assignVirt2StackSlot(li->reg); 410 411 // Iterate over instructions using register. 412 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg); 413 MachineInstr *MI = RI.skipInstruction();) { 414 415 // Debug values are not allowed to affect codegen. 416 if (MI->isDebugValue()) { 417 // Modify DBG_VALUE now that the value is in a spill slot. 418 uint64_t Offset = MI->getOperand(1).getImm(); 419 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 420 DebugLoc DL = MI->getDebugLoc(); 421 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, 422 Offset, MDPtr, DL)) { 423 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 424 MachineBasicBlock *MBB = MI->getParent(); 425 MBB->insert(MBB->erase(MI), NewDV); 426 } else { 427 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 428 MI->eraseFromParent(); 429 } 430 continue; 431 } 432 433 // Stack slot accesses may coalesce away. 434 if (coalesceStackAccess(MI)) 435 continue; 436 437 // Analyze instruction. 438 bool Reads, Writes; 439 SmallVector<unsigned, 8> Ops; 440 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops); 441 442 // Attempt to fold memory ops. 443 if (foldMemoryOperand(MI, Ops)) 444 continue; 445 446 // Allocate interval around instruction. 447 // FIXME: Infer regclass from instruction alone. 448 unsigned NewVReg = mri_.createVirtualRegister(rc_); 449 vrm_.grow(); 450 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 451 NewLI.markNotSpillable(); 452 453 if (Reads) 454 insertReload(NewLI, MI); 455 456 // Rewrite instruction operands. 457 bool hasLiveDef = false; 458 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 459 MachineOperand &MO = MI->getOperand(Ops[i]); 460 MO.setReg(NewVReg); 461 if (MO.isUse()) { 462 if (!MI->isRegTiedToDefOperand(Ops[i])) 463 MO.setIsKill(); 464 } else { 465 if (!MO.isDead()) 466 hasLiveDef = true; 467 } 468 } 469 470 // FIXME: Use a second vreg if instruction has no tied ops. 471 if (Writes && hasLiveDef) 472 insertSpill(NewLI, MI); 473 474 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 475 newIntervals.push_back(&NewLI); 476 } 477 } 478