1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "spiller" 16 #include "Spiller.h" 17 #include "VirtRegMap.h" 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineFunction.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/Target/TargetMachine.h" 23 #include "llvm/Target/TargetInstrInfo.h" 24 #include "llvm/Support/Debug.h" 25 #include "llvm/Support/raw_ostream.h" 26 27 using namespace llvm; 28 29 namespace { 30 class InlineSpiller : public Spiller { 31 MachineFunction &mf_; 32 LiveIntervals &lis_; 33 VirtRegMap &vrm_; 34 MachineFrameInfo &mfi_; 35 MachineRegisterInfo &mri_; 36 const TargetInstrInfo &tii_; 37 const TargetRegisterInfo &tri_; 38 const BitVector reserved_; 39 40 // Variables that are valid during spill(), but used by multiple methods. 41 LiveInterval *li_; 42 const TargetRegisterClass *rc_; 43 int stackSlot_; 44 const SmallVectorImpl<LiveInterval*> *spillIs_; 45 46 ~InlineSpiller() {} 47 48 public: 49 InlineSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm) 50 : mf_(*mf), lis_(*lis), vrm_(*vrm), 51 mfi_(*mf->getFrameInfo()), 52 mri_(mf->getRegInfo()), 53 tii_(*mf->getTarget().getInstrInfo()), 54 tri_(*mf->getTarget().getRegisterInfo()), 55 reserved_(tri_.getReservedRegs(mf_)) {} 56 57 void spill(LiveInterval *li, 58 std::vector<LiveInterval*> &newIntervals, 59 SmallVectorImpl<LiveInterval*> &spillIs, 60 SlotIndex *earliestIndex); 61 bool reMaterialize(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 62 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 63 const SmallVectorImpl<unsigned> &Ops); 64 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 65 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 66 }; 67 } 68 69 namespace llvm { 70 Spiller *createInlineSpiller(MachineFunction *mf, 71 LiveIntervals *lis, 72 const MachineLoopInfo *mli, 73 VirtRegMap *vrm) { 74 return new InlineSpiller(mf, lis, vrm); 75 } 76 } 77 78 /// reMaterialize - Attempt to rematerialize li_->reg before MI instead of 79 /// reloading it. 80 bool InlineSpiller::reMaterialize(LiveInterval &NewLI, 81 MachineBasicBlock::iterator MI) { 82 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 83 LiveRange *LR = li_->getLiveRangeContaining(UseIdx); 84 if (!LR) { 85 DEBUG(dbgs() << "\tundef at " << UseIdx << ", adding <undef> flags.\n"); 86 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 87 MachineOperand &MO = MI->getOperand(i); 88 if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) 89 MO.setIsUndef(); 90 } 91 return true; 92 } 93 94 // Find the instruction that defined this value of li_->reg. 95 if (!LR->valno->isDefAccurate()) 96 return false; 97 SlotIndex OrigDefIdx = LR->valno->def; 98 MachineInstr *OrigDefMI = lis_.getInstructionFromIndex(OrigDefIdx); 99 if (!OrigDefMI) 100 return false; 101 102 // FIXME: Provide AliasAnalysis argument. 103 if (!tii_.isTriviallyReMaterializable(OrigDefMI)) 104 return false; 105 106 // A rematerializable instruction may be using other virtual registers. 107 // Make sure they are available at the new location. 108 for (unsigned i = 0, e = OrigDefMI->getNumOperands(); i != e; ++i) { 109 MachineOperand &MO = OrigDefMI->getOperand(i); 110 if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg) 111 continue; 112 // Reserved physregs are OK. Others are not (probably from coalescing). 113 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { 114 if (reserved_.test(MO.getReg())) 115 continue; 116 else 117 return false; 118 } 119 // We don't want to move any virtual defs. 120 if (MO.isDef()) 121 return false; 122 // We have a use of a virtual register other than li_->reg. 123 if (MO.isUndef()) 124 continue; 125 // We cannot depend on virtual registers in spillIs_. They will be spilled. 126 for (unsigned si = 0, se = spillIs_->size(); si != se; ++si) 127 if ((*spillIs_)[si]->reg == MO.getReg()) 128 return false; 129 130 // Is the register available here with the same value as at OrigDefMI? 131 LiveInterval &ULI = lis_.getInterval(MO.getReg()); 132 LiveRange *HereLR = ULI.getLiveRangeContaining(UseIdx); 133 if (!HereLR) 134 return false; 135 LiveRange *DefLR = ULI.getLiveRangeContaining(OrigDefIdx.getUseIndex()); 136 if (!DefLR || DefLR->valno != HereLR->valno) 137 return false; 138 } 139 140 // Finally we can rematerialize OrigDefMI before MI. 141 MachineBasicBlock &MBB = *MI->getParent(); 142 tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigDefMI, tri_); 143 SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--MI).getDefIndex(); 144 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *MI); 145 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true, 146 lis_.getVNInfoAllocator()); 147 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 148 return true; 149 } 150 151 /// foldMemoryOperand - Try folding stack slot references in Ops into MI. 152 /// Return true on success, and MI will be erased. 153 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 154 const SmallVectorImpl<unsigned> &Ops) { 155 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 156 // operands. 157 SmallVector<unsigned, 8> FoldOps; 158 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 159 unsigned Idx = Ops[i]; 160 MachineOperand &MO = MI->getOperand(Idx); 161 if (MO.isImplicit()) 162 continue; 163 // FIXME: Teach targets to deal with subregs. 164 if (MO.getSubReg()) 165 return false; 166 // Tied use operands should not be passed to foldMemoryOperand. 167 if (!MI->isRegTiedToDefOperand(Idx)) 168 FoldOps.push_back(Idx); 169 } 170 171 MachineInstr *FoldMI = tii_.foldMemoryOperand(mf_, MI, FoldOps, stackSlot_); 172 if (!FoldMI) 173 return false; 174 MachineBasicBlock &MBB = *MI->getParent(); 175 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 176 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 177 MBB.insert(MBB.erase(MI), FoldMI); 178 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 179 return true; 180 } 181 182 /// insertReload - Insert a reload of NewLI.reg before MI. 183 void InlineSpiller::insertReload(LiveInterval &NewLI, 184 MachineBasicBlock::iterator MI) { 185 MachineBasicBlock &MBB = *MI->getParent(); 186 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 187 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 188 --MI; // Point to load instruction. 189 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 190 vrm_.addSpillSlotUse(stackSlot_, MI); 191 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 192 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true, 193 lis_.getVNInfoAllocator()); 194 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 195 } 196 197 /// insertSpill - Insert a spill of NewLI.reg after MI. 198 void InlineSpiller::insertSpill(LiveInterval &NewLI, 199 MachineBasicBlock::iterator MI) { 200 MachineBasicBlock &MBB = *MI->getParent(); 201 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 202 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 203 --MI; // Point to store instruction. 204 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 205 vrm_.addSpillSlotUse(stackSlot_, MI); 206 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 207 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true, 208 lis_.getVNInfoAllocator()); 209 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 210 } 211 212 void InlineSpiller::spill(LiveInterval *li, 213 std::vector<LiveInterval*> &newIntervals, 214 SmallVectorImpl<LiveInterval*> &spillIs, 215 SlotIndex *earliestIndex) { 216 DEBUG(dbgs() << "Inline spilling " << *li << "\n"); 217 assert(li->isSpillable() && "Attempting to spill already spilled value."); 218 assert(!li->isStackSlot() && "Trying to spill a stack slot."); 219 220 li_ = li; 221 rc_ = mri_.getRegClass(li->reg); 222 stackSlot_ = vrm_.assignVirt2StackSlot(li->reg); 223 spillIs_ = &spillIs; 224 225 // Iterate over instructions using register. 226 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg); 227 MachineInstr *MI = RI.skipInstruction();) { 228 229 // Analyze instruction. 230 bool Reads, Writes; 231 SmallVector<unsigned, 8> Ops; 232 tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops); 233 234 // Allocate interval around instruction. 235 // FIXME: Infer regclass from instruction alone. 236 unsigned NewVReg = mri_.createVirtualRegister(rc_); 237 vrm_.grow(); 238 LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg); 239 NewLI.markNotSpillable(); 240 241 // Attempt remat instead of reload. 242 bool NeedsReload = Reads && !reMaterialize(NewLI, MI); 243 244 // Attempt to fold memory ops. 245 if (NewLI.empty() && foldMemoryOperand(MI, Ops)) 246 continue; 247 248 if (NeedsReload) 249 insertReload(NewLI, MI); 250 251 // Rewrite instruction operands. 252 bool hasLiveDef = false; 253 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 254 MachineOperand &MO = MI->getOperand(Ops[i]); 255 MO.setReg(NewVReg); 256 if (MO.isUse()) { 257 if (!MI->isRegTiedToDefOperand(Ops[i])) 258 MO.setIsKill(); 259 } else { 260 if (!MO.isDead()) 261 hasLiveDef = true; 262 } 263 } 264 265 // FIXME: Use a second vreg if instruction has no tied ops. 266 if (Writes && hasLiveDef) 267 insertSpill(NewLI, MI); 268 269 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 270 newIntervals.push_back(&NewLI); 271 } 272 } 273