1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass is responsible for finalizing the functions frame layout, saving 10 // callee saved registers, and for emitting prolog & epilog code for the 11 // function. 12 // 13 // This pass must be run after register allocation. After this pass is 14 // executed, it is illegal to construct MO_FrameIndex operands. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineDominators.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineInstr.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineModuleInfo.h" 37 #include "llvm/CodeGen/MachineOperand.h" 38 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 39 #include "llvm/CodeGen/MachineRegisterInfo.h" 40 #include "llvm/CodeGen/RegisterScavenging.h" 41 #include "llvm/CodeGen/TargetFrameLowering.h" 42 #include "llvm/CodeGen/TargetInstrInfo.h" 43 #include "llvm/CodeGen/TargetOpcodes.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/WinEHFuncInfo.h" 47 #include "llvm/IR/Attributes.h" 48 #include "llvm/IR/CallingConv.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DiagnosticInfo.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/InlineAsm.h" 53 #include "llvm/IR/LLVMContext.h" 54 #include "llvm/InitializePasses.h" 55 #include "llvm/MC/MCRegisterInfo.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/CodeGen.h" 58 #include "llvm/Support/Debug.h" 59 #include "llvm/Support/ErrorHandling.h" 60 #include "llvm/Support/FormatVariadic.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include "llvm/Target/TargetMachine.h" 63 #include "llvm/Target/TargetOptions.h" 64 #include <algorithm> 65 #include <cassert> 66 #include <cstdint> 67 #include <functional> 68 #include <limits> 69 #include <utility> 70 #include <vector> 71 72 using namespace llvm; 73 74 #define DEBUG_TYPE "prologepilog" 75 76 using MBBVector = SmallVector<MachineBasicBlock *, 4>; 77 78 STATISTIC(NumLeafFuncWithSpills, "Number of leaf functions with CSRs"); 79 STATISTIC(NumFuncSeen, "Number of functions seen in PEI"); 80 81 82 namespace { 83 84 class PEI : public MachineFunctionPass { 85 public: 86 static char ID; 87 88 PEI() : MachineFunctionPass(ID) { 89 initializePEIPass(*PassRegistry::getPassRegistry()); 90 } 91 92 void getAnalysisUsage(AnalysisUsage &AU) const override; 93 94 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 95 /// frame indexes with appropriate references. 96 bool runOnMachineFunction(MachineFunction &MF) override; 97 98 private: 99 RegScavenger *RS; 100 101 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved 102 // stack frame indexes. 103 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 104 unsigned MaxCSFrameIndex = 0; 105 106 // Save and Restore blocks of the current function. Typically there is a 107 // single save block, unless Windows EH funclets are involved. 108 MBBVector SaveBlocks; 109 MBBVector RestoreBlocks; 110 111 // Flag to control whether to use the register scavenger to resolve 112 // frame index materialization registers. Set according to 113 // TRI->requiresFrameIndexScavenging() for the current function. 114 bool FrameIndexVirtualScavenging; 115 116 // Flag to control whether the scavenger should be passed even though 117 // FrameIndexVirtualScavenging is used. 118 bool FrameIndexEliminationScavenging; 119 120 // Emit remarks. 121 MachineOptimizationRemarkEmitter *ORE = nullptr; 122 123 void calculateCallFrameInfo(MachineFunction &MF); 124 void calculateSaveRestoreBlocks(MachineFunction &MF); 125 void spillCalleeSavedRegs(MachineFunction &MF); 126 127 void calculateFrameObjectOffsets(MachineFunction &MF); 128 void replaceFrameIndices(MachineFunction &MF); 129 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 130 int &SPAdj); 131 // Frame indices in debug values are encoded in a target independent 132 // way with simply the frame index and offset rather than any 133 // target-specific addressing mode. 134 bool replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI, 135 unsigned OpIdx, int SPAdj = 0); 136 // Does same as replaceFrameIndices but using the backward MIR walk and 137 // backward register scavenger walk. Does not yet support call sequence 138 // processing. 139 void replaceFrameIndicesBackward(MachineBasicBlock *BB, MachineFunction &MF, 140 int &SPAdj); 141 142 void insertPrologEpilogCode(MachineFunction &MF); 143 void insertZeroCallUsedRegs(MachineFunction &MF); 144 }; 145 146 } // end anonymous namespace 147 148 char PEI::ID = 0; 149 150 char &llvm::PrologEpilogCodeInserterID = PEI::ID; 151 152 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false, 153 false) 154 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 155 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 156 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 157 INITIALIZE_PASS_END(PEI, DEBUG_TYPE, 158 "Prologue/Epilogue Insertion & Frame Finalization", false, 159 false) 160 161 MachineFunctionPass *llvm::createPrologEpilogInserterPass() { 162 return new PEI(); 163 } 164 165 STATISTIC(NumBytesStackSpace, 166 "Number of bytes used for stack in all functions"); 167 168 void PEI::getAnalysisUsage(AnalysisUsage &AU) const { 169 AU.setPreservesCFG(); 170 AU.addPreserved<MachineLoopInfo>(); 171 AU.addPreserved<MachineDominatorTree>(); 172 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 173 MachineFunctionPass::getAnalysisUsage(AU); 174 } 175 176 /// StackObjSet - A set of stack object indexes 177 using StackObjSet = SmallSetVector<int, 8>; 178 179 using SavedDbgValuesMap = 180 SmallDenseMap<MachineBasicBlock *, SmallVector<MachineInstr *, 4>, 4>; 181 182 /// Stash DBG_VALUEs that describe parameters and which are placed at the start 183 /// of the block. Later on, after the prologue code has been emitted, the 184 /// stashed DBG_VALUEs will be reinserted at the start of the block. 185 static void stashEntryDbgValues(MachineBasicBlock &MBB, 186 SavedDbgValuesMap &EntryDbgValues) { 187 SmallVector<const MachineInstr *, 4> FrameIndexValues; 188 189 for (auto &MI : MBB) { 190 if (!MI.isDebugInstr()) 191 break; 192 if (!MI.isDebugValue() || !MI.getDebugVariable()->isParameter()) 193 continue; 194 if (any_of(MI.debug_operands(), 195 [](const MachineOperand &MO) { return MO.isFI(); })) { 196 // We can only emit valid locations for frame indices after the frame 197 // setup, so do not stash away them. 198 FrameIndexValues.push_back(&MI); 199 continue; 200 } 201 const DILocalVariable *Var = MI.getDebugVariable(); 202 const DIExpression *Expr = MI.getDebugExpression(); 203 auto Overlaps = [Var, Expr](const MachineInstr *DV) { 204 return Var == DV->getDebugVariable() && 205 Expr->fragmentsOverlap(DV->getDebugExpression()); 206 }; 207 // See if the debug value overlaps with any preceding debug value that will 208 // not be stashed. If that is the case, then we can't stash this value, as 209 // we would then reorder the values at reinsertion. 210 if (llvm::none_of(FrameIndexValues, Overlaps)) 211 EntryDbgValues[&MBB].push_back(&MI); 212 } 213 214 // Remove stashed debug values from the block. 215 if (EntryDbgValues.count(&MBB)) 216 for (auto *MI : EntryDbgValues[&MBB]) 217 MI->removeFromParent(); 218 } 219 220 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 221 /// frame indexes with appropriate references. 222 bool PEI::runOnMachineFunction(MachineFunction &MF) { 223 NumFuncSeen++; 224 const Function &F = MF.getFunction(); 225 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 226 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 227 228 RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr; 229 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF); 230 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 231 232 // Calculate the MaxCallFrameSize and AdjustsStack variables for the 233 // function's frame information. Also eliminates call frame pseudo 234 // instructions. 235 calculateCallFrameInfo(MF); 236 237 // Determine placement of CSR spill/restore code and prolog/epilog code: 238 // place all spills in the entry block, all restores in return blocks. 239 calculateSaveRestoreBlocks(MF); 240 241 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code. 242 SavedDbgValuesMap EntryDbgValues; 243 for (MachineBasicBlock *SaveBlock : SaveBlocks) 244 stashEntryDbgValues(*SaveBlock, EntryDbgValues); 245 246 // Handle CSR spilling and restoring, for targets that need it. 247 if (MF.getTarget().usesPhysRegsForValues()) 248 spillCalleeSavedRegs(MF); 249 250 // Allow the target machine to make final modifications to the function 251 // before the frame layout is finalized. 252 TFI->processFunctionBeforeFrameFinalized(MF, RS); 253 254 // Calculate actual frame offsets for all abstract stack objects... 255 calculateFrameObjectOffsets(MF); 256 257 // Add prolog and epilog code to the function. This function is required 258 // to align the stack frame as necessary for any stack variables or 259 // called functions. Because of this, calculateCalleeSavedRegisters() 260 // must be called before this function in order to set the AdjustsStack 261 // and MaxCallFrameSize variables. 262 if (!F.hasFnAttribute(Attribute::Naked)) 263 insertPrologEpilogCode(MF); 264 265 // Reinsert stashed debug values at the start of the entry blocks. 266 for (auto &I : EntryDbgValues) 267 I.first->insert(I.first->begin(), I.second.begin(), I.second.end()); 268 269 // Allow the target machine to make final modifications to the function 270 // before the frame layout is finalized. 271 TFI->processFunctionBeforeFrameIndicesReplaced(MF, RS); 272 273 // Replace all MO_FrameIndex operands with physical register references 274 // and actual offsets. 275 // 276 replaceFrameIndices(MF); 277 278 // If register scavenging is needed, as we've enabled doing it as a 279 // post-pass, scavenge the virtual registers that frame index elimination 280 // inserted. 281 if (TRI->requiresRegisterScavenging(MF) && FrameIndexVirtualScavenging) 282 scavengeFrameVirtualRegs(MF, *RS); 283 284 // Warn on stack size when we exceeds the given limit. 285 MachineFrameInfo &MFI = MF.getFrameInfo(); 286 uint64_t StackSize = MFI.getStackSize(); 287 288 unsigned Threshold = UINT_MAX; 289 if (MF.getFunction().hasFnAttribute("warn-stack-size")) { 290 bool Failed = MF.getFunction() 291 .getFnAttribute("warn-stack-size") 292 .getValueAsString() 293 .getAsInteger(10, Threshold); 294 // Verifier should have caught this. 295 assert(!Failed && "Invalid warn-stack-size fn attr value"); 296 (void)Failed; 297 } 298 uint64_t UnsafeStackSize = MFI.getUnsafeStackSize(); 299 if (MF.getFunction().hasFnAttribute(Attribute::SafeStack)) 300 StackSize += UnsafeStackSize; 301 302 if (StackSize > Threshold) { 303 DiagnosticInfoStackSize DiagStackSize(F, StackSize, Threshold, DS_Warning); 304 F.getContext().diagnose(DiagStackSize); 305 int64_t SpillSize = 0; 306 for (int Idx = MFI.getObjectIndexBegin(), End = MFI.getObjectIndexEnd(); 307 Idx != End; ++Idx) { 308 if (MFI.isSpillSlotObjectIndex(Idx)) 309 SpillSize += MFI.getObjectSize(Idx); 310 } 311 312 float SpillPct = 313 static_cast<float>(SpillSize) / static_cast<float>(StackSize); 314 float VarPct = 1.0f - SpillPct; 315 int64_t VariableSize = StackSize - SpillSize; 316 dbgs() << formatv("{0}/{1} ({3:P}) spills, {2}/{1} ({4:P}) variables", 317 SpillSize, StackSize, VariableSize, SpillPct, VarPct); 318 if (UnsafeStackSize != 0) { 319 float UnsafePct = 320 static_cast<float>(UnsafeStackSize) / static_cast<float>(StackSize); 321 dbgs() << formatv(", {0}/{2} ({1:P}) unsafe stack", UnsafeStackSize, 322 UnsafePct, StackSize); 323 } 324 dbgs() << "\n"; 325 } 326 327 ORE->emit([&]() { 328 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize", 329 MF.getFunction().getSubprogram(), 330 &MF.front()) 331 << ore::NV("NumStackBytes", StackSize) << " stack bytes in function"; 332 }); 333 334 delete RS; 335 SaveBlocks.clear(); 336 RestoreBlocks.clear(); 337 MFI.setSavePoint(nullptr); 338 MFI.setRestorePoint(nullptr); 339 return true; 340 } 341 342 /// Calculate the MaxCallFrameSize and AdjustsStack 343 /// variables for the function's frame information and eliminate call frame 344 /// pseudo instructions. 345 void PEI::calculateCallFrameInfo(MachineFunction &MF) { 346 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 347 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 348 MachineFrameInfo &MFI = MF.getFrameInfo(); 349 350 unsigned MaxCallFrameSize = 0; 351 bool AdjustsStack = MFI.adjustsStack(); 352 353 // Get the function call frame set-up and tear-down instruction opcode 354 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 355 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 356 357 // Early exit for targets which have no call frame setup/destroy pseudo 358 // instructions. 359 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 360 return; 361 362 std::vector<MachineBasicBlock::iterator> FrameSDOps; 363 for (MachineBasicBlock &BB : MF) 364 for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) 365 if (TII.isFrameInstr(*I)) { 366 unsigned Size = TII.getFrameSize(*I); 367 if (Size > MaxCallFrameSize) MaxCallFrameSize = Size; 368 AdjustsStack = true; 369 FrameSDOps.push_back(I); 370 } else if (I->isInlineAsm()) { 371 // Some inline asm's need a stack frame, as indicated by operand 1. 372 unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 373 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 374 AdjustsStack = true; 375 } 376 377 assert(!MFI.isMaxCallFrameSizeComputed() || 378 (MFI.getMaxCallFrameSize() == MaxCallFrameSize && 379 MFI.adjustsStack() == AdjustsStack)); 380 MFI.setAdjustsStack(AdjustsStack); 381 MFI.setMaxCallFrameSize(MaxCallFrameSize); 382 383 for (MachineBasicBlock::iterator I : FrameSDOps) { 384 // If call frames are not being included as part of the stack frame, and 385 // the target doesn't indicate otherwise, remove the call frame pseudos 386 // here. The sub/add sp instruction pairs are still inserted, but we don't 387 // need to track the SP adjustment for frame index elimination. 388 if (TFI->canSimplifyCallFramePseudos(MF)) 389 TFI->eliminateCallFramePseudoInstr(MF, *I->getParent(), I); 390 } 391 } 392 393 /// Compute the sets of entry and return blocks for saving and restoring 394 /// callee-saved registers, and placing prolog and epilog code. 395 void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) { 396 const MachineFrameInfo &MFI = MF.getFrameInfo(); 397 398 // Even when we do not change any CSR, we still want to insert the 399 // prologue and epilogue of the function. 400 // So set the save points for those. 401 402 // Use the points found by shrink-wrapping, if any. 403 if (MFI.getSavePoint()) { 404 SaveBlocks.push_back(MFI.getSavePoint()); 405 assert(MFI.getRestorePoint() && "Both restore and save must be set"); 406 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 407 // If RestoreBlock does not have any successor and is not a return block 408 // then the end point is unreachable and we do not need to insert any 409 // epilogue. 410 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock()) 411 RestoreBlocks.push_back(RestoreBlock); 412 return; 413 } 414 415 // Save refs to entry and return blocks. 416 SaveBlocks.push_back(&MF.front()); 417 for (MachineBasicBlock &MBB : MF) { 418 if (MBB.isEHFuncletEntry()) 419 SaveBlocks.push_back(&MBB); 420 if (MBB.isReturnBlock()) 421 RestoreBlocks.push_back(&MBB); 422 } 423 } 424 425 static void assignCalleeSavedSpillSlots(MachineFunction &F, 426 const BitVector &SavedRegs, 427 unsigned &MinCSFrameIndex, 428 unsigned &MaxCSFrameIndex) { 429 if (SavedRegs.empty()) 430 return; 431 432 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo(); 433 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs(); 434 BitVector CSMask(SavedRegs.size()); 435 436 for (unsigned i = 0; CSRegs[i]; ++i) 437 CSMask.set(CSRegs[i]); 438 439 std::vector<CalleeSavedInfo> CSI; 440 for (unsigned i = 0; CSRegs[i]; ++i) { 441 unsigned Reg = CSRegs[i]; 442 if (SavedRegs.test(Reg)) { 443 bool SavedSuper = false; 444 for (const MCPhysReg &SuperReg : RegInfo->superregs(Reg)) { 445 // Some backends set all aliases for some registers as saved, such as 446 // Mips's $fp, so they appear in SavedRegs but not CSRegs. 447 if (SavedRegs.test(SuperReg) && CSMask.test(SuperReg)) { 448 SavedSuper = true; 449 break; 450 } 451 } 452 453 if (!SavedSuper) 454 CSI.push_back(CalleeSavedInfo(Reg)); 455 } 456 } 457 458 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering(); 459 MachineFrameInfo &MFI = F.getFrameInfo(); 460 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI, MinCSFrameIndex, 461 MaxCSFrameIndex)) { 462 // If target doesn't implement this, use generic code. 463 464 if (CSI.empty()) 465 return; // Early exit if no callee saved registers are modified! 466 467 unsigned NumFixedSpillSlots; 468 const TargetFrameLowering::SpillSlot *FixedSpillSlots = 469 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); 470 471 // Now that we know which registers need to be saved and restored, allocate 472 // stack slots for them. 473 for (auto &CS : CSI) { 474 // If the target has spilled this register to another register, we don't 475 // need to allocate a stack slot. 476 if (CS.isSpilledToReg()) 477 continue; 478 479 unsigned Reg = CS.getReg(); 480 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 481 482 int FrameIdx; 483 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { 484 CS.setFrameIdx(FrameIdx); 485 continue; 486 } 487 488 // Check to see if this physreg must be spilled to a particular stack slot 489 // on this target. 490 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots; 491 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots && 492 FixedSlot->Reg != Reg) 493 ++FixedSlot; 494 495 unsigned Size = RegInfo->getSpillSize(*RC); 496 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { 497 // Nope, just spill it anywhere convenient. 498 Align Alignment = RegInfo->getSpillAlign(*RC); 499 // We may not be able to satisfy the desired alignment specification of 500 // the TargetRegisterClass if the stack alignment is smaller. Use the 501 // min. 502 Alignment = std::min(Alignment, TFI->getStackAlign()); 503 FrameIdx = MFI.CreateStackObject(Size, Alignment, true); 504 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 505 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 506 } else { 507 // Spill it to the stack where we must. 508 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset); 509 } 510 511 CS.setFrameIdx(FrameIdx); 512 } 513 } 514 515 MFI.setCalleeSavedInfo(CSI); 516 } 517 518 /// Helper function to update the liveness information for the callee-saved 519 /// registers. 520 static void updateLiveness(MachineFunction &MF) { 521 MachineFrameInfo &MFI = MF.getFrameInfo(); 522 // Visited will contain all the basic blocks that are in the region 523 // where the callee saved registers are alive: 524 // - Anything that is not Save or Restore -> LiveThrough. 525 // - Save -> LiveIn. 526 // - Restore -> LiveOut. 527 // The live-out is not attached to the block, so no need to keep 528 // Restore in this set. 529 SmallPtrSet<MachineBasicBlock *, 8> Visited; 530 SmallVector<MachineBasicBlock *, 8> WorkList; 531 MachineBasicBlock *Entry = &MF.front(); 532 MachineBasicBlock *Save = MFI.getSavePoint(); 533 534 if (!Save) 535 Save = Entry; 536 537 if (Entry != Save) { 538 WorkList.push_back(Entry); 539 Visited.insert(Entry); 540 } 541 Visited.insert(Save); 542 543 MachineBasicBlock *Restore = MFI.getRestorePoint(); 544 if (Restore) 545 // By construction Restore cannot be visited, otherwise it 546 // means there exists a path to Restore that does not go 547 // through Save. 548 WorkList.push_back(Restore); 549 550 while (!WorkList.empty()) { 551 const MachineBasicBlock *CurBB = WorkList.pop_back_val(); 552 // By construction, the region that is after the save point is 553 // dominated by the Save and post-dominated by the Restore. 554 if (CurBB == Save && Save != Restore) 555 continue; 556 // Enqueue all the successors not already visited. 557 // Those are by construction either before Save or after Restore. 558 for (MachineBasicBlock *SuccBB : CurBB->successors()) 559 if (Visited.insert(SuccBB).second) 560 WorkList.push_back(SuccBB); 561 } 562 563 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 564 565 MachineRegisterInfo &MRI = MF.getRegInfo(); 566 for (const CalleeSavedInfo &I : CSI) { 567 for (MachineBasicBlock *MBB : Visited) { 568 MCPhysReg Reg = I.getReg(); 569 // Add the callee-saved register as live-in. 570 // It's killed at the spill. 571 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg)) 572 MBB->addLiveIn(Reg); 573 } 574 // If callee-saved register is spilled to another register rather than 575 // spilling to stack, the destination register has to be marked as live for 576 // each MBB between the prologue and epilogue so that it is not clobbered 577 // before it is reloaded in the epilogue. The Visited set contains all 578 // blocks outside of the region delimited by prologue/epilogue. 579 if (I.isSpilledToReg()) { 580 for (MachineBasicBlock &MBB : MF) { 581 if (Visited.count(&MBB)) 582 continue; 583 MCPhysReg DstReg = I.getDstReg(); 584 if (!MBB.isLiveIn(DstReg)) 585 MBB.addLiveIn(DstReg); 586 } 587 } 588 } 589 } 590 591 /// Insert spill code for the callee-saved registers used in the function. 592 static void insertCSRSaves(MachineBasicBlock &SaveBlock, 593 ArrayRef<CalleeSavedInfo> CSI) { 594 MachineFunction &MF = *SaveBlock.getParent(); 595 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 596 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 597 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 598 599 MachineBasicBlock::iterator I = SaveBlock.begin(); 600 if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) { 601 for (const CalleeSavedInfo &CS : CSI) { 602 // Insert the spill to the stack frame. 603 unsigned Reg = CS.getReg(); 604 605 if (CS.isSpilledToReg()) { 606 BuildMI(SaveBlock, I, DebugLoc(), 607 TII.get(TargetOpcode::COPY), CS.getDstReg()) 608 .addReg(Reg, getKillRegState(true)); 609 } else { 610 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 611 TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC, 612 TRI); 613 } 614 } 615 } 616 } 617 618 /// Insert restore code for the callee-saved registers used in the function. 619 static void insertCSRRestores(MachineBasicBlock &RestoreBlock, 620 std::vector<CalleeSavedInfo> &CSI) { 621 MachineFunction &MF = *RestoreBlock.getParent(); 622 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 623 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 624 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 625 626 // Restore all registers immediately before the return and any 627 // terminators that precede it. 628 MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator(); 629 630 if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) { 631 for (const CalleeSavedInfo &CI : reverse(CSI)) { 632 unsigned Reg = CI.getReg(); 633 if (CI.isSpilledToReg()) { 634 BuildMI(RestoreBlock, I, DebugLoc(), TII.get(TargetOpcode::COPY), Reg) 635 .addReg(CI.getDstReg(), getKillRegState(true)); 636 } else { 637 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 638 TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI); 639 assert(I != RestoreBlock.begin() && 640 "loadRegFromStackSlot didn't insert any code!"); 641 // Insert in reverse order. loadRegFromStackSlot can insert 642 // multiple instructions. 643 } 644 } 645 } 646 } 647 648 void PEI::spillCalleeSavedRegs(MachineFunction &MF) { 649 // We can't list this requirement in getRequiredProperties because some 650 // targets (WebAssembly) use virtual registers past this point, and the pass 651 // pipeline is set up without giving the passes a chance to look at the 652 // TargetMachine. 653 // FIXME: Find a way to express this in getRequiredProperties. 654 assert(MF.getProperties().hasProperty( 655 MachineFunctionProperties::Property::NoVRegs)); 656 657 const Function &F = MF.getFunction(); 658 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 659 MachineFrameInfo &MFI = MF.getFrameInfo(); 660 MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 661 MaxCSFrameIndex = 0; 662 663 // Determine which of the registers in the callee save list should be saved. 664 BitVector SavedRegs; 665 TFI->determineCalleeSaves(MF, SavedRegs, RS); 666 667 // Assign stack slots for any callee-saved registers that must be spilled. 668 assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); 669 670 // Add the code to save and restore the callee saved registers. 671 if (!F.hasFnAttribute(Attribute::Naked)) { 672 MFI.setCalleeSavedInfoValid(true); 673 674 std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 675 if (!CSI.empty()) { 676 if (!MFI.hasCalls()) 677 NumLeafFuncWithSpills++; 678 679 for (MachineBasicBlock *SaveBlock : SaveBlocks) 680 insertCSRSaves(*SaveBlock, CSI); 681 682 // Update the live-in information of all the blocks up to the save point. 683 updateLiveness(MF); 684 685 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 686 insertCSRRestores(*RestoreBlock, CSI); 687 } 688 } 689 } 690 691 /// AdjustStackOffset - Helper function used to adjust the stack frame offset. 692 static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, 693 bool StackGrowsDown, int64_t &Offset, 694 Align &MaxAlign, unsigned Skew) { 695 // If the stack grows down, add the object size to find the lowest address. 696 if (StackGrowsDown) 697 Offset += MFI.getObjectSize(FrameIdx); 698 699 Align Alignment = MFI.getObjectAlign(FrameIdx); 700 701 // If the alignment of this object is greater than that of the stack, then 702 // increase the stack alignment to match. 703 MaxAlign = std::max(MaxAlign, Alignment); 704 705 // Adjust to alignment boundary. 706 Offset = alignTo(Offset, Alignment, Skew); 707 708 if (StackGrowsDown) { 709 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset 710 << "]\n"); 711 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset 712 } else { 713 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset 714 << "]\n"); 715 MFI.setObjectOffset(FrameIdx, Offset); 716 Offset += MFI.getObjectSize(FrameIdx); 717 } 718 } 719 720 /// Compute which bytes of fixed and callee-save stack area are unused and keep 721 /// track of them in StackBytesFree. 722 static inline void 723 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown, 724 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex, 725 int64_t FixedCSEnd, BitVector &StackBytesFree) { 726 // Avoid undefined int64_t -> int conversion below in extreme case. 727 if (FixedCSEnd > std::numeric_limits<int>::max()) 728 return; 729 730 StackBytesFree.resize(FixedCSEnd, true); 731 732 SmallVector<int, 16> AllocatedFrameSlots; 733 // Add fixed objects. 734 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) 735 // StackSlot scavenging is only implemented for the default stack. 736 if (MFI.getStackID(i) == TargetStackID::Default) 737 AllocatedFrameSlots.push_back(i); 738 // Add callee-save objects if there are any. 739 if (MinCSFrameIndex <= MaxCSFrameIndex) { 740 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i) 741 if (MFI.getStackID(i) == TargetStackID::Default) 742 AllocatedFrameSlots.push_back(i); 743 } 744 745 for (int i : AllocatedFrameSlots) { 746 // These are converted from int64_t, but they should always fit in int 747 // because of the FixedCSEnd check above. 748 int ObjOffset = MFI.getObjectOffset(i); 749 int ObjSize = MFI.getObjectSize(i); 750 int ObjStart, ObjEnd; 751 if (StackGrowsDown) { 752 // ObjOffset is negative when StackGrowsDown is true. 753 ObjStart = -ObjOffset - ObjSize; 754 ObjEnd = -ObjOffset; 755 } else { 756 ObjStart = ObjOffset; 757 ObjEnd = ObjOffset + ObjSize; 758 } 759 // Ignore fixed holes that are in the previous stack frame. 760 if (ObjEnd > 0) 761 StackBytesFree.reset(ObjStart, ObjEnd); 762 } 763 } 764 765 /// Assign frame object to an unused portion of the stack in the fixed stack 766 /// object range. Return true if the allocation was successful. 767 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, 768 bool StackGrowsDown, Align MaxAlign, 769 BitVector &StackBytesFree) { 770 if (MFI.isVariableSizedObjectIndex(FrameIdx)) 771 return false; 772 773 if (StackBytesFree.none()) { 774 // clear it to speed up later scavengeStackSlot calls to 775 // StackBytesFree.none() 776 StackBytesFree.clear(); 777 return false; 778 } 779 780 Align ObjAlign = MFI.getObjectAlign(FrameIdx); 781 if (ObjAlign > MaxAlign) 782 return false; 783 784 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 785 int FreeStart; 786 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; 787 FreeStart = StackBytesFree.find_next(FreeStart)) { 788 789 // Check that free space has suitable alignment. 790 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; 791 if (alignTo(ObjStart, ObjAlign) != ObjStart) 792 continue; 793 794 if (FreeStart + ObjSize > StackBytesFree.size()) 795 return false; 796 797 bool AllBytesFree = true; 798 for (unsigned Byte = 0; Byte < ObjSize; ++Byte) 799 if (!StackBytesFree.test(FreeStart + Byte)) { 800 AllBytesFree = false; 801 break; 802 } 803 if (AllBytesFree) 804 break; 805 } 806 807 if (FreeStart == -1) 808 return false; 809 810 if (StackGrowsDown) { 811 int ObjStart = -(FreeStart + ObjSize); 812 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 813 << ObjStart << "]\n"); 814 MFI.setObjectOffset(FrameIdx, ObjStart); 815 } else { 816 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 817 << FreeStart << "]\n"); 818 MFI.setObjectOffset(FrameIdx, FreeStart); 819 } 820 821 StackBytesFree.reset(FreeStart, FreeStart + ObjSize); 822 return true; 823 } 824 825 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e., 826 /// those required to be close to the Stack Protector) to stack offsets. 827 static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs, 828 SmallSet<int, 16> &ProtectedObjs, 829 MachineFrameInfo &MFI, bool StackGrowsDown, 830 int64_t &Offset, Align &MaxAlign, 831 unsigned Skew) { 832 833 for (int i : UnassignedObjs) { 834 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew); 835 ProtectedObjs.insert(i); 836 } 837 } 838 839 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the 840 /// abstract stack objects. 841 void PEI::calculateFrameObjectOffsets(MachineFunction &MF) { 842 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 843 844 bool StackGrowsDown = 845 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 846 847 // Loop over all of the stack objects, assigning sequential addresses... 848 MachineFrameInfo &MFI = MF.getFrameInfo(); 849 850 // Start at the beginning of the local area. 851 // The Offset is the distance from the stack top in the direction 852 // of stack growth -- so it's always nonnegative. 853 int LocalAreaOffset = TFI.getOffsetOfLocalArea(); 854 if (StackGrowsDown) 855 LocalAreaOffset = -LocalAreaOffset; 856 assert(LocalAreaOffset >= 0 857 && "Local area offset should be in direction of stack growth"); 858 int64_t Offset = LocalAreaOffset; 859 860 // Skew to be applied to alignment. 861 unsigned Skew = TFI.getStackAlignmentSkew(MF); 862 863 #ifdef EXPENSIVE_CHECKS 864 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) 865 if (!MFI.isDeadObjectIndex(i) && 866 MFI.getStackID(i) == TargetStackID::Default) 867 assert(MFI.getObjectAlign(i) <= MFI.getMaxAlign() && 868 "MaxAlignment is invalid"); 869 #endif 870 871 // If there are fixed sized objects that are preallocated in the local area, 872 // non-fixed objects can't be allocated right at the start of local area. 873 // Adjust 'Offset' to point to the end of last fixed sized preallocated 874 // object. 875 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) { 876 // Only allocate objects on the default stack. 877 if (MFI.getStackID(i) != TargetStackID::Default) 878 continue; 879 880 int64_t FixedOff; 881 if (StackGrowsDown) { 882 // The maximum distance from the stack pointer is at lower address of 883 // the object -- which is given by offset. For down growing stack 884 // the offset is negative, so we negate the offset to get the distance. 885 FixedOff = -MFI.getObjectOffset(i); 886 } else { 887 // The maximum distance from the start pointer is at the upper 888 // address of the object. 889 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i); 890 } 891 if (FixedOff > Offset) Offset = FixedOff; 892 } 893 894 Align MaxAlign = MFI.getMaxAlign(); 895 // First assign frame offsets to stack objects that are used to spill 896 // callee saved registers. 897 if (MaxCSFrameIndex >= MinCSFrameIndex) { 898 for (unsigned i = 0; i <= MaxCSFrameIndex - MinCSFrameIndex; ++i) { 899 unsigned FrameIndex = 900 StackGrowsDown ? MinCSFrameIndex + i : MaxCSFrameIndex - i; 901 902 // Only allocate objects on the default stack. 903 if (MFI.getStackID(FrameIndex) != TargetStackID::Default) 904 continue; 905 906 // TODO: should this just be if (MFI.isDeadObjectIndex(FrameIndex)) 907 if (!StackGrowsDown && MFI.isDeadObjectIndex(FrameIndex)) 908 continue; 909 910 AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign, 911 Skew); 912 } 913 } 914 915 assert(MaxAlign == MFI.getMaxAlign() && 916 "MFI.getMaxAlign should already account for all callee-saved " 917 "registers without a fixed stack slot"); 918 919 // FixedCSEnd is the stack offset to the end of the fixed and callee-save 920 // stack area. 921 int64_t FixedCSEnd = Offset; 922 923 // Make sure the special register scavenging spill slot is closest to the 924 // incoming stack pointer if a frame pointer is required and is closer 925 // to the incoming rather than the final stack pointer. 926 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 927 bool EarlyScavengingSlots = TFI.allocateScavengingFrameIndexesNearIncomingSP(MF); 928 if (RS && EarlyScavengingSlots) { 929 SmallVector<int, 2> SFIs; 930 RS->getScavengingFrameIndices(SFIs); 931 for (int SFI : SFIs) 932 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); 933 } 934 935 // FIXME: Once this is working, then enable flag will change to a target 936 // check for whether the frame is large enough to want to use virtual 937 // frame index registers. Functions which don't want/need this optimization 938 // will continue to use the existing code path. 939 if (MFI.getUseLocalStackAllocationBlock()) { 940 Align Alignment = MFI.getLocalFrameMaxAlign(); 941 942 // Adjust to alignment boundary. 943 Offset = alignTo(Offset, Alignment, Skew); 944 945 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); 946 947 // Resolve offsets for objects in the local block. 948 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) { 949 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i); 950 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; 951 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset 952 << "]\n"); 953 MFI.setObjectOffset(Entry.first, FIOffset); 954 } 955 // Allocate the local block 956 Offset += MFI.getLocalFrameSize(); 957 958 MaxAlign = std::max(Alignment, MaxAlign); 959 } 960 961 // Retrieve the Exception Handler registration node. 962 int EHRegNodeFrameIndex = std::numeric_limits<int>::max(); 963 if (const WinEHFuncInfo *FuncInfo = MF.getWinEHFuncInfo()) 964 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; 965 966 // Make sure that the stack protector comes before the local variables on the 967 // stack. 968 SmallSet<int, 16> ProtectedObjs; 969 if (MFI.hasStackProtectorIndex()) { 970 int StackProtectorFI = MFI.getStackProtectorIndex(); 971 StackObjSet LargeArrayObjs; 972 StackObjSet SmallArrayObjs; 973 StackObjSet AddrOfObjs; 974 975 // If we need a stack protector, we need to make sure that 976 // LocalStackSlotPass didn't already allocate a slot for it. 977 // If we are told to use the LocalStackAllocationBlock, the stack protector 978 // is expected to be already pre-allocated. 979 if (MFI.getStackID(StackProtectorFI) != TargetStackID::Default) { 980 // If the stack protector isn't on the default stack then it's up to the 981 // target to set the stack offset. 982 assert(MFI.getObjectOffset(StackProtectorFI) != 0 && 983 "Offset of stack protector on non-default stack expected to be " 984 "already set."); 985 assert(!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex()) && 986 "Stack protector on non-default stack expected to not be " 987 "pre-allocated by LocalStackSlotPass."); 988 } else if (!MFI.getUseLocalStackAllocationBlock()) { 989 AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, MaxAlign, 990 Skew); 991 } else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) { 992 llvm_unreachable( 993 "Stack protector not pre-allocated by LocalStackSlotPass."); 994 } 995 996 // Assign large stack objects first. 997 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 998 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 999 continue; 1000 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 1001 continue; 1002 if (RS && RS->isScavengingFrameIndex((int)i)) 1003 continue; 1004 if (MFI.isDeadObjectIndex(i)) 1005 continue; 1006 if (StackProtectorFI == (int)i || EHRegNodeFrameIndex == (int)i) 1007 continue; 1008 // Only allocate objects on the default stack. 1009 if (MFI.getStackID(i) != TargetStackID::Default) 1010 continue; 1011 1012 switch (MFI.getObjectSSPLayout(i)) { 1013 case MachineFrameInfo::SSPLK_None: 1014 continue; 1015 case MachineFrameInfo::SSPLK_SmallArray: 1016 SmallArrayObjs.insert(i); 1017 continue; 1018 case MachineFrameInfo::SSPLK_AddrOf: 1019 AddrOfObjs.insert(i); 1020 continue; 1021 case MachineFrameInfo::SSPLK_LargeArray: 1022 LargeArrayObjs.insert(i); 1023 continue; 1024 } 1025 llvm_unreachable("Unexpected SSPLayoutKind."); 1026 } 1027 1028 // We expect **all** the protected stack objects to be pre-allocated by 1029 // LocalStackSlotPass. If it turns out that PEI still has to allocate some 1030 // of them, we may end up messing up the expected order of the objects. 1031 if (MFI.getUseLocalStackAllocationBlock() && 1032 !(LargeArrayObjs.empty() && SmallArrayObjs.empty() && 1033 AddrOfObjs.empty())) 1034 llvm_unreachable("Found protected stack objects not pre-allocated by " 1035 "LocalStackSlotPass."); 1036 1037 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1038 Offset, MaxAlign, Skew); 1039 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1040 Offset, MaxAlign, Skew); 1041 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, 1042 Offset, MaxAlign, Skew); 1043 } 1044 1045 SmallVector<int, 8> ObjectsToAllocate; 1046 1047 // Then prepare to assign frame offsets to stack objects that are not used to 1048 // spill callee saved registers. 1049 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 1050 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 1051 continue; 1052 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 1053 continue; 1054 if (RS && RS->isScavengingFrameIndex((int)i)) 1055 continue; 1056 if (MFI.isDeadObjectIndex(i)) 1057 continue; 1058 if (MFI.getStackProtectorIndex() == (int)i || EHRegNodeFrameIndex == (int)i) 1059 continue; 1060 if (ProtectedObjs.count(i)) 1061 continue; 1062 // Only allocate objects on the default stack. 1063 if (MFI.getStackID(i) != TargetStackID::Default) 1064 continue; 1065 1066 // Add the objects that we need to allocate to our working set. 1067 ObjectsToAllocate.push_back(i); 1068 } 1069 1070 // Allocate the EH registration node first if one is present. 1071 if (EHRegNodeFrameIndex != std::numeric_limits<int>::max()) 1072 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, 1073 MaxAlign, Skew); 1074 1075 // Give the targets a chance to order the objects the way they like it. 1076 if (MF.getTarget().getOptLevel() != CodeGenOpt::None && 1077 MF.getTarget().Options.StackSymbolOrdering) 1078 TFI.orderFrameObjects(MF, ObjectsToAllocate); 1079 1080 // Keep track of which bytes in the fixed and callee-save range are used so we 1081 // can use the holes when allocating later stack objects. Only do this if 1082 // stack protector isn't being used and the target requests it and we're 1083 // optimizing. 1084 BitVector StackBytesFree; 1085 if (!ObjectsToAllocate.empty() && 1086 MF.getTarget().getOptLevel() != CodeGenOpt::None && 1087 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(MF)) 1088 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex, 1089 FixedCSEnd, StackBytesFree); 1090 1091 // Now walk the objects and actually assign base offsets to them. 1092 for (auto &Object : ObjectsToAllocate) 1093 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, 1094 StackBytesFree)) 1095 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew); 1096 1097 // Make sure the special register scavenging spill slot is closest to the 1098 // stack pointer. 1099 if (RS && !EarlyScavengingSlots) { 1100 SmallVector<int, 2> SFIs; 1101 RS->getScavengingFrameIndices(SFIs); 1102 for (int SFI : SFIs) 1103 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); 1104 } 1105 1106 if (!TFI.targetHandlesStackFrameRounding()) { 1107 // If we have reserved argument space for call sites in the function 1108 // immediately on entry to the current function, count it as part of the 1109 // overall stack size. 1110 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(MF)) 1111 Offset += MFI.getMaxCallFrameSize(); 1112 1113 // Round up the size to a multiple of the alignment. If the function has 1114 // any calls or alloca's, align to the target's StackAlignment value to 1115 // ensure that the callee's frame or the alloca data is suitably aligned; 1116 // otherwise, for leaf functions, align to the TransientStackAlignment 1117 // value. 1118 Align StackAlign; 1119 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() || 1120 (RegInfo->hasStackRealignment(MF) && MFI.getObjectIndexEnd() != 0)) 1121 StackAlign = TFI.getStackAlign(); 1122 else 1123 StackAlign = TFI.getTransientStackAlign(); 1124 1125 // If the frame pointer is eliminated, all frame offsets will be relative to 1126 // SP not FP. Align to MaxAlign so this works. 1127 StackAlign = std::max(StackAlign, MaxAlign); 1128 int64_t OffsetBeforeAlignment = Offset; 1129 Offset = alignTo(Offset, StackAlign, Skew); 1130 1131 // If we have increased the offset to fulfill the alignment constrants, 1132 // then the scavenging spill slots may become harder to reach from the 1133 // stack pointer, float them so they stay close. 1134 if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS && 1135 !EarlyScavengingSlots) { 1136 SmallVector<int, 2> SFIs; 1137 RS->getScavengingFrameIndices(SFIs); 1138 LLVM_DEBUG(if (!SFIs.empty()) llvm::dbgs() 1139 << "Adjusting emergency spill slots!\n";); 1140 int64_t Delta = Offset - OffsetBeforeAlignment; 1141 for (int SFI : SFIs) { 1142 LLVM_DEBUG(llvm::dbgs() 1143 << "Adjusting offset of emergency spill slot #" << SFI 1144 << " from " << MFI.getObjectOffset(SFI);); 1145 MFI.setObjectOffset(SFI, MFI.getObjectOffset(SFI) - Delta); 1146 LLVM_DEBUG(llvm::dbgs() << " to " << MFI.getObjectOffset(SFI) << "\n";); 1147 } 1148 } 1149 } 1150 1151 // Update frame info to pretend that this is part of the stack... 1152 int64_t StackSize = Offset - LocalAreaOffset; 1153 MFI.setStackSize(StackSize); 1154 NumBytesStackSpace += StackSize; 1155 } 1156 1157 /// insertPrologEpilogCode - Scan the function for modified callee saved 1158 /// registers, insert spill code for these callee saved registers, then add 1159 /// prolog and epilog code to the function. 1160 void PEI::insertPrologEpilogCode(MachineFunction &MF) { 1161 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1162 1163 // Add prologue to the function... 1164 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1165 TFI.emitPrologue(MF, *SaveBlock); 1166 1167 // Add epilogue to restore the callee-save registers in each exiting block. 1168 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 1169 TFI.emitEpilogue(MF, *RestoreBlock); 1170 1171 // Zero call used registers before restoring callee-saved registers. 1172 insertZeroCallUsedRegs(MF); 1173 1174 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1175 TFI.inlineStackProbe(MF, *SaveBlock); 1176 1177 // Emit additional code that is required to support segmented stacks, if 1178 // we've been asked for it. This, when linked with a runtime with support 1179 // for segmented stacks (libgcc is one), will result in allocating stack 1180 // space in small chunks instead of one large contiguous block. 1181 if (MF.shouldSplitStack()) { 1182 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1183 TFI.adjustForSegmentedStacks(MF, *SaveBlock); 1184 } 1185 1186 // Emit additional code that is required to explicitly handle the stack in 1187 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The 1188 // approach is rather similar to that of Segmented Stacks, but it uses a 1189 // different conditional check and another BIF for allocating more stack 1190 // space. 1191 if (MF.getFunction().getCallingConv() == CallingConv::HiPE) 1192 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1193 TFI.adjustForHiPEPrologue(MF, *SaveBlock); 1194 } 1195 1196 /// insertZeroCallUsedRegs - Zero out call used registers. 1197 void PEI::insertZeroCallUsedRegs(MachineFunction &MF) { 1198 const Function &F = MF.getFunction(); 1199 1200 if (!F.hasFnAttribute("zero-call-used-regs")) 1201 return; 1202 1203 using namespace ZeroCallUsedRegs; 1204 1205 ZeroCallUsedRegsKind ZeroRegsKind = 1206 StringSwitch<ZeroCallUsedRegsKind>( 1207 F.getFnAttribute("zero-call-used-regs").getValueAsString()) 1208 .Case("skip", ZeroCallUsedRegsKind::Skip) 1209 .Case("used-gpr-arg", ZeroCallUsedRegsKind::UsedGPRArg) 1210 .Case("used-gpr", ZeroCallUsedRegsKind::UsedGPR) 1211 .Case("used-arg", ZeroCallUsedRegsKind::UsedArg) 1212 .Case("used", ZeroCallUsedRegsKind::Used) 1213 .Case("all-gpr-arg", ZeroCallUsedRegsKind::AllGPRArg) 1214 .Case("all-gpr", ZeroCallUsedRegsKind::AllGPR) 1215 .Case("all-arg", ZeroCallUsedRegsKind::AllArg) 1216 .Case("all", ZeroCallUsedRegsKind::All); 1217 1218 if (ZeroRegsKind == ZeroCallUsedRegsKind::Skip) 1219 return; 1220 1221 const bool OnlyGPR = static_cast<unsigned>(ZeroRegsKind) & ONLY_GPR; 1222 const bool OnlyUsed = static_cast<unsigned>(ZeroRegsKind) & ONLY_USED; 1223 const bool OnlyArg = static_cast<unsigned>(ZeroRegsKind) & ONLY_ARG; 1224 1225 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1226 const BitVector AllocatableSet(TRI.getAllocatableSet(MF)); 1227 1228 // Mark all used registers. 1229 BitVector UsedRegs(TRI.getNumRegs()); 1230 if (OnlyUsed) 1231 for (const MachineBasicBlock &MBB : MF) 1232 for (const MachineInstr &MI : MBB) 1233 for (const MachineOperand &MO : MI.operands()) { 1234 if (!MO.isReg()) 1235 continue; 1236 1237 MCRegister Reg = MO.getReg(); 1238 if (AllocatableSet[Reg] && !MO.isImplicit() && 1239 (MO.isDef() || MO.isUse())) 1240 UsedRegs.set(Reg); 1241 } 1242 1243 // Get a list of registers that are used. 1244 BitVector LiveIns(TRI.getNumRegs()); 1245 for (const MachineBasicBlock::RegisterMaskPair &LI : MF.front().liveins()) 1246 LiveIns.set(LI.PhysReg); 1247 1248 BitVector RegsToZero(TRI.getNumRegs()); 1249 for (MCRegister Reg : AllocatableSet.set_bits()) { 1250 // Skip over fixed registers. 1251 if (TRI.isFixedRegister(MF, Reg)) 1252 continue; 1253 1254 // Want only general purpose registers. 1255 if (OnlyGPR && !TRI.isGeneralPurposeRegister(MF, Reg)) 1256 continue; 1257 1258 // Want only used registers. 1259 if (OnlyUsed && !UsedRegs[Reg]) 1260 continue; 1261 1262 // Want only registers used for arguments. 1263 if (OnlyArg) { 1264 if (OnlyUsed) { 1265 if (!LiveIns[Reg]) 1266 continue; 1267 } else if (!TRI.isArgumentRegister(MF, Reg)) { 1268 continue; 1269 } 1270 } 1271 1272 RegsToZero.set(Reg); 1273 } 1274 1275 // Don't clear registers that are live when leaving the function. 1276 for (const MachineBasicBlock &MBB : MF) 1277 for (const MachineInstr &MI : MBB.terminators()) { 1278 if (!MI.isReturn()) 1279 continue; 1280 1281 for (const auto &MO : MI.operands()) { 1282 if (!MO.isReg()) 1283 continue; 1284 1285 MCRegister Reg = MO.getReg(); 1286 1287 // This picks up sibling registers (e.q. %al -> %ah). 1288 for (MCRegUnitIterator Unit(Reg, &TRI); Unit.isValid(); ++Unit) 1289 RegsToZero.reset(*Unit); 1290 1291 for (MCPhysReg SReg : TRI.sub_and_superregs_inclusive(Reg)) 1292 RegsToZero.reset(SReg); 1293 } 1294 } 1295 1296 // Don't need to clear registers that are used/clobbered by terminating 1297 // instructions. 1298 for (const MachineBasicBlock &MBB : MF) { 1299 if (!MBB.isReturnBlock()) 1300 continue; 1301 1302 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator(); 1303 for (MachineBasicBlock::const_iterator I = MBBI, E = MBB.end(); I != E; 1304 ++I) { 1305 for (const MachineOperand &MO : I->operands()) { 1306 if (!MO.isReg()) 1307 continue; 1308 1309 for (const MCPhysReg &Reg : 1310 TRI.sub_and_superregs_inclusive(MO.getReg())) 1311 RegsToZero.reset(Reg); 1312 } 1313 } 1314 } 1315 1316 // Don't clear registers that must be preserved. 1317 for (const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF); 1318 MCPhysReg CSReg = *CSRegs; ++CSRegs) 1319 for (MCRegister Reg : TRI.sub_and_superregs_inclusive(CSReg)) 1320 RegsToZero.reset(Reg); 1321 1322 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1323 for (MachineBasicBlock &MBB : MF) 1324 if (MBB.isReturnBlock()) 1325 TFI.emitZeroCallUsedRegs(RegsToZero, MBB); 1326 } 1327 1328 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical 1329 /// register references and actual offsets. 1330 void PEI::replaceFrameIndices(MachineFunction &MF) { 1331 const auto &ST = MF.getSubtarget(); 1332 const TargetFrameLowering &TFI = *ST.getFrameLowering(); 1333 if (!TFI.needsFrameIndexResolution(MF)) 1334 return; 1335 1336 const TargetRegisterInfo *TRI = ST.getRegisterInfo(); 1337 1338 // Allow the target to determine this after knowing the frame size. 1339 FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) || 1340 TRI->requiresFrameIndexReplacementScavenging(MF); 1341 1342 // Store SPAdj at exit of a basic block. 1343 SmallVector<int, 8> SPState; 1344 SPState.resize(MF.getNumBlockIDs()); 1345 df_iterator_default_set<MachineBasicBlock*> Reachable; 1346 1347 // Iterate over the reachable blocks in DFS order. 1348 for (auto DFI = df_ext_begin(&MF, Reachable), DFE = df_ext_end(&MF, Reachable); 1349 DFI != DFE; ++DFI) { 1350 int SPAdj = 0; 1351 // Check the exit state of the DFS stack predecessor. 1352 if (DFI.getPathLength() >= 2) { 1353 MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 1354 assert(Reachable.count(StackPred) && 1355 "DFS stack predecessor is already visited.\n"); 1356 SPAdj = SPState[StackPred->getNumber()]; 1357 } 1358 MachineBasicBlock *BB = *DFI; 1359 replaceFrameIndices(BB, MF, SPAdj); 1360 SPState[BB->getNumber()] = SPAdj; 1361 } 1362 1363 // Handle the unreachable blocks. 1364 for (auto &BB : MF) { 1365 if (Reachable.count(&BB)) 1366 // Already handled in DFS traversal. 1367 continue; 1368 int SPAdj = 0; 1369 replaceFrameIndices(&BB, MF, SPAdj); 1370 } 1371 } 1372 1373 bool PEI::replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI, 1374 unsigned OpIdx, int SPAdj) { 1375 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1376 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1377 if (MI.isDebugValue()) { 1378 1379 MachineOperand &Op = MI.getOperand(OpIdx); 1380 assert(MI.isDebugOperand(&Op) && 1381 "Frame indices can only appear as a debug operand in a DBG_VALUE*" 1382 " machine instruction"); 1383 Register Reg; 1384 unsigned FrameIdx = Op.getIndex(); 1385 unsigned Size = MF.getFrameInfo().getObjectSize(FrameIdx); 1386 1387 StackOffset Offset = TFI->getFrameIndexReference(MF, FrameIdx, Reg); 1388 Op.ChangeToRegister(Reg, false /*isDef*/); 1389 1390 const DIExpression *DIExpr = MI.getDebugExpression(); 1391 1392 // If we have a direct DBG_VALUE, and its location expression isn't 1393 // currently complex, then adding an offset will morph it into a 1394 // complex location that is interpreted as being a memory address. 1395 // This changes a pointer-valued variable to dereference that pointer, 1396 // which is incorrect. Fix by adding DW_OP_stack_value. 1397 1398 if (MI.isNonListDebugValue()) { 1399 unsigned PrependFlags = DIExpression::ApplyOffset; 1400 if (!MI.isIndirectDebugValue() && !DIExpr->isComplex()) 1401 PrependFlags |= DIExpression::StackValue; 1402 1403 // If we have DBG_VALUE that is indirect and has a Implicit location 1404 // expression need to insert a deref before prepending a Memory 1405 // location expression. Also after doing this we change the DBG_VALUE 1406 // to be direct. 1407 if (MI.isIndirectDebugValue() && DIExpr->isImplicit()) { 1408 SmallVector<uint64_t, 2> Ops = {dwarf::DW_OP_deref_size, Size}; 1409 bool WithStackValue = true; 1410 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); 1411 // Make the DBG_VALUE direct. 1412 MI.getDebugOffset().ChangeToRegister(0, false); 1413 } 1414 DIExpr = TRI.prependOffsetExpression(DIExpr, PrependFlags, Offset); 1415 } else { 1416 // The debug operand at DebugOpIndex was a frame index at offset 1417 // `Offset`; now the operand has been replaced with the frame 1418 // register, we must add Offset with `register x, plus Offset`. 1419 unsigned DebugOpIndex = MI.getDebugOperandIndex(&Op); 1420 SmallVector<uint64_t, 3> Ops; 1421 TRI.getOffsetOpcodes(Offset, Ops); 1422 DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, DebugOpIndex); 1423 } 1424 MI.getDebugExpressionOp().setMetadata(DIExpr); 1425 return true; 1426 } 1427 1428 if (MI.isDebugPHI()) { 1429 // Allow stack ref to continue onwards. 1430 return true; 1431 } 1432 1433 // TODO: This code should be commoned with the code for 1434 // PATCHPOINT. There's no good reason for the difference in 1435 // implementation other than historical accident. The only 1436 // remaining difference is the unconditional use of the stack 1437 // pointer as the base register. 1438 if (MI.getOpcode() == TargetOpcode::STATEPOINT) { 1439 assert((!MI.isDebugValue() || OpIdx == 0) && 1440 "Frame indicies can only appear as the first operand of a " 1441 "DBG_VALUE machine instruction"); 1442 Register Reg; 1443 MachineOperand &Offset = MI.getOperand(OpIdx + 1); 1444 StackOffset refOffset = TFI->getFrameIndexReferencePreferSP( 1445 MF, MI.getOperand(OpIdx).getIndex(), Reg, /*IgnoreSPUpdates*/ false); 1446 assert(!refOffset.getScalable() && 1447 "Frame offsets with a scalable component are not supported"); 1448 Offset.setImm(Offset.getImm() + refOffset.getFixed() + SPAdj); 1449 MI.getOperand(OpIdx).ChangeToRegister(Reg, false /*isDef*/); 1450 return true; 1451 } 1452 return false; 1453 } 1454 1455 void PEI::replaceFrameIndicesBackward(MachineBasicBlock *BB, 1456 MachineFunction &MF, int &SPAdj) { 1457 assert(MF.getSubtarget().getRegisterInfo() && 1458 "getRegisterInfo() must be implemented!"); 1459 1460 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1461 1462 RS->enterBasicBlockEnd(*BB); 1463 1464 for (MachineInstr &MI : make_early_inc_range(reverse(*BB))) { 1465 1466 // Register scavenger backward step 1467 MachineBasicBlock::iterator Step(MI); 1468 for (unsigned i = 0; i != MI.getNumOperands(); ++i) { 1469 if (!MI.getOperand(i).isFI()) 1470 continue; 1471 1472 if (replaceFrameIndexDebugInstr(MF, MI, i, SPAdj)) 1473 continue; 1474 1475 // If this instruction has a FrameIndex operand, we need to 1476 // use that target machine register info object to eliminate 1477 // it. 1478 1479 // TRI.eliminateFrameIndex may lower the frame index to a sequence of 1480 // instructions. It also can remove/change instructions passed by the 1481 // iterator and invalidate the iterator. We have to take care of this. For 1482 // that we support two iterators: *Step* - points to the position up to 1483 // which the scavenger should scan by the next iteration to have liveness 1484 // information up to date. *Curr* - keeps track of the correct RS->MBBI - 1485 // the scan start point. It points to the currently processed instruction 1486 // right before the frame lowering. 1487 // 1488 // ITERATORS WORK AS FOLLOWS: 1489 // *Step* is shifted one step back right before the frame lowering and 1490 // one step forward right after it. No matter how many instructions were 1491 // inserted, *Step* will be right after the position which is going to be 1492 // processed in the next iteration, thus, in the correct position for the 1493 // scavenger to go up to. 1494 // *Curr* is shifted one step forward right before calling 1495 // TRI.eliminateFrameIndex and one step backward after. Thus, we make sure 1496 // it points right to the position that is the correct starting point for 1497 // the scavenger to scan. 1498 MachineBasicBlock::iterator Curr = ++RS->getCurrentPosition(); 1499 1500 // Shift back 1501 --Step; 1502 1503 bool Removed = TRI.eliminateFrameIndex(MI, SPAdj, i, RS); 1504 // Restore to unify logic with a shift back that happens in the end of 1505 // the outer loop. 1506 ++Step; 1507 RS->skipTo(--Curr); 1508 if (Removed) 1509 break; 1510 } 1511 1512 // Shift it to make RS collect reg info up to the current instruction. 1513 if (Step != BB->begin()) 1514 Step--; 1515 1516 // Update register states. 1517 RS->backward(Step); 1518 } 1519 } 1520 1521 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 1522 int &SPAdj) { 1523 assert(MF.getSubtarget().getRegisterInfo() && 1524 "getRegisterInfo() must be implemented!"); 1525 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1526 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1527 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1528 1529 if (RS && TRI.supportsBackwardScavenger()) 1530 return replaceFrameIndicesBackward(BB, MF, SPAdj); 1531 1532 if (RS && FrameIndexEliminationScavenging) 1533 RS->enterBasicBlock(*BB); 1534 1535 bool InsideCallSequence = false; 1536 1537 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { 1538 if (TII.isFrameInstr(*I)) { 1539 InsideCallSequence = TII.isFrameSetup(*I); 1540 SPAdj += TII.getSPAdjust(*I); 1541 I = TFI->eliminateCallFramePseudoInstr(MF, *BB, I); 1542 continue; 1543 } 1544 1545 MachineInstr &MI = *I; 1546 bool DoIncr = true; 1547 bool DidFinishLoop = true; 1548 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1549 if (!MI.getOperand(i).isFI()) 1550 continue; 1551 1552 if (replaceFrameIndexDebugInstr(MF, MI, i, SPAdj)) 1553 continue; 1554 1555 // Some instructions (e.g. inline asm instructions) can have 1556 // multiple frame indices and/or cause eliminateFrameIndex 1557 // to insert more than one instruction. We need the register 1558 // scavenger to go through all of these instructions so that 1559 // it can update its register information. We keep the 1560 // iterator at the point before insertion so that we can 1561 // revisit them in full. 1562 bool AtBeginning = (I == BB->begin()); 1563 if (!AtBeginning) --I; 1564 1565 // If this instruction has a FrameIndex operand, we need to 1566 // use that target machine register info object to eliminate 1567 // it. 1568 TRI.eliminateFrameIndex(MI, SPAdj, i, 1569 FrameIndexEliminationScavenging ? RS : nullptr); 1570 1571 // Reset the iterator if we were at the beginning of the BB. 1572 if (AtBeginning) { 1573 I = BB->begin(); 1574 DoIncr = false; 1575 } 1576 1577 DidFinishLoop = false; 1578 break; 1579 } 1580 1581 // If we are looking at a call sequence, we need to keep track of 1582 // the SP adjustment made by each instruction in the sequence. 1583 // This includes both the frame setup/destroy pseudos (handled above), 1584 // as well as other instructions that have side effects w.r.t the SP. 1585 // Note that this must come after eliminateFrameIndex, because 1586 // if I itself referred to a frame index, we shouldn't count its own 1587 // adjustment. 1588 if (DidFinishLoop && InsideCallSequence) 1589 SPAdj += TII.getSPAdjust(MI); 1590 1591 if (DoIncr && I != BB->end()) ++I; 1592 1593 // Update register states. 1594 if (RS && FrameIndexEliminationScavenging && DidFinishLoop) 1595 RS->forward(MI); 1596 } 1597 } 1598