1 //===-- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function --===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass is responsible for finalizing the functions frame layout, saving 11 // callee saved registers, and for emitting prolog & epilog code for the 12 // function. 13 // 14 // This pass must be run after register allocation. After this pass is 15 // executed, it is illegal to construct MO_FrameIndex operands. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/MachineDominators.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineInstr.h" 26 #include "llvm/CodeGen/MachineLoopInfo.h" 27 #include "llvm/CodeGen/MachineModuleInfo.h" 28 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/Passes.h" 31 #include "llvm/CodeGen/RegisterScavenging.h" 32 #include "llvm/CodeGen/StackProtector.h" 33 #include "llvm/CodeGen/WinEHFuncInfo.h" 34 #include "llvm/IR/DiagnosticInfo.h" 35 #include "llvm/IR/InlineAsm.h" 36 #include "llvm/IR/LLVMContext.h" 37 #include "llvm/Support/CommandLine.h" 38 #include "llvm/Support/Debug.h" 39 #include "llvm/Support/raw_ostream.h" 40 #include "llvm/Target/TargetFrameLowering.h" 41 #include "llvm/Target/TargetInstrInfo.h" 42 #include "llvm/Target/TargetMachine.h" 43 #include "llvm/Target/TargetRegisterInfo.h" 44 #include "llvm/Target/TargetSubtargetInfo.h" 45 #include <climits> 46 47 using namespace llvm; 48 49 #define DEBUG_TYPE "prologepilog" 50 51 typedef SmallVector<MachineBasicBlock *, 4> MBBVector; 52 static void doSpillCalleeSavedRegs(MachineFunction &MF, RegScavenger *RS, 53 unsigned &MinCSFrameIndex, 54 unsigned &MaxCXFrameIndex, 55 const MBBVector &SaveBlocks, 56 const MBBVector &RestoreBlocks); 57 58 namespace { 59 class PEI : public MachineFunctionPass { 60 public: 61 static char ID; 62 PEI() : MachineFunctionPass(ID) { 63 initializePEIPass(*PassRegistry::getPassRegistry()); 64 } 65 66 void getAnalysisUsage(AnalysisUsage &AU) const override; 67 68 MachineFunctionProperties getRequiredProperties() const override { 69 MachineFunctionProperties MFP; 70 if (UsesCalleeSaves) 71 MFP.set(MachineFunctionProperties::Property::NoVRegs); 72 return MFP; 73 } 74 75 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 76 /// frame indexes with appropriate references. 77 /// 78 bool runOnMachineFunction(MachineFunction &Fn) override; 79 80 private: 81 std::function<void(MachineFunction &MF, RegScavenger *RS, 82 unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex, 83 const MBBVector &SaveBlocks, 84 const MBBVector &RestoreBlocks)> 85 SpillCalleeSavedRegisters; 86 std::function<void(MachineFunction &MF, RegScavenger &RS)> 87 ScavengeFrameVirtualRegs; 88 89 bool UsesCalleeSaves = false; 90 91 RegScavenger *RS; 92 93 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved 94 // stack frame indexes. 95 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 96 unsigned MaxCSFrameIndex = 0; 97 98 // Save and Restore blocks of the current function. Typically there is a 99 // single save block, unless Windows EH funclets are involved. 100 MBBVector SaveBlocks; 101 MBBVector RestoreBlocks; 102 103 // Flag to control whether to use the register scavenger to resolve 104 // frame index materialization registers. Set according to 105 // TRI->requiresFrameIndexScavenging() for the current function. 106 bool FrameIndexVirtualScavenging; 107 108 // Flag to control whether the scavenger should be passed even though 109 // FrameIndexVirtualScavenging is used. 110 bool FrameIndexEliminationScavenging; 111 112 // Emit remarks. 113 MachineOptimizationRemarkEmitter *ORE = nullptr; 114 115 void calculateCallFrameInfo(MachineFunction &Fn); 116 void calculateSaveRestoreBlocks(MachineFunction &Fn); 117 118 void calculateFrameObjectOffsets(MachineFunction &Fn); 119 void replaceFrameIndices(MachineFunction &Fn); 120 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn, 121 int &SPAdj); 122 void insertPrologEpilogCode(MachineFunction &Fn); 123 }; 124 } // namespace 125 126 char PEI::ID = 0; 127 char &llvm::PrologEpilogCodeInserterID = PEI::ID; 128 129 static cl::opt<unsigned> 130 WarnStackSize("warn-stack-size", cl::Hidden, cl::init((unsigned)-1), 131 cl::desc("Warn for stack size bigger than the given" 132 " number")); 133 134 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false, 135 false) 136 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 137 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 138 INITIALIZE_PASS_DEPENDENCY(StackProtector) 139 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 140 INITIALIZE_PASS_END(PEI, DEBUG_TYPE, 141 "Prologue/Epilogue Insertion & Frame Finalization", false, 142 false) 143 144 MachineFunctionPass *llvm::createPrologEpilogInserterPass() { 145 return new PEI(); 146 } 147 148 STATISTIC(NumBytesStackSpace, 149 "Number of bytes used for stack in all functions"); 150 151 void PEI::getAnalysisUsage(AnalysisUsage &AU) const { 152 AU.setPreservesCFG(); 153 AU.addPreserved<MachineLoopInfo>(); 154 AU.addPreserved<MachineDominatorTree>(); 155 AU.addRequired<StackProtector>(); 156 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 157 MachineFunctionPass::getAnalysisUsage(AU); 158 } 159 160 161 /// StackObjSet - A set of stack object indexes 162 typedef SmallSetVector<int, 8> StackObjSet; 163 164 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 165 /// frame indexes with appropriate references. 166 /// 167 bool PEI::runOnMachineFunction(MachineFunction &Fn) { 168 if (!SpillCalleeSavedRegisters) { 169 const TargetMachine &TM = Fn.getTarget(); 170 if (!TM.usesPhysRegsForPEI()) { 171 SpillCalleeSavedRegisters = [](MachineFunction &, RegScavenger *, 172 unsigned &, unsigned &, const MBBVector &, 173 const MBBVector &) {}; 174 ScavengeFrameVirtualRegs = [](MachineFunction &, RegScavenger &) {}; 175 } else { 176 SpillCalleeSavedRegisters = doSpillCalleeSavedRegs; 177 ScavengeFrameVirtualRegs = scavengeFrameVirtualRegs; 178 UsesCalleeSaves = true; 179 } 180 } 181 182 const Function* F = Fn.getFunction(); 183 const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo(); 184 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 185 186 RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : nullptr; 187 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn); 188 FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) || 189 TRI->requiresFrameIndexReplacementScavenging(Fn); 190 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 191 192 // Calculate the MaxCallFrameSize and AdjustsStack variables for the 193 // function's frame information. Also eliminates call frame pseudo 194 // instructions. 195 calculateCallFrameInfo(Fn); 196 197 // Determine placement of CSR spill/restore code and prolog/epilog code: 198 // place all spills in the entry block, all restores in return blocks. 199 calculateSaveRestoreBlocks(Fn); 200 201 // Handle CSR spilling and restoring, for targets that need it. 202 SpillCalleeSavedRegisters(Fn, RS, MinCSFrameIndex, MaxCSFrameIndex, 203 SaveBlocks, RestoreBlocks); 204 205 // Allow the target machine to make final modifications to the function 206 // before the frame layout is finalized. 207 TFI->processFunctionBeforeFrameFinalized(Fn, RS); 208 209 // Calculate actual frame offsets for all abstract stack objects... 210 calculateFrameObjectOffsets(Fn); 211 212 // Add prolog and epilog code to the function. This function is required 213 // to align the stack frame as necessary for any stack variables or 214 // called functions. Because of this, calculateCalleeSavedRegisters() 215 // must be called before this function in order to set the AdjustsStack 216 // and MaxCallFrameSize variables. 217 if (!F->hasFnAttribute(Attribute::Naked)) 218 insertPrologEpilogCode(Fn); 219 220 // Replace all MO_FrameIndex operands with physical register references 221 // and actual offsets. 222 // 223 replaceFrameIndices(Fn); 224 225 // If register scavenging is needed, as we've enabled doing it as a 226 // post-pass, scavenge the virtual registers that frame index elimination 227 // inserted. 228 if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging) { 229 ScavengeFrameVirtualRegs(Fn, *RS); 230 231 // Clear any vregs created by virtual scavenging. 232 Fn.getRegInfo().clearVirtRegs(); 233 } 234 235 // Warn on stack size when we exceeds the given limit. 236 MachineFrameInfo &MFI = Fn.getFrameInfo(); 237 uint64_t StackSize = MFI.getStackSize(); 238 if (WarnStackSize.getNumOccurrences() > 0 && WarnStackSize < StackSize) { 239 DiagnosticInfoStackSize DiagStackSize(*F, StackSize); 240 F->getContext().diagnose(DiagStackSize); 241 } 242 243 delete RS; 244 SaveBlocks.clear(); 245 RestoreBlocks.clear(); 246 MFI.setSavePoint(nullptr); 247 MFI.setRestorePoint(nullptr); 248 return true; 249 } 250 251 /// Calculate the MaxCallFrameSize and AdjustsStack 252 /// variables for the function's frame information and eliminate call frame 253 /// pseudo instructions. 254 void PEI::calculateCallFrameInfo(MachineFunction &Fn) { 255 const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo(); 256 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 257 MachineFrameInfo &MFI = Fn.getFrameInfo(); 258 259 unsigned MaxCallFrameSize = 0; 260 bool AdjustsStack = MFI.adjustsStack(); 261 262 // Get the function call frame set-up and tear-down instruction opcode 263 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 264 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 265 266 // Early exit for targets which have no call frame setup/destroy pseudo 267 // instructions. 268 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 269 return; 270 271 std::vector<MachineBasicBlock::iterator> FrameSDOps; 272 for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) 273 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) 274 if (TII.isFrameInstr(*I)) { 275 unsigned Size = TII.getFrameSize(*I); 276 if (Size > MaxCallFrameSize) MaxCallFrameSize = Size; 277 AdjustsStack = true; 278 FrameSDOps.push_back(I); 279 } else if (I->isInlineAsm()) { 280 // Some inline asm's need a stack frame, as indicated by operand 1. 281 unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 282 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 283 AdjustsStack = true; 284 } 285 286 assert(!MFI.isMaxCallFrameSizeComputed() || 287 (MFI.getMaxCallFrameSize() == MaxCallFrameSize && 288 MFI.adjustsStack() == AdjustsStack)); 289 MFI.setAdjustsStack(AdjustsStack); 290 MFI.setMaxCallFrameSize(MaxCallFrameSize); 291 292 for (std::vector<MachineBasicBlock::iterator>::iterator 293 i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) { 294 MachineBasicBlock::iterator I = *i; 295 296 // If call frames are not being included as part of the stack frame, and 297 // the target doesn't indicate otherwise, remove the call frame pseudos 298 // here. The sub/add sp instruction pairs are still inserted, but we don't 299 // need to track the SP adjustment for frame index elimination. 300 if (TFI->canSimplifyCallFramePseudos(Fn)) 301 TFI->eliminateCallFramePseudoInstr(Fn, *I->getParent(), I); 302 } 303 } 304 305 /// Compute the sets of entry and return blocks for saving and restoring 306 /// callee-saved registers, and placing prolog and epilog code. 307 void PEI::calculateSaveRestoreBlocks(MachineFunction &Fn) { 308 const MachineFrameInfo &MFI = Fn.getFrameInfo(); 309 310 // Even when we do not change any CSR, we still want to insert the 311 // prologue and epilogue of the function. 312 // So set the save points for those. 313 314 // Use the points found by shrink-wrapping, if any. 315 if (MFI.getSavePoint()) { 316 SaveBlocks.push_back(MFI.getSavePoint()); 317 assert(MFI.getRestorePoint() && "Both restore and save must be set"); 318 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 319 // If RestoreBlock does not have any successor and is not a return block 320 // then the end point is unreachable and we do not need to insert any 321 // epilogue. 322 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock()) 323 RestoreBlocks.push_back(RestoreBlock); 324 return; 325 } 326 327 // Save refs to entry and return blocks. 328 SaveBlocks.push_back(&Fn.front()); 329 for (MachineBasicBlock &MBB : Fn) { 330 if (MBB.isEHFuncletEntry()) 331 SaveBlocks.push_back(&MBB); 332 if (MBB.isReturnBlock()) 333 RestoreBlocks.push_back(&MBB); 334 } 335 } 336 337 static void assignCalleeSavedSpillSlots(MachineFunction &F, 338 const BitVector &SavedRegs, 339 unsigned &MinCSFrameIndex, 340 unsigned &MaxCSFrameIndex) { 341 if (SavedRegs.empty()) 342 return; 343 344 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo(); 345 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs(); 346 347 std::vector<CalleeSavedInfo> CSI; 348 for (unsigned i = 0; CSRegs[i]; ++i) { 349 unsigned Reg = CSRegs[i]; 350 if (SavedRegs.test(Reg)) 351 CSI.push_back(CalleeSavedInfo(Reg)); 352 } 353 354 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering(); 355 MachineFrameInfo &MFI = F.getFrameInfo(); 356 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI)) { 357 // If target doesn't implement this, use generic code. 358 359 if (CSI.empty()) 360 return; // Early exit if no callee saved registers are modified! 361 362 unsigned NumFixedSpillSlots; 363 const TargetFrameLowering::SpillSlot *FixedSpillSlots = 364 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); 365 366 // Now that we know which registers need to be saved and restored, allocate 367 // stack slots for them. 368 for (auto &CS : CSI) { 369 unsigned Reg = CS.getReg(); 370 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 371 372 int FrameIdx; 373 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { 374 CS.setFrameIdx(FrameIdx); 375 continue; 376 } 377 378 // Check to see if this physreg must be spilled to a particular stack slot 379 // on this target. 380 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots; 381 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots && 382 FixedSlot->Reg != Reg) 383 ++FixedSlot; 384 385 unsigned Size = RegInfo->getSpillSize(*RC); 386 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { 387 // Nope, just spill it anywhere convenient. 388 unsigned Align = RegInfo->getSpillAlignment(*RC); 389 unsigned StackAlign = TFI->getStackAlignment(); 390 391 // We may not be able to satisfy the desired alignment specification of 392 // the TargetRegisterClass if the stack alignment is smaller. Use the 393 // min. 394 Align = std::min(Align, StackAlign); 395 FrameIdx = MFI.CreateStackObject(Size, Align, true); 396 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 397 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 398 } else { 399 // Spill it to the stack where we must. 400 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset); 401 } 402 403 CS.setFrameIdx(FrameIdx); 404 } 405 } 406 407 MFI.setCalleeSavedInfo(CSI); 408 } 409 410 /// Helper function to update the liveness information for the callee-saved 411 /// registers. 412 static void updateLiveness(MachineFunction &MF) { 413 MachineFrameInfo &MFI = MF.getFrameInfo(); 414 // Visited will contain all the basic blocks that are in the region 415 // where the callee saved registers are alive: 416 // - Anything that is not Save or Restore -> LiveThrough. 417 // - Save -> LiveIn. 418 // - Restore -> LiveOut. 419 // The live-out is not attached to the block, so no need to keep 420 // Restore in this set. 421 SmallPtrSet<MachineBasicBlock *, 8> Visited; 422 SmallVector<MachineBasicBlock *, 8> WorkList; 423 MachineBasicBlock *Entry = &MF.front(); 424 MachineBasicBlock *Save = MFI.getSavePoint(); 425 426 if (!Save) 427 Save = Entry; 428 429 if (Entry != Save) { 430 WorkList.push_back(Entry); 431 Visited.insert(Entry); 432 } 433 Visited.insert(Save); 434 435 MachineBasicBlock *Restore = MFI.getRestorePoint(); 436 if (Restore) 437 // By construction Restore cannot be visited, otherwise it 438 // means there exists a path to Restore that does not go 439 // through Save. 440 WorkList.push_back(Restore); 441 442 while (!WorkList.empty()) { 443 const MachineBasicBlock *CurBB = WorkList.pop_back_val(); 444 // By construction, the region that is after the save point is 445 // dominated by the Save and post-dominated by the Restore. 446 if (CurBB == Save && Save != Restore) 447 continue; 448 // Enqueue all the successors not already visited. 449 // Those are by construction either before Save or after Restore. 450 for (MachineBasicBlock *SuccBB : CurBB->successors()) 451 if (Visited.insert(SuccBB).second) 452 WorkList.push_back(SuccBB); 453 } 454 455 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 456 457 MachineRegisterInfo &MRI = MF.getRegInfo(); 458 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 459 for (MachineBasicBlock *MBB : Visited) { 460 MCPhysReg Reg = CSI[i].getReg(); 461 // Add the callee-saved register as live-in. 462 // It's killed at the spill. 463 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg)) 464 MBB->addLiveIn(Reg); 465 } 466 } 467 } 468 469 /// insertCSRSpillsAndRestores - Insert spill and restore code for 470 /// callee saved registers used in the function. 471 /// 472 static void insertCSRSpillsAndRestores(MachineFunction &Fn, 473 const MBBVector &SaveBlocks, 474 const MBBVector &RestoreBlocks) { 475 // Get callee saved register information. 476 MachineFrameInfo &MFI = Fn.getFrameInfo(); 477 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 478 479 MFI.setCalleeSavedInfoValid(true); 480 481 // Early exit if no callee saved registers are modified! 482 if (CSI.empty()) 483 return; 484 485 const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo(); 486 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 487 const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo(); 488 MachineBasicBlock::iterator I; 489 490 // Spill using target interface. 491 for (MachineBasicBlock *SaveBlock : SaveBlocks) { 492 I = SaveBlock->begin(); 493 if (!TFI->spillCalleeSavedRegisters(*SaveBlock, I, CSI, TRI)) { 494 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 495 // Insert the spill to the stack frame. 496 unsigned Reg = CSI[i].getReg(); 497 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 498 TII.storeRegToStackSlot(*SaveBlock, I, Reg, true, CSI[i].getFrameIdx(), 499 RC, TRI); 500 } 501 } 502 // Update the live-in information of all the blocks up to the save point. 503 updateLiveness(Fn); 504 } 505 506 // Restore using target interface. 507 for (MachineBasicBlock *MBB : RestoreBlocks) { 508 I = MBB->end(); 509 510 // Skip over all terminator instructions, which are part of the return 511 // sequence. 512 MachineBasicBlock::iterator I2 = I; 513 while (I2 != MBB->begin() && (--I2)->isTerminator()) 514 I = I2; 515 516 bool AtStart = I == MBB->begin(); 517 MachineBasicBlock::iterator BeforeI = I; 518 if (!AtStart) 519 --BeforeI; 520 521 // Restore all registers immediately before the return and any 522 // terminators that precede it. 523 if (!TFI->restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) { 524 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 525 unsigned Reg = CSI[i].getReg(); 526 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 527 TII.loadRegFromStackSlot(*MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI); 528 assert(I != MBB->begin() && 529 "loadRegFromStackSlot didn't insert any code!"); 530 // Insert in reverse order. loadRegFromStackSlot can insert 531 // multiple instructions. 532 if (AtStart) 533 I = MBB->begin(); 534 else { 535 I = BeforeI; 536 ++I; 537 } 538 } 539 } 540 } 541 } 542 543 static void doSpillCalleeSavedRegs(MachineFunction &Fn, RegScavenger *RS, 544 unsigned &MinCSFrameIndex, 545 unsigned &MaxCSFrameIndex, 546 const MBBVector &SaveBlocks, 547 const MBBVector &RestoreBlocks) { 548 const Function *F = Fn.getFunction(); 549 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 550 MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 551 MaxCSFrameIndex = 0; 552 553 // Determine which of the registers in the callee save list should be saved. 554 BitVector SavedRegs; 555 TFI->determineCalleeSaves(Fn, SavedRegs, RS); 556 557 // Assign stack slots for any callee-saved registers that must be spilled. 558 assignCalleeSavedSpillSlots(Fn, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); 559 560 // Add the code to save and restore the callee saved registers. 561 if (!F->hasFnAttribute(Attribute::Naked)) 562 insertCSRSpillsAndRestores(Fn, SaveBlocks, RestoreBlocks); 563 } 564 565 /// AdjustStackOffset - Helper function used to adjust the stack frame offset. 566 static inline void 567 AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, 568 bool StackGrowsDown, int64_t &Offset, 569 unsigned &MaxAlign, unsigned Skew) { 570 // If the stack grows down, add the object size to find the lowest address. 571 if (StackGrowsDown) 572 Offset += MFI.getObjectSize(FrameIdx); 573 574 unsigned Align = MFI.getObjectAlignment(FrameIdx); 575 576 // If the alignment of this object is greater than that of the stack, then 577 // increase the stack alignment to match. 578 MaxAlign = std::max(MaxAlign, Align); 579 580 // Adjust to alignment boundary. 581 Offset = alignTo(Offset, Align, Skew); 582 583 if (StackGrowsDown) { 584 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n"); 585 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset 586 } else { 587 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n"); 588 MFI.setObjectOffset(FrameIdx, Offset); 589 Offset += MFI.getObjectSize(FrameIdx); 590 } 591 } 592 593 /// Compute which bytes of fixed and callee-save stack area are unused and keep 594 /// track of them in StackBytesFree. 595 /// 596 static inline void 597 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown, 598 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex, 599 int64_t FixedCSEnd, BitVector &StackBytesFree) { 600 // Avoid undefined int64_t -> int conversion below in extreme case. 601 if (FixedCSEnd > std::numeric_limits<int>::max()) 602 return; 603 604 StackBytesFree.resize(FixedCSEnd, true); 605 606 SmallVector<int, 16> AllocatedFrameSlots; 607 // Add fixed objects. 608 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) 609 AllocatedFrameSlots.push_back(i); 610 // Add callee-save objects. 611 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i) 612 AllocatedFrameSlots.push_back(i); 613 614 for (int i : AllocatedFrameSlots) { 615 // These are converted from int64_t, but they should always fit in int 616 // because of the FixedCSEnd check above. 617 int ObjOffset = MFI.getObjectOffset(i); 618 int ObjSize = MFI.getObjectSize(i); 619 int ObjStart, ObjEnd; 620 if (StackGrowsDown) { 621 // ObjOffset is negative when StackGrowsDown is true. 622 ObjStart = -ObjOffset - ObjSize; 623 ObjEnd = -ObjOffset; 624 } else { 625 ObjStart = ObjOffset; 626 ObjEnd = ObjOffset + ObjSize; 627 } 628 // Ignore fixed holes that are in the previous stack frame. 629 if (ObjEnd > 0) 630 StackBytesFree.reset(ObjStart, ObjEnd); 631 } 632 } 633 634 /// Assign frame object to an unused portion of the stack in the fixed stack 635 /// object range. Return true if the allocation was successful. 636 /// 637 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, 638 bool StackGrowsDown, unsigned MaxAlign, 639 BitVector &StackBytesFree) { 640 if (MFI.isVariableSizedObjectIndex(FrameIdx)) 641 return false; 642 643 if (StackBytesFree.none()) { 644 // clear it to speed up later scavengeStackSlot calls to 645 // StackBytesFree.none() 646 StackBytesFree.clear(); 647 return false; 648 } 649 650 unsigned ObjAlign = MFI.getObjectAlignment(FrameIdx); 651 if (ObjAlign > MaxAlign) 652 return false; 653 654 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 655 int FreeStart; 656 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; 657 FreeStart = StackBytesFree.find_next(FreeStart)) { 658 659 // Check that free space has suitable alignment. 660 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; 661 if (alignTo(ObjStart, ObjAlign) != ObjStart) 662 continue; 663 664 if (FreeStart + ObjSize > StackBytesFree.size()) 665 return false; 666 667 bool AllBytesFree = true; 668 for (unsigned Byte = 0; Byte < ObjSize; ++Byte) 669 if (!StackBytesFree.test(FreeStart + Byte)) { 670 AllBytesFree = false; 671 break; 672 } 673 if (AllBytesFree) 674 break; 675 } 676 677 if (FreeStart == -1) 678 return false; 679 680 if (StackGrowsDown) { 681 int ObjStart = -(FreeStart + ObjSize); 682 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << ObjStart 683 << "]\n"); 684 MFI.setObjectOffset(FrameIdx, ObjStart); 685 } else { 686 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << FreeStart 687 << "]\n"); 688 MFI.setObjectOffset(FrameIdx, FreeStart); 689 } 690 691 StackBytesFree.reset(FreeStart, FreeStart + ObjSize); 692 return true; 693 } 694 695 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e., 696 /// those required to be close to the Stack Protector) to stack offsets. 697 static void 698 AssignProtectedObjSet(const StackObjSet &UnassignedObjs, 699 SmallSet<int, 16> &ProtectedObjs, 700 MachineFrameInfo &MFI, bool StackGrowsDown, 701 int64_t &Offset, unsigned &MaxAlign, unsigned Skew) { 702 703 for (StackObjSet::const_iterator I = UnassignedObjs.begin(), 704 E = UnassignedObjs.end(); I != E; ++I) { 705 int i = *I; 706 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew); 707 ProtectedObjs.insert(i); 708 } 709 } 710 711 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the 712 /// abstract stack objects. 713 /// 714 void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { 715 const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering(); 716 StackProtector *SP = &getAnalysis<StackProtector>(); 717 718 bool StackGrowsDown = 719 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 720 721 // Loop over all of the stack objects, assigning sequential addresses... 722 MachineFrameInfo &MFI = Fn.getFrameInfo(); 723 724 // Start at the beginning of the local area. 725 // The Offset is the distance from the stack top in the direction 726 // of stack growth -- so it's always nonnegative. 727 int LocalAreaOffset = TFI.getOffsetOfLocalArea(); 728 if (StackGrowsDown) 729 LocalAreaOffset = -LocalAreaOffset; 730 assert(LocalAreaOffset >= 0 731 && "Local area offset should be in direction of stack growth"); 732 int64_t Offset = LocalAreaOffset; 733 734 // Skew to be applied to alignment. 735 unsigned Skew = TFI.getStackAlignmentSkew(Fn); 736 737 // If there are fixed sized objects that are preallocated in the local area, 738 // non-fixed objects can't be allocated right at the start of local area. 739 // Adjust 'Offset' to point to the end of last fixed sized preallocated 740 // object. 741 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) { 742 int64_t FixedOff; 743 if (StackGrowsDown) { 744 // The maximum distance from the stack pointer is at lower address of 745 // the object -- which is given by offset. For down growing stack 746 // the offset is negative, so we negate the offset to get the distance. 747 FixedOff = -MFI.getObjectOffset(i); 748 } else { 749 // The maximum distance from the start pointer is at the upper 750 // address of the object. 751 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i); 752 } 753 if (FixedOff > Offset) Offset = FixedOff; 754 } 755 756 // First assign frame offsets to stack objects that are used to spill 757 // callee saved registers. 758 if (StackGrowsDown) { 759 for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) { 760 // If the stack grows down, we need to add the size to find the lowest 761 // address of the object. 762 Offset += MFI.getObjectSize(i); 763 764 unsigned Align = MFI.getObjectAlignment(i); 765 // Adjust to alignment boundary 766 Offset = alignTo(Offset, Align, Skew); 767 768 DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << -Offset << "]\n"); 769 MFI.setObjectOffset(i, -Offset); // Set the computed offset 770 } 771 } else if (MaxCSFrameIndex >= MinCSFrameIndex) { 772 // Be careful about underflow in comparisons agains MinCSFrameIndex. 773 for (unsigned i = MaxCSFrameIndex; i != MinCSFrameIndex - 1; --i) { 774 if (MFI.isDeadObjectIndex(i)) 775 continue; 776 777 unsigned Align = MFI.getObjectAlignment(i); 778 // Adjust to alignment boundary 779 Offset = alignTo(Offset, Align, Skew); 780 781 DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << Offset << "]\n"); 782 MFI.setObjectOffset(i, Offset); 783 Offset += MFI.getObjectSize(i); 784 } 785 } 786 787 // FixedCSEnd is the stack offset to the end of the fixed and callee-save 788 // stack area. 789 int64_t FixedCSEnd = Offset; 790 unsigned MaxAlign = MFI.getMaxAlignment(); 791 792 // Make sure the special register scavenging spill slot is closest to the 793 // incoming stack pointer if a frame pointer is required and is closer 794 // to the incoming rather than the final stack pointer. 795 const TargetRegisterInfo *RegInfo = Fn.getSubtarget().getRegisterInfo(); 796 bool EarlyScavengingSlots = (TFI.hasFP(Fn) && 797 TFI.isFPCloseToIncomingSP() && 798 RegInfo->useFPForScavengingIndex(Fn) && 799 !RegInfo->needsStackRealignment(Fn)); 800 if (RS && EarlyScavengingSlots) { 801 SmallVector<int, 2> SFIs; 802 RS->getScavengingFrameIndices(SFIs); 803 for (SmallVectorImpl<int>::iterator I = SFIs.begin(), 804 IE = SFIs.end(); I != IE; ++I) 805 AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew); 806 } 807 808 // FIXME: Once this is working, then enable flag will change to a target 809 // check for whether the frame is large enough to want to use virtual 810 // frame index registers. Functions which don't want/need this optimization 811 // will continue to use the existing code path. 812 if (MFI.getUseLocalStackAllocationBlock()) { 813 unsigned Align = MFI.getLocalFrameMaxAlign(); 814 815 // Adjust to alignment boundary. 816 Offset = alignTo(Offset, Align, Skew); 817 818 DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); 819 820 // Resolve offsets for objects in the local block. 821 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) { 822 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i); 823 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; 824 DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << 825 FIOffset << "]\n"); 826 MFI.setObjectOffset(Entry.first, FIOffset); 827 } 828 // Allocate the local block 829 Offset += MFI.getLocalFrameSize(); 830 831 MaxAlign = std::max(Align, MaxAlign); 832 } 833 834 // Retrieve the Exception Handler registration node. 835 int EHRegNodeFrameIndex = INT_MAX; 836 if (const WinEHFuncInfo *FuncInfo = Fn.getWinEHFuncInfo()) 837 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; 838 839 // Make sure that the stack protector comes before the local variables on the 840 // stack. 841 SmallSet<int, 16> ProtectedObjs; 842 if (MFI.getStackProtectorIndex() >= 0) { 843 StackObjSet LargeArrayObjs; 844 StackObjSet SmallArrayObjs; 845 StackObjSet AddrOfObjs; 846 847 AdjustStackOffset(MFI, MFI.getStackProtectorIndex(), StackGrowsDown, 848 Offset, MaxAlign, Skew); 849 850 // Assign large stack objects first. 851 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 852 if (MFI.isObjectPreAllocated(i) && 853 MFI.getUseLocalStackAllocationBlock()) 854 continue; 855 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 856 continue; 857 if (RS && RS->isScavengingFrameIndex((int)i)) 858 continue; 859 if (MFI.isDeadObjectIndex(i)) 860 continue; 861 if (MFI.getStackProtectorIndex() == (int)i || 862 EHRegNodeFrameIndex == (int)i) 863 continue; 864 865 switch (SP->getSSPLayout(MFI.getObjectAllocation(i))) { 866 case StackProtector::SSPLK_None: 867 continue; 868 case StackProtector::SSPLK_SmallArray: 869 SmallArrayObjs.insert(i); 870 continue; 871 case StackProtector::SSPLK_AddrOf: 872 AddrOfObjs.insert(i); 873 continue; 874 case StackProtector::SSPLK_LargeArray: 875 LargeArrayObjs.insert(i); 876 continue; 877 } 878 llvm_unreachable("Unexpected SSPLayoutKind."); 879 } 880 881 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 882 Offset, MaxAlign, Skew); 883 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 884 Offset, MaxAlign, Skew); 885 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, 886 Offset, MaxAlign, Skew); 887 } 888 889 SmallVector<int, 8> ObjectsToAllocate; 890 891 // Then prepare to assign frame offsets to stack objects that are not used to 892 // spill callee saved registers. 893 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 894 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 895 continue; 896 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 897 continue; 898 if (RS && RS->isScavengingFrameIndex((int)i)) 899 continue; 900 if (MFI.isDeadObjectIndex(i)) 901 continue; 902 if (MFI.getStackProtectorIndex() == (int)i || 903 EHRegNodeFrameIndex == (int)i) 904 continue; 905 if (ProtectedObjs.count(i)) 906 continue; 907 908 // Add the objects that we need to allocate to our working set. 909 ObjectsToAllocate.push_back(i); 910 } 911 912 // Allocate the EH registration node first if one is present. 913 if (EHRegNodeFrameIndex != INT_MAX) 914 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, 915 MaxAlign, Skew); 916 917 // Give the targets a chance to order the objects the way they like it. 918 if (Fn.getTarget().getOptLevel() != CodeGenOpt::None && 919 Fn.getTarget().Options.StackSymbolOrdering) 920 TFI.orderFrameObjects(Fn, ObjectsToAllocate); 921 922 // Keep track of which bytes in the fixed and callee-save range are used so we 923 // can use the holes when allocating later stack objects. Only do this if 924 // stack protector isn't being used and the target requests it and we're 925 // optimizing. 926 BitVector StackBytesFree; 927 if (!ObjectsToAllocate.empty() && 928 Fn.getTarget().getOptLevel() != CodeGenOpt::None && 929 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(Fn)) 930 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex, 931 FixedCSEnd, StackBytesFree); 932 933 // Now walk the objects and actually assign base offsets to them. 934 for (auto &Object : ObjectsToAllocate) 935 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, 936 StackBytesFree)) 937 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew); 938 939 // Make sure the special register scavenging spill slot is closest to the 940 // stack pointer. 941 if (RS && !EarlyScavengingSlots) { 942 SmallVector<int, 2> SFIs; 943 RS->getScavengingFrameIndices(SFIs); 944 for (SmallVectorImpl<int>::iterator I = SFIs.begin(), 945 IE = SFIs.end(); I != IE; ++I) 946 AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew); 947 } 948 949 if (!TFI.targetHandlesStackFrameRounding()) { 950 // If we have reserved argument space for call sites in the function 951 // immediately on entry to the current function, count it as part of the 952 // overall stack size. 953 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(Fn)) 954 Offset += MFI.getMaxCallFrameSize(); 955 956 // Round up the size to a multiple of the alignment. If the function has 957 // any calls or alloca's, align to the target's StackAlignment value to 958 // ensure that the callee's frame or the alloca data is suitably aligned; 959 // otherwise, for leaf functions, align to the TransientStackAlignment 960 // value. 961 unsigned StackAlign; 962 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() || 963 (RegInfo->needsStackRealignment(Fn) && MFI.getObjectIndexEnd() != 0)) 964 StackAlign = TFI.getStackAlignment(); 965 else 966 StackAlign = TFI.getTransientStackAlignment(); 967 968 // If the frame pointer is eliminated, all frame offsets will be relative to 969 // SP not FP. Align to MaxAlign so this works. 970 StackAlign = std::max(StackAlign, MaxAlign); 971 Offset = alignTo(Offset, StackAlign, Skew); 972 } 973 974 // Update frame info to pretend that this is part of the stack... 975 int64_t StackSize = Offset - LocalAreaOffset; 976 MFI.setStackSize(StackSize); 977 NumBytesStackSpace += StackSize; 978 979 MachineOptimizationRemarkAnalysis R( 980 DEBUG_TYPE, "StackSize", Fn.getFunction()->getSubprogram(), &Fn.front()); 981 R << ore::NV("NumStackBytes", static_cast<unsigned>(StackSize)) 982 << " stack bytes in function"; 983 ORE->emit(R); 984 } 985 986 /// insertPrologEpilogCode - Scan the function for modified callee saved 987 /// registers, insert spill code for these callee saved registers, then add 988 /// prolog and epilog code to the function. 989 /// 990 void PEI::insertPrologEpilogCode(MachineFunction &Fn) { 991 const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering(); 992 993 // Add prologue to the function... 994 for (MachineBasicBlock *SaveBlock : SaveBlocks) 995 TFI.emitPrologue(Fn, *SaveBlock); 996 997 // Add epilogue to restore the callee-save registers in each exiting block. 998 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 999 TFI.emitEpilogue(Fn, *RestoreBlock); 1000 1001 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1002 TFI.inlineStackProbe(Fn, *SaveBlock); 1003 1004 // Emit additional code that is required to support segmented stacks, if 1005 // we've been asked for it. This, when linked with a runtime with support 1006 // for segmented stacks (libgcc is one), will result in allocating stack 1007 // space in small chunks instead of one large contiguous block. 1008 if (Fn.shouldSplitStack()) { 1009 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1010 TFI.adjustForSegmentedStacks(Fn, *SaveBlock); 1011 } 1012 1013 // Emit additional code that is required to explicitly handle the stack in 1014 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The 1015 // approach is rather similar to that of Segmented Stacks, but it uses a 1016 // different conditional check and another BIF for allocating more stack 1017 // space. 1018 if (Fn.getFunction()->getCallingConv() == CallingConv::HiPE) 1019 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1020 TFI.adjustForHiPEPrologue(Fn, *SaveBlock); 1021 } 1022 1023 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical 1024 /// register references and actual offsets. 1025 /// 1026 void PEI::replaceFrameIndices(MachineFunction &Fn) { 1027 const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering(); 1028 if (!TFI.needsFrameIndexResolution(Fn)) return; 1029 1030 // Store SPAdj at exit of a basic block. 1031 SmallVector<int, 8> SPState; 1032 SPState.resize(Fn.getNumBlockIDs()); 1033 df_iterator_default_set<MachineBasicBlock*> Reachable; 1034 1035 // Iterate over the reachable blocks in DFS order. 1036 for (auto DFI = df_ext_begin(&Fn, Reachable), DFE = df_ext_end(&Fn, Reachable); 1037 DFI != DFE; ++DFI) { 1038 int SPAdj = 0; 1039 // Check the exit state of the DFS stack predecessor. 1040 if (DFI.getPathLength() >= 2) { 1041 MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 1042 assert(Reachable.count(StackPred) && 1043 "DFS stack predecessor is already visited.\n"); 1044 SPAdj = SPState[StackPred->getNumber()]; 1045 } 1046 MachineBasicBlock *BB = *DFI; 1047 replaceFrameIndices(BB, Fn, SPAdj); 1048 SPState[BB->getNumber()] = SPAdj; 1049 } 1050 1051 // Handle the unreachable blocks. 1052 for (auto &BB : Fn) { 1053 if (Reachable.count(&BB)) 1054 // Already handled in DFS traversal. 1055 continue; 1056 int SPAdj = 0; 1057 replaceFrameIndices(&BB, Fn, SPAdj); 1058 } 1059 } 1060 1061 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn, 1062 int &SPAdj) { 1063 assert(Fn.getSubtarget().getRegisterInfo() && 1064 "getRegisterInfo() must be implemented!"); 1065 const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo(); 1066 const TargetRegisterInfo &TRI = *Fn.getSubtarget().getRegisterInfo(); 1067 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 1068 1069 if (RS && FrameIndexEliminationScavenging) 1070 RS->enterBasicBlock(*BB); 1071 1072 bool InsideCallSequence = false; 1073 1074 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { 1075 1076 if (TII.isFrameInstr(*I)) { 1077 InsideCallSequence = TII.isFrameSetup(*I); 1078 SPAdj += TII.getSPAdjust(*I); 1079 I = TFI->eliminateCallFramePseudoInstr(Fn, *BB, I); 1080 continue; 1081 } 1082 1083 MachineInstr &MI = *I; 1084 bool DoIncr = true; 1085 bool DidFinishLoop = true; 1086 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1087 if (!MI.getOperand(i).isFI()) 1088 continue; 1089 1090 // Frame indices in debug values are encoded in a target independent 1091 // way with simply the frame index and offset rather than any 1092 // target-specific addressing mode. 1093 if (MI.isDebugValue()) { 1094 assert(i == 0 && "Frame indices can only appear as the first " 1095 "operand of a DBG_VALUE machine instruction"); 1096 unsigned Reg; 1097 MachineOperand &Offset = MI.getOperand(1); 1098 Offset.setImm( 1099 Offset.getImm() + 1100 TFI->getFrameIndexReference(Fn, MI.getOperand(0).getIndex(), Reg)); 1101 MI.getOperand(0).ChangeToRegister(Reg, false /*isDef*/); 1102 continue; 1103 } 1104 1105 // TODO: This code should be commoned with the code for 1106 // PATCHPOINT. There's no good reason for the difference in 1107 // implementation other than historical accident. The only 1108 // remaining difference is the unconditional use of the stack 1109 // pointer as the base register. 1110 if (MI.getOpcode() == TargetOpcode::STATEPOINT) { 1111 assert((!MI.isDebugValue() || i == 0) && 1112 "Frame indicies can only appear as the first operand of a " 1113 "DBG_VALUE machine instruction"); 1114 unsigned Reg; 1115 MachineOperand &Offset = MI.getOperand(i + 1); 1116 int refOffset = TFI->getFrameIndexReferencePreferSP( 1117 Fn, MI.getOperand(i).getIndex(), Reg, /*IgnoreSPUpdates*/ false); 1118 Offset.setImm(Offset.getImm() + refOffset); 1119 MI.getOperand(i).ChangeToRegister(Reg, false /*isDef*/); 1120 continue; 1121 } 1122 1123 // Some instructions (e.g. inline asm instructions) can have 1124 // multiple frame indices and/or cause eliminateFrameIndex 1125 // to insert more than one instruction. We need the register 1126 // scavenger to go through all of these instructions so that 1127 // it can update its register information. We keep the 1128 // iterator at the point before insertion so that we can 1129 // revisit them in full. 1130 bool AtBeginning = (I == BB->begin()); 1131 if (!AtBeginning) --I; 1132 1133 // If this instruction has a FrameIndex operand, we need to 1134 // use that target machine register info object to eliminate 1135 // it. 1136 TRI.eliminateFrameIndex(MI, SPAdj, i, 1137 FrameIndexEliminationScavenging ? RS : nullptr); 1138 1139 // Reset the iterator if we were at the beginning of the BB. 1140 if (AtBeginning) { 1141 I = BB->begin(); 1142 DoIncr = false; 1143 } 1144 1145 DidFinishLoop = false; 1146 break; 1147 } 1148 1149 // If we are looking at a call sequence, we need to keep track of 1150 // the SP adjustment made by each instruction in the sequence. 1151 // This includes both the frame setup/destroy pseudos (handled above), 1152 // as well as other instructions that have side effects w.r.t the SP. 1153 // Note that this must come after eliminateFrameIndex, because 1154 // if I itself referred to a frame index, we shouldn't count its own 1155 // adjustment. 1156 if (DidFinishLoop && InsideCallSequence) 1157 SPAdj += TII.getSPAdjust(MI); 1158 1159 if (DoIncr && I != BB->end()) ++I; 1160 1161 // Update register states. 1162 if (RS && FrameIndexEliminationScavenging && DidFinishLoop) 1163 RS->forward(MI); 1164 } 1165 } 1166