1 //===-- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function --===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass is responsible for finalizing the functions frame layout, saving 11 // callee saved registers, and for emitting prolog & epilog code for the 12 // function. 13 // 14 // This pass must be run after register allocation. After this pass is 15 // executed, it is illegal to construct MO_FrameIndex operands. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/MachineDominators.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineInstr.h" 26 #include "llvm/CodeGen/MachineLoopInfo.h" 27 #include "llvm/CodeGen/MachineModuleInfo.h" 28 #include "llvm/CodeGen/MachineRegisterInfo.h" 29 #include "llvm/CodeGen/Passes.h" 30 #include "llvm/CodeGen/RegisterScavenging.h" 31 #include "llvm/CodeGen/StackProtector.h" 32 #include "llvm/CodeGen/WinEHFuncInfo.h" 33 #include "llvm/IR/DiagnosticInfo.h" 34 #include "llvm/IR/InlineAsm.h" 35 #include "llvm/IR/LLVMContext.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include "llvm/Target/TargetFrameLowering.h" 40 #include "llvm/Target/TargetInstrInfo.h" 41 #include "llvm/Target/TargetMachine.h" 42 #include "llvm/Target/TargetRegisterInfo.h" 43 #include "llvm/Target/TargetSubtargetInfo.h" 44 #include <algorithm> 45 #include <climits> 46 47 using namespace llvm; 48 49 #define DEBUG_TYPE "pei" 50 51 typedef SmallVector<MachineBasicBlock *, 4> MBBVector; 52 static void doSpillCalleeSavedRegs(MachineFunction &MF, RegScavenger *RS, 53 unsigned &MinCSFrameIndex, 54 unsigned &MaxCXFrameIndex, 55 const MBBVector &SaveBlocks, 56 const MBBVector &RestoreBlocks); 57 58 static void doScavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger *RS); 59 60 namespace { 61 class PEI : public MachineFunctionPass { 62 public: 63 static char ID; 64 explicit PEI(const TargetMachine *TM = nullptr) : MachineFunctionPass(ID) { 65 initializePEIPass(*PassRegistry::getPassRegistry()); 66 67 if (TM && (!TM->usesPhysRegsForPEI())) { 68 SpillCalleeSavedRegisters = [](MachineFunction &, RegScavenger *, 69 unsigned &, unsigned &, const MBBVector &, 70 const MBBVector &) {}; 71 ScavengeFrameVirtualRegs = [](MachineFunction &, RegScavenger *) {}; 72 } else { 73 SpillCalleeSavedRegisters = doSpillCalleeSavedRegs; 74 ScavengeFrameVirtualRegs = doScavengeFrameVirtualRegs; 75 UsesCalleeSaves = true; 76 } 77 } 78 79 void getAnalysisUsage(AnalysisUsage &AU) const override; 80 81 MachineFunctionProperties getRequiredProperties() const override { 82 MachineFunctionProperties MFP; 83 if (UsesCalleeSaves) 84 MFP.set(MachineFunctionProperties::Property::AllVRegsAllocated); 85 return MFP; 86 } 87 88 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 89 /// frame indexes with appropriate references. 90 /// 91 bool runOnMachineFunction(MachineFunction &Fn) override; 92 93 private: 94 std::function<void(MachineFunction &MF, RegScavenger *RS, 95 unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex, 96 const MBBVector &SaveBlocks, 97 const MBBVector &RestoreBlocks)> 98 SpillCalleeSavedRegisters; 99 std::function<void(MachineFunction &MF, RegScavenger *RS)> 100 ScavengeFrameVirtualRegs; 101 102 bool UsesCalleeSaves = false; 103 104 RegScavenger *RS; 105 106 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved 107 // stack frame indexes. 108 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 109 unsigned MaxCSFrameIndex = 0; 110 111 // Save and Restore blocks of the current function. Typically there is a 112 // single save block, unless Windows EH funclets are involved. 113 MBBVector SaveBlocks; 114 MBBVector RestoreBlocks; 115 116 // Flag to control whether to use the register scavenger to resolve 117 // frame index materialization registers. Set according to 118 // TRI->requiresFrameIndexScavenging() for the current function. 119 bool FrameIndexVirtualScavenging; 120 121 void calculateCallFrameInfo(MachineFunction &Fn); 122 void calculateSaveRestoreBlocks(MachineFunction &Fn); 123 124 void calculateFrameObjectOffsets(MachineFunction &Fn); 125 void replaceFrameIndices(MachineFunction &Fn); 126 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn, 127 int &SPAdj); 128 void insertPrologEpilogCode(MachineFunction &Fn); 129 }; 130 } // namespace 131 132 char PEI::ID = 0; 133 char &llvm::PrologEpilogCodeInserterID = PEI::ID; 134 135 static cl::opt<unsigned> 136 WarnStackSize("warn-stack-size", cl::Hidden, cl::init((unsigned)-1), 137 cl::desc("Warn for stack size bigger than the given" 138 " number")); 139 140 INITIALIZE_TM_PASS_BEGIN(PEI, "prologepilog", "Prologue/Epilogue Insertion", 141 false, false) 142 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 143 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 144 INITIALIZE_PASS_DEPENDENCY(StackProtector) 145 INITIALIZE_TM_PASS_END(PEI, "prologepilog", 146 "Prologue/Epilogue Insertion & Frame Finalization", 147 false, false) 148 149 MachineFunctionPass * 150 llvm::createPrologEpilogInserterPass(const TargetMachine *TM) { 151 return new PEI(TM); 152 } 153 154 STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged"); 155 STATISTIC(NumBytesStackSpace, 156 "Number of bytes used for stack in all functions"); 157 158 void PEI::getAnalysisUsage(AnalysisUsage &AU) const { 159 AU.setPreservesCFG(); 160 AU.addPreserved<MachineLoopInfo>(); 161 AU.addPreserved<MachineDominatorTree>(); 162 AU.addRequired<StackProtector>(); 163 MachineFunctionPass::getAnalysisUsage(AU); 164 } 165 166 167 /// StackObjSet - A set of stack object indexes 168 typedef SmallSetVector<int, 8> StackObjSet; 169 170 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 171 /// frame indexes with appropriate references. 172 /// 173 bool PEI::runOnMachineFunction(MachineFunction &Fn) { 174 const Function* F = Fn.getFunction(); 175 const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo(); 176 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 177 178 RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : nullptr; 179 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn); 180 181 // Calculate the MaxCallFrameSize and AdjustsStack variables for the 182 // function's frame information. Also eliminates call frame pseudo 183 // instructions. 184 calculateCallFrameInfo(Fn); 185 186 // Determine placement of CSR spill/restore code and prolog/epilog code: 187 // place all spills in the entry block, all restores in return blocks. 188 calculateSaveRestoreBlocks(Fn); 189 190 // Handle CSR spilling and restoring, for targets that need it. 191 SpillCalleeSavedRegisters(Fn, RS, MinCSFrameIndex, MaxCSFrameIndex, 192 SaveBlocks, RestoreBlocks); 193 194 // Allow the target machine to make final modifications to the function 195 // before the frame layout is finalized. 196 TFI->processFunctionBeforeFrameFinalized(Fn, RS); 197 198 // Calculate actual frame offsets for all abstract stack objects... 199 calculateFrameObjectOffsets(Fn); 200 201 // Add prolog and epilog code to the function. This function is required 202 // to align the stack frame as necessary for any stack variables or 203 // called functions. Because of this, calculateCalleeSavedRegisters() 204 // must be called before this function in order to set the AdjustsStack 205 // and MaxCallFrameSize variables. 206 if (!F->hasFnAttribute(Attribute::Naked)) 207 insertPrologEpilogCode(Fn); 208 209 // Replace all MO_FrameIndex operands with physical register references 210 // and actual offsets. 211 // 212 replaceFrameIndices(Fn); 213 214 // If register scavenging is needed, as we've enabled doing it as a 215 // post-pass, scavenge the virtual registers that frame index elimination 216 // inserted. 217 if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging) { 218 ScavengeFrameVirtualRegs(Fn, RS); 219 220 // Clear any vregs created by virtual scavenging. 221 Fn.getRegInfo().clearVirtRegs(); 222 } 223 224 // Warn on stack size when we exceeds the given limit. 225 MachineFrameInfo &MFI = Fn.getFrameInfo(); 226 uint64_t StackSize = MFI.getStackSize(); 227 if (WarnStackSize.getNumOccurrences() > 0 && WarnStackSize < StackSize) { 228 DiagnosticInfoStackSize DiagStackSize(*F, StackSize); 229 F->getContext().diagnose(DiagStackSize); 230 } 231 232 delete RS; 233 SaveBlocks.clear(); 234 RestoreBlocks.clear(); 235 MFI.setSavePoint(nullptr); 236 MFI.setRestorePoint(nullptr); 237 return true; 238 } 239 240 /// Calculate the MaxCallFrameSize and AdjustsStack 241 /// variables for the function's frame information and eliminate call frame 242 /// pseudo instructions. 243 void PEI::calculateCallFrameInfo(MachineFunction &Fn) { 244 const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo(); 245 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 246 MachineFrameInfo &MFI = Fn.getFrameInfo(); 247 248 unsigned MaxCallFrameSize = 0; 249 bool AdjustsStack = MFI.adjustsStack(); 250 251 // Get the function call frame set-up and tear-down instruction opcode 252 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 253 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 254 255 // Early exit for targets which have no call frame setup/destroy pseudo 256 // instructions. 257 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 258 return; 259 260 std::vector<MachineBasicBlock::iterator> FrameSDOps; 261 for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) 262 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) 263 if (I->getOpcode() == FrameSetupOpcode || 264 I->getOpcode() == FrameDestroyOpcode) { 265 assert(I->getNumOperands() >= 1 && "Call Frame Setup/Destroy Pseudo" 266 " instructions should have a single immediate argument!"); 267 unsigned Size = I->getOperand(0).getImm(); 268 if (Size > MaxCallFrameSize) MaxCallFrameSize = Size; 269 AdjustsStack = true; 270 FrameSDOps.push_back(I); 271 } else if (I->isInlineAsm()) { 272 // Some inline asm's need a stack frame, as indicated by operand 1. 273 unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 274 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 275 AdjustsStack = true; 276 } 277 278 MFI.setAdjustsStack(AdjustsStack); 279 MFI.setMaxCallFrameSize(MaxCallFrameSize); 280 281 for (std::vector<MachineBasicBlock::iterator>::iterator 282 i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) { 283 MachineBasicBlock::iterator I = *i; 284 285 // If call frames are not being included as part of the stack frame, and 286 // the target doesn't indicate otherwise, remove the call frame pseudos 287 // here. The sub/add sp instruction pairs are still inserted, but we don't 288 // need to track the SP adjustment for frame index elimination. 289 if (TFI->canSimplifyCallFramePseudos(Fn)) 290 TFI->eliminateCallFramePseudoInstr(Fn, *I->getParent(), I); 291 } 292 } 293 294 /// Compute the sets of entry and return blocks for saving and restoring 295 /// callee-saved registers, and placing prolog and epilog code. 296 void PEI::calculateSaveRestoreBlocks(MachineFunction &Fn) { 297 const MachineFrameInfo &MFI = Fn.getFrameInfo(); 298 299 // Even when we do not change any CSR, we still want to insert the 300 // prologue and epilogue of the function. 301 // So set the save points for those. 302 303 // Use the points found by shrink-wrapping, if any. 304 if (MFI.getSavePoint()) { 305 SaveBlocks.push_back(MFI.getSavePoint()); 306 assert(MFI.getRestorePoint() && "Both restore and save must be set"); 307 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 308 // If RestoreBlock does not have any successor and is not a return block 309 // then the end point is unreachable and we do not need to insert any 310 // epilogue. 311 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock()) 312 RestoreBlocks.push_back(RestoreBlock); 313 return; 314 } 315 316 // Save refs to entry and return blocks. 317 SaveBlocks.push_back(&Fn.front()); 318 for (MachineBasicBlock &MBB : Fn) { 319 if (MBB.isEHFuncletEntry()) 320 SaveBlocks.push_back(&MBB); 321 if (MBB.isReturnBlock()) 322 RestoreBlocks.push_back(&MBB); 323 } 324 } 325 326 static void assignCalleeSavedSpillSlots(MachineFunction &F, 327 const BitVector &SavedRegs, 328 unsigned &MinCSFrameIndex, 329 unsigned &MaxCSFrameIndex) { 330 if (SavedRegs.empty()) 331 return; 332 333 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo(); 334 const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&F); 335 336 std::vector<CalleeSavedInfo> CSI; 337 for (unsigned i = 0; CSRegs[i]; ++i) { 338 unsigned Reg = CSRegs[i]; 339 if (SavedRegs.test(Reg)) 340 CSI.push_back(CalleeSavedInfo(Reg)); 341 } 342 343 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering(); 344 MachineFrameInfo &MFI = F.getFrameInfo(); 345 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI)) { 346 // If target doesn't implement this, use generic code. 347 348 if (CSI.empty()) 349 return; // Early exit if no callee saved registers are modified! 350 351 unsigned NumFixedSpillSlots; 352 const TargetFrameLowering::SpillSlot *FixedSpillSlots = 353 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); 354 355 // Now that we know which registers need to be saved and restored, allocate 356 // stack slots for them. 357 for (auto &CS : CSI) { 358 unsigned Reg = CS.getReg(); 359 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 360 361 int FrameIdx; 362 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { 363 CS.setFrameIdx(FrameIdx); 364 continue; 365 } 366 367 // Check to see if this physreg must be spilled to a particular stack slot 368 // on this target. 369 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots; 370 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots && 371 FixedSlot->Reg != Reg) 372 ++FixedSlot; 373 374 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { 375 // Nope, just spill it anywhere convenient. 376 unsigned Align = RC->getAlignment(); 377 unsigned StackAlign = TFI->getStackAlignment(); 378 379 // We may not be able to satisfy the desired alignment specification of 380 // the TargetRegisterClass if the stack alignment is smaller. Use the 381 // min. 382 Align = std::min(Align, StackAlign); 383 FrameIdx = MFI.CreateStackObject(RC->getSize(), Align, true); 384 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 385 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 386 } else { 387 // Spill it to the stack where we must. 388 FrameIdx = 389 MFI.CreateFixedSpillStackObject(RC->getSize(), FixedSlot->Offset); 390 } 391 392 CS.setFrameIdx(FrameIdx); 393 } 394 } 395 396 MFI.setCalleeSavedInfo(CSI); 397 } 398 399 /// Helper function to update the liveness information for the callee-saved 400 /// registers. 401 static void updateLiveness(MachineFunction &MF) { 402 MachineFrameInfo &MFI = MF.getFrameInfo(); 403 // Visited will contain all the basic blocks that are in the region 404 // where the callee saved registers are alive: 405 // - Anything that is not Save or Restore -> LiveThrough. 406 // - Save -> LiveIn. 407 // - Restore -> LiveOut. 408 // The live-out is not attached to the block, so no need to keep 409 // Restore in this set. 410 SmallPtrSet<MachineBasicBlock *, 8> Visited; 411 SmallVector<MachineBasicBlock *, 8> WorkList; 412 MachineBasicBlock *Entry = &MF.front(); 413 MachineBasicBlock *Save = MFI.getSavePoint(); 414 415 if (!Save) 416 Save = Entry; 417 418 if (Entry != Save) { 419 WorkList.push_back(Entry); 420 Visited.insert(Entry); 421 } 422 Visited.insert(Save); 423 424 MachineBasicBlock *Restore = MFI.getRestorePoint(); 425 if (Restore) 426 // By construction Restore cannot be visited, otherwise it 427 // means there exists a path to Restore that does not go 428 // through Save. 429 WorkList.push_back(Restore); 430 431 while (!WorkList.empty()) { 432 const MachineBasicBlock *CurBB = WorkList.pop_back_val(); 433 // By construction, the region that is after the save point is 434 // dominated by the Save and post-dominated by the Restore. 435 if (CurBB == Save && Save != Restore) 436 continue; 437 // Enqueue all the successors not already visited. 438 // Those are by construction either before Save or after Restore. 439 for (MachineBasicBlock *SuccBB : CurBB->successors()) 440 if (Visited.insert(SuccBB).second) 441 WorkList.push_back(SuccBB); 442 } 443 444 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 445 446 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 447 for (MachineBasicBlock *MBB : Visited) { 448 MCPhysReg Reg = CSI[i].getReg(); 449 // Add the callee-saved register as live-in. 450 // It's killed at the spill. 451 if (!MBB->isLiveIn(Reg)) 452 MBB->addLiveIn(Reg); 453 } 454 } 455 } 456 457 /// insertCSRSpillsAndRestores - Insert spill and restore code for 458 /// callee saved registers used in the function. 459 /// 460 static void insertCSRSpillsAndRestores(MachineFunction &Fn, 461 const MBBVector &SaveBlocks, 462 const MBBVector &RestoreBlocks) { 463 // Get callee saved register information. 464 MachineFrameInfo &MFI = Fn.getFrameInfo(); 465 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 466 467 MFI.setCalleeSavedInfoValid(true); 468 469 // Early exit if no callee saved registers are modified! 470 if (CSI.empty()) 471 return; 472 473 const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo(); 474 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 475 const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo(); 476 MachineBasicBlock::iterator I; 477 478 // Spill using target interface. 479 for (MachineBasicBlock *SaveBlock : SaveBlocks) { 480 I = SaveBlock->begin(); 481 if (!TFI->spillCalleeSavedRegisters(*SaveBlock, I, CSI, TRI)) { 482 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 483 // Insert the spill to the stack frame. 484 unsigned Reg = CSI[i].getReg(); 485 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 486 TII.storeRegToStackSlot(*SaveBlock, I, Reg, true, CSI[i].getFrameIdx(), 487 RC, TRI); 488 } 489 } 490 // Update the live-in information of all the blocks up to the save point. 491 updateLiveness(Fn); 492 } 493 494 // Restore using target interface. 495 for (MachineBasicBlock *MBB : RestoreBlocks) { 496 I = MBB->end(); 497 498 // Skip over all terminator instructions, which are part of the return 499 // sequence. 500 MachineBasicBlock::iterator I2 = I; 501 while (I2 != MBB->begin() && (--I2)->isTerminator()) 502 I = I2; 503 504 bool AtStart = I == MBB->begin(); 505 MachineBasicBlock::iterator BeforeI = I; 506 if (!AtStart) 507 --BeforeI; 508 509 // Restore all registers immediately before the return and any 510 // terminators that precede it. 511 if (!TFI->restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) { 512 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 513 unsigned Reg = CSI[i].getReg(); 514 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 515 TII.loadRegFromStackSlot(*MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI); 516 assert(I != MBB->begin() && 517 "loadRegFromStackSlot didn't insert any code!"); 518 // Insert in reverse order. loadRegFromStackSlot can insert 519 // multiple instructions. 520 if (AtStart) 521 I = MBB->begin(); 522 else { 523 I = BeforeI; 524 ++I; 525 } 526 } 527 } 528 } 529 } 530 531 static void doSpillCalleeSavedRegs(MachineFunction &Fn, RegScavenger *RS, 532 unsigned &MinCSFrameIndex, 533 unsigned &MaxCSFrameIndex, 534 const MBBVector &SaveBlocks, 535 const MBBVector &RestoreBlocks) { 536 const Function *F = Fn.getFunction(); 537 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 538 MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 539 MaxCSFrameIndex = 0; 540 541 // Determine which of the registers in the callee save list should be saved. 542 BitVector SavedRegs; 543 TFI->determineCalleeSaves(Fn, SavedRegs, RS); 544 545 // Assign stack slots for any callee-saved registers that must be spilled. 546 assignCalleeSavedSpillSlots(Fn, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); 547 548 // Add the code to save and restore the callee saved registers. 549 if (!F->hasFnAttribute(Attribute::Naked)) 550 insertCSRSpillsAndRestores(Fn, SaveBlocks, RestoreBlocks); 551 } 552 553 /// AdjustStackOffset - Helper function used to adjust the stack frame offset. 554 static inline void 555 AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, 556 bool StackGrowsDown, int64_t &Offset, 557 unsigned &MaxAlign, unsigned Skew) { 558 // If the stack grows down, add the object size to find the lowest address. 559 if (StackGrowsDown) 560 Offset += MFI.getObjectSize(FrameIdx); 561 562 unsigned Align = MFI.getObjectAlignment(FrameIdx); 563 564 // If the alignment of this object is greater than that of the stack, then 565 // increase the stack alignment to match. 566 MaxAlign = std::max(MaxAlign, Align); 567 568 // Adjust to alignment boundary. 569 Offset = alignTo(Offset, Align, Skew); 570 571 if (StackGrowsDown) { 572 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n"); 573 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset 574 } else { 575 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n"); 576 MFI.setObjectOffset(FrameIdx, Offset); 577 Offset += MFI.getObjectSize(FrameIdx); 578 } 579 } 580 581 /// Compute which bytes of fixed and callee-save stack area are unused and keep 582 /// track of them in StackBytesFree. 583 /// 584 static inline void 585 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown, 586 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex, 587 int64_t FixedCSEnd, BitVector &StackBytesFree) { 588 // Avoid undefined int64_t -> int conversion below in extreme case. 589 if (FixedCSEnd > std::numeric_limits<int>::max()) 590 return; 591 592 StackBytesFree.resize(FixedCSEnd, true); 593 594 SmallVector<int, 16> AllocatedFrameSlots; 595 // Add fixed objects. 596 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) 597 AllocatedFrameSlots.push_back(i); 598 // Add callee-save objects. 599 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i) 600 AllocatedFrameSlots.push_back(i); 601 602 for (int i : AllocatedFrameSlots) { 603 // These are converted from int64_t, but they should always fit in int 604 // because of the FixedCSEnd check above. 605 int ObjOffset = MFI.getObjectOffset(i); 606 int ObjSize = MFI.getObjectSize(i); 607 int ObjStart, ObjEnd; 608 if (StackGrowsDown) { 609 // ObjOffset is negative when StackGrowsDown is true. 610 ObjStart = -ObjOffset - ObjSize; 611 ObjEnd = -ObjOffset; 612 } else { 613 ObjStart = ObjOffset; 614 ObjEnd = ObjOffset + ObjSize; 615 } 616 // Ignore fixed holes that are in the previous stack frame. 617 if (ObjEnd > 0) 618 StackBytesFree.reset(ObjStart, ObjEnd); 619 } 620 } 621 622 /// Assign frame object to an unused portion of the stack in the fixed stack 623 /// object range. Return true if the allocation was successful. 624 /// 625 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, 626 bool StackGrowsDown, unsigned MaxAlign, 627 BitVector &StackBytesFree) { 628 if (MFI.isVariableSizedObjectIndex(FrameIdx)) 629 return false; 630 631 if (StackBytesFree.none()) { 632 // clear it to speed up later scavengeStackSlot calls to 633 // StackBytesFree.none() 634 StackBytesFree.clear(); 635 return false; 636 } 637 638 unsigned ObjAlign = MFI.getObjectAlignment(FrameIdx); 639 if (ObjAlign > MaxAlign) 640 return false; 641 642 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 643 int FreeStart; 644 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; 645 FreeStart = StackBytesFree.find_next(FreeStart)) { 646 647 // Check that free space has suitable alignment. 648 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; 649 if (alignTo(ObjStart, ObjAlign) != ObjStart) 650 continue; 651 652 if (FreeStart + ObjSize > StackBytesFree.size()) 653 return false; 654 655 bool AllBytesFree = true; 656 for (unsigned Byte = 0; Byte < ObjSize; ++Byte) 657 if (!StackBytesFree.test(FreeStart + Byte)) { 658 AllBytesFree = false; 659 break; 660 } 661 if (AllBytesFree) 662 break; 663 } 664 665 if (FreeStart == -1) 666 return false; 667 668 if (StackGrowsDown) { 669 int ObjStart = -(FreeStart + ObjSize); 670 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << ObjStart 671 << "]\n"); 672 MFI.setObjectOffset(FrameIdx, ObjStart); 673 } else { 674 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << FreeStart 675 << "]\n"); 676 MFI.setObjectOffset(FrameIdx, FreeStart); 677 } 678 679 StackBytesFree.reset(FreeStart, FreeStart + ObjSize); 680 return true; 681 } 682 683 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e., 684 /// those required to be close to the Stack Protector) to stack offsets. 685 static void 686 AssignProtectedObjSet(const StackObjSet &UnassignedObjs, 687 SmallSet<int, 16> &ProtectedObjs, 688 MachineFrameInfo &MFI, bool StackGrowsDown, 689 int64_t &Offset, unsigned &MaxAlign, unsigned Skew) { 690 691 for (StackObjSet::const_iterator I = UnassignedObjs.begin(), 692 E = UnassignedObjs.end(); I != E; ++I) { 693 int i = *I; 694 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew); 695 ProtectedObjs.insert(i); 696 } 697 } 698 699 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the 700 /// abstract stack objects. 701 /// 702 void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { 703 const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering(); 704 StackProtector *SP = &getAnalysis<StackProtector>(); 705 706 bool StackGrowsDown = 707 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 708 709 // Loop over all of the stack objects, assigning sequential addresses... 710 MachineFrameInfo &MFI = Fn.getFrameInfo(); 711 712 // Start at the beginning of the local area. 713 // The Offset is the distance from the stack top in the direction 714 // of stack growth -- so it's always nonnegative. 715 int LocalAreaOffset = TFI.getOffsetOfLocalArea(); 716 if (StackGrowsDown) 717 LocalAreaOffset = -LocalAreaOffset; 718 assert(LocalAreaOffset >= 0 719 && "Local area offset should be in direction of stack growth"); 720 int64_t Offset = LocalAreaOffset; 721 722 // Skew to be applied to alignment. 723 unsigned Skew = TFI.getStackAlignmentSkew(Fn); 724 725 // If there are fixed sized objects that are preallocated in the local area, 726 // non-fixed objects can't be allocated right at the start of local area. 727 // Adjust 'Offset' to point to the end of last fixed sized preallocated 728 // object. 729 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) { 730 int64_t FixedOff; 731 if (StackGrowsDown) { 732 // The maximum distance from the stack pointer is at lower address of 733 // the object -- which is given by offset. For down growing stack 734 // the offset is negative, so we negate the offset to get the distance. 735 FixedOff = -MFI.getObjectOffset(i); 736 } else { 737 // The maximum distance from the start pointer is at the upper 738 // address of the object. 739 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i); 740 } 741 if (FixedOff > Offset) Offset = FixedOff; 742 } 743 744 // First assign frame offsets to stack objects that are used to spill 745 // callee saved registers. 746 if (StackGrowsDown) { 747 for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) { 748 // If the stack grows down, we need to add the size to find the lowest 749 // address of the object. 750 Offset += MFI.getObjectSize(i); 751 752 unsigned Align = MFI.getObjectAlignment(i); 753 // Adjust to alignment boundary 754 Offset = alignTo(Offset, Align, Skew); 755 756 DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << -Offset << "]\n"); 757 MFI.setObjectOffset(i, -Offset); // Set the computed offset 758 } 759 } else if (MaxCSFrameIndex >= MinCSFrameIndex) { 760 // Be careful about underflow in comparisons agains MinCSFrameIndex. 761 for (unsigned i = MaxCSFrameIndex; i != MinCSFrameIndex - 1; --i) { 762 unsigned Align = MFI.getObjectAlignment(i); 763 // Adjust to alignment boundary 764 Offset = alignTo(Offset, Align, Skew); 765 766 DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << Offset << "]\n"); 767 MFI.setObjectOffset(i, Offset); 768 Offset += MFI.getObjectSize(i); 769 } 770 } 771 772 // FixedCSEnd is the stack offset to the end of the fixed and callee-save 773 // stack area. 774 int64_t FixedCSEnd = Offset; 775 unsigned MaxAlign = MFI.getMaxAlignment(); 776 777 // Make sure the special register scavenging spill slot is closest to the 778 // incoming stack pointer if a frame pointer is required and is closer 779 // to the incoming rather than the final stack pointer. 780 const TargetRegisterInfo *RegInfo = Fn.getSubtarget().getRegisterInfo(); 781 bool EarlyScavengingSlots = (TFI.hasFP(Fn) && 782 TFI.isFPCloseToIncomingSP() && 783 RegInfo->useFPForScavengingIndex(Fn) && 784 !RegInfo->needsStackRealignment(Fn)); 785 if (RS && EarlyScavengingSlots) { 786 SmallVector<int, 2> SFIs; 787 RS->getScavengingFrameIndices(SFIs); 788 for (SmallVectorImpl<int>::iterator I = SFIs.begin(), 789 IE = SFIs.end(); I != IE; ++I) 790 AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew); 791 } 792 793 // FIXME: Once this is working, then enable flag will change to a target 794 // check for whether the frame is large enough to want to use virtual 795 // frame index registers. Functions which don't want/need this optimization 796 // will continue to use the existing code path. 797 if (MFI.getUseLocalStackAllocationBlock()) { 798 unsigned Align = MFI.getLocalFrameMaxAlign(); 799 800 // Adjust to alignment boundary. 801 Offset = alignTo(Offset, Align, Skew); 802 803 DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); 804 805 // Resolve offsets for objects in the local block. 806 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) { 807 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i); 808 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; 809 DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << 810 FIOffset << "]\n"); 811 MFI.setObjectOffset(Entry.first, FIOffset); 812 } 813 // Allocate the local block 814 Offset += MFI.getLocalFrameSize(); 815 816 MaxAlign = std::max(Align, MaxAlign); 817 } 818 819 // Retrieve the Exception Handler registration node. 820 int EHRegNodeFrameIndex = INT_MAX; 821 if (const WinEHFuncInfo *FuncInfo = Fn.getWinEHFuncInfo()) 822 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; 823 824 // Make sure that the stack protector comes before the local variables on the 825 // stack. 826 SmallSet<int, 16> ProtectedObjs; 827 if (MFI.getStackProtectorIndex() >= 0) { 828 StackObjSet LargeArrayObjs; 829 StackObjSet SmallArrayObjs; 830 StackObjSet AddrOfObjs; 831 832 AdjustStackOffset(MFI, MFI.getStackProtectorIndex(), StackGrowsDown, 833 Offset, MaxAlign, Skew); 834 835 // Assign large stack objects first. 836 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 837 if (MFI.isObjectPreAllocated(i) && 838 MFI.getUseLocalStackAllocationBlock()) 839 continue; 840 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 841 continue; 842 if (RS && RS->isScavengingFrameIndex((int)i)) 843 continue; 844 if (MFI.isDeadObjectIndex(i)) 845 continue; 846 if (MFI.getStackProtectorIndex() == (int)i || 847 EHRegNodeFrameIndex == (int)i) 848 continue; 849 850 switch (SP->getSSPLayout(MFI.getObjectAllocation(i))) { 851 case StackProtector::SSPLK_None: 852 continue; 853 case StackProtector::SSPLK_SmallArray: 854 SmallArrayObjs.insert(i); 855 continue; 856 case StackProtector::SSPLK_AddrOf: 857 AddrOfObjs.insert(i); 858 continue; 859 case StackProtector::SSPLK_LargeArray: 860 LargeArrayObjs.insert(i); 861 continue; 862 } 863 llvm_unreachable("Unexpected SSPLayoutKind."); 864 } 865 866 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 867 Offset, MaxAlign, Skew); 868 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 869 Offset, MaxAlign, Skew); 870 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, 871 Offset, MaxAlign, Skew); 872 } 873 874 SmallVector<int, 8> ObjectsToAllocate; 875 876 // Then prepare to assign frame offsets to stack objects that are not used to 877 // spill callee saved registers. 878 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 879 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 880 continue; 881 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 882 continue; 883 if (RS && RS->isScavengingFrameIndex((int)i)) 884 continue; 885 if (MFI.isDeadObjectIndex(i)) 886 continue; 887 if (MFI.getStackProtectorIndex() == (int)i || 888 EHRegNodeFrameIndex == (int)i) 889 continue; 890 if (ProtectedObjs.count(i)) 891 continue; 892 893 // Add the objects that we need to allocate to our working set. 894 ObjectsToAllocate.push_back(i); 895 } 896 897 // Allocate the EH registration node first if one is present. 898 if (EHRegNodeFrameIndex != INT_MAX) 899 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, 900 MaxAlign, Skew); 901 902 // Give the targets a chance to order the objects the way they like it. 903 if (Fn.getTarget().getOptLevel() != CodeGenOpt::None && 904 Fn.getTarget().Options.StackSymbolOrdering) 905 TFI.orderFrameObjects(Fn, ObjectsToAllocate); 906 907 // Keep track of which bytes in the fixed and callee-save range are used so we 908 // can use the holes when allocating later stack objects. Only do this if 909 // stack protector isn't being used and the target requests it and we're 910 // optimizing. 911 BitVector StackBytesFree; 912 if (!ObjectsToAllocate.empty() && 913 Fn.getTarget().getOptLevel() != CodeGenOpt::None && 914 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(Fn)) 915 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex, 916 FixedCSEnd, StackBytesFree); 917 918 // Now walk the objects and actually assign base offsets to them. 919 for (auto &Object : ObjectsToAllocate) 920 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, 921 StackBytesFree)) 922 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew); 923 924 // Make sure the special register scavenging spill slot is closest to the 925 // stack pointer. 926 if (RS && !EarlyScavengingSlots) { 927 SmallVector<int, 2> SFIs; 928 RS->getScavengingFrameIndices(SFIs); 929 for (SmallVectorImpl<int>::iterator I = SFIs.begin(), 930 IE = SFIs.end(); I != IE; ++I) 931 AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew); 932 } 933 934 if (!TFI.targetHandlesStackFrameRounding()) { 935 // If we have reserved argument space for call sites in the function 936 // immediately on entry to the current function, count it as part of the 937 // overall stack size. 938 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(Fn)) 939 Offset += MFI.getMaxCallFrameSize(); 940 941 // Round up the size to a multiple of the alignment. If the function has 942 // any calls or alloca's, align to the target's StackAlignment value to 943 // ensure that the callee's frame or the alloca data is suitably aligned; 944 // otherwise, for leaf functions, align to the TransientStackAlignment 945 // value. 946 unsigned StackAlign; 947 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() || 948 (RegInfo->needsStackRealignment(Fn) && MFI.getObjectIndexEnd() != 0)) 949 StackAlign = TFI.getStackAlignment(); 950 else 951 StackAlign = TFI.getTransientStackAlignment(); 952 953 // If the frame pointer is eliminated, all frame offsets will be relative to 954 // SP not FP. Align to MaxAlign so this works. 955 StackAlign = std::max(StackAlign, MaxAlign); 956 Offset = alignTo(Offset, StackAlign, Skew); 957 } 958 959 // Update frame info to pretend that this is part of the stack... 960 int64_t StackSize = Offset - LocalAreaOffset; 961 MFI.setStackSize(StackSize); 962 NumBytesStackSpace += StackSize; 963 } 964 965 /// insertPrologEpilogCode - Scan the function for modified callee saved 966 /// registers, insert spill code for these callee saved registers, then add 967 /// prolog and epilog code to the function. 968 /// 969 void PEI::insertPrologEpilogCode(MachineFunction &Fn) { 970 const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering(); 971 972 // Add prologue to the function... 973 for (MachineBasicBlock *SaveBlock : SaveBlocks) 974 TFI.emitPrologue(Fn, *SaveBlock); 975 976 // Add epilogue to restore the callee-save registers in each exiting block. 977 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 978 TFI.emitEpilogue(Fn, *RestoreBlock); 979 980 for (MachineBasicBlock *SaveBlock : SaveBlocks) 981 TFI.inlineStackProbe(Fn, *SaveBlock); 982 983 // Emit additional code that is required to support segmented stacks, if 984 // we've been asked for it. This, when linked with a runtime with support 985 // for segmented stacks (libgcc is one), will result in allocating stack 986 // space in small chunks instead of one large contiguous block. 987 if (Fn.shouldSplitStack()) { 988 for (MachineBasicBlock *SaveBlock : SaveBlocks) 989 TFI.adjustForSegmentedStacks(Fn, *SaveBlock); 990 } 991 992 // Emit additional code that is required to explicitly handle the stack in 993 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The 994 // approach is rather similar to that of Segmented Stacks, but it uses a 995 // different conditional check and another BIF for allocating more stack 996 // space. 997 if (Fn.getFunction()->getCallingConv() == CallingConv::HiPE) 998 for (MachineBasicBlock *SaveBlock : SaveBlocks) 999 TFI.adjustForHiPEPrologue(Fn, *SaveBlock); 1000 } 1001 1002 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical 1003 /// register references and actual offsets. 1004 /// 1005 void PEI::replaceFrameIndices(MachineFunction &Fn) { 1006 const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering(); 1007 if (!TFI.needsFrameIndexResolution(Fn)) return; 1008 1009 // Store SPAdj at exit of a basic block. 1010 SmallVector<int, 8> SPState; 1011 SPState.resize(Fn.getNumBlockIDs()); 1012 SmallPtrSet<MachineBasicBlock*, 8> Reachable; 1013 1014 // Iterate over the reachable blocks in DFS order. 1015 for (auto DFI = df_ext_begin(&Fn, Reachable), DFE = df_ext_end(&Fn, Reachable); 1016 DFI != DFE; ++DFI) { 1017 int SPAdj = 0; 1018 // Check the exit state of the DFS stack predecessor. 1019 if (DFI.getPathLength() >= 2) { 1020 MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 1021 assert(Reachable.count(StackPred) && 1022 "DFS stack predecessor is already visited.\n"); 1023 SPAdj = SPState[StackPred->getNumber()]; 1024 } 1025 MachineBasicBlock *BB = *DFI; 1026 replaceFrameIndices(BB, Fn, SPAdj); 1027 SPState[BB->getNumber()] = SPAdj; 1028 } 1029 1030 // Handle the unreachable blocks. 1031 for (auto &BB : Fn) { 1032 if (Reachable.count(&BB)) 1033 // Already handled in DFS traversal. 1034 continue; 1035 int SPAdj = 0; 1036 replaceFrameIndices(&BB, Fn, SPAdj); 1037 } 1038 } 1039 1040 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn, 1041 int &SPAdj) { 1042 assert(Fn.getSubtarget().getRegisterInfo() && 1043 "getRegisterInfo() must be implemented!"); 1044 const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo(); 1045 const TargetRegisterInfo &TRI = *Fn.getSubtarget().getRegisterInfo(); 1046 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 1047 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 1048 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 1049 1050 if (RS && !FrameIndexVirtualScavenging) RS->enterBasicBlock(*BB); 1051 1052 bool InsideCallSequence = false; 1053 1054 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { 1055 1056 if (I->getOpcode() == FrameSetupOpcode || 1057 I->getOpcode() == FrameDestroyOpcode) { 1058 InsideCallSequence = (I->getOpcode() == FrameSetupOpcode); 1059 SPAdj += TII.getSPAdjust(*I); 1060 1061 I = TFI->eliminateCallFramePseudoInstr(Fn, *BB, I); 1062 continue; 1063 } 1064 1065 MachineInstr &MI = *I; 1066 bool DoIncr = true; 1067 bool DidFinishLoop = true; 1068 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1069 if (!MI.getOperand(i).isFI()) 1070 continue; 1071 1072 // Frame indices in debug values are encoded in a target independent 1073 // way with simply the frame index and offset rather than any 1074 // target-specific addressing mode. 1075 if (MI.isDebugValue()) { 1076 assert(i == 0 && "Frame indices can only appear as the first " 1077 "operand of a DBG_VALUE machine instruction"); 1078 unsigned Reg; 1079 MachineOperand &Offset = MI.getOperand(1); 1080 Offset.setImm( 1081 Offset.getImm() + 1082 TFI->getFrameIndexReference(Fn, MI.getOperand(0).getIndex(), Reg)); 1083 MI.getOperand(0).ChangeToRegister(Reg, false /*isDef*/); 1084 continue; 1085 } 1086 1087 // TODO: This code should be commoned with the code for 1088 // PATCHPOINT. There's no good reason for the difference in 1089 // implementation other than historical accident. The only 1090 // remaining difference is the unconditional use of the stack 1091 // pointer as the base register. 1092 if (MI.getOpcode() == TargetOpcode::STATEPOINT) { 1093 assert((!MI.isDebugValue() || i == 0) && 1094 "Frame indicies can only appear as the first operand of a " 1095 "DBG_VALUE machine instruction"); 1096 unsigned Reg; 1097 MachineOperand &Offset = MI.getOperand(i + 1); 1098 int refOffset = TFI->getFrameIndexReferencePreferSP( 1099 Fn, MI.getOperand(i).getIndex(), Reg, /*IgnoreSPUpdates*/ false); 1100 Offset.setImm(Offset.getImm() + refOffset); 1101 MI.getOperand(i).ChangeToRegister(Reg, false /*isDef*/); 1102 continue; 1103 } 1104 1105 // Some instructions (e.g. inline asm instructions) can have 1106 // multiple frame indices and/or cause eliminateFrameIndex 1107 // to insert more than one instruction. We need the register 1108 // scavenger to go through all of these instructions so that 1109 // it can update its register information. We keep the 1110 // iterator at the point before insertion so that we can 1111 // revisit them in full. 1112 bool AtBeginning = (I == BB->begin()); 1113 if (!AtBeginning) --I; 1114 1115 // If this instruction has a FrameIndex operand, we need to 1116 // use that target machine register info object to eliminate 1117 // it. 1118 TRI.eliminateFrameIndex(MI, SPAdj, i, 1119 FrameIndexVirtualScavenging ? nullptr : RS); 1120 1121 // Reset the iterator if we were at the beginning of the BB. 1122 if (AtBeginning) { 1123 I = BB->begin(); 1124 DoIncr = false; 1125 } 1126 1127 DidFinishLoop = false; 1128 break; 1129 } 1130 1131 // If we are looking at a call sequence, we need to keep track of 1132 // the SP adjustment made by each instruction in the sequence. 1133 // This includes both the frame setup/destroy pseudos (handled above), 1134 // as well as other instructions that have side effects w.r.t the SP. 1135 // Note that this must come after eliminateFrameIndex, because 1136 // if I itself referred to a frame index, we shouldn't count its own 1137 // adjustment. 1138 if (DidFinishLoop && InsideCallSequence) 1139 SPAdj += TII.getSPAdjust(MI); 1140 1141 if (DoIncr && I != BB->end()) ++I; 1142 1143 // Update register states. 1144 if (RS && !FrameIndexVirtualScavenging && DidFinishLoop) 1145 RS->forward(MI); 1146 } 1147 } 1148 1149 /// Allocate a register for the virtual register \p VReg. The last use of 1150 /// \p VReg is around the current position of the register scavenger \p RS. 1151 /// \p ReserveAfter controls whether the scavenged register needs to be reserved 1152 /// after the current instruction, otherwise it will only be reserved before the 1153 /// current instruction. 1154 static unsigned scavengeVReg(MachineRegisterInfo &MRI, RegScavenger &RS, 1155 unsigned VReg, bool ReserveAfter) { 1156 #ifndef NDEBUG 1157 // Verify that all definitions and uses are in the same basic block. 1158 const MachineBasicBlock *CommonMBB = nullptr; 1159 bool HadDef = false; 1160 for (MachineOperand &MO : MRI.reg_nodbg_operands(VReg)) { 1161 MachineBasicBlock *MBB = MO.getParent()->getParent(); 1162 if (CommonMBB == nullptr) 1163 CommonMBB = MBB; 1164 assert(MBB == CommonMBB && "All defs+uses must be in the same basic block"); 1165 if (MO.isDef()) 1166 HadDef = true; 1167 } 1168 assert(HadDef && "Must have at least 1 Def"); 1169 #endif 1170 1171 // We should only have one definition of the register. However to accomodate 1172 // the requirements of two address code we also allow definitions in 1173 // subsequent instructions provided they also read the register. That way 1174 // we get a single contiguous lifetime. 1175 // 1176 // Definitions in MRI.def_begin() are unordered, search for the first. 1177 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); 1178 MachineRegisterInfo::def_iterator FirstDef = 1179 std::find_if(MRI.def_begin(VReg), MRI.def_end(), 1180 [VReg, &TRI](const MachineOperand &MO) { 1181 return !MO.getParent()->readsRegister(VReg, &TRI); 1182 }); 1183 assert(FirstDef != MRI.def_end() && 1184 "Must have one definition that does not redefine vreg"); 1185 MachineInstr &DefMI = *FirstDef->getParent(); 1186 1187 // The register scavenger will report a free register inserting an emergency 1188 // spill/reload if necessary. 1189 int SPAdj = 0; 1190 const TargetRegisterClass &RC = *MRI.getRegClass(VReg); 1191 unsigned SReg = RS.scavengeRegisterBackwards(RC, DefMI.getIterator(), 1192 ReserveAfter, SPAdj); 1193 MRI.replaceRegWith(VReg, SReg); 1194 ++NumScavengedRegs; 1195 return SReg; 1196 } 1197 1198 /// doScavengeFrameVirtualRegs - Replace all frame index virtual registers 1199 /// with physical registers. Use the register scavenger to find an 1200 /// appropriate register to use. 1201 /// 1202 /// FIXME: Iterating over the instruction stream is unnecessary. We can simply 1203 /// iterate over the vreg use list, which at this point only contains machine 1204 /// operands for which eliminateFrameIndex need a new scratch reg. 1205 static void 1206 doScavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger *RS) { 1207 // Run through the instructions and find any virtual registers. 1208 MachineRegisterInfo &MRI = MF.getRegInfo(); 1209 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); 1210 for (MachineBasicBlock &MBB : MF) { 1211 RS->enterBasicBlockEnd(MBB); 1212 1213 bool LastIterationHadVRegUses = false; 1214 for (MachineBasicBlock::iterator I = MBB.end(); I != MBB.begin(); ) { 1215 --I; 1216 // Move RegScavenger to the position between *I and *std::next(I). 1217 RS->backward(I); 1218 1219 // Look for unassigned vregs in the uses of *std::next(I). 1220 if (LastIterationHadVRegUses) { 1221 MachineBasicBlock::iterator N = std::next(I); 1222 const MachineInstr &NMI = *N; 1223 for (const MachineOperand &MO : NMI.operands()) { 1224 if (!MO.isReg() || !MO.readsReg()) 1225 continue; 1226 unsigned Reg = MO.getReg(); 1227 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 1228 unsigned SReg = scavengeVReg(MRI, *RS, Reg, true); 1229 N->addRegisterKilled(SReg, &TRI, false); 1230 RS->setRegUsed(SReg); 1231 } 1232 } 1233 } 1234 1235 // Look for unassigned vregs in the defs of *I. 1236 LastIterationHadVRegUses = false; 1237 const MachineInstr &MI = *I; 1238 for (const MachineOperand &MO : MI.operands()) { 1239 if (!MO.isReg()) 1240 continue; 1241 unsigned Reg = MO.getReg(); 1242 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1243 continue; 1244 // We have to look at all operands anyway so we can precalculate here 1245 // whether there is a reading operand. This allows use to skip the use 1246 // step in the next iteration if there was none. 1247 if (MO.readsReg()) 1248 LastIterationHadVRegUses = true; 1249 if (MO.isDef()) { 1250 unsigned SReg = scavengeVReg(MRI, *RS, Reg, false); 1251 I->addRegisterDead(SReg, &TRI, false); 1252 } 1253 } 1254 } 1255 } 1256 } 1257