1 //===-- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function --===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass is responsible for finalizing the functions frame layout, saving 11 // callee saved registers, and for emitting prolog & epilog code for the 12 // function. 13 // 14 // This pass must be run after register allocation. After this pass is 15 // executed, it is illegal to construct MO_FrameIndex operands. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/MachineDominators.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineInstr.h" 26 #include "llvm/CodeGen/MachineLoopInfo.h" 27 #include "llvm/CodeGen/MachineModuleInfo.h" 28 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/Passes.h" 31 #include "llvm/CodeGen/RegisterScavenging.h" 32 #include "llvm/CodeGen/StackProtector.h" 33 #include "llvm/CodeGen/WinEHFuncInfo.h" 34 #include "llvm/IR/DebugInfoMetadata.h" 35 #include "llvm/IR/DiagnosticInfo.h" 36 #include "llvm/IR/InlineAsm.h" 37 #include "llvm/IR/LLVMContext.h" 38 #include "llvm/Support/CommandLine.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include "llvm/Target/TargetFrameLowering.h" 42 #include "llvm/Target/TargetInstrInfo.h" 43 #include "llvm/Target/TargetMachine.h" 44 #include "llvm/Target/TargetRegisterInfo.h" 45 #include "llvm/Target/TargetSubtargetInfo.h" 46 #include <climits> 47 48 using namespace llvm; 49 50 #define DEBUG_TYPE "prologepilog" 51 52 typedef SmallVector<MachineBasicBlock *, 4> MBBVector; 53 static void doSpillCalleeSavedRegs(MachineFunction &MF, RegScavenger *RS, 54 unsigned &MinCSFrameIndex, 55 unsigned &MaxCXFrameIndex, 56 const MBBVector &SaveBlocks, 57 const MBBVector &RestoreBlocks); 58 59 namespace { 60 class PEI : public MachineFunctionPass { 61 public: 62 static char ID; 63 PEI() : MachineFunctionPass(ID) { 64 initializePEIPass(*PassRegistry::getPassRegistry()); 65 } 66 67 void getAnalysisUsage(AnalysisUsage &AU) const override; 68 69 MachineFunctionProperties getRequiredProperties() const override { 70 MachineFunctionProperties MFP; 71 if (UsesCalleeSaves) 72 MFP.set(MachineFunctionProperties::Property::NoVRegs); 73 return MFP; 74 } 75 76 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 77 /// frame indexes with appropriate references. 78 /// 79 bool runOnMachineFunction(MachineFunction &Fn) override; 80 81 private: 82 std::function<void(MachineFunction &MF, RegScavenger *RS, 83 unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex, 84 const MBBVector &SaveBlocks, 85 const MBBVector &RestoreBlocks)> 86 SpillCalleeSavedRegisters; 87 std::function<void(MachineFunction &MF, RegScavenger &RS)> 88 ScavengeFrameVirtualRegs; 89 90 bool UsesCalleeSaves = false; 91 92 RegScavenger *RS; 93 94 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved 95 // stack frame indexes. 96 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 97 unsigned MaxCSFrameIndex = 0; 98 99 // Save and Restore blocks of the current function. Typically there is a 100 // single save block, unless Windows EH funclets are involved. 101 MBBVector SaveBlocks; 102 MBBVector RestoreBlocks; 103 104 // Flag to control whether to use the register scavenger to resolve 105 // frame index materialization registers. Set according to 106 // TRI->requiresFrameIndexScavenging() for the current function. 107 bool FrameIndexVirtualScavenging; 108 109 // Flag to control whether the scavenger should be passed even though 110 // FrameIndexVirtualScavenging is used. 111 bool FrameIndexEliminationScavenging; 112 113 // Emit remarks. 114 MachineOptimizationRemarkEmitter *ORE = nullptr; 115 116 void calculateCallFrameInfo(MachineFunction &Fn); 117 void calculateSaveRestoreBlocks(MachineFunction &Fn); 118 119 void calculateFrameObjectOffsets(MachineFunction &Fn); 120 void replaceFrameIndices(MachineFunction &Fn); 121 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn, 122 int &SPAdj); 123 void insertPrologEpilogCode(MachineFunction &Fn); 124 }; 125 } // namespace 126 127 char PEI::ID = 0; 128 char &llvm::PrologEpilogCodeInserterID = PEI::ID; 129 130 static cl::opt<unsigned> 131 WarnStackSize("warn-stack-size", cl::Hidden, cl::init((unsigned)-1), 132 cl::desc("Warn for stack size bigger than the given" 133 " number")); 134 135 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false, 136 false) 137 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 138 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 139 INITIALIZE_PASS_DEPENDENCY(StackProtector) 140 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 141 INITIALIZE_PASS_END(PEI, DEBUG_TYPE, 142 "Prologue/Epilogue Insertion & Frame Finalization", false, 143 false) 144 145 MachineFunctionPass *llvm::createPrologEpilogInserterPass() { 146 return new PEI(); 147 } 148 149 STATISTIC(NumBytesStackSpace, 150 "Number of bytes used for stack in all functions"); 151 152 void PEI::getAnalysisUsage(AnalysisUsage &AU) const { 153 AU.setPreservesCFG(); 154 AU.addPreserved<MachineLoopInfo>(); 155 AU.addPreserved<MachineDominatorTree>(); 156 AU.addRequired<StackProtector>(); 157 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 158 MachineFunctionPass::getAnalysisUsage(AU); 159 } 160 161 162 /// StackObjSet - A set of stack object indexes 163 typedef SmallSetVector<int, 8> StackObjSet; 164 165 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 166 /// frame indexes with appropriate references. 167 /// 168 bool PEI::runOnMachineFunction(MachineFunction &Fn) { 169 if (!SpillCalleeSavedRegisters) { 170 const TargetMachine &TM = Fn.getTarget(); 171 if (!TM.usesPhysRegsForPEI()) { 172 SpillCalleeSavedRegisters = [](MachineFunction &, RegScavenger *, 173 unsigned &, unsigned &, const MBBVector &, 174 const MBBVector &) {}; 175 ScavengeFrameVirtualRegs = [](MachineFunction &, RegScavenger &) {}; 176 } else { 177 SpillCalleeSavedRegisters = doSpillCalleeSavedRegs; 178 ScavengeFrameVirtualRegs = scavengeFrameVirtualRegs; 179 UsesCalleeSaves = true; 180 } 181 } 182 183 const Function* F = Fn.getFunction(); 184 const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo(); 185 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 186 187 RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : nullptr; 188 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn); 189 FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) || 190 TRI->requiresFrameIndexReplacementScavenging(Fn); 191 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 192 193 // Calculate the MaxCallFrameSize and AdjustsStack variables for the 194 // function's frame information. Also eliminates call frame pseudo 195 // instructions. 196 calculateCallFrameInfo(Fn); 197 198 // Determine placement of CSR spill/restore code and prolog/epilog code: 199 // place all spills in the entry block, all restores in return blocks. 200 calculateSaveRestoreBlocks(Fn); 201 202 // Handle CSR spilling and restoring, for targets that need it. 203 SpillCalleeSavedRegisters(Fn, RS, MinCSFrameIndex, MaxCSFrameIndex, 204 SaveBlocks, RestoreBlocks); 205 206 // Allow the target machine to make final modifications to the function 207 // before the frame layout is finalized. 208 TFI->processFunctionBeforeFrameFinalized(Fn, RS); 209 210 // Calculate actual frame offsets for all abstract stack objects... 211 calculateFrameObjectOffsets(Fn); 212 213 // Add prolog and epilog code to the function. This function is required 214 // to align the stack frame as necessary for any stack variables or 215 // called functions. Because of this, calculateCalleeSavedRegisters() 216 // must be called before this function in order to set the AdjustsStack 217 // and MaxCallFrameSize variables. 218 if (!F->hasFnAttribute(Attribute::Naked)) 219 insertPrologEpilogCode(Fn); 220 221 // Replace all MO_FrameIndex operands with physical register references 222 // and actual offsets. 223 // 224 replaceFrameIndices(Fn); 225 226 // If register scavenging is needed, as we've enabled doing it as a 227 // post-pass, scavenge the virtual registers that frame index elimination 228 // inserted. 229 if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging) { 230 ScavengeFrameVirtualRegs(Fn, *RS); 231 232 // Clear any vregs created by virtual scavenging. 233 Fn.getRegInfo().clearVirtRegs(); 234 } 235 236 // Warn on stack size when we exceeds the given limit. 237 MachineFrameInfo &MFI = Fn.getFrameInfo(); 238 uint64_t StackSize = MFI.getStackSize(); 239 if (WarnStackSize.getNumOccurrences() > 0 && WarnStackSize < StackSize) { 240 DiagnosticInfoStackSize DiagStackSize(*F, StackSize); 241 F->getContext().diagnose(DiagStackSize); 242 } 243 244 delete RS; 245 SaveBlocks.clear(); 246 RestoreBlocks.clear(); 247 MFI.setSavePoint(nullptr); 248 MFI.setRestorePoint(nullptr); 249 return true; 250 } 251 252 /// Calculate the MaxCallFrameSize and AdjustsStack 253 /// variables for the function's frame information and eliminate call frame 254 /// pseudo instructions. 255 void PEI::calculateCallFrameInfo(MachineFunction &Fn) { 256 const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo(); 257 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 258 MachineFrameInfo &MFI = Fn.getFrameInfo(); 259 260 unsigned MaxCallFrameSize = 0; 261 bool AdjustsStack = MFI.adjustsStack(); 262 263 // Get the function call frame set-up and tear-down instruction opcode 264 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 265 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 266 267 // Early exit for targets which have no call frame setup/destroy pseudo 268 // instructions. 269 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 270 return; 271 272 std::vector<MachineBasicBlock::iterator> FrameSDOps; 273 for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) 274 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) 275 if (TII.isFrameInstr(*I)) { 276 unsigned Size = TII.getFrameSize(*I); 277 if (Size > MaxCallFrameSize) MaxCallFrameSize = Size; 278 AdjustsStack = true; 279 FrameSDOps.push_back(I); 280 } else if (I->isInlineAsm()) { 281 // Some inline asm's need a stack frame, as indicated by operand 1. 282 unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 283 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 284 AdjustsStack = true; 285 } 286 287 assert(!MFI.isMaxCallFrameSizeComputed() || 288 (MFI.getMaxCallFrameSize() == MaxCallFrameSize && 289 MFI.adjustsStack() == AdjustsStack)); 290 MFI.setAdjustsStack(AdjustsStack); 291 MFI.setMaxCallFrameSize(MaxCallFrameSize); 292 293 for (std::vector<MachineBasicBlock::iterator>::iterator 294 i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) { 295 MachineBasicBlock::iterator I = *i; 296 297 // If call frames are not being included as part of the stack frame, and 298 // the target doesn't indicate otherwise, remove the call frame pseudos 299 // here. The sub/add sp instruction pairs are still inserted, but we don't 300 // need to track the SP adjustment for frame index elimination. 301 if (TFI->canSimplifyCallFramePseudos(Fn)) 302 TFI->eliminateCallFramePseudoInstr(Fn, *I->getParent(), I); 303 } 304 } 305 306 /// Compute the sets of entry and return blocks for saving and restoring 307 /// callee-saved registers, and placing prolog and epilog code. 308 void PEI::calculateSaveRestoreBlocks(MachineFunction &Fn) { 309 const MachineFrameInfo &MFI = Fn.getFrameInfo(); 310 311 // Even when we do not change any CSR, we still want to insert the 312 // prologue and epilogue of the function. 313 // So set the save points for those. 314 315 // Use the points found by shrink-wrapping, if any. 316 if (MFI.getSavePoint()) { 317 SaveBlocks.push_back(MFI.getSavePoint()); 318 assert(MFI.getRestorePoint() && "Both restore and save must be set"); 319 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 320 // If RestoreBlock does not have any successor and is not a return block 321 // then the end point is unreachable and we do not need to insert any 322 // epilogue. 323 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock()) 324 RestoreBlocks.push_back(RestoreBlock); 325 return; 326 } 327 328 // Save refs to entry and return blocks. 329 SaveBlocks.push_back(&Fn.front()); 330 for (MachineBasicBlock &MBB : Fn) { 331 if (MBB.isEHFuncletEntry()) 332 SaveBlocks.push_back(&MBB); 333 if (MBB.isReturnBlock()) 334 RestoreBlocks.push_back(&MBB); 335 } 336 } 337 338 static void assignCalleeSavedSpillSlots(MachineFunction &F, 339 const BitVector &SavedRegs, 340 unsigned &MinCSFrameIndex, 341 unsigned &MaxCSFrameIndex) { 342 if (SavedRegs.empty()) 343 return; 344 345 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo(); 346 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs(); 347 348 std::vector<CalleeSavedInfo> CSI; 349 for (unsigned i = 0; CSRegs[i]; ++i) { 350 unsigned Reg = CSRegs[i]; 351 if (SavedRegs.test(Reg)) 352 CSI.push_back(CalleeSavedInfo(Reg)); 353 } 354 355 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering(); 356 MachineFrameInfo &MFI = F.getFrameInfo(); 357 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI)) { 358 // If target doesn't implement this, use generic code. 359 360 if (CSI.empty()) 361 return; // Early exit if no callee saved registers are modified! 362 363 unsigned NumFixedSpillSlots; 364 const TargetFrameLowering::SpillSlot *FixedSpillSlots = 365 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); 366 367 // Now that we know which registers need to be saved and restored, allocate 368 // stack slots for them. 369 for (auto &CS : CSI) { 370 unsigned Reg = CS.getReg(); 371 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 372 373 int FrameIdx; 374 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { 375 CS.setFrameIdx(FrameIdx); 376 continue; 377 } 378 379 // Check to see if this physreg must be spilled to a particular stack slot 380 // on this target. 381 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots; 382 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots && 383 FixedSlot->Reg != Reg) 384 ++FixedSlot; 385 386 unsigned Size = RegInfo->getSpillSize(*RC); 387 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { 388 // Nope, just spill it anywhere convenient. 389 unsigned Align = RegInfo->getSpillAlignment(*RC); 390 unsigned StackAlign = TFI->getStackAlignment(); 391 392 // We may not be able to satisfy the desired alignment specification of 393 // the TargetRegisterClass if the stack alignment is smaller. Use the 394 // min. 395 Align = std::min(Align, StackAlign); 396 FrameIdx = MFI.CreateStackObject(Size, Align, true); 397 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 398 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 399 } else { 400 // Spill it to the stack where we must. 401 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset); 402 } 403 404 CS.setFrameIdx(FrameIdx); 405 } 406 } 407 408 MFI.setCalleeSavedInfo(CSI); 409 } 410 411 /// Helper function to update the liveness information for the callee-saved 412 /// registers. 413 static void updateLiveness(MachineFunction &MF) { 414 MachineFrameInfo &MFI = MF.getFrameInfo(); 415 // Visited will contain all the basic blocks that are in the region 416 // where the callee saved registers are alive: 417 // - Anything that is not Save or Restore -> LiveThrough. 418 // - Save -> LiveIn. 419 // - Restore -> LiveOut. 420 // The live-out is not attached to the block, so no need to keep 421 // Restore in this set. 422 SmallPtrSet<MachineBasicBlock *, 8> Visited; 423 SmallVector<MachineBasicBlock *, 8> WorkList; 424 MachineBasicBlock *Entry = &MF.front(); 425 MachineBasicBlock *Save = MFI.getSavePoint(); 426 427 if (!Save) 428 Save = Entry; 429 430 if (Entry != Save) { 431 WorkList.push_back(Entry); 432 Visited.insert(Entry); 433 } 434 Visited.insert(Save); 435 436 MachineBasicBlock *Restore = MFI.getRestorePoint(); 437 if (Restore) 438 // By construction Restore cannot be visited, otherwise it 439 // means there exists a path to Restore that does not go 440 // through Save. 441 WorkList.push_back(Restore); 442 443 while (!WorkList.empty()) { 444 const MachineBasicBlock *CurBB = WorkList.pop_back_val(); 445 // By construction, the region that is after the save point is 446 // dominated by the Save and post-dominated by the Restore. 447 if (CurBB == Save && Save != Restore) 448 continue; 449 // Enqueue all the successors not already visited. 450 // Those are by construction either before Save or after Restore. 451 for (MachineBasicBlock *SuccBB : CurBB->successors()) 452 if (Visited.insert(SuccBB).second) 453 WorkList.push_back(SuccBB); 454 } 455 456 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 457 458 MachineRegisterInfo &MRI = MF.getRegInfo(); 459 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 460 for (MachineBasicBlock *MBB : Visited) { 461 MCPhysReg Reg = CSI[i].getReg(); 462 // Add the callee-saved register as live-in. 463 // It's killed at the spill. 464 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg)) 465 MBB->addLiveIn(Reg); 466 } 467 } 468 } 469 470 /// Insert restore code for the callee-saved registers used in the function. 471 static void insertCSRSaves(MachineBasicBlock &SaveBlock, 472 ArrayRef<CalleeSavedInfo> CSI) { 473 MachineFunction &Fn = *SaveBlock.getParent(); 474 const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo(); 475 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 476 const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo(); 477 478 MachineBasicBlock::iterator I = SaveBlock.begin(); 479 if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) { 480 for (const CalleeSavedInfo &CS : CSI) { 481 // Insert the spill to the stack frame. 482 unsigned Reg = CS.getReg(); 483 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 484 TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC, 485 TRI); 486 } 487 } 488 } 489 490 /// Insert restore code for the callee-saved registers used in the function. 491 static void insertCSRRestores(MachineBasicBlock &RestoreBlock, 492 ArrayRef<CalleeSavedInfo> CSI) { 493 MachineFunction &Fn = *RestoreBlock.getParent(); 494 const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo(); 495 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 496 const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo(); 497 498 // Restore all registers immediately before the return and any 499 // terminators that precede it. 500 MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator(); 501 502 if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) { 503 for (const CalleeSavedInfo &CI : reverse(CSI)) { 504 unsigned Reg = CI.getReg(); 505 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 506 TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI); 507 assert(I != RestoreBlock.begin() && 508 "loadRegFromStackSlot didn't insert any code!"); 509 // Insert in reverse order. loadRegFromStackSlot can insert 510 // multiple instructions. 511 } 512 } 513 } 514 515 static void doSpillCalleeSavedRegs(MachineFunction &Fn, RegScavenger *RS, 516 unsigned &MinCSFrameIndex, 517 unsigned &MaxCSFrameIndex, 518 const MBBVector &SaveBlocks, 519 const MBBVector &RestoreBlocks) { 520 const Function *F = Fn.getFunction(); 521 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 522 MachineFrameInfo &MFI = Fn.getFrameInfo(); 523 MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 524 MaxCSFrameIndex = 0; 525 526 // Determine which of the registers in the callee save list should be saved. 527 BitVector SavedRegs; 528 TFI->determineCalleeSaves(Fn, SavedRegs, RS); 529 530 // Assign stack slots for any callee-saved registers that must be spilled. 531 assignCalleeSavedSpillSlots(Fn, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); 532 533 // Add the code to save and restore the callee saved registers. 534 if (!F->hasFnAttribute(Attribute::Naked)) { 535 MFI.setCalleeSavedInfoValid(true); 536 537 ArrayRef<CalleeSavedInfo> CSI = MFI.getCalleeSavedInfo(); 538 if (!CSI.empty()) { 539 for (MachineBasicBlock *SaveBlock : SaveBlocks) { 540 insertCSRSaves(*SaveBlock, CSI); 541 // Update the live-in information of all the blocks up to the save 542 // point. 543 updateLiveness(Fn); 544 } 545 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 546 insertCSRRestores(*RestoreBlock, CSI); 547 } 548 } 549 } 550 551 /// AdjustStackOffset - Helper function used to adjust the stack frame offset. 552 static inline void 553 AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, 554 bool StackGrowsDown, int64_t &Offset, 555 unsigned &MaxAlign, unsigned Skew) { 556 // If the stack grows down, add the object size to find the lowest address. 557 if (StackGrowsDown) 558 Offset += MFI.getObjectSize(FrameIdx); 559 560 unsigned Align = MFI.getObjectAlignment(FrameIdx); 561 562 // If the alignment of this object is greater than that of the stack, then 563 // increase the stack alignment to match. 564 MaxAlign = std::max(MaxAlign, Align); 565 566 // Adjust to alignment boundary. 567 Offset = alignTo(Offset, Align, Skew); 568 569 if (StackGrowsDown) { 570 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n"); 571 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset 572 } else { 573 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n"); 574 MFI.setObjectOffset(FrameIdx, Offset); 575 Offset += MFI.getObjectSize(FrameIdx); 576 } 577 } 578 579 /// Compute which bytes of fixed and callee-save stack area are unused and keep 580 /// track of them in StackBytesFree. 581 /// 582 static inline void 583 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown, 584 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex, 585 int64_t FixedCSEnd, BitVector &StackBytesFree) { 586 // Avoid undefined int64_t -> int conversion below in extreme case. 587 if (FixedCSEnd > std::numeric_limits<int>::max()) 588 return; 589 590 StackBytesFree.resize(FixedCSEnd, true); 591 592 SmallVector<int, 16> AllocatedFrameSlots; 593 // Add fixed objects. 594 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) 595 AllocatedFrameSlots.push_back(i); 596 // Add callee-save objects. 597 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i) 598 AllocatedFrameSlots.push_back(i); 599 600 for (int i : AllocatedFrameSlots) { 601 // These are converted from int64_t, but they should always fit in int 602 // because of the FixedCSEnd check above. 603 int ObjOffset = MFI.getObjectOffset(i); 604 int ObjSize = MFI.getObjectSize(i); 605 int ObjStart, ObjEnd; 606 if (StackGrowsDown) { 607 // ObjOffset is negative when StackGrowsDown is true. 608 ObjStart = -ObjOffset - ObjSize; 609 ObjEnd = -ObjOffset; 610 } else { 611 ObjStart = ObjOffset; 612 ObjEnd = ObjOffset + ObjSize; 613 } 614 // Ignore fixed holes that are in the previous stack frame. 615 if (ObjEnd > 0) 616 StackBytesFree.reset(ObjStart, ObjEnd); 617 } 618 } 619 620 /// Assign frame object to an unused portion of the stack in the fixed stack 621 /// object range. Return true if the allocation was successful. 622 /// 623 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, 624 bool StackGrowsDown, unsigned MaxAlign, 625 BitVector &StackBytesFree) { 626 if (MFI.isVariableSizedObjectIndex(FrameIdx)) 627 return false; 628 629 if (StackBytesFree.none()) { 630 // clear it to speed up later scavengeStackSlot calls to 631 // StackBytesFree.none() 632 StackBytesFree.clear(); 633 return false; 634 } 635 636 unsigned ObjAlign = MFI.getObjectAlignment(FrameIdx); 637 if (ObjAlign > MaxAlign) 638 return false; 639 640 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 641 int FreeStart; 642 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; 643 FreeStart = StackBytesFree.find_next(FreeStart)) { 644 645 // Check that free space has suitable alignment. 646 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; 647 if (alignTo(ObjStart, ObjAlign) != ObjStart) 648 continue; 649 650 if (FreeStart + ObjSize > StackBytesFree.size()) 651 return false; 652 653 bool AllBytesFree = true; 654 for (unsigned Byte = 0; Byte < ObjSize; ++Byte) 655 if (!StackBytesFree.test(FreeStart + Byte)) { 656 AllBytesFree = false; 657 break; 658 } 659 if (AllBytesFree) 660 break; 661 } 662 663 if (FreeStart == -1) 664 return false; 665 666 if (StackGrowsDown) { 667 int ObjStart = -(FreeStart + ObjSize); 668 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << ObjStart 669 << "]\n"); 670 MFI.setObjectOffset(FrameIdx, ObjStart); 671 } else { 672 DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" << FreeStart 673 << "]\n"); 674 MFI.setObjectOffset(FrameIdx, FreeStart); 675 } 676 677 StackBytesFree.reset(FreeStart, FreeStart + ObjSize); 678 return true; 679 } 680 681 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e., 682 /// those required to be close to the Stack Protector) to stack offsets. 683 static void 684 AssignProtectedObjSet(const StackObjSet &UnassignedObjs, 685 SmallSet<int, 16> &ProtectedObjs, 686 MachineFrameInfo &MFI, bool StackGrowsDown, 687 int64_t &Offset, unsigned &MaxAlign, unsigned Skew) { 688 689 for (StackObjSet::const_iterator I = UnassignedObjs.begin(), 690 E = UnassignedObjs.end(); I != E; ++I) { 691 int i = *I; 692 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew); 693 ProtectedObjs.insert(i); 694 } 695 } 696 697 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the 698 /// abstract stack objects. 699 /// 700 void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { 701 const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering(); 702 StackProtector *SP = &getAnalysis<StackProtector>(); 703 704 bool StackGrowsDown = 705 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 706 707 // Loop over all of the stack objects, assigning sequential addresses... 708 MachineFrameInfo &MFI = Fn.getFrameInfo(); 709 710 // Start at the beginning of the local area. 711 // The Offset is the distance from the stack top in the direction 712 // of stack growth -- so it's always nonnegative. 713 int LocalAreaOffset = TFI.getOffsetOfLocalArea(); 714 if (StackGrowsDown) 715 LocalAreaOffset = -LocalAreaOffset; 716 assert(LocalAreaOffset >= 0 717 && "Local area offset should be in direction of stack growth"); 718 int64_t Offset = LocalAreaOffset; 719 720 // Skew to be applied to alignment. 721 unsigned Skew = TFI.getStackAlignmentSkew(Fn); 722 723 // If there are fixed sized objects that are preallocated in the local area, 724 // non-fixed objects can't be allocated right at the start of local area. 725 // Adjust 'Offset' to point to the end of last fixed sized preallocated 726 // object. 727 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) { 728 int64_t FixedOff; 729 if (StackGrowsDown) { 730 // The maximum distance from the stack pointer is at lower address of 731 // the object -- which is given by offset. For down growing stack 732 // the offset is negative, so we negate the offset to get the distance. 733 FixedOff = -MFI.getObjectOffset(i); 734 } else { 735 // The maximum distance from the start pointer is at the upper 736 // address of the object. 737 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i); 738 } 739 if (FixedOff > Offset) Offset = FixedOff; 740 } 741 742 // First assign frame offsets to stack objects that are used to spill 743 // callee saved registers. 744 if (StackGrowsDown) { 745 for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) { 746 // If the stack grows down, we need to add the size to find the lowest 747 // address of the object. 748 Offset += MFI.getObjectSize(i); 749 750 unsigned Align = MFI.getObjectAlignment(i); 751 // Adjust to alignment boundary 752 Offset = alignTo(Offset, Align, Skew); 753 754 DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << -Offset << "]\n"); 755 MFI.setObjectOffset(i, -Offset); // Set the computed offset 756 } 757 } else if (MaxCSFrameIndex >= MinCSFrameIndex) { 758 // Be careful about underflow in comparisons agains MinCSFrameIndex. 759 for (unsigned i = MaxCSFrameIndex; i != MinCSFrameIndex - 1; --i) { 760 if (MFI.isDeadObjectIndex(i)) 761 continue; 762 763 unsigned Align = MFI.getObjectAlignment(i); 764 // Adjust to alignment boundary 765 Offset = alignTo(Offset, Align, Skew); 766 767 DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << Offset << "]\n"); 768 MFI.setObjectOffset(i, Offset); 769 Offset += MFI.getObjectSize(i); 770 } 771 } 772 773 // FixedCSEnd is the stack offset to the end of the fixed and callee-save 774 // stack area. 775 int64_t FixedCSEnd = Offset; 776 unsigned MaxAlign = MFI.getMaxAlignment(); 777 778 // Make sure the special register scavenging spill slot is closest to the 779 // incoming stack pointer if a frame pointer is required and is closer 780 // to the incoming rather than the final stack pointer. 781 const TargetRegisterInfo *RegInfo = Fn.getSubtarget().getRegisterInfo(); 782 bool EarlyScavengingSlots = (TFI.hasFP(Fn) && 783 TFI.isFPCloseToIncomingSP() && 784 RegInfo->useFPForScavengingIndex(Fn) && 785 !RegInfo->needsStackRealignment(Fn)); 786 if (RS && EarlyScavengingSlots) { 787 SmallVector<int, 2> SFIs; 788 RS->getScavengingFrameIndices(SFIs); 789 for (SmallVectorImpl<int>::iterator I = SFIs.begin(), 790 IE = SFIs.end(); I != IE; ++I) 791 AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew); 792 } 793 794 // FIXME: Once this is working, then enable flag will change to a target 795 // check for whether the frame is large enough to want to use virtual 796 // frame index registers. Functions which don't want/need this optimization 797 // will continue to use the existing code path. 798 if (MFI.getUseLocalStackAllocationBlock()) { 799 unsigned Align = MFI.getLocalFrameMaxAlign(); 800 801 // Adjust to alignment boundary. 802 Offset = alignTo(Offset, Align, Skew); 803 804 DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); 805 806 // Resolve offsets for objects in the local block. 807 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) { 808 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i); 809 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; 810 DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << 811 FIOffset << "]\n"); 812 MFI.setObjectOffset(Entry.first, FIOffset); 813 } 814 // Allocate the local block 815 Offset += MFI.getLocalFrameSize(); 816 817 MaxAlign = std::max(Align, MaxAlign); 818 } 819 820 // Retrieve the Exception Handler registration node. 821 int EHRegNodeFrameIndex = INT_MAX; 822 if (const WinEHFuncInfo *FuncInfo = Fn.getWinEHFuncInfo()) 823 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; 824 825 // Make sure that the stack protector comes before the local variables on the 826 // stack. 827 SmallSet<int, 16> ProtectedObjs; 828 if (MFI.getStackProtectorIndex() >= 0) { 829 StackObjSet LargeArrayObjs; 830 StackObjSet SmallArrayObjs; 831 StackObjSet AddrOfObjs; 832 833 AdjustStackOffset(MFI, MFI.getStackProtectorIndex(), StackGrowsDown, 834 Offset, MaxAlign, Skew); 835 836 // Assign large stack objects first. 837 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 838 if (MFI.isObjectPreAllocated(i) && 839 MFI.getUseLocalStackAllocationBlock()) 840 continue; 841 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 842 continue; 843 if (RS && RS->isScavengingFrameIndex((int)i)) 844 continue; 845 if (MFI.isDeadObjectIndex(i)) 846 continue; 847 if (MFI.getStackProtectorIndex() == (int)i || 848 EHRegNodeFrameIndex == (int)i) 849 continue; 850 851 switch (SP->getSSPLayout(MFI.getObjectAllocation(i))) { 852 case StackProtector::SSPLK_None: 853 continue; 854 case StackProtector::SSPLK_SmallArray: 855 SmallArrayObjs.insert(i); 856 continue; 857 case StackProtector::SSPLK_AddrOf: 858 AddrOfObjs.insert(i); 859 continue; 860 case StackProtector::SSPLK_LargeArray: 861 LargeArrayObjs.insert(i); 862 continue; 863 } 864 llvm_unreachable("Unexpected SSPLayoutKind."); 865 } 866 867 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 868 Offset, MaxAlign, Skew); 869 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 870 Offset, MaxAlign, Skew); 871 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, 872 Offset, MaxAlign, Skew); 873 } 874 875 SmallVector<int, 8> ObjectsToAllocate; 876 877 // Then prepare to assign frame offsets to stack objects that are not used to 878 // spill callee saved registers. 879 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 880 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 881 continue; 882 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 883 continue; 884 if (RS && RS->isScavengingFrameIndex((int)i)) 885 continue; 886 if (MFI.isDeadObjectIndex(i)) 887 continue; 888 if (MFI.getStackProtectorIndex() == (int)i || 889 EHRegNodeFrameIndex == (int)i) 890 continue; 891 if (ProtectedObjs.count(i)) 892 continue; 893 894 // Add the objects that we need to allocate to our working set. 895 ObjectsToAllocate.push_back(i); 896 } 897 898 // Allocate the EH registration node first if one is present. 899 if (EHRegNodeFrameIndex != INT_MAX) 900 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, 901 MaxAlign, Skew); 902 903 // Give the targets a chance to order the objects the way they like it. 904 if (Fn.getTarget().getOptLevel() != CodeGenOpt::None && 905 Fn.getTarget().Options.StackSymbolOrdering) 906 TFI.orderFrameObjects(Fn, ObjectsToAllocate); 907 908 // Keep track of which bytes in the fixed and callee-save range are used so we 909 // can use the holes when allocating later stack objects. Only do this if 910 // stack protector isn't being used and the target requests it and we're 911 // optimizing. 912 BitVector StackBytesFree; 913 if (!ObjectsToAllocate.empty() && 914 Fn.getTarget().getOptLevel() != CodeGenOpt::None && 915 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(Fn)) 916 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex, 917 FixedCSEnd, StackBytesFree); 918 919 // Now walk the objects and actually assign base offsets to them. 920 for (auto &Object : ObjectsToAllocate) 921 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, 922 StackBytesFree)) 923 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew); 924 925 // Make sure the special register scavenging spill slot is closest to the 926 // stack pointer. 927 if (RS && !EarlyScavengingSlots) { 928 SmallVector<int, 2> SFIs; 929 RS->getScavengingFrameIndices(SFIs); 930 for (SmallVectorImpl<int>::iterator I = SFIs.begin(), 931 IE = SFIs.end(); I != IE; ++I) 932 AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign, Skew); 933 } 934 935 if (!TFI.targetHandlesStackFrameRounding()) { 936 // If we have reserved argument space for call sites in the function 937 // immediately on entry to the current function, count it as part of the 938 // overall stack size. 939 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(Fn)) 940 Offset += MFI.getMaxCallFrameSize(); 941 942 // Round up the size to a multiple of the alignment. If the function has 943 // any calls or alloca's, align to the target's StackAlignment value to 944 // ensure that the callee's frame or the alloca data is suitably aligned; 945 // otherwise, for leaf functions, align to the TransientStackAlignment 946 // value. 947 unsigned StackAlign; 948 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() || 949 (RegInfo->needsStackRealignment(Fn) && MFI.getObjectIndexEnd() != 0)) 950 StackAlign = TFI.getStackAlignment(); 951 else 952 StackAlign = TFI.getTransientStackAlignment(); 953 954 // If the frame pointer is eliminated, all frame offsets will be relative to 955 // SP not FP. Align to MaxAlign so this works. 956 StackAlign = std::max(StackAlign, MaxAlign); 957 Offset = alignTo(Offset, StackAlign, Skew); 958 } 959 960 // Update frame info to pretend that this is part of the stack... 961 int64_t StackSize = Offset - LocalAreaOffset; 962 MFI.setStackSize(StackSize); 963 NumBytesStackSpace += StackSize; 964 965 MachineOptimizationRemarkAnalysis R( 966 DEBUG_TYPE, "StackSize", Fn.getFunction()->getSubprogram(), &Fn.front()); 967 R << ore::NV("NumStackBytes", StackSize) 968 << " stack bytes in function"; 969 ORE->emit(R); 970 } 971 972 /// insertPrologEpilogCode - Scan the function for modified callee saved 973 /// registers, insert spill code for these callee saved registers, then add 974 /// prolog and epilog code to the function. 975 /// 976 void PEI::insertPrologEpilogCode(MachineFunction &Fn) { 977 const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering(); 978 979 // Add prologue to the function... 980 for (MachineBasicBlock *SaveBlock : SaveBlocks) 981 TFI.emitPrologue(Fn, *SaveBlock); 982 983 // Add epilogue to restore the callee-save registers in each exiting block. 984 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 985 TFI.emitEpilogue(Fn, *RestoreBlock); 986 987 for (MachineBasicBlock *SaveBlock : SaveBlocks) 988 TFI.inlineStackProbe(Fn, *SaveBlock); 989 990 // Emit additional code that is required to support segmented stacks, if 991 // we've been asked for it. This, when linked with a runtime with support 992 // for segmented stacks (libgcc is one), will result in allocating stack 993 // space in small chunks instead of one large contiguous block. 994 if (Fn.shouldSplitStack()) { 995 for (MachineBasicBlock *SaveBlock : SaveBlocks) 996 TFI.adjustForSegmentedStacks(Fn, *SaveBlock); 997 } 998 999 // Emit additional code that is required to explicitly handle the stack in 1000 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The 1001 // approach is rather similar to that of Segmented Stacks, but it uses a 1002 // different conditional check and another BIF for allocating more stack 1003 // space. 1004 if (Fn.getFunction()->getCallingConv() == CallingConv::HiPE) 1005 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1006 TFI.adjustForHiPEPrologue(Fn, *SaveBlock); 1007 } 1008 1009 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical 1010 /// register references and actual offsets. 1011 /// 1012 void PEI::replaceFrameIndices(MachineFunction &Fn) { 1013 const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering(); 1014 if (!TFI.needsFrameIndexResolution(Fn)) return; 1015 1016 // Store SPAdj at exit of a basic block. 1017 SmallVector<int, 8> SPState; 1018 SPState.resize(Fn.getNumBlockIDs()); 1019 df_iterator_default_set<MachineBasicBlock*> Reachable; 1020 1021 // Iterate over the reachable blocks in DFS order. 1022 for (auto DFI = df_ext_begin(&Fn, Reachable), DFE = df_ext_end(&Fn, Reachable); 1023 DFI != DFE; ++DFI) { 1024 int SPAdj = 0; 1025 // Check the exit state of the DFS stack predecessor. 1026 if (DFI.getPathLength() >= 2) { 1027 MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 1028 assert(Reachable.count(StackPred) && 1029 "DFS stack predecessor is already visited.\n"); 1030 SPAdj = SPState[StackPred->getNumber()]; 1031 } 1032 MachineBasicBlock *BB = *DFI; 1033 replaceFrameIndices(BB, Fn, SPAdj); 1034 SPState[BB->getNumber()] = SPAdj; 1035 } 1036 1037 // Handle the unreachable blocks. 1038 for (auto &BB : Fn) { 1039 if (Reachable.count(&BB)) 1040 // Already handled in DFS traversal. 1041 continue; 1042 int SPAdj = 0; 1043 replaceFrameIndices(&BB, Fn, SPAdj); 1044 } 1045 } 1046 1047 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn, 1048 int &SPAdj) { 1049 assert(Fn.getSubtarget().getRegisterInfo() && 1050 "getRegisterInfo() must be implemented!"); 1051 const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo(); 1052 const TargetRegisterInfo &TRI = *Fn.getSubtarget().getRegisterInfo(); 1053 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering(); 1054 1055 if (RS && FrameIndexEliminationScavenging) 1056 RS->enterBasicBlock(*BB); 1057 1058 bool InsideCallSequence = false; 1059 1060 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { 1061 1062 if (TII.isFrameInstr(*I)) { 1063 InsideCallSequence = TII.isFrameSetup(*I); 1064 SPAdj += TII.getSPAdjust(*I); 1065 I = TFI->eliminateCallFramePseudoInstr(Fn, *BB, I); 1066 continue; 1067 } 1068 1069 MachineInstr &MI = *I; 1070 bool DoIncr = true; 1071 bool DidFinishLoop = true; 1072 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1073 if (!MI.getOperand(i).isFI()) 1074 continue; 1075 1076 // Frame indices in debug values are encoded in a target independent 1077 // way with simply the frame index and offset rather than any 1078 // target-specific addressing mode. 1079 if (MI.isDebugValue()) { 1080 assert(i == 0 && "Frame indices can only appear as the first " 1081 "operand of a DBG_VALUE machine instruction"); 1082 unsigned Reg; 1083 int64_t Offset = 1084 TFI->getFrameIndexReference(Fn, MI.getOperand(0).getIndex(), Reg); 1085 MI.getOperand(0).ChangeToRegister(Reg, false /*isDef*/); 1086 auto *DIExpr = DIExpression::prepend(MI.getDebugExpression(), 1087 DIExpression::NoDeref, Offset); 1088 MI.getOperand(3).setMetadata(DIExpr); 1089 const Module *M = Fn.getMMI().getModule(); 1090 // Add the expression to the metadata graph so isn't lost in MIR dumps. 1091 M->getNamedMetadata("llvm.dbg.mir")->addOperand(DIExpr); 1092 continue; 1093 } 1094 1095 // TODO: This code should be commoned with the code for 1096 // PATCHPOINT. There's no good reason for the difference in 1097 // implementation other than historical accident. The only 1098 // remaining difference is the unconditional use of the stack 1099 // pointer as the base register. 1100 if (MI.getOpcode() == TargetOpcode::STATEPOINT) { 1101 assert((!MI.isDebugValue() || i == 0) && 1102 "Frame indicies can only appear as the first operand of a " 1103 "DBG_VALUE machine instruction"); 1104 unsigned Reg; 1105 MachineOperand &Offset = MI.getOperand(i + 1); 1106 int refOffset = TFI->getFrameIndexReferencePreferSP( 1107 Fn, MI.getOperand(i).getIndex(), Reg, /*IgnoreSPUpdates*/ false); 1108 Offset.setImm(Offset.getImm() + refOffset); 1109 MI.getOperand(i).ChangeToRegister(Reg, false /*isDef*/); 1110 continue; 1111 } 1112 1113 // Some instructions (e.g. inline asm instructions) can have 1114 // multiple frame indices and/or cause eliminateFrameIndex 1115 // to insert more than one instruction. We need the register 1116 // scavenger to go through all of these instructions so that 1117 // it can update its register information. We keep the 1118 // iterator at the point before insertion so that we can 1119 // revisit them in full. 1120 bool AtBeginning = (I == BB->begin()); 1121 if (!AtBeginning) --I; 1122 1123 // If this instruction has a FrameIndex operand, we need to 1124 // use that target machine register info object to eliminate 1125 // it. 1126 TRI.eliminateFrameIndex(MI, SPAdj, i, 1127 FrameIndexEliminationScavenging ? RS : nullptr); 1128 1129 // Reset the iterator if we were at the beginning of the BB. 1130 if (AtBeginning) { 1131 I = BB->begin(); 1132 DoIncr = false; 1133 } 1134 1135 DidFinishLoop = false; 1136 break; 1137 } 1138 1139 // If we are looking at a call sequence, we need to keep track of 1140 // the SP adjustment made by each instruction in the sequence. 1141 // This includes both the frame setup/destroy pseudos (handled above), 1142 // as well as other instructions that have side effects w.r.t the SP. 1143 // Note that this must come after eliminateFrameIndex, because 1144 // if I itself referred to a frame index, we shouldn't count its own 1145 // adjustment. 1146 if (DidFinishLoop && InsideCallSequence) 1147 SPAdj += TII.getSPAdjust(MI); 1148 1149 if (DoIncr && I != BB->end()) ++I; 1150 1151 // Update register states. 1152 if (RS && FrameIndexEliminationScavenging && DidFinishLoop) 1153 RS->forward(MI); 1154 } 1155 } 1156