1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass is responsible for finalizing the functions frame layout, saving 10 // callee saved registers, and for emitting prolog & epilog code for the 11 // function. 12 // 13 // This pass must be run after register allocation. After this pass is 14 // executed, it is illegal to construct MO_FrameIndex operands. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineDominators.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineInstr.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineModuleInfo.h" 37 #include "llvm/CodeGen/MachineOperand.h" 38 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 39 #include "llvm/CodeGen/MachineRegisterInfo.h" 40 #include "llvm/CodeGen/RegisterScavenging.h" 41 #include "llvm/CodeGen/TargetFrameLowering.h" 42 #include "llvm/CodeGen/TargetInstrInfo.h" 43 #include "llvm/CodeGen/TargetOpcodes.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/WinEHFuncInfo.h" 47 #include "llvm/IR/Attributes.h" 48 #include "llvm/IR/CallingConv.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DiagnosticInfo.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/InlineAsm.h" 53 #include "llvm/IR/LLVMContext.h" 54 #include "llvm/InitializePasses.h" 55 #include "llvm/MC/MCRegisterInfo.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/CodeGen.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/MathExtras.h" 62 #include "llvm/Support/raw_ostream.h" 63 #include "llvm/Target/TargetMachine.h" 64 #include "llvm/Target/TargetOptions.h" 65 #include <algorithm> 66 #include <cassert> 67 #include <cstdint> 68 #include <functional> 69 #include <limits> 70 #include <utility> 71 #include <vector> 72 73 using namespace llvm; 74 75 #define DEBUG_TYPE "prologepilog" 76 77 using MBBVector = SmallVector<MachineBasicBlock *, 4>; 78 79 STATISTIC(NumLeafFuncWithSpills, "Number of leaf functions with CSRs"); 80 STATISTIC(NumFuncSeen, "Number of functions seen in PEI"); 81 82 83 namespace { 84 85 class PEI : public MachineFunctionPass { 86 public: 87 static char ID; 88 89 PEI() : MachineFunctionPass(ID) { 90 initializePEIPass(*PassRegistry::getPassRegistry()); 91 } 92 93 void getAnalysisUsage(AnalysisUsage &AU) const override; 94 95 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 96 /// frame indexes with appropriate references. 97 bool runOnMachineFunction(MachineFunction &MF) override; 98 99 private: 100 RegScavenger *RS; 101 102 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved 103 // stack frame indexes. 104 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 105 unsigned MaxCSFrameIndex = 0; 106 107 // Save and Restore blocks of the current function. Typically there is a 108 // single save block, unless Windows EH funclets are involved. 109 MBBVector SaveBlocks; 110 MBBVector RestoreBlocks; 111 112 // Flag to control whether to use the register scavenger to resolve 113 // frame index materialization registers. Set according to 114 // TRI->requiresFrameIndexScavenging() for the current function. 115 bool FrameIndexVirtualScavenging; 116 117 // Flag to control whether the scavenger should be passed even though 118 // FrameIndexVirtualScavenging is used. 119 bool FrameIndexEliminationScavenging; 120 121 // Emit remarks. 122 MachineOptimizationRemarkEmitter *ORE = nullptr; 123 124 void calculateCallFrameInfo(MachineFunction &MF); 125 void calculateSaveRestoreBlocks(MachineFunction &MF); 126 void spillCalleeSavedRegs(MachineFunction &MF); 127 128 void calculateFrameObjectOffsets(MachineFunction &MF); 129 void replaceFrameIndices(MachineFunction &MF); 130 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 131 int &SPAdj); 132 void insertPrologEpilogCode(MachineFunction &MF); 133 }; 134 135 } // end anonymous namespace 136 137 char PEI::ID = 0; 138 139 char &llvm::PrologEpilogCodeInserterID = PEI::ID; 140 141 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false, 142 false) 143 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 144 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 145 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 146 INITIALIZE_PASS_END(PEI, DEBUG_TYPE, 147 "Prologue/Epilogue Insertion & Frame Finalization", false, 148 false) 149 150 MachineFunctionPass *llvm::createPrologEpilogInserterPass() { 151 return new PEI(); 152 } 153 154 STATISTIC(NumBytesStackSpace, 155 "Number of bytes used for stack in all functions"); 156 157 void PEI::getAnalysisUsage(AnalysisUsage &AU) const { 158 AU.setPreservesCFG(); 159 AU.addPreserved<MachineLoopInfo>(); 160 AU.addPreserved<MachineDominatorTree>(); 161 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 162 MachineFunctionPass::getAnalysisUsage(AU); 163 } 164 165 /// StackObjSet - A set of stack object indexes 166 using StackObjSet = SmallSetVector<int, 8>; 167 168 using SavedDbgValuesMap = 169 SmallDenseMap<MachineBasicBlock *, SmallVector<MachineInstr *, 4>, 4>; 170 171 /// Stash DBG_VALUEs that describe parameters and which are placed at the start 172 /// of the block. Later on, after the prologue code has been emitted, the 173 /// stashed DBG_VALUEs will be reinserted at the start of the block. 174 static void stashEntryDbgValues(MachineBasicBlock &MBB, 175 SavedDbgValuesMap &EntryDbgValues) { 176 SmallVector<const MachineInstr *, 4> FrameIndexValues; 177 178 for (auto &MI : MBB) { 179 if (!MI.isDebugInstr()) 180 break; 181 if (!MI.isDebugValue() || !MI.getDebugVariable()->isParameter()) 182 continue; 183 if (any_of(MI.debug_operands(), 184 [](const MachineOperand &MO) { return MO.isFI(); })) { 185 // We can only emit valid locations for frame indices after the frame 186 // setup, so do not stash away them. 187 FrameIndexValues.push_back(&MI); 188 continue; 189 } 190 const DILocalVariable *Var = MI.getDebugVariable(); 191 const DIExpression *Expr = MI.getDebugExpression(); 192 auto Overlaps = [Var, Expr](const MachineInstr *DV) { 193 return Var == DV->getDebugVariable() && 194 Expr->fragmentsOverlap(DV->getDebugExpression()); 195 }; 196 // See if the debug value overlaps with any preceding debug value that will 197 // not be stashed. If that is the case, then we can't stash this value, as 198 // we would then reorder the values at reinsertion. 199 if (llvm::none_of(FrameIndexValues, Overlaps)) 200 EntryDbgValues[&MBB].push_back(&MI); 201 } 202 203 // Remove stashed debug values from the block. 204 if (EntryDbgValues.count(&MBB)) 205 for (auto *MI : EntryDbgValues[&MBB]) 206 MI->removeFromParent(); 207 } 208 209 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 210 /// frame indexes with appropriate references. 211 bool PEI::runOnMachineFunction(MachineFunction &MF) { 212 NumFuncSeen++; 213 const Function &F = MF.getFunction(); 214 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 215 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 216 217 RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr; 218 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF); 219 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 220 221 // Calculate the MaxCallFrameSize and AdjustsStack variables for the 222 // function's frame information. Also eliminates call frame pseudo 223 // instructions. 224 calculateCallFrameInfo(MF); 225 226 // Determine placement of CSR spill/restore code and prolog/epilog code: 227 // place all spills in the entry block, all restores in return blocks. 228 calculateSaveRestoreBlocks(MF); 229 230 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code. 231 SavedDbgValuesMap EntryDbgValues; 232 for (MachineBasicBlock *SaveBlock : SaveBlocks) 233 stashEntryDbgValues(*SaveBlock, EntryDbgValues); 234 235 // Handle CSR spilling and restoring, for targets that need it. 236 if (MF.getTarget().usesPhysRegsForValues()) 237 spillCalleeSavedRegs(MF); 238 239 // Allow the target machine to make final modifications to the function 240 // before the frame layout is finalized. 241 TFI->processFunctionBeforeFrameFinalized(MF, RS); 242 243 // Calculate actual frame offsets for all abstract stack objects... 244 calculateFrameObjectOffsets(MF); 245 246 // Add prolog and epilog code to the function. This function is required 247 // to align the stack frame as necessary for any stack variables or 248 // called functions. Because of this, calculateCalleeSavedRegisters() 249 // must be called before this function in order to set the AdjustsStack 250 // and MaxCallFrameSize variables. 251 if (!F.hasFnAttribute(Attribute::Naked)) 252 insertPrologEpilogCode(MF); 253 254 // Reinsert stashed debug values at the start of the entry blocks. 255 for (auto &I : EntryDbgValues) 256 I.first->insert(I.first->begin(), I.second.begin(), I.second.end()); 257 258 // Allow the target machine to make final modifications to the function 259 // before the frame layout is finalized. 260 TFI->processFunctionBeforeFrameIndicesReplaced(MF, RS); 261 262 // Replace all MO_FrameIndex operands with physical register references 263 // and actual offsets. 264 // 265 replaceFrameIndices(MF); 266 267 // If register scavenging is needed, as we've enabled doing it as a 268 // post-pass, scavenge the virtual registers that frame index elimination 269 // inserted. 270 if (TRI->requiresRegisterScavenging(MF) && FrameIndexVirtualScavenging) 271 scavengeFrameVirtualRegs(MF, *RS); 272 273 // Warn on stack size when we exceeds the given limit. 274 MachineFrameInfo &MFI = MF.getFrameInfo(); 275 uint64_t StackSize = MFI.getStackSize(); 276 277 unsigned Threshold = UINT_MAX; 278 if (MF.getFunction().hasFnAttribute("warn-stack-size")) { 279 bool Failed = MF.getFunction() 280 .getFnAttribute("warn-stack-size") 281 .getValueAsString() 282 .getAsInteger(10, Threshold); 283 // Verifier should have caught this. 284 assert(!Failed && "Invalid warn-stack-size fn attr value"); 285 (void)Failed; 286 } 287 if (StackSize > Threshold) { 288 DiagnosticInfoStackSize DiagStackSize(F, StackSize, DS_Warning, Threshold); 289 F.getContext().diagnose(DiagStackSize); 290 } 291 ORE->emit([&]() { 292 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize", 293 MF.getFunction().getSubprogram(), 294 &MF.front()) 295 << ore::NV("NumStackBytes", StackSize) << " stack bytes in function"; 296 }); 297 298 delete RS; 299 SaveBlocks.clear(); 300 RestoreBlocks.clear(); 301 MFI.setSavePoint(nullptr); 302 MFI.setRestorePoint(nullptr); 303 return true; 304 } 305 306 /// Calculate the MaxCallFrameSize and AdjustsStack 307 /// variables for the function's frame information and eliminate call frame 308 /// pseudo instructions. 309 void PEI::calculateCallFrameInfo(MachineFunction &MF) { 310 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 311 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 312 MachineFrameInfo &MFI = MF.getFrameInfo(); 313 314 unsigned MaxCallFrameSize = 0; 315 bool AdjustsStack = MFI.adjustsStack(); 316 317 // Get the function call frame set-up and tear-down instruction opcode 318 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 319 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 320 321 // Early exit for targets which have no call frame setup/destroy pseudo 322 // instructions. 323 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 324 return; 325 326 std::vector<MachineBasicBlock::iterator> FrameSDOps; 327 for (MachineBasicBlock &BB : MF) 328 for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) 329 if (TII.isFrameInstr(*I)) { 330 unsigned Size = TII.getFrameSize(*I); 331 if (Size > MaxCallFrameSize) MaxCallFrameSize = Size; 332 AdjustsStack = true; 333 FrameSDOps.push_back(I); 334 } else if (I->isInlineAsm()) { 335 // Some inline asm's need a stack frame, as indicated by operand 1. 336 unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 337 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 338 AdjustsStack = true; 339 } 340 341 assert(!MFI.isMaxCallFrameSizeComputed() || 342 (MFI.getMaxCallFrameSize() == MaxCallFrameSize && 343 MFI.adjustsStack() == AdjustsStack)); 344 MFI.setAdjustsStack(AdjustsStack); 345 MFI.setMaxCallFrameSize(MaxCallFrameSize); 346 347 for (MachineBasicBlock::iterator I : FrameSDOps) { 348 // If call frames are not being included as part of the stack frame, and 349 // the target doesn't indicate otherwise, remove the call frame pseudos 350 // here. The sub/add sp instruction pairs are still inserted, but we don't 351 // need to track the SP adjustment for frame index elimination. 352 if (TFI->canSimplifyCallFramePseudos(MF)) 353 TFI->eliminateCallFramePseudoInstr(MF, *I->getParent(), I); 354 } 355 } 356 357 /// Compute the sets of entry and return blocks for saving and restoring 358 /// callee-saved registers, and placing prolog and epilog code. 359 void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) { 360 const MachineFrameInfo &MFI = MF.getFrameInfo(); 361 362 // Even when we do not change any CSR, we still want to insert the 363 // prologue and epilogue of the function. 364 // So set the save points for those. 365 366 // Use the points found by shrink-wrapping, if any. 367 if (MFI.getSavePoint()) { 368 SaveBlocks.push_back(MFI.getSavePoint()); 369 assert(MFI.getRestorePoint() && "Both restore and save must be set"); 370 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 371 // If RestoreBlock does not have any successor and is not a return block 372 // then the end point is unreachable and we do not need to insert any 373 // epilogue. 374 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock()) 375 RestoreBlocks.push_back(RestoreBlock); 376 return; 377 } 378 379 // Save refs to entry and return blocks. 380 SaveBlocks.push_back(&MF.front()); 381 for (MachineBasicBlock &MBB : MF) { 382 if (MBB.isEHFuncletEntry()) 383 SaveBlocks.push_back(&MBB); 384 if (MBB.isReturnBlock()) 385 RestoreBlocks.push_back(&MBB); 386 } 387 } 388 389 static void assignCalleeSavedSpillSlots(MachineFunction &F, 390 const BitVector &SavedRegs, 391 unsigned &MinCSFrameIndex, 392 unsigned &MaxCSFrameIndex) { 393 if (SavedRegs.empty()) 394 return; 395 396 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo(); 397 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs(); 398 399 std::vector<CalleeSavedInfo> CSI; 400 for (unsigned i = 0; CSRegs[i]; ++i) { 401 unsigned Reg = CSRegs[i]; 402 if (SavedRegs.test(Reg)) 403 CSI.push_back(CalleeSavedInfo(Reg)); 404 } 405 406 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering(); 407 MachineFrameInfo &MFI = F.getFrameInfo(); 408 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI, MinCSFrameIndex, 409 MaxCSFrameIndex)) { 410 // If target doesn't implement this, use generic code. 411 412 if (CSI.empty()) 413 return; // Early exit if no callee saved registers are modified! 414 415 unsigned NumFixedSpillSlots; 416 const TargetFrameLowering::SpillSlot *FixedSpillSlots = 417 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); 418 419 // Now that we know which registers need to be saved and restored, allocate 420 // stack slots for them. 421 for (auto &CS : CSI) { 422 // If the target has spilled this register to another register, we don't 423 // need to allocate a stack slot. 424 if (CS.isSpilledToReg()) 425 continue; 426 427 unsigned Reg = CS.getReg(); 428 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 429 430 int FrameIdx; 431 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { 432 CS.setFrameIdx(FrameIdx); 433 continue; 434 } 435 436 // Check to see if this physreg must be spilled to a particular stack slot 437 // on this target. 438 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots; 439 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots && 440 FixedSlot->Reg != Reg) 441 ++FixedSlot; 442 443 unsigned Size = RegInfo->getSpillSize(*RC); 444 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { 445 // Nope, just spill it anywhere convenient. 446 Align Alignment = RegInfo->getSpillAlign(*RC); 447 // We may not be able to satisfy the desired alignment specification of 448 // the TargetRegisterClass if the stack alignment is smaller. Use the 449 // min. 450 Alignment = std::min(Alignment, TFI->getStackAlign()); 451 FrameIdx = MFI.CreateStackObject(Size, Alignment, true); 452 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 453 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 454 } else { 455 // Spill it to the stack where we must. 456 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset); 457 } 458 459 CS.setFrameIdx(FrameIdx); 460 } 461 } 462 463 MFI.setCalleeSavedInfo(CSI); 464 } 465 466 /// Helper function to update the liveness information for the callee-saved 467 /// registers. 468 static void updateLiveness(MachineFunction &MF) { 469 MachineFrameInfo &MFI = MF.getFrameInfo(); 470 // Visited will contain all the basic blocks that are in the region 471 // where the callee saved registers are alive: 472 // - Anything that is not Save or Restore -> LiveThrough. 473 // - Save -> LiveIn. 474 // - Restore -> LiveOut. 475 // The live-out is not attached to the block, so no need to keep 476 // Restore in this set. 477 SmallPtrSet<MachineBasicBlock *, 8> Visited; 478 SmallVector<MachineBasicBlock *, 8> WorkList; 479 MachineBasicBlock *Entry = &MF.front(); 480 MachineBasicBlock *Save = MFI.getSavePoint(); 481 482 if (!Save) 483 Save = Entry; 484 485 if (Entry != Save) { 486 WorkList.push_back(Entry); 487 Visited.insert(Entry); 488 } 489 Visited.insert(Save); 490 491 MachineBasicBlock *Restore = MFI.getRestorePoint(); 492 if (Restore) 493 // By construction Restore cannot be visited, otherwise it 494 // means there exists a path to Restore that does not go 495 // through Save. 496 WorkList.push_back(Restore); 497 498 while (!WorkList.empty()) { 499 const MachineBasicBlock *CurBB = WorkList.pop_back_val(); 500 // By construction, the region that is after the save point is 501 // dominated by the Save and post-dominated by the Restore. 502 if (CurBB == Save && Save != Restore) 503 continue; 504 // Enqueue all the successors not already visited. 505 // Those are by construction either before Save or after Restore. 506 for (MachineBasicBlock *SuccBB : CurBB->successors()) 507 if (Visited.insert(SuccBB).second) 508 WorkList.push_back(SuccBB); 509 } 510 511 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 512 513 MachineRegisterInfo &MRI = MF.getRegInfo(); 514 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 515 for (MachineBasicBlock *MBB : Visited) { 516 MCPhysReg Reg = CSI[i].getReg(); 517 // Add the callee-saved register as live-in. 518 // It's killed at the spill. 519 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg)) 520 MBB->addLiveIn(Reg); 521 } 522 // If callee-saved register is spilled to another register rather than 523 // spilling to stack, the destination register has to be marked as live for 524 // each MBB between the prologue and epilogue so that it is not clobbered 525 // before it is reloaded in the epilogue. The Visited set contains all 526 // blocks outside of the region delimited by prologue/epilogue. 527 if (CSI[i].isSpilledToReg()) { 528 for (MachineBasicBlock &MBB : MF) { 529 if (Visited.count(&MBB)) 530 continue; 531 MCPhysReg DstReg = CSI[i].getDstReg(); 532 if (!MBB.isLiveIn(DstReg)) 533 MBB.addLiveIn(DstReg); 534 } 535 } 536 } 537 538 } 539 540 /// Insert restore code for the callee-saved registers used in the function. 541 static void insertCSRSaves(MachineBasicBlock &SaveBlock, 542 ArrayRef<CalleeSavedInfo> CSI) { 543 MachineFunction &MF = *SaveBlock.getParent(); 544 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 545 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 546 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 547 548 MachineBasicBlock::iterator I = SaveBlock.begin(); 549 if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) { 550 for (const CalleeSavedInfo &CS : CSI) { 551 // Insert the spill to the stack frame. 552 unsigned Reg = CS.getReg(); 553 554 if (CS.isSpilledToReg()) { 555 BuildMI(SaveBlock, I, DebugLoc(), 556 TII.get(TargetOpcode::COPY), CS.getDstReg()) 557 .addReg(Reg, getKillRegState(true)); 558 } else { 559 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 560 TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC, 561 TRI); 562 } 563 } 564 } 565 } 566 567 /// Insert restore code for the callee-saved registers used in the function. 568 static void insertCSRRestores(MachineBasicBlock &RestoreBlock, 569 std::vector<CalleeSavedInfo> &CSI) { 570 MachineFunction &MF = *RestoreBlock.getParent(); 571 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 572 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 573 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 574 575 // Restore all registers immediately before the return and any 576 // terminators that precede it. 577 MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator(); 578 579 if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) { 580 for (const CalleeSavedInfo &CI : reverse(CSI)) { 581 unsigned Reg = CI.getReg(); 582 if (CI.isSpilledToReg()) { 583 BuildMI(RestoreBlock, I, DebugLoc(), TII.get(TargetOpcode::COPY), Reg) 584 .addReg(CI.getDstReg(), getKillRegState(true)); 585 } else { 586 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 587 TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI); 588 assert(I != RestoreBlock.begin() && 589 "loadRegFromStackSlot didn't insert any code!"); 590 // Insert in reverse order. loadRegFromStackSlot can insert 591 // multiple instructions. 592 } 593 } 594 } 595 } 596 597 void PEI::spillCalleeSavedRegs(MachineFunction &MF) { 598 // We can't list this requirement in getRequiredProperties because some 599 // targets (WebAssembly) use virtual registers past this point, and the pass 600 // pipeline is set up without giving the passes a chance to look at the 601 // TargetMachine. 602 // FIXME: Find a way to express this in getRequiredProperties. 603 assert(MF.getProperties().hasProperty( 604 MachineFunctionProperties::Property::NoVRegs)); 605 606 const Function &F = MF.getFunction(); 607 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 608 MachineFrameInfo &MFI = MF.getFrameInfo(); 609 MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 610 MaxCSFrameIndex = 0; 611 612 // Determine which of the registers in the callee save list should be saved. 613 BitVector SavedRegs; 614 TFI->determineCalleeSaves(MF, SavedRegs, RS); 615 616 // Assign stack slots for any callee-saved registers that must be spilled. 617 assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); 618 619 // Add the code to save and restore the callee saved registers. 620 if (!F.hasFnAttribute(Attribute::Naked)) { 621 MFI.setCalleeSavedInfoValid(true); 622 623 std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 624 if (!CSI.empty()) { 625 if (!MFI.hasCalls()) 626 NumLeafFuncWithSpills++; 627 628 for (MachineBasicBlock *SaveBlock : SaveBlocks) 629 insertCSRSaves(*SaveBlock, CSI); 630 631 // Update the live-in information of all the blocks up to the save point. 632 updateLiveness(MF); 633 634 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 635 insertCSRRestores(*RestoreBlock, CSI); 636 } 637 } 638 } 639 640 /// AdjustStackOffset - Helper function used to adjust the stack frame offset. 641 static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, 642 bool StackGrowsDown, int64_t &Offset, 643 Align &MaxAlign, unsigned Skew) { 644 // If the stack grows down, add the object size to find the lowest address. 645 if (StackGrowsDown) 646 Offset += MFI.getObjectSize(FrameIdx); 647 648 Align Alignment = MFI.getObjectAlign(FrameIdx); 649 650 // If the alignment of this object is greater than that of the stack, then 651 // increase the stack alignment to match. 652 MaxAlign = std::max(MaxAlign, Alignment); 653 654 // Adjust to alignment boundary. 655 Offset = alignTo(Offset, Alignment, Skew); 656 657 if (StackGrowsDown) { 658 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset 659 << "]\n"); 660 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset 661 } else { 662 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset 663 << "]\n"); 664 MFI.setObjectOffset(FrameIdx, Offset); 665 Offset += MFI.getObjectSize(FrameIdx); 666 } 667 } 668 669 /// Compute which bytes of fixed and callee-save stack area are unused and keep 670 /// track of them in StackBytesFree. 671 static inline void 672 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown, 673 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex, 674 int64_t FixedCSEnd, BitVector &StackBytesFree) { 675 // Avoid undefined int64_t -> int conversion below in extreme case. 676 if (FixedCSEnd > std::numeric_limits<int>::max()) 677 return; 678 679 StackBytesFree.resize(FixedCSEnd, true); 680 681 SmallVector<int, 16> AllocatedFrameSlots; 682 // Add fixed objects. 683 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) 684 // StackSlot scavenging is only implemented for the default stack. 685 if (MFI.getStackID(i) == TargetStackID::Default) 686 AllocatedFrameSlots.push_back(i); 687 // Add callee-save objects if there are any. 688 if (MinCSFrameIndex <= MaxCSFrameIndex) { 689 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i) 690 if (MFI.getStackID(i) == TargetStackID::Default) 691 AllocatedFrameSlots.push_back(i); 692 } 693 694 for (int i : AllocatedFrameSlots) { 695 // These are converted from int64_t, but they should always fit in int 696 // because of the FixedCSEnd check above. 697 int ObjOffset = MFI.getObjectOffset(i); 698 int ObjSize = MFI.getObjectSize(i); 699 int ObjStart, ObjEnd; 700 if (StackGrowsDown) { 701 // ObjOffset is negative when StackGrowsDown is true. 702 ObjStart = -ObjOffset - ObjSize; 703 ObjEnd = -ObjOffset; 704 } else { 705 ObjStart = ObjOffset; 706 ObjEnd = ObjOffset + ObjSize; 707 } 708 // Ignore fixed holes that are in the previous stack frame. 709 if (ObjEnd > 0) 710 StackBytesFree.reset(ObjStart, ObjEnd); 711 } 712 } 713 714 /// Assign frame object to an unused portion of the stack in the fixed stack 715 /// object range. Return true if the allocation was successful. 716 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, 717 bool StackGrowsDown, Align MaxAlign, 718 BitVector &StackBytesFree) { 719 if (MFI.isVariableSizedObjectIndex(FrameIdx)) 720 return false; 721 722 if (StackBytesFree.none()) { 723 // clear it to speed up later scavengeStackSlot calls to 724 // StackBytesFree.none() 725 StackBytesFree.clear(); 726 return false; 727 } 728 729 Align ObjAlign = MFI.getObjectAlign(FrameIdx); 730 if (ObjAlign > MaxAlign) 731 return false; 732 733 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 734 int FreeStart; 735 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; 736 FreeStart = StackBytesFree.find_next(FreeStart)) { 737 738 // Check that free space has suitable alignment. 739 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; 740 if (alignTo(ObjStart, ObjAlign) != ObjStart) 741 continue; 742 743 if (FreeStart + ObjSize > StackBytesFree.size()) 744 return false; 745 746 bool AllBytesFree = true; 747 for (unsigned Byte = 0; Byte < ObjSize; ++Byte) 748 if (!StackBytesFree.test(FreeStart + Byte)) { 749 AllBytesFree = false; 750 break; 751 } 752 if (AllBytesFree) 753 break; 754 } 755 756 if (FreeStart == -1) 757 return false; 758 759 if (StackGrowsDown) { 760 int ObjStart = -(FreeStart + ObjSize); 761 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 762 << ObjStart << "]\n"); 763 MFI.setObjectOffset(FrameIdx, ObjStart); 764 } else { 765 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 766 << FreeStart << "]\n"); 767 MFI.setObjectOffset(FrameIdx, FreeStart); 768 } 769 770 StackBytesFree.reset(FreeStart, FreeStart + ObjSize); 771 return true; 772 } 773 774 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e., 775 /// those required to be close to the Stack Protector) to stack offsets. 776 static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs, 777 SmallSet<int, 16> &ProtectedObjs, 778 MachineFrameInfo &MFI, bool StackGrowsDown, 779 int64_t &Offset, Align &MaxAlign, 780 unsigned Skew) { 781 782 for (int i : UnassignedObjs) { 783 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew); 784 ProtectedObjs.insert(i); 785 } 786 } 787 788 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the 789 /// abstract stack objects. 790 void PEI::calculateFrameObjectOffsets(MachineFunction &MF) { 791 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 792 793 bool StackGrowsDown = 794 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 795 796 // Loop over all of the stack objects, assigning sequential addresses... 797 MachineFrameInfo &MFI = MF.getFrameInfo(); 798 799 // Start at the beginning of the local area. 800 // The Offset is the distance from the stack top in the direction 801 // of stack growth -- so it's always nonnegative. 802 int LocalAreaOffset = TFI.getOffsetOfLocalArea(); 803 if (StackGrowsDown) 804 LocalAreaOffset = -LocalAreaOffset; 805 assert(LocalAreaOffset >= 0 806 && "Local area offset should be in direction of stack growth"); 807 int64_t Offset = LocalAreaOffset; 808 809 // Skew to be applied to alignment. 810 unsigned Skew = TFI.getStackAlignmentSkew(MF); 811 812 #ifdef EXPENSIVE_CHECKS 813 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) 814 if (!MFI.isDeadObjectIndex(i) && 815 MFI.getStackID(i) == TargetStackID::Default) 816 assert(MFI.getObjectAlign(i) <= MFI.getMaxAlign() && 817 "MaxAlignment is invalid"); 818 #endif 819 820 // If there are fixed sized objects that are preallocated in the local area, 821 // non-fixed objects can't be allocated right at the start of local area. 822 // Adjust 'Offset' to point to the end of last fixed sized preallocated 823 // object. 824 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) { 825 if (MFI.getStackID(i) != 826 TargetStackID::Default) // Only allocate objects on the default stack. 827 continue; 828 829 int64_t FixedOff; 830 if (StackGrowsDown) { 831 // The maximum distance from the stack pointer is at lower address of 832 // the object -- which is given by offset. For down growing stack 833 // the offset is negative, so we negate the offset to get the distance. 834 FixedOff = -MFI.getObjectOffset(i); 835 } else { 836 // The maximum distance from the start pointer is at the upper 837 // address of the object. 838 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i); 839 } 840 if (FixedOff > Offset) Offset = FixedOff; 841 } 842 843 // First assign frame offsets to stack objects that are used to spill 844 // callee saved registers. 845 if (StackGrowsDown && MaxCSFrameIndex >= MinCSFrameIndex) { 846 for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) { 847 if (MFI.getStackID(i) != 848 TargetStackID::Default) // Only allocate objects on the default stack. 849 continue; 850 851 // If the stack grows down, we need to add the size to find the lowest 852 // address of the object. 853 Offset += MFI.getObjectSize(i); 854 855 // Adjust to alignment boundary 856 Offset = alignTo(Offset, MFI.getObjectAlign(i), Skew); 857 858 LLVM_DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << -Offset << "]\n"); 859 MFI.setObjectOffset(i, -Offset); // Set the computed offset 860 } 861 } else if (MaxCSFrameIndex >= MinCSFrameIndex) { 862 // Be careful about underflow in comparisons agains MinCSFrameIndex. 863 for (unsigned i = MaxCSFrameIndex; i != MinCSFrameIndex - 1; --i) { 864 if (MFI.getStackID(i) != 865 TargetStackID::Default) // Only allocate objects on the default stack. 866 continue; 867 868 if (MFI.isDeadObjectIndex(i)) 869 continue; 870 871 // Adjust to alignment boundary 872 Offset = alignTo(Offset, MFI.getObjectAlign(i), Skew); 873 874 LLVM_DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << Offset << "]\n"); 875 MFI.setObjectOffset(i, Offset); 876 Offset += MFI.getObjectSize(i); 877 } 878 } 879 880 // FixedCSEnd is the stack offset to the end of the fixed and callee-save 881 // stack area. 882 int64_t FixedCSEnd = Offset; 883 Align MaxAlign = MFI.getMaxAlign(); 884 885 // Make sure the special register scavenging spill slot is closest to the 886 // incoming stack pointer if a frame pointer is required and is closer 887 // to the incoming rather than the final stack pointer. 888 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 889 bool EarlyScavengingSlots = (TFI.hasFP(MF) && TFI.isFPCloseToIncomingSP() && 890 RegInfo->useFPForScavengingIndex(MF) && 891 !RegInfo->hasStackRealignment(MF)); 892 if (RS && EarlyScavengingSlots) { 893 SmallVector<int, 2> SFIs; 894 RS->getScavengingFrameIndices(SFIs); 895 for (int SFI : SFIs) 896 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); 897 } 898 899 // FIXME: Once this is working, then enable flag will change to a target 900 // check for whether the frame is large enough to want to use virtual 901 // frame index registers. Functions which don't want/need this optimization 902 // will continue to use the existing code path. 903 if (MFI.getUseLocalStackAllocationBlock()) { 904 Align Alignment = MFI.getLocalFrameMaxAlign(); 905 906 // Adjust to alignment boundary. 907 Offset = alignTo(Offset, Alignment, Skew); 908 909 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); 910 911 // Resolve offsets for objects in the local block. 912 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) { 913 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i); 914 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; 915 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset 916 << "]\n"); 917 MFI.setObjectOffset(Entry.first, FIOffset); 918 } 919 // Allocate the local block 920 Offset += MFI.getLocalFrameSize(); 921 922 MaxAlign = std::max(Alignment, MaxAlign); 923 } 924 925 // Retrieve the Exception Handler registration node. 926 int EHRegNodeFrameIndex = std::numeric_limits<int>::max(); 927 if (const WinEHFuncInfo *FuncInfo = MF.getWinEHFuncInfo()) 928 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; 929 930 // Make sure that the stack protector comes before the local variables on the 931 // stack. 932 SmallSet<int, 16> ProtectedObjs; 933 if (MFI.hasStackProtectorIndex()) { 934 int StackProtectorFI = MFI.getStackProtectorIndex(); 935 StackObjSet LargeArrayObjs; 936 StackObjSet SmallArrayObjs; 937 StackObjSet AddrOfObjs; 938 939 // If we need a stack protector, we need to make sure that 940 // LocalStackSlotPass didn't already allocate a slot for it. 941 // If we are told to use the LocalStackAllocationBlock, the stack protector 942 // is expected to be already pre-allocated. 943 if (!MFI.getUseLocalStackAllocationBlock()) 944 AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, MaxAlign, 945 Skew); 946 else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) 947 llvm_unreachable( 948 "Stack protector not pre-allocated by LocalStackSlotPass."); 949 950 // Assign large stack objects first. 951 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 952 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 953 continue; 954 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 955 continue; 956 if (RS && RS->isScavengingFrameIndex((int)i)) 957 continue; 958 if (MFI.isDeadObjectIndex(i)) 959 continue; 960 if (StackProtectorFI == (int)i || EHRegNodeFrameIndex == (int)i) 961 continue; 962 if (MFI.getStackID(i) != 963 TargetStackID::Default) // Only allocate objects on the default stack. 964 continue; 965 966 switch (MFI.getObjectSSPLayout(i)) { 967 case MachineFrameInfo::SSPLK_None: 968 continue; 969 case MachineFrameInfo::SSPLK_SmallArray: 970 SmallArrayObjs.insert(i); 971 continue; 972 case MachineFrameInfo::SSPLK_AddrOf: 973 AddrOfObjs.insert(i); 974 continue; 975 case MachineFrameInfo::SSPLK_LargeArray: 976 LargeArrayObjs.insert(i); 977 continue; 978 } 979 llvm_unreachable("Unexpected SSPLayoutKind."); 980 } 981 982 // We expect **all** the protected stack objects to be pre-allocated by 983 // LocalStackSlotPass. If it turns out that PEI still has to allocate some 984 // of them, we may end up messing up the expected order of the objects. 985 if (MFI.getUseLocalStackAllocationBlock() && 986 !(LargeArrayObjs.empty() && SmallArrayObjs.empty() && 987 AddrOfObjs.empty())) 988 llvm_unreachable("Found protected stack objects not pre-allocated by " 989 "LocalStackSlotPass."); 990 991 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 992 Offset, MaxAlign, Skew); 993 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 994 Offset, MaxAlign, Skew); 995 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, 996 Offset, MaxAlign, Skew); 997 } 998 999 SmallVector<int, 8> ObjectsToAllocate; 1000 1001 // Then prepare to assign frame offsets to stack objects that are not used to 1002 // spill callee saved registers. 1003 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 1004 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 1005 continue; 1006 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 1007 continue; 1008 if (RS && RS->isScavengingFrameIndex((int)i)) 1009 continue; 1010 if (MFI.isDeadObjectIndex(i)) 1011 continue; 1012 if (MFI.getStackProtectorIndex() == (int)i || EHRegNodeFrameIndex == (int)i) 1013 continue; 1014 if (ProtectedObjs.count(i)) 1015 continue; 1016 if (MFI.getStackID(i) != 1017 TargetStackID::Default) // Only allocate objects on the default stack. 1018 continue; 1019 1020 // Add the objects that we need to allocate to our working set. 1021 ObjectsToAllocate.push_back(i); 1022 } 1023 1024 // Allocate the EH registration node first if one is present. 1025 if (EHRegNodeFrameIndex != std::numeric_limits<int>::max()) 1026 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, 1027 MaxAlign, Skew); 1028 1029 // Give the targets a chance to order the objects the way they like it. 1030 if (MF.getTarget().getOptLevel() != CodeGenOpt::None && 1031 MF.getTarget().Options.StackSymbolOrdering) 1032 TFI.orderFrameObjects(MF, ObjectsToAllocate); 1033 1034 // Keep track of which bytes in the fixed and callee-save range are used so we 1035 // can use the holes when allocating later stack objects. Only do this if 1036 // stack protector isn't being used and the target requests it and we're 1037 // optimizing. 1038 BitVector StackBytesFree; 1039 if (!ObjectsToAllocate.empty() && 1040 MF.getTarget().getOptLevel() != CodeGenOpt::None && 1041 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(MF)) 1042 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex, 1043 FixedCSEnd, StackBytesFree); 1044 1045 // Now walk the objects and actually assign base offsets to them. 1046 for (auto &Object : ObjectsToAllocate) 1047 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, 1048 StackBytesFree)) 1049 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew); 1050 1051 // Make sure the special register scavenging spill slot is closest to the 1052 // stack pointer. 1053 if (RS && !EarlyScavengingSlots) { 1054 SmallVector<int, 2> SFIs; 1055 RS->getScavengingFrameIndices(SFIs); 1056 for (int SFI : SFIs) 1057 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); 1058 } 1059 1060 if (!TFI.targetHandlesStackFrameRounding()) { 1061 // If we have reserved argument space for call sites in the function 1062 // immediately on entry to the current function, count it as part of the 1063 // overall stack size. 1064 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(MF)) 1065 Offset += MFI.getMaxCallFrameSize(); 1066 1067 // Round up the size to a multiple of the alignment. If the function has 1068 // any calls or alloca's, align to the target's StackAlignment value to 1069 // ensure that the callee's frame or the alloca data is suitably aligned; 1070 // otherwise, for leaf functions, align to the TransientStackAlignment 1071 // value. 1072 Align StackAlign; 1073 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() || 1074 (RegInfo->hasStackRealignment(MF) && MFI.getObjectIndexEnd() != 0)) 1075 StackAlign = TFI.getStackAlign(); 1076 else 1077 StackAlign = TFI.getTransientStackAlign(); 1078 1079 // If the frame pointer is eliminated, all frame offsets will be relative to 1080 // SP not FP. Align to MaxAlign so this works. 1081 StackAlign = std::max(StackAlign, MaxAlign); 1082 int64_t OffsetBeforeAlignment = Offset; 1083 Offset = alignTo(Offset, StackAlign, Skew); 1084 1085 // If we have increased the offset to fulfill the alignment constrants, 1086 // then the scavenging spill slots may become harder to reach from the 1087 // stack pointer, float them so they stay close. 1088 if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS && 1089 !EarlyScavengingSlots) { 1090 SmallVector<int, 2> SFIs; 1091 RS->getScavengingFrameIndices(SFIs); 1092 LLVM_DEBUG(if (!SFIs.empty()) llvm::dbgs() 1093 << "Adjusting emergency spill slots!\n";); 1094 int64_t Delta = Offset - OffsetBeforeAlignment; 1095 for (int SFI : SFIs) { 1096 LLVM_DEBUG(llvm::dbgs() 1097 << "Adjusting offset of emergency spill slot #" << SFI 1098 << " from " << MFI.getObjectOffset(SFI);); 1099 MFI.setObjectOffset(SFI, MFI.getObjectOffset(SFI) - Delta); 1100 LLVM_DEBUG(llvm::dbgs() << " to " << MFI.getObjectOffset(SFI) << "\n";); 1101 } 1102 } 1103 } 1104 1105 // Update frame info to pretend that this is part of the stack... 1106 int64_t StackSize = Offset - LocalAreaOffset; 1107 MFI.setStackSize(StackSize); 1108 NumBytesStackSpace += StackSize; 1109 } 1110 1111 /// insertPrologEpilogCode - Scan the function for modified callee saved 1112 /// registers, insert spill code for these callee saved registers, then add 1113 /// prolog and epilog code to the function. 1114 void PEI::insertPrologEpilogCode(MachineFunction &MF) { 1115 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1116 1117 // Add prologue to the function... 1118 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1119 TFI.emitPrologue(MF, *SaveBlock); 1120 1121 // Add epilogue to restore the callee-save registers in each exiting block. 1122 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 1123 TFI.emitEpilogue(MF, *RestoreBlock); 1124 1125 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1126 TFI.inlineStackProbe(MF, *SaveBlock); 1127 1128 // Emit additional code that is required to support segmented stacks, if 1129 // we've been asked for it. This, when linked with a runtime with support 1130 // for segmented stacks (libgcc is one), will result in allocating stack 1131 // space in small chunks instead of one large contiguous block. 1132 if (MF.shouldSplitStack()) { 1133 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1134 TFI.adjustForSegmentedStacks(MF, *SaveBlock); 1135 // Record that there are split-stack functions, so we will emit a 1136 // special section to tell the linker. 1137 MF.getMMI().setHasSplitStack(true); 1138 } else 1139 MF.getMMI().setHasNosplitStack(true); 1140 1141 // Emit additional code that is required to explicitly handle the stack in 1142 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The 1143 // approach is rather similar to that of Segmented Stacks, but it uses a 1144 // different conditional check and another BIF for allocating more stack 1145 // space. 1146 if (MF.getFunction().getCallingConv() == CallingConv::HiPE) 1147 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1148 TFI.adjustForHiPEPrologue(MF, *SaveBlock); 1149 } 1150 1151 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical 1152 /// register references and actual offsets. 1153 void PEI::replaceFrameIndices(MachineFunction &MF) { 1154 const auto &ST = MF.getSubtarget(); 1155 const TargetFrameLowering &TFI = *ST.getFrameLowering(); 1156 if (!TFI.needsFrameIndexResolution(MF)) 1157 return; 1158 1159 const TargetRegisterInfo *TRI = ST.getRegisterInfo(); 1160 1161 // Allow the target to determine this after knowing the frame size. 1162 FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) || 1163 TRI->requiresFrameIndexReplacementScavenging(MF); 1164 1165 // Store SPAdj at exit of a basic block. 1166 SmallVector<int, 8> SPState; 1167 SPState.resize(MF.getNumBlockIDs()); 1168 df_iterator_default_set<MachineBasicBlock*> Reachable; 1169 1170 // Iterate over the reachable blocks in DFS order. 1171 for (auto DFI = df_ext_begin(&MF, Reachable), DFE = df_ext_end(&MF, Reachable); 1172 DFI != DFE; ++DFI) { 1173 int SPAdj = 0; 1174 // Check the exit state of the DFS stack predecessor. 1175 if (DFI.getPathLength() >= 2) { 1176 MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 1177 assert(Reachable.count(StackPred) && 1178 "DFS stack predecessor is already visited.\n"); 1179 SPAdj = SPState[StackPred->getNumber()]; 1180 } 1181 MachineBasicBlock *BB = *DFI; 1182 replaceFrameIndices(BB, MF, SPAdj); 1183 SPState[BB->getNumber()] = SPAdj; 1184 } 1185 1186 // Handle the unreachable blocks. 1187 for (auto &BB : MF) { 1188 if (Reachable.count(&BB)) 1189 // Already handled in DFS traversal. 1190 continue; 1191 int SPAdj = 0; 1192 replaceFrameIndices(&BB, MF, SPAdj); 1193 } 1194 } 1195 1196 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 1197 int &SPAdj) { 1198 assert(MF.getSubtarget().getRegisterInfo() && 1199 "getRegisterInfo() must be implemented!"); 1200 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1201 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1202 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1203 1204 if (RS && FrameIndexEliminationScavenging) 1205 RS->enterBasicBlock(*BB); 1206 1207 bool InsideCallSequence = false; 1208 1209 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { 1210 if (TII.isFrameInstr(*I)) { 1211 InsideCallSequence = TII.isFrameSetup(*I); 1212 SPAdj += TII.getSPAdjust(*I); 1213 I = TFI->eliminateCallFramePseudoInstr(MF, *BB, I); 1214 continue; 1215 } 1216 1217 MachineInstr &MI = *I; 1218 bool DoIncr = true; 1219 bool DidFinishLoop = true; 1220 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1221 if (!MI.getOperand(i).isFI()) 1222 continue; 1223 1224 // Frame indices in debug values are encoded in a target independent 1225 // way with simply the frame index and offset rather than any 1226 // target-specific addressing mode. 1227 if (MI.isDebugValue()) { 1228 MachineOperand &Op = MI.getOperand(i); 1229 assert( 1230 MI.isDebugOperand(&Op) && 1231 "Frame indices can only appear as a debug operand in a DBG_VALUE*" 1232 " machine instruction"); 1233 Register Reg; 1234 unsigned FrameIdx = Op.getIndex(); 1235 unsigned Size = MF.getFrameInfo().getObjectSize(FrameIdx); 1236 1237 StackOffset Offset = 1238 TFI->getFrameIndexReference(MF, FrameIdx, Reg); 1239 Op.ChangeToRegister(Reg, false /*isDef*/); 1240 Op.setIsDebug(); 1241 1242 const DIExpression *DIExpr = MI.getDebugExpression(); 1243 1244 // If we have a direct DBG_VALUE, and its location expression isn't 1245 // currently complex, then adding an offset will morph it into a 1246 // complex location that is interpreted as being a memory address. 1247 // This changes a pointer-valued variable to dereference that pointer, 1248 // which is incorrect. Fix by adding DW_OP_stack_value. 1249 1250 if (MI.isNonListDebugValue()) { 1251 unsigned PrependFlags = DIExpression::ApplyOffset; 1252 if (!MI.isIndirectDebugValue() && !DIExpr->isComplex()) 1253 PrependFlags |= DIExpression::StackValue; 1254 1255 // If we have DBG_VALUE that is indirect and has a Implicit location 1256 // expression need to insert a deref before prepending a Memory 1257 // location expression. Also after doing this we change the DBG_VALUE 1258 // to be direct. 1259 if (MI.isIndirectDebugValue() && DIExpr->isImplicit()) { 1260 SmallVector<uint64_t, 2> Ops = {dwarf::DW_OP_deref_size, Size}; 1261 bool WithStackValue = true; 1262 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); 1263 // Make the DBG_VALUE direct. 1264 MI.getDebugOffset().ChangeToRegister(0, false); 1265 } 1266 DIExpr = TRI.prependOffsetExpression(DIExpr, PrependFlags, Offset); 1267 } else { 1268 // The debug operand at DebugOpIndex was a frame index at offset 1269 // `Offset`; now the operand has been replaced with the frame 1270 // register, we must add Offset with `register x, plus Offset`. 1271 unsigned DebugOpIndex = MI.getDebugOperandIndex(&Op); 1272 SmallVector<uint64_t, 3> Ops; 1273 TRI.getOffsetOpcodes(Offset, Ops); 1274 DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, DebugOpIndex); 1275 } 1276 MI.getDebugExpressionOp().setMetadata(DIExpr); 1277 continue; 1278 } else if (MI.isDebugPHI()) { 1279 // Allow stack ref to continue onwards. 1280 continue; 1281 } 1282 1283 // TODO: This code should be commoned with the code for 1284 // PATCHPOINT. There's no good reason for the difference in 1285 // implementation other than historical accident. The only 1286 // remaining difference is the unconditional use of the stack 1287 // pointer as the base register. 1288 if (MI.getOpcode() == TargetOpcode::STATEPOINT) { 1289 assert((!MI.isDebugValue() || i == 0) && 1290 "Frame indicies can only appear as the first operand of a " 1291 "DBG_VALUE machine instruction"); 1292 Register Reg; 1293 MachineOperand &Offset = MI.getOperand(i + 1); 1294 StackOffset refOffset = TFI->getFrameIndexReferencePreferSP( 1295 MF, MI.getOperand(i).getIndex(), Reg, /*IgnoreSPUpdates*/ false); 1296 assert(!refOffset.getScalable() && 1297 "Frame offsets with a scalable component are not supported"); 1298 Offset.setImm(Offset.getImm() + refOffset.getFixed() + SPAdj); 1299 MI.getOperand(i).ChangeToRegister(Reg, false /*isDef*/); 1300 continue; 1301 } 1302 1303 // Some instructions (e.g. inline asm instructions) can have 1304 // multiple frame indices and/or cause eliminateFrameIndex 1305 // to insert more than one instruction. We need the register 1306 // scavenger to go through all of these instructions so that 1307 // it can update its register information. We keep the 1308 // iterator at the point before insertion so that we can 1309 // revisit them in full. 1310 bool AtBeginning = (I == BB->begin()); 1311 if (!AtBeginning) --I; 1312 1313 // If this instruction has a FrameIndex operand, we need to 1314 // use that target machine register info object to eliminate 1315 // it. 1316 TRI.eliminateFrameIndex(MI, SPAdj, i, 1317 FrameIndexEliminationScavenging ? RS : nullptr); 1318 1319 // Reset the iterator if we were at the beginning of the BB. 1320 if (AtBeginning) { 1321 I = BB->begin(); 1322 DoIncr = false; 1323 } 1324 1325 DidFinishLoop = false; 1326 break; 1327 } 1328 1329 // If we are looking at a call sequence, we need to keep track of 1330 // the SP adjustment made by each instruction in the sequence. 1331 // This includes both the frame setup/destroy pseudos (handled above), 1332 // as well as other instructions that have side effects w.r.t the SP. 1333 // Note that this must come after eliminateFrameIndex, because 1334 // if I itself referred to a frame index, we shouldn't count its own 1335 // adjustment. 1336 if (DidFinishLoop && InsideCallSequence) 1337 SPAdj += TII.getSPAdjust(MI); 1338 1339 if (DoIncr && I != BB->end()) ++I; 1340 1341 // Update register states. 1342 if (RS && FrameIndexEliminationScavenging && DidFinishLoop) 1343 RS->forward(MI); 1344 } 1345 } 1346