1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass is responsible for finalizing the functions frame layout, saving 10 // callee saved registers, and for emitting prolog & epilog code for the 11 // function. 12 // 13 // This pass must be run after register allocation. After this pass is 14 // executed, it is illegal to construct MO_FrameIndex operands. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineDominators.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineInstr.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineModuleInfo.h" 37 #include "llvm/CodeGen/MachineOperand.h" 38 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 39 #include "llvm/CodeGen/MachineRegisterInfo.h" 40 #include "llvm/CodeGen/RegisterScavenging.h" 41 #include "llvm/CodeGen/TargetFrameLowering.h" 42 #include "llvm/CodeGen/TargetInstrInfo.h" 43 #include "llvm/CodeGen/TargetOpcodes.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/WinEHFuncInfo.h" 47 #include "llvm/IR/Attributes.h" 48 #include "llvm/IR/CallingConv.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DiagnosticInfo.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/InlineAsm.h" 53 #include "llvm/IR/LLVMContext.h" 54 #include "llvm/InitializePasses.h" 55 #include "llvm/MC/MCRegisterInfo.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/CodeGen.h" 58 #include "llvm/Support/Debug.h" 59 #include "llvm/Support/ErrorHandling.h" 60 #include "llvm/Support/FormatVariadic.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include "llvm/Target/TargetMachine.h" 63 #include "llvm/Target/TargetOptions.h" 64 #include <algorithm> 65 #include <cassert> 66 #include <cstdint> 67 #include <functional> 68 #include <limits> 69 #include <utility> 70 #include <vector> 71 72 using namespace llvm; 73 74 #define DEBUG_TYPE "prologepilog" 75 76 using MBBVector = SmallVector<MachineBasicBlock *, 4>; 77 78 STATISTIC(NumLeafFuncWithSpills, "Number of leaf functions with CSRs"); 79 STATISTIC(NumFuncSeen, "Number of functions seen in PEI"); 80 81 82 namespace { 83 84 class PEI : public MachineFunctionPass { 85 public: 86 static char ID; 87 88 PEI() : MachineFunctionPass(ID) { 89 initializePEIPass(*PassRegistry::getPassRegistry()); 90 } 91 92 void getAnalysisUsage(AnalysisUsage &AU) const override; 93 94 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 95 /// frame indexes with appropriate references. 96 bool runOnMachineFunction(MachineFunction &MF) override; 97 98 private: 99 RegScavenger *RS; 100 101 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved 102 // stack frame indexes. 103 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 104 unsigned MaxCSFrameIndex = 0; 105 106 // Save and Restore blocks of the current function. Typically there is a 107 // single save block, unless Windows EH funclets are involved. 108 MBBVector SaveBlocks; 109 MBBVector RestoreBlocks; 110 111 // Flag to control whether to use the register scavenger to resolve 112 // frame index materialization registers. Set according to 113 // TRI->requiresFrameIndexScavenging() for the current function. 114 bool FrameIndexVirtualScavenging; 115 116 // Flag to control whether the scavenger should be passed even though 117 // FrameIndexVirtualScavenging is used. 118 bool FrameIndexEliminationScavenging; 119 120 // Emit remarks. 121 MachineOptimizationRemarkEmitter *ORE = nullptr; 122 123 void calculateCallFrameInfo(MachineFunction &MF); 124 void calculateSaveRestoreBlocks(MachineFunction &MF); 125 void spillCalleeSavedRegs(MachineFunction &MF); 126 127 void calculateFrameObjectOffsets(MachineFunction &MF); 128 void replaceFrameIndices(MachineFunction &MF); 129 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 130 int &SPAdj); 131 bool replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI, 132 unsigned OpIdx, int SPAdj = 0); 133 134 void insertPrologEpilogCode(MachineFunction &MF); 135 void insertZeroCallUsedRegs(MachineFunction &MF); 136 }; 137 138 } // end anonymous namespace 139 140 char PEI::ID = 0; 141 142 char &llvm::PrologEpilogCodeInserterID = PEI::ID; 143 144 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false, 145 false) 146 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 147 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 148 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 149 INITIALIZE_PASS_END(PEI, DEBUG_TYPE, 150 "Prologue/Epilogue Insertion & Frame Finalization", false, 151 false) 152 153 MachineFunctionPass *llvm::createPrologEpilogInserterPass() { 154 return new PEI(); 155 } 156 157 STATISTIC(NumBytesStackSpace, 158 "Number of bytes used for stack in all functions"); 159 160 void PEI::getAnalysisUsage(AnalysisUsage &AU) const { 161 AU.setPreservesCFG(); 162 AU.addPreserved<MachineLoopInfo>(); 163 AU.addPreserved<MachineDominatorTree>(); 164 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 165 MachineFunctionPass::getAnalysisUsage(AU); 166 } 167 168 /// StackObjSet - A set of stack object indexes 169 using StackObjSet = SmallSetVector<int, 8>; 170 171 using SavedDbgValuesMap = 172 SmallDenseMap<MachineBasicBlock *, SmallVector<MachineInstr *, 4>, 4>; 173 174 /// Stash DBG_VALUEs that describe parameters and which are placed at the start 175 /// of the block. Later on, after the prologue code has been emitted, the 176 /// stashed DBG_VALUEs will be reinserted at the start of the block. 177 static void stashEntryDbgValues(MachineBasicBlock &MBB, 178 SavedDbgValuesMap &EntryDbgValues) { 179 SmallVector<const MachineInstr *, 4> FrameIndexValues; 180 181 for (auto &MI : MBB) { 182 if (!MI.isDebugInstr()) 183 break; 184 if (!MI.isDebugValue() || !MI.getDebugVariable()->isParameter()) 185 continue; 186 if (any_of(MI.debug_operands(), 187 [](const MachineOperand &MO) { return MO.isFI(); })) { 188 // We can only emit valid locations for frame indices after the frame 189 // setup, so do not stash away them. 190 FrameIndexValues.push_back(&MI); 191 continue; 192 } 193 const DILocalVariable *Var = MI.getDebugVariable(); 194 const DIExpression *Expr = MI.getDebugExpression(); 195 auto Overlaps = [Var, Expr](const MachineInstr *DV) { 196 return Var == DV->getDebugVariable() && 197 Expr->fragmentsOverlap(DV->getDebugExpression()); 198 }; 199 // See if the debug value overlaps with any preceding debug value that will 200 // not be stashed. If that is the case, then we can't stash this value, as 201 // we would then reorder the values at reinsertion. 202 if (llvm::none_of(FrameIndexValues, Overlaps)) 203 EntryDbgValues[&MBB].push_back(&MI); 204 } 205 206 // Remove stashed debug values from the block. 207 if (EntryDbgValues.count(&MBB)) 208 for (auto *MI : EntryDbgValues[&MBB]) 209 MI->removeFromParent(); 210 } 211 212 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 213 /// frame indexes with appropriate references. 214 bool PEI::runOnMachineFunction(MachineFunction &MF) { 215 NumFuncSeen++; 216 const Function &F = MF.getFunction(); 217 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 218 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 219 220 RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr; 221 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF); 222 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 223 224 // Calculate the MaxCallFrameSize and AdjustsStack variables for the 225 // function's frame information. Also eliminates call frame pseudo 226 // instructions. 227 calculateCallFrameInfo(MF); 228 229 // Determine placement of CSR spill/restore code and prolog/epilog code: 230 // place all spills in the entry block, all restores in return blocks. 231 calculateSaveRestoreBlocks(MF); 232 233 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code. 234 SavedDbgValuesMap EntryDbgValues; 235 for (MachineBasicBlock *SaveBlock : SaveBlocks) 236 stashEntryDbgValues(*SaveBlock, EntryDbgValues); 237 238 // Handle CSR spilling and restoring, for targets that need it. 239 if (MF.getTarget().usesPhysRegsForValues()) 240 spillCalleeSavedRegs(MF); 241 242 // Allow the target machine to make final modifications to the function 243 // before the frame layout is finalized. 244 TFI->processFunctionBeforeFrameFinalized(MF, RS); 245 246 // Calculate actual frame offsets for all abstract stack objects... 247 calculateFrameObjectOffsets(MF); 248 249 // Add prolog and epilog code to the function. This function is required 250 // to align the stack frame as necessary for any stack variables or 251 // called functions. Because of this, calculateCalleeSavedRegisters() 252 // must be called before this function in order to set the AdjustsStack 253 // and MaxCallFrameSize variables. 254 if (!F.hasFnAttribute(Attribute::Naked)) 255 insertPrologEpilogCode(MF); 256 257 // Reinsert stashed debug values at the start of the entry blocks. 258 for (auto &I : EntryDbgValues) 259 I.first->insert(I.first->begin(), I.second.begin(), I.second.end()); 260 261 // Allow the target machine to make final modifications to the function 262 // before the frame layout is finalized. 263 TFI->processFunctionBeforeFrameIndicesReplaced(MF, RS); 264 265 // Replace all MO_FrameIndex operands with physical register references 266 // and actual offsets. 267 // 268 replaceFrameIndices(MF); 269 270 // If register scavenging is needed, as we've enabled doing it as a 271 // post-pass, scavenge the virtual registers that frame index elimination 272 // inserted. 273 if (TRI->requiresRegisterScavenging(MF) && FrameIndexVirtualScavenging) 274 scavengeFrameVirtualRegs(MF, *RS); 275 276 // Warn on stack size when we exceeds the given limit. 277 MachineFrameInfo &MFI = MF.getFrameInfo(); 278 uint64_t StackSize = MFI.getStackSize(); 279 280 unsigned Threshold = UINT_MAX; 281 if (MF.getFunction().hasFnAttribute("warn-stack-size")) { 282 bool Failed = MF.getFunction() 283 .getFnAttribute("warn-stack-size") 284 .getValueAsString() 285 .getAsInteger(10, Threshold); 286 // Verifier should have caught this. 287 assert(!Failed && "Invalid warn-stack-size fn attr value"); 288 (void)Failed; 289 } 290 uint64_t UnsafeStackSize = MFI.getUnsafeStackSize(); 291 if (MF.getFunction().hasFnAttribute(Attribute::SafeStack)) 292 StackSize += UnsafeStackSize; 293 294 if (StackSize > Threshold) { 295 DiagnosticInfoStackSize DiagStackSize(F, StackSize, Threshold, DS_Warning); 296 F.getContext().diagnose(DiagStackSize); 297 int64_t SpillSize = 0; 298 for (int Idx = MFI.getObjectIndexBegin(), End = MFI.getObjectIndexEnd(); 299 Idx != End; ++Idx) { 300 if (MFI.isSpillSlotObjectIndex(Idx)) 301 SpillSize += MFI.getObjectSize(Idx); 302 } 303 304 float SpillPct = 305 static_cast<float>(SpillSize) / static_cast<float>(StackSize); 306 float VarPct = 1.0f - SpillPct; 307 int64_t VariableSize = StackSize - SpillSize; 308 dbgs() << formatv("{0}/{1} ({3:P}) spills, {2}/{1} ({4:P}) variables", 309 SpillSize, StackSize, VariableSize, SpillPct, VarPct); 310 if (UnsafeStackSize != 0) { 311 float UnsafePct = 312 static_cast<float>(UnsafeStackSize) / static_cast<float>(StackSize); 313 dbgs() << formatv(", {0}/{2} ({1:P}) unsafe stack", UnsafeStackSize, 314 UnsafePct, StackSize); 315 } 316 dbgs() << "\n"; 317 } 318 319 ORE->emit([&]() { 320 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize", 321 MF.getFunction().getSubprogram(), 322 &MF.front()) 323 << ore::NV("NumStackBytes", StackSize) << " stack bytes in function"; 324 }); 325 326 delete RS; 327 SaveBlocks.clear(); 328 RestoreBlocks.clear(); 329 MFI.setSavePoint(nullptr); 330 MFI.setRestorePoint(nullptr); 331 return true; 332 } 333 334 /// Calculate the MaxCallFrameSize and AdjustsStack 335 /// variables for the function's frame information and eliminate call frame 336 /// pseudo instructions. 337 void PEI::calculateCallFrameInfo(MachineFunction &MF) { 338 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 339 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 340 MachineFrameInfo &MFI = MF.getFrameInfo(); 341 342 unsigned MaxCallFrameSize = 0; 343 bool AdjustsStack = MFI.adjustsStack(); 344 345 // Get the function call frame set-up and tear-down instruction opcode 346 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 347 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 348 349 // Early exit for targets which have no call frame setup/destroy pseudo 350 // instructions. 351 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 352 return; 353 354 std::vector<MachineBasicBlock::iterator> FrameSDOps; 355 for (MachineBasicBlock &BB : MF) 356 for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) 357 if (TII.isFrameInstr(*I)) { 358 unsigned Size = TII.getFrameSize(*I); 359 if (Size > MaxCallFrameSize) MaxCallFrameSize = Size; 360 AdjustsStack = true; 361 FrameSDOps.push_back(I); 362 } else if (I->isInlineAsm()) { 363 // Some inline asm's need a stack frame, as indicated by operand 1. 364 unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 365 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 366 AdjustsStack = true; 367 } 368 369 assert(!MFI.isMaxCallFrameSizeComputed() || 370 (MFI.getMaxCallFrameSize() == MaxCallFrameSize && 371 MFI.adjustsStack() == AdjustsStack)); 372 MFI.setAdjustsStack(AdjustsStack); 373 MFI.setMaxCallFrameSize(MaxCallFrameSize); 374 375 for (MachineBasicBlock::iterator I : FrameSDOps) { 376 // If call frames are not being included as part of the stack frame, and 377 // the target doesn't indicate otherwise, remove the call frame pseudos 378 // here. The sub/add sp instruction pairs are still inserted, but we don't 379 // need to track the SP adjustment for frame index elimination. 380 if (TFI->canSimplifyCallFramePseudos(MF)) 381 TFI->eliminateCallFramePseudoInstr(MF, *I->getParent(), I); 382 } 383 } 384 385 /// Compute the sets of entry and return blocks for saving and restoring 386 /// callee-saved registers, and placing prolog and epilog code. 387 void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) { 388 const MachineFrameInfo &MFI = MF.getFrameInfo(); 389 390 // Even when we do not change any CSR, we still want to insert the 391 // prologue and epilogue of the function. 392 // So set the save points for those. 393 394 // Use the points found by shrink-wrapping, if any. 395 if (MFI.getSavePoint()) { 396 SaveBlocks.push_back(MFI.getSavePoint()); 397 assert(MFI.getRestorePoint() && "Both restore and save must be set"); 398 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 399 // If RestoreBlock does not have any successor and is not a return block 400 // then the end point is unreachable and we do not need to insert any 401 // epilogue. 402 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock()) 403 RestoreBlocks.push_back(RestoreBlock); 404 return; 405 } 406 407 // Save refs to entry and return blocks. 408 SaveBlocks.push_back(&MF.front()); 409 for (MachineBasicBlock &MBB : MF) { 410 if (MBB.isEHFuncletEntry()) 411 SaveBlocks.push_back(&MBB); 412 if (MBB.isReturnBlock()) 413 RestoreBlocks.push_back(&MBB); 414 } 415 } 416 417 static void assignCalleeSavedSpillSlots(MachineFunction &F, 418 const BitVector &SavedRegs, 419 unsigned &MinCSFrameIndex, 420 unsigned &MaxCSFrameIndex) { 421 if (SavedRegs.empty()) 422 return; 423 424 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo(); 425 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs(); 426 BitVector CSMask(SavedRegs.size()); 427 428 for (unsigned i = 0; CSRegs[i]; ++i) 429 CSMask.set(CSRegs[i]); 430 431 std::vector<CalleeSavedInfo> CSI; 432 for (unsigned i = 0; CSRegs[i]; ++i) { 433 unsigned Reg = CSRegs[i]; 434 if (SavedRegs.test(Reg)) { 435 bool SavedSuper = false; 436 for (const MCPhysReg &SuperReg : RegInfo->superregs(Reg)) { 437 // Some backends set all aliases for some registers as saved, such as 438 // Mips's $fp, so they appear in SavedRegs but not CSRegs. 439 if (SavedRegs.test(SuperReg) && CSMask.test(SuperReg)) { 440 SavedSuper = true; 441 break; 442 } 443 } 444 445 if (!SavedSuper) 446 CSI.push_back(CalleeSavedInfo(Reg)); 447 } 448 } 449 450 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering(); 451 MachineFrameInfo &MFI = F.getFrameInfo(); 452 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI, MinCSFrameIndex, 453 MaxCSFrameIndex)) { 454 // If target doesn't implement this, use generic code. 455 456 if (CSI.empty()) 457 return; // Early exit if no callee saved registers are modified! 458 459 unsigned NumFixedSpillSlots; 460 const TargetFrameLowering::SpillSlot *FixedSpillSlots = 461 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); 462 463 // Now that we know which registers need to be saved and restored, allocate 464 // stack slots for them. 465 for (auto &CS : CSI) { 466 // If the target has spilled this register to another register, we don't 467 // need to allocate a stack slot. 468 if (CS.isSpilledToReg()) 469 continue; 470 471 unsigned Reg = CS.getReg(); 472 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 473 474 int FrameIdx; 475 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { 476 CS.setFrameIdx(FrameIdx); 477 continue; 478 } 479 480 // Check to see if this physreg must be spilled to a particular stack slot 481 // on this target. 482 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots; 483 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots && 484 FixedSlot->Reg != Reg) 485 ++FixedSlot; 486 487 unsigned Size = RegInfo->getSpillSize(*RC); 488 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { 489 // Nope, just spill it anywhere convenient. 490 Align Alignment = RegInfo->getSpillAlign(*RC); 491 // We may not be able to satisfy the desired alignment specification of 492 // the TargetRegisterClass if the stack alignment is smaller. Use the 493 // min. 494 Alignment = std::min(Alignment, TFI->getStackAlign()); 495 FrameIdx = MFI.CreateStackObject(Size, Alignment, true); 496 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 497 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 498 } else { 499 // Spill it to the stack where we must. 500 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset); 501 } 502 503 CS.setFrameIdx(FrameIdx); 504 } 505 } 506 507 MFI.setCalleeSavedInfo(CSI); 508 } 509 510 /// Helper function to update the liveness information for the callee-saved 511 /// registers. 512 static void updateLiveness(MachineFunction &MF) { 513 MachineFrameInfo &MFI = MF.getFrameInfo(); 514 // Visited will contain all the basic blocks that are in the region 515 // where the callee saved registers are alive: 516 // - Anything that is not Save or Restore -> LiveThrough. 517 // - Save -> LiveIn. 518 // - Restore -> LiveOut. 519 // The live-out is not attached to the block, so no need to keep 520 // Restore in this set. 521 SmallPtrSet<MachineBasicBlock *, 8> Visited; 522 SmallVector<MachineBasicBlock *, 8> WorkList; 523 MachineBasicBlock *Entry = &MF.front(); 524 MachineBasicBlock *Save = MFI.getSavePoint(); 525 526 if (!Save) 527 Save = Entry; 528 529 if (Entry != Save) { 530 WorkList.push_back(Entry); 531 Visited.insert(Entry); 532 } 533 Visited.insert(Save); 534 535 MachineBasicBlock *Restore = MFI.getRestorePoint(); 536 if (Restore) 537 // By construction Restore cannot be visited, otherwise it 538 // means there exists a path to Restore that does not go 539 // through Save. 540 WorkList.push_back(Restore); 541 542 while (!WorkList.empty()) { 543 const MachineBasicBlock *CurBB = WorkList.pop_back_val(); 544 // By construction, the region that is after the save point is 545 // dominated by the Save and post-dominated by the Restore. 546 if (CurBB == Save && Save != Restore) 547 continue; 548 // Enqueue all the successors not already visited. 549 // Those are by construction either before Save or after Restore. 550 for (MachineBasicBlock *SuccBB : CurBB->successors()) 551 if (Visited.insert(SuccBB).second) 552 WorkList.push_back(SuccBB); 553 } 554 555 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 556 557 MachineRegisterInfo &MRI = MF.getRegInfo(); 558 for (const CalleeSavedInfo &I : CSI) { 559 for (MachineBasicBlock *MBB : Visited) { 560 MCPhysReg Reg = I.getReg(); 561 // Add the callee-saved register as live-in. 562 // It's killed at the spill. 563 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg)) 564 MBB->addLiveIn(Reg); 565 } 566 // If callee-saved register is spilled to another register rather than 567 // spilling to stack, the destination register has to be marked as live for 568 // each MBB between the prologue and epilogue so that it is not clobbered 569 // before it is reloaded in the epilogue. The Visited set contains all 570 // blocks outside of the region delimited by prologue/epilogue. 571 if (I.isSpilledToReg()) { 572 for (MachineBasicBlock &MBB : MF) { 573 if (Visited.count(&MBB)) 574 continue; 575 MCPhysReg DstReg = I.getDstReg(); 576 if (!MBB.isLiveIn(DstReg)) 577 MBB.addLiveIn(DstReg); 578 } 579 } 580 } 581 } 582 583 /// Insert spill code for the callee-saved registers used in the function. 584 static void insertCSRSaves(MachineBasicBlock &SaveBlock, 585 ArrayRef<CalleeSavedInfo> CSI) { 586 MachineFunction &MF = *SaveBlock.getParent(); 587 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 588 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 589 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 590 591 MachineBasicBlock::iterator I = SaveBlock.begin(); 592 if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) { 593 for (const CalleeSavedInfo &CS : CSI) { 594 // Insert the spill to the stack frame. 595 unsigned Reg = CS.getReg(); 596 597 if (CS.isSpilledToReg()) { 598 BuildMI(SaveBlock, I, DebugLoc(), 599 TII.get(TargetOpcode::COPY), CS.getDstReg()) 600 .addReg(Reg, getKillRegState(true)); 601 } else { 602 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 603 TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC, 604 TRI); 605 } 606 } 607 } 608 } 609 610 /// Insert restore code for the callee-saved registers used in the function. 611 static void insertCSRRestores(MachineBasicBlock &RestoreBlock, 612 std::vector<CalleeSavedInfo> &CSI) { 613 MachineFunction &MF = *RestoreBlock.getParent(); 614 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 615 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 616 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 617 618 // Restore all registers immediately before the return and any 619 // terminators that precede it. 620 MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator(); 621 622 if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) { 623 for (const CalleeSavedInfo &CI : reverse(CSI)) { 624 unsigned Reg = CI.getReg(); 625 if (CI.isSpilledToReg()) { 626 BuildMI(RestoreBlock, I, DebugLoc(), TII.get(TargetOpcode::COPY), Reg) 627 .addReg(CI.getDstReg(), getKillRegState(true)); 628 } else { 629 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 630 TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI); 631 assert(I != RestoreBlock.begin() && 632 "loadRegFromStackSlot didn't insert any code!"); 633 // Insert in reverse order. loadRegFromStackSlot can insert 634 // multiple instructions. 635 } 636 } 637 } 638 } 639 640 void PEI::spillCalleeSavedRegs(MachineFunction &MF) { 641 // We can't list this requirement in getRequiredProperties because some 642 // targets (WebAssembly) use virtual registers past this point, and the pass 643 // pipeline is set up without giving the passes a chance to look at the 644 // TargetMachine. 645 // FIXME: Find a way to express this in getRequiredProperties. 646 assert(MF.getProperties().hasProperty( 647 MachineFunctionProperties::Property::NoVRegs)); 648 649 const Function &F = MF.getFunction(); 650 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 651 MachineFrameInfo &MFI = MF.getFrameInfo(); 652 MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 653 MaxCSFrameIndex = 0; 654 655 // Determine which of the registers in the callee save list should be saved. 656 BitVector SavedRegs; 657 TFI->determineCalleeSaves(MF, SavedRegs, RS); 658 659 // Assign stack slots for any callee-saved registers that must be spilled. 660 assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); 661 662 // Add the code to save and restore the callee saved registers. 663 if (!F.hasFnAttribute(Attribute::Naked)) { 664 MFI.setCalleeSavedInfoValid(true); 665 666 std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 667 if (!CSI.empty()) { 668 if (!MFI.hasCalls()) 669 NumLeafFuncWithSpills++; 670 671 for (MachineBasicBlock *SaveBlock : SaveBlocks) 672 insertCSRSaves(*SaveBlock, CSI); 673 674 // Update the live-in information of all the blocks up to the save point. 675 updateLiveness(MF); 676 677 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 678 insertCSRRestores(*RestoreBlock, CSI); 679 } 680 } 681 } 682 683 /// AdjustStackOffset - Helper function used to adjust the stack frame offset. 684 static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, 685 bool StackGrowsDown, int64_t &Offset, 686 Align &MaxAlign, unsigned Skew) { 687 // If the stack grows down, add the object size to find the lowest address. 688 if (StackGrowsDown) 689 Offset += MFI.getObjectSize(FrameIdx); 690 691 Align Alignment = MFI.getObjectAlign(FrameIdx); 692 693 // If the alignment of this object is greater than that of the stack, then 694 // increase the stack alignment to match. 695 MaxAlign = std::max(MaxAlign, Alignment); 696 697 // Adjust to alignment boundary. 698 Offset = alignTo(Offset, Alignment, Skew); 699 700 if (StackGrowsDown) { 701 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset 702 << "]\n"); 703 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset 704 } else { 705 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset 706 << "]\n"); 707 MFI.setObjectOffset(FrameIdx, Offset); 708 Offset += MFI.getObjectSize(FrameIdx); 709 } 710 } 711 712 /// Compute which bytes of fixed and callee-save stack area are unused and keep 713 /// track of them in StackBytesFree. 714 static inline void 715 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown, 716 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex, 717 int64_t FixedCSEnd, BitVector &StackBytesFree) { 718 // Avoid undefined int64_t -> int conversion below in extreme case. 719 if (FixedCSEnd > std::numeric_limits<int>::max()) 720 return; 721 722 StackBytesFree.resize(FixedCSEnd, true); 723 724 SmallVector<int, 16> AllocatedFrameSlots; 725 // Add fixed objects. 726 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) 727 // StackSlot scavenging is only implemented for the default stack. 728 if (MFI.getStackID(i) == TargetStackID::Default) 729 AllocatedFrameSlots.push_back(i); 730 // Add callee-save objects if there are any. 731 if (MinCSFrameIndex <= MaxCSFrameIndex) { 732 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i) 733 if (MFI.getStackID(i) == TargetStackID::Default) 734 AllocatedFrameSlots.push_back(i); 735 } 736 737 for (int i : AllocatedFrameSlots) { 738 // These are converted from int64_t, but they should always fit in int 739 // because of the FixedCSEnd check above. 740 int ObjOffset = MFI.getObjectOffset(i); 741 int ObjSize = MFI.getObjectSize(i); 742 int ObjStart, ObjEnd; 743 if (StackGrowsDown) { 744 // ObjOffset is negative when StackGrowsDown is true. 745 ObjStart = -ObjOffset - ObjSize; 746 ObjEnd = -ObjOffset; 747 } else { 748 ObjStart = ObjOffset; 749 ObjEnd = ObjOffset + ObjSize; 750 } 751 // Ignore fixed holes that are in the previous stack frame. 752 if (ObjEnd > 0) 753 StackBytesFree.reset(ObjStart, ObjEnd); 754 } 755 } 756 757 /// Assign frame object to an unused portion of the stack in the fixed stack 758 /// object range. Return true if the allocation was successful. 759 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, 760 bool StackGrowsDown, Align MaxAlign, 761 BitVector &StackBytesFree) { 762 if (MFI.isVariableSizedObjectIndex(FrameIdx)) 763 return false; 764 765 if (StackBytesFree.none()) { 766 // clear it to speed up later scavengeStackSlot calls to 767 // StackBytesFree.none() 768 StackBytesFree.clear(); 769 return false; 770 } 771 772 Align ObjAlign = MFI.getObjectAlign(FrameIdx); 773 if (ObjAlign > MaxAlign) 774 return false; 775 776 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 777 int FreeStart; 778 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; 779 FreeStart = StackBytesFree.find_next(FreeStart)) { 780 781 // Check that free space has suitable alignment. 782 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; 783 if (alignTo(ObjStart, ObjAlign) != ObjStart) 784 continue; 785 786 if (FreeStart + ObjSize > StackBytesFree.size()) 787 return false; 788 789 bool AllBytesFree = true; 790 for (unsigned Byte = 0; Byte < ObjSize; ++Byte) 791 if (!StackBytesFree.test(FreeStart + Byte)) { 792 AllBytesFree = false; 793 break; 794 } 795 if (AllBytesFree) 796 break; 797 } 798 799 if (FreeStart == -1) 800 return false; 801 802 if (StackGrowsDown) { 803 int ObjStart = -(FreeStart + ObjSize); 804 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 805 << ObjStart << "]\n"); 806 MFI.setObjectOffset(FrameIdx, ObjStart); 807 } else { 808 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 809 << FreeStart << "]\n"); 810 MFI.setObjectOffset(FrameIdx, FreeStart); 811 } 812 813 StackBytesFree.reset(FreeStart, FreeStart + ObjSize); 814 return true; 815 } 816 817 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e., 818 /// those required to be close to the Stack Protector) to stack offsets. 819 static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs, 820 SmallSet<int, 16> &ProtectedObjs, 821 MachineFrameInfo &MFI, bool StackGrowsDown, 822 int64_t &Offset, Align &MaxAlign, 823 unsigned Skew) { 824 825 for (int i : UnassignedObjs) { 826 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew); 827 ProtectedObjs.insert(i); 828 } 829 } 830 831 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the 832 /// abstract stack objects. 833 void PEI::calculateFrameObjectOffsets(MachineFunction &MF) { 834 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 835 836 bool StackGrowsDown = 837 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 838 839 // Loop over all of the stack objects, assigning sequential addresses... 840 MachineFrameInfo &MFI = MF.getFrameInfo(); 841 842 // Start at the beginning of the local area. 843 // The Offset is the distance from the stack top in the direction 844 // of stack growth -- so it's always nonnegative. 845 int LocalAreaOffset = TFI.getOffsetOfLocalArea(); 846 if (StackGrowsDown) 847 LocalAreaOffset = -LocalAreaOffset; 848 assert(LocalAreaOffset >= 0 849 && "Local area offset should be in direction of stack growth"); 850 int64_t Offset = LocalAreaOffset; 851 852 // Skew to be applied to alignment. 853 unsigned Skew = TFI.getStackAlignmentSkew(MF); 854 855 #ifdef EXPENSIVE_CHECKS 856 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) 857 if (!MFI.isDeadObjectIndex(i) && 858 MFI.getStackID(i) == TargetStackID::Default) 859 assert(MFI.getObjectAlign(i) <= MFI.getMaxAlign() && 860 "MaxAlignment is invalid"); 861 #endif 862 863 // If there are fixed sized objects that are preallocated in the local area, 864 // non-fixed objects can't be allocated right at the start of local area. 865 // Adjust 'Offset' to point to the end of last fixed sized preallocated 866 // object. 867 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) { 868 // Only allocate objects on the default stack. 869 if (MFI.getStackID(i) != TargetStackID::Default) 870 continue; 871 872 int64_t FixedOff; 873 if (StackGrowsDown) { 874 // The maximum distance from the stack pointer is at lower address of 875 // the object -- which is given by offset. For down growing stack 876 // the offset is negative, so we negate the offset to get the distance. 877 FixedOff = -MFI.getObjectOffset(i); 878 } else { 879 // The maximum distance from the start pointer is at the upper 880 // address of the object. 881 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i); 882 } 883 if (FixedOff > Offset) Offset = FixedOff; 884 } 885 886 Align MaxAlign = MFI.getMaxAlign(); 887 // First assign frame offsets to stack objects that are used to spill 888 // callee saved registers. 889 if (MaxCSFrameIndex >= MinCSFrameIndex) { 890 for (unsigned i = 0; i <= MaxCSFrameIndex - MinCSFrameIndex; ++i) { 891 unsigned FrameIndex = 892 StackGrowsDown ? MinCSFrameIndex + i : MaxCSFrameIndex - i; 893 894 // Only allocate objects on the default stack. 895 if (MFI.getStackID(FrameIndex) != TargetStackID::Default) 896 continue; 897 898 // TODO: should this just be if (MFI.isDeadObjectIndex(FrameIndex)) 899 if (!StackGrowsDown && MFI.isDeadObjectIndex(FrameIndex)) 900 continue; 901 902 AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign, 903 Skew); 904 } 905 } 906 907 assert(MaxAlign == MFI.getMaxAlign() && 908 "MFI.getMaxAlign should already account for all callee-saved " 909 "registers without a fixed stack slot"); 910 911 // FixedCSEnd is the stack offset to the end of the fixed and callee-save 912 // stack area. 913 int64_t FixedCSEnd = Offset; 914 915 // Make sure the special register scavenging spill slot is closest to the 916 // incoming stack pointer if a frame pointer is required and is closer 917 // to the incoming rather than the final stack pointer. 918 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 919 bool EarlyScavengingSlots = TFI.allocateScavengingFrameIndexesNearIncomingSP(MF); 920 if (RS && EarlyScavengingSlots) { 921 SmallVector<int, 2> SFIs; 922 RS->getScavengingFrameIndices(SFIs); 923 for (int SFI : SFIs) 924 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); 925 } 926 927 // FIXME: Once this is working, then enable flag will change to a target 928 // check for whether the frame is large enough to want to use virtual 929 // frame index registers. Functions which don't want/need this optimization 930 // will continue to use the existing code path. 931 if (MFI.getUseLocalStackAllocationBlock()) { 932 Align Alignment = MFI.getLocalFrameMaxAlign(); 933 934 // Adjust to alignment boundary. 935 Offset = alignTo(Offset, Alignment, Skew); 936 937 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); 938 939 // Resolve offsets for objects in the local block. 940 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) { 941 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i); 942 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; 943 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset 944 << "]\n"); 945 MFI.setObjectOffset(Entry.first, FIOffset); 946 } 947 // Allocate the local block 948 Offset += MFI.getLocalFrameSize(); 949 950 MaxAlign = std::max(Alignment, MaxAlign); 951 } 952 953 // Retrieve the Exception Handler registration node. 954 int EHRegNodeFrameIndex = std::numeric_limits<int>::max(); 955 if (const WinEHFuncInfo *FuncInfo = MF.getWinEHFuncInfo()) 956 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; 957 958 // Make sure that the stack protector comes before the local variables on the 959 // stack. 960 SmallSet<int, 16> ProtectedObjs; 961 if (MFI.hasStackProtectorIndex()) { 962 int StackProtectorFI = MFI.getStackProtectorIndex(); 963 StackObjSet LargeArrayObjs; 964 StackObjSet SmallArrayObjs; 965 StackObjSet AddrOfObjs; 966 967 // If we need a stack protector, we need to make sure that 968 // LocalStackSlotPass didn't already allocate a slot for it. 969 // If we are told to use the LocalStackAllocationBlock, the stack protector 970 // is expected to be already pre-allocated. 971 if (MFI.getStackID(StackProtectorFI) != TargetStackID::Default) { 972 // If the stack protector isn't on the default stack then it's up to the 973 // target to set the stack offset. 974 assert(MFI.getObjectOffset(StackProtectorFI) != 0 && 975 "Offset of stack protector on non-default stack expected to be " 976 "already set."); 977 assert(!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex()) && 978 "Stack protector on non-default stack expected to not be " 979 "pre-allocated by LocalStackSlotPass."); 980 } else if (!MFI.getUseLocalStackAllocationBlock()) { 981 AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, MaxAlign, 982 Skew); 983 } else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) { 984 llvm_unreachable( 985 "Stack protector not pre-allocated by LocalStackSlotPass."); 986 } 987 988 // Assign large stack objects first. 989 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 990 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 991 continue; 992 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 993 continue; 994 if (RS && RS->isScavengingFrameIndex((int)i)) 995 continue; 996 if (MFI.isDeadObjectIndex(i)) 997 continue; 998 if (StackProtectorFI == (int)i || EHRegNodeFrameIndex == (int)i) 999 continue; 1000 // Only allocate objects on the default stack. 1001 if (MFI.getStackID(i) != TargetStackID::Default) 1002 continue; 1003 1004 switch (MFI.getObjectSSPLayout(i)) { 1005 case MachineFrameInfo::SSPLK_None: 1006 continue; 1007 case MachineFrameInfo::SSPLK_SmallArray: 1008 SmallArrayObjs.insert(i); 1009 continue; 1010 case MachineFrameInfo::SSPLK_AddrOf: 1011 AddrOfObjs.insert(i); 1012 continue; 1013 case MachineFrameInfo::SSPLK_LargeArray: 1014 LargeArrayObjs.insert(i); 1015 continue; 1016 } 1017 llvm_unreachable("Unexpected SSPLayoutKind."); 1018 } 1019 1020 // We expect **all** the protected stack objects to be pre-allocated by 1021 // LocalStackSlotPass. If it turns out that PEI still has to allocate some 1022 // of them, we may end up messing up the expected order of the objects. 1023 if (MFI.getUseLocalStackAllocationBlock() && 1024 !(LargeArrayObjs.empty() && SmallArrayObjs.empty() && 1025 AddrOfObjs.empty())) 1026 llvm_unreachable("Found protected stack objects not pre-allocated by " 1027 "LocalStackSlotPass."); 1028 1029 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1030 Offset, MaxAlign, Skew); 1031 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1032 Offset, MaxAlign, Skew); 1033 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, 1034 Offset, MaxAlign, Skew); 1035 } 1036 1037 SmallVector<int, 8> ObjectsToAllocate; 1038 1039 // Then prepare to assign frame offsets to stack objects that are not used to 1040 // spill callee saved registers. 1041 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 1042 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 1043 continue; 1044 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 1045 continue; 1046 if (RS && RS->isScavengingFrameIndex((int)i)) 1047 continue; 1048 if (MFI.isDeadObjectIndex(i)) 1049 continue; 1050 if (MFI.getStackProtectorIndex() == (int)i || EHRegNodeFrameIndex == (int)i) 1051 continue; 1052 if (ProtectedObjs.count(i)) 1053 continue; 1054 // Only allocate objects on the default stack. 1055 if (MFI.getStackID(i) != TargetStackID::Default) 1056 continue; 1057 1058 // Add the objects that we need to allocate to our working set. 1059 ObjectsToAllocate.push_back(i); 1060 } 1061 1062 // Allocate the EH registration node first if one is present. 1063 if (EHRegNodeFrameIndex != std::numeric_limits<int>::max()) 1064 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, 1065 MaxAlign, Skew); 1066 1067 // Give the targets a chance to order the objects the way they like it. 1068 if (MF.getTarget().getOptLevel() != CodeGenOpt::None && 1069 MF.getTarget().Options.StackSymbolOrdering) 1070 TFI.orderFrameObjects(MF, ObjectsToAllocate); 1071 1072 // Keep track of which bytes in the fixed and callee-save range are used so we 1073 // can use the holes when allocating later stack objects. Only do this if 1074 // stack protector isn't being used and the target requests it and we're 1075 // optimizing. 1076 BitVector StackBytesFree; 1077 if (!ObjectsToAllocate.empty() && 1078 MF.getTarget().getOptLevel() != CodeGenOpt::None && 1079 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(MF)) 1080 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex, 1081 FixedCSEnd, StackBytesFree); 1082 1083 // Now walk the objects and actually assign base offsets to them. 1084 for (auto &Object : ObjectsToAllocate) 1085 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, 1086 StackBytesFree)) 1087 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew); 1088 1089 // Make sure the special register scavenging spill slot is closest to the 1090 // stack pointer. 1091 if (RS && !EarlyScavengingSlots) { 1092 SmallVector<int, 2> SFIs; 1093 RS->getScavengingFrameIndices(SFIs); 1094 for (int SFI : SFIs) 1095 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); 1096 } 1097 1098 if (!TFI.targetHandlesStackFrameRounding()) { 1099 // If we have reserved argument space for call sites in the function 1100 // immediately on entry to the current function, count it as part of the 1101 // overall stack size. 1102 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(MF)) 1103 Offset += MFI.getMaxCallFrameSize(); 1104 1105 // Round up the size to a multiple of the alignment. If the function has 1106 // any calls or alloca's, align to the target's StackAlignment value to 1107 // ensure that the callee's frame or the alloca data is suitably aligned; 1108 // otherwise, for leaf functions, align to the TransientStackAlignment 1109 // value. 1110 Align StackAlign; 1111 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() || 1112 (RegInfo->hasStackRealignment(MF) && MFI.getObjectIndexEnd() != 0)) 1113 StackAlign = TFI.getStackAlign(); 1114 else 1115 StackAlign = TFI.getTransientStackAlign(); 1116 1117 // If the frame pointer is eliminated, all frame offsets will be relative to 1118 // SP not FP. Align to MaxAlign so this works. 1119 StackAlign = std::max(StackAlign, MaxAlign); 1120 int64_t OffsetBeforeAlignment = Offset; 1121 Offset = alignTo(Offset, StackAlign, Skew); 1122 1123 // If we have increased the offset to fulfill the alignment constrants, 1124 // then the scavenging spill slots may become harder to reach from the 1125 // stack pointer, float them so they stay close. 1126 if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS && 1127 !EarlyScavengingSlots) { 1128 SmallVector<int, 2> SFIs; 1129 RS->getScavengingFrameIndices(SFIs); 1130 LLVM_DEBUG(if (!SFIs.empty()) llvm::dbgs() 1131 << "Adjusting emergency spill slots!\n";); 1132 int64_t Delta = Offset - OffsetBeforeAlignment; 1133 for (int SFI : SFIs) { 1134 LLVM_DEBUG(llvm::dbgs() 1135 << "Adjusting offset of emergency spill slot #" << SFI 1136 << " from " << MFI.getObjectOffset(SFI);); 1137 MFI.setObjectOffset(SFI, MFI.getObjectOffset(SFI) - Delta); 1138 LLVM_DEBUG(llvm::dbgs() << " to " << MFI.getObjectOffset(SFI) << "\n";); 1139 } 1140 } 1141 } 1142 1143 // Update frame info to pretend that this is part of the stack... 1144 int64_t StackSize = Offset - LocalAreaOffset; 1145 MFI.setStackSize(StackSize); 1146 NumBytesStackSpace += StackSize; 1147 } 1148 1149 /// insertPrologEpilogCode - Scan the function for modified callee saved 1150 /// registers, insert spill code for these callee saved registers, then add 1151 /// prolog and epilog code to the function. 1152 void PEI::insertPrologEpilogCode(MachineFunction &MF) { 1153 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1154 1155 // Add prologue to the function... 1156 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1157 TFI.emitPrologue(MF, *SaveBlock); 1158 1159 // Add epilogue to restore the callee-save registers in each exiting block. 1160 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 1161 TFI.emitEpilogue(MF, *RestoreBlock); 1162 1163 // Zero call used registers before restoring callee-saved registers. 1164 insertZeroCallUsedRegs(MF); 1165 1166 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1167 TFI.inlineStackProbe(MF, *SaveBlock); 1168 1169 // Emit additional code that is required to support segmented stacks, if 1170 // we've been asked for it. This, when linked with a runtime with support 1171 // for segmented stacks (libgcc is one), will result in allocating stack 1172 // space in small chunks instead of one large contiguous block. 1173 if (MF.shouldSplitStack()) { 1174 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1175 TFI.adjustForSegmentedStacks(MF, *SaveBlock); 1176 } 1177 1178 // Emit additional code that is required to explicitly handle the stack in 1179 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The 1180 // approach is rather similar to that of Segmented Stacks, but it uses a 1181 // different conditional check and another BIF for allocating more stack 1182 // space. 1183 if (MF.getFunction().getCallingConv() == CallingConv::HiPE) 1184 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1185 TFI.adjustForHiPEPrologue(MF, *SaveBlock); 1186 } 1187 1188 /// insertZeroCallUsedRegs - Zero out call used registers. 1189 void PEI::insertZeroCallUsedRegs(MachineFunction &MF) { 1190 const Function &F = MF.getFunction(); 1191 1192 if (!F.hasFnAttribute("zero-call-used-regs")) 1193 return; 1194 1195 using namespace ZeroCallUsedRegs; 1196 1197 ZeroCallUsedRegsKind ZeroRegsKind = 1198 StringSwitch<ZeroCallUsedRegsKind>( 1199 F.getFnAttribute("zero-call-used-regs").getValueAsString()) 1200 .Case("skip", ZeroCallUsedRegsKind::Skip) 1201 .Case("used-gpr-arg", ZeroCallUsedRegsKind::UsedGPRArg) 1202 .Case("used-gpr", ZeroCallUsedRegsKind::UsedGPR) 1203 .Case("used-arg", ZeroCallUsedRegsKind::UsedArg) 1204 .Case("used", ZeroCallUsedRegsKind::Used) 1205 .Case("all-gpr-arg", ZeroCallUsedRegsKind::AllGPRArg) 1206 .Case("all-gpr", ZeroCallUsedRegsKind::AllGPR) 1207 .Case("all-arg", ZeroCallUsedRegsKind::AllArg) 1208 .Case("all", ZeroCallUsedRegsKind::All); 1209 1210 if (ZeroRegsKind == ZeroCallUsedRegsKind::Skip) 1211 return; 1212 1213 const bool OnlyGPR = static_cast<unsigned>(ZeroRegsKind) & ONLY_GPR; 1214 const bool OnlyUsed = static_cast<unsigned>(ZeroRegsKind) & ONLY_USED; 1215 const bool OnlyArg = static_cast<unsigned>(ZeroRegsKind) & ONLY_ARG; 1216 1217 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1218 const BitVector AllocatableSet(TRI.getAllocatableSet(MF)); 1219 1220 // Mark all used registers. 1221 BitVector UsedRegs(TRI.getNumRegs()); 1222 if (OnlyUsed) 1223 for (const MachineBasicBlock &MBB : MF) 1224 for (const MachineInstr &MI : MBB) 1225 for (const MachineOperand &MO : MI.operands()) { 1226 if (!MO.isReg()) 1227 continue; 1228 1229 MCRegister Reg = MO.getReg(); 1230 if (AllocatableSet[Reg] && !MO.isImplicit() && 1231 (MO.isDef() || MO.isUse())) 1232 UsedRegs.set(Reg); 1233 } 1234 1235 // Get a list of registers that are used. 1236 BitVector LiveIns(TRI.getNumRegs()); 1237 for (const MachineBasicBlock::RegisterMaskPair &LI : MF.front().liveins()) 1238 LiveIns.set(LI.PhysReg); 1239 1240 BitVector RegsToZero(TRI.getNumRegs()); 1241 for (MCRegister Reg : AllocatableSet.set_bits()) { 1242 // Skip over fixed registers. 1243 if (TRI.isFixedRegister(MF, Reg)) 1244 continue; 1245 1246 // Want only general purpose registers. 1247 if (OnlyGPR && !TRI.isGeneralPurposeRegister(MF, Reg)) 1248 continue; 1249 1250 // Want only used registers. 1251 if (OnlyUsed && !UsedRegs[Reg]) 1252 continue; 1253 1254 // Want only registers used for arguments. 1255 if (OnlyArg) { 1256 if (OnlyUsed) { 1257 if (!LiveIns[Reg]) 1258 continue; 1259 } else if (!TRI.isArgumentRegister(MF, Reg)) { 1260 continue; 1261 } 1262 } 1263 1264 RegsToZero.set(Reg); 1265 } 1266 1267 // Don't clear registers that are live when leaving the function. 1268 for (const MachineBasicBlock &MBB : MF) 1269 for (const MachineInstr &MI : MBB.terminators()) { 1270 if (!MI.isReturn()) 1271 continue; 1272 1273 for (const auto &MO : MI.operands()) { 1274 if (!MO.isReg()) 1275 continue; 1276 1277 for (MCPhysReg SReg : TRI.sub_and_superregs_inclusive(MO.getReg())) 1278 RegsToZero.reset(SReg); 1279 } 1280 } 1281 1282 // Don't need to clear registers that are used/clobbered by terminating 1283 // instructions. 1284 for (const MachineBasicBlock &MBB : MF) { 1285 if (!MBB.isReturnBlock()) 1286 continue; 1287 1288 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator(); 1289 for (MachineBasicBlock::const_iterator I = MBBI, E = MBB.end(); I != E; 1290 ++I) { 1291 for (const MachineOperand &MO : I->operands()) { 1292 if (!MO.isReg()) 1293 continue; 1294 1295 for (const MCPhysReg &Reg : 1296 TRI.sub_and_superregs_inclusive(MO.getReg())) 1297 RegsToZero.reset(Reg); 1298 } 1299 } 1300 } 1301 1302 // Don't clear registers that must be preserved. 1303 for (const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF); 1304 MCPhysReg CSReg = *CSRegs; ++CSRegs) 1305 for (MCRegister Reg : TRI.sub_and_superregs_inclusive(CSReg)) 1306 RegsToZero.reset(Reg); 1307 1308 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1309 for (MachineBasicBlock &MBB : MF) 1310 if (MBB.isReturnBlock()) 1311 TFI.emitZeroCallUsedRegs(RegsToZero, MBB); 1312 } 1313 1314 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical 1315 /// register references and actual offsets. 1316 void PEI::replaceFrameIndices(MachineFunction &MF) { 1317 const auto &ST = MF.getSubtarget(); 1318 const TargetFrameLowering &TFI = *ST.getFrameLowering(); 1319 if (!TFI.needsFrameIndexResolution(MF)) 1320 return; 1321 1322 const TargetRegisterInfo *TRI = ST.getRegisterInfo(); 1323 1324 // Allow the target to determine this after knowing the frame size. 1325 FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) || 1326 TRI->requiresFrameIndexReplacementScavenging(MF); 1327 1328 // Store SPAdj at exit of a basic block. 1329 SmallVector<int, 8> SPState; 1330 SPState.resize(MF.getNumBlockIDs()); 1331 df_iterator_default_set<MachineBasicBlock*> Reachable; 1332 1333 // Iterate over the reachable blocks in DFS order. 1334 for (auto DFI = df_ext_begin(&MF, Reachable), DFE = df_ext_end(&MF, Reachable); 1335 DFI != DFE; ++DFI) { 1336 int SPAdj = 0; 1337 // Check the exit state of the DFS stack predecessor. 1338 if (DFI.getPathLength() >= 2) { 1339 MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 1340 assert(Reachable.count(StackPred) && 1341 "DFS stack predecessor is already visited.\n"); 1342 SPAdj = SPState[StackPred->getNumber()]; 1343 } 1344 MachineBasicBlock *BB = *DFI; 1345 replaceFrameIndices(BB, MF, SPAdj); 1346 SPState[BB->getNumber()] = SPAdj; 1347 } 1348 1349 // Handle the unreachable blocks. 1350 for (auto &BB : MF) { 1351 if (Reachable.count(&BB)) 1352 // Already handled in DFS traversal. 1353 continue; 1354 int SPAdj = 0; 1355 replaceFrameIndices(&BB, MF, SPAdj); 1356 } 1357 } 1358 1359 bool PEI::replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI, 1360 unsigned OpIdx, int SPAdj) { 1361 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1362 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1363 if (MI.isDebugValue()) { 1364 1365 MachineOperand &Op = MI.getOperand(OpIdx); 1366 assert(MI.isDebugOperand(&Op) && 1367 "Frame indices can only appear as a debug operand in a DBG_VALUE*" 1368 " machine instruction"); 1369 Register Reg; 1370 unsigned FrameIdx = Op.getIndex(); 1371 unsigned Size = MF.getFrameInfo().getObjectSize(FrameIdx); 1372 1373 StackOffset Offset = TFI->getFrameIndexReference(MF, FrameIdx, Reg); 1374 Op.ChangeToRegister(Reg, false /*isDef*/); 1375 1376 const DIExpression *DIExpr = MI.getDebugExpression(); 1377 1378 // If we have a direct DBG_VALUE, and its location expression isn't 1379 // currently complex, then adding an offset will morph it into a 1380 // complex location that is interpreted as being a memory address. 1381 // This changes a pointer-valued variable to dereference that pointer, 1382 // which is incorrect. Fix by adding DW_OP_stack_value. 1383 1384 if (MI.isNonListDebugValue()) { 1385 unsigned PrependFlags = DIExpression::ApplyOffset; 1386 if (!MI.isIndirectDebugValue() && !DIExpr->isComplex()) 1387 PrependFlags |= DIExpression::StackValue; 1388 1389 // If we have DBG_VALUE that is indirect and has a Implicit location 1390 // expression need to insert a deref before prepending a Memory 1391 // location expression. Also after doing this we change the DBG_VALUE 1392 // to be direct. 1393 if (MI.isIndirectDebugValue() && DIExpr->isImplicit()) { 1394 SmallVector<uint64_t, 2> Ops = {dwarf::DW_OP_deref_size, Size}; 1395 bool WithStackValue = true; 1396 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); 1397 // Make the DBG_VALUE direct. 1398 MI.getDebugOffset().ChangeToRegister(0, false); 1399 } 1400 DIExpr = TRI.prependOffsetExpression(DIExpr, PrependFlags, Offset); 1401 } else { 1402 // The debug operand at DebugOpIndex was a frame index at offset 1403 // `Offset`; now the operand has been replaced with the frame 1404 // register, we must add Offset with `register x, plus Offset`. 1405 unsigned DebugOpIndex = MI.getDebugOperandIndex(&Op); 1406 SmallVector<uint64_t, 3> Ops; 1407 TRI.getOffsetOpcodes(Offset, Ops); 1408 DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, DebugOpIndex); 1409 } 1410 MI.getDebugExpressionOp().setMetadata(DIExpr); 1411 return true; 1412 } 1413 1414 if (MI.isDebugPHI()) { 1415 // Allow stack ref to continue onwards. 1416 return true; 1417 } 1418 1419 // TODO: This code should be commoned with the code for 1420 // PATCHPOINT. There's no good reason for the difference in 1421 // implementation other than historical accident. The only 1422 // remaining difference is the unconditional use of the stack 1423 // pointer as the base register. 1424 if (MI.getOpcode() == TargetOpcode::STATEPOINT) { 1425 assert((!MI.isDebugValue() || OpIdx == 0) && 1426 "Frame indicies can only appear as the first operand of a " 1427 "DBG_VALUE machine instruction"); 1428 Register Reg; 1429 MachineOperand &Offset = MI.getOperand(OpIdx + 1); 1430 StackOffset refOffset = TFI->getFrameIndexReferencePreferSP( 1431 MF, MI.getOperand(OpIdx).getIndex(), Reg, /*IgnoreSPUpdates*/ false); 1432 assert(!refOffset.getScalable() && 1433 "Frame offsets with a scalable component are not supported"); 1434 Offset.setImm(Offset.getImm() + refOffset.getFixed() + SPAdj); 1435 MI.getOperand(OpIdx).ChangeToRegister(Reg, false /*isDef*/); 1436 return true; 1437 } 1438 return false; 1439 } 1440 1441 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 1442 int &SPAdj) { 1443 assert(MF.getSubtarget().getRegisterInfo() && 1444 "getRegisterInfo() must be implemented!"); 1445 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1446 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1447 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1448 1449 if (RS && FrameIndexEliminationScavenging) 1450 RS->enterBasicBlock(*BB); 1451 1452 bool InsideCallSequence = false; 1453 1454 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { 1455 if (TII.isFrameInstr(*I)) { 1456 InsideCallSequence = TII.isFrameSetup(*I); 1457 SPAdj += TII.getSPAdjust(*I); 1458 I = TFI->eliminateCallFramePseudoInstr(MF, *BB, I); 1459 continue; 1460 } 1461 1462 MachineInstr &MI = *I; 1463 bool DoIncr = true; 1464 bool DidFinishLoop = true; 1465 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1466 if (!MI.getOperand(i).isFI()) 1467 continue; 1468 1469 // Frame indices in debug values are encoded in a target independent 1470 // way with simply the frame index and offset rather than any 1471 // target-specific addressing mode. 1472 if (replaceFrameIndexDebugInstr(MF, MI, i, SPAdj)) 1473 continue; 1474 1475 // Some instructions (e.g. inline asm instructions) can have 1476 // multiple frame indices and/or cause eliminateFrameIndex 1477 // to insert more than one instruction. We need the register 1478 // scavenger to go through all of these instructions so that 1479 // it can update its register information. We keep the 1480 // iterator at the point before insertion so that we can 1481 // revisit them in full. 1482 bool AtBeginning = (I == BB->begin()); 1483 if (!AtBeginning) --I; 1484 1485 // If this instruction has a FrameIndex operand, we need to 1486 // use that target machine register info object to eliminate 1487 // it. 1488 TRI.eliminateFrameIndex(MI, SPAdj, i, 1489 FrameIndexEliminationScavenging ? RS : nullptr); 1490 1491 // Reset the iterator if we were at the beginning of the BB. 1492 if (AtBeginning) { 1493 I = BB->begin(); 1494 DoIncr = false; 1495 } 1496 1497 DidFinishLoop = false; 1498 break; 1499 } 1500 1501 // If we are looking at a call sequence, we need to keep track of 1502 // the SP adjustment made by each instruction in the sequence. 1503 // This includes both the frame setup/destroy pseudos (handled above), 1504 // as well as other instructions that have side effects w.r.t the SP. 1505 // Note that this must come after eliminateFrameIndex, because 1506 // if I itself referred to a frame index, we shouldn't count its own 1507 // adjustment. 1508 if (DidFinishLoop && InsideCallSequence) 1509 SPAdj += TII.getSPAdjust(MI); 1510 1511 if (DoIncr && I != BB->end()) ++I; 1512 1513 // Update register states. 1514 if (RS && FrameIndexEliminationScavenging && DidFinishLoop) 1515 RS->forward(MI); 1516 } 1517 } 1518