1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass is responsible for finalizing the functions frame layout, saving 10 // callee saved registers, and for emitting prolog & epilog code for the 11 // function. 12 // 13 // This pass must be run after register allocation. After this pass is 14 // executed, it is illegal to construct MO_FrameIndex operands. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineDominators.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineInstr.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineModuleInfo.h" 37 #include "llvm/CodeGen/MachineOperand.h" 38 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 39 #include "llvm/CodeGen/MachineRegisterInfo.h" 40 #include "llvm/CodeGen/RegisterScavenging.h" 41 #include "llvm/CodeGen/TargetFrameLowering.h" 42 #include "llvm/CodeGen/TargetInstrInfo.h" 43 #include "llvm/CodeGen/TargetOpcodes.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/WinEHFuncInfo.h" 47 #include "llvm/IR/Attributes.h" 48 #include "llvm/IR/CallingConv.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DiagnosticInfo.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/InlineAsm.h" 53 #include "llvm/IR/LLVMContext.h" 54 #include "llvm/InitializePasses.h" 55 #include "llvm/MC/MCRegisterInfo.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/CodeGen.h" 58 #include "llvm/Support/Debug.h" 59 #include "llvm/Support/ErrorHandling.h" 60 #include "llvm/Support/FormatVariadic.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include "llvm/Target/TargetMachine.h" 63 #include "llvm/Target/TargetOptions.h" 64 #include <algorithm> 65 #include <cassert> 66 #include <cstdint> 67 #include <functional> 68 #include <limits> 69 #include <utility> 70 #include <vector> 71 72 using namespace llvm; 73 74 #define DEBUG_TYPE "prologepilog" 75 76 using MBBVector = SmallVector<MachineBasicBlock *, 4>; 77 78 STATISTIC(NumLeafFuncWithSpills, "Number of leaf functions with CSRs"); 79 STATISTIC(NumFuncSeen, "Number of functions seen in PEI"); 80 81 82 namespace { 83 84 class PEI : public MachineFunctionPass { 85 public: 86 static char ID; 87 88 PEI() : MachineFunctionPass(ID) { 89 initializePEIPass(*PassRegistry::getPassRegistry()); 90 } 91 92 void getAnalysisUsage(AnalysisUsage &AU) const override; 93 94 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 95 /// frame indexes with appropriate references. 96 bool runOnMachineFunction(MachineFunction &MF) override; 97 98 private: 99 RegScavenger *RS; 100 101 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved 102 // stack frame indexes. 103 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 104 unsigned MaxCSFrameIndex = 0; 105 106 // Save and Restore blocks of the current function. Typically there is a 107 // single save block, unless Windows EH funclets are involved. 108 MBBVector SaveBlocks; 109 MBBVector RestoreBlocks; 110 111 // Flag to control whether to use the register scavenger to resolve 112 // frame index materialization registers. Set according to 113 // TRI->requiresFrameIndexScavenging() for the current function. 114 bool FrameIndexVirtualScavenging; 115 116 // Flag to control whether the scavenger should be passed even though 117 // FrameIndexVirtualScavenging is used. 118 bool FrameIndexEliminationScavenging; 119 120 // Emit remarks. 121 MachineOptimizationRemarkEmitter *ORE = nullptr; 122 123 void calculateCallFrameInfo(MachineFunction &MF); 124 void calculateSaveRestoreBlocks(MachineFunction &MF); 125 void spillCalleeSavedRegs(MachineFunction &MF); 126 127 void calculateFrameObjectOffsets(MachineFunction &MF); 128 void replaceFrameIndices(MachineFunction &MF); 129 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 130 int &SPAdj); 131 void insertPrologEpilogCode(MachineFunction &MF); 132 void insertZeroCallUsedRegs(MachineFunction &MF); 133 }; 134 135 } // end anonymous namespace 136 137 char PEI::ID = 0; 138 139 char &llvm::PrologEpilogCodeInserterID = PEI::ID; 140 141 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false, 142 false) 143 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 144 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 145 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 146 INITIALIZE_PASS_END(PEI, DEBUG_TYPE, 147 "Prologue/Epilogue Insertion & Frame Finalization", false, 148 false) 149 150 MachineFunctionPass *llvm::createPrologEpilogInserterPass() { 151 return new PEI(); 152 } 153 154 STATISTIC(NumBytesStackSpace, 155 "Number of bytes used for stack in all functions"); 156 157 void PEI::getAnalysisUsage(AnalysisUsage &AU) const { 158 AU.setPreservesCFG(); 159 AU.addPreserved<MachineLoopInfo>(); 160 AU.addPreserved<MachineDominatorTree>(); 161 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 162 MachineFunctionPass::getAnalysisUsage(AU); 163 } 164 165 /// StackObjSet - A set of stack object indexes 166 using StackObjSet = SmallSetVector<int, 8>; 167 168 using SavedDbgValuesMap = 169 SmallDenseMap<MachineBasicBlock *, SmallVector<MachineInstr *, 4>, 4>; 170 171 /// Stash DBG_VALUEs that describe parameters and which are placed at the start 172 /// of the block. Later on, after the prologue code has been emitted, the 173 /// stashed DBG_VALUEs will be reinserted at the start of the block. 174 static void stashEntryDbgValues(MachineBasicBlock &MBB, 175 SavedDbgValuesMap &EntryDbgValues) { 176 SmallVector<const MachineInstr *, 4> FrameIndexValues; 177 178 for (auto &MI : MBB) { 179 if (!MI.isDebugInstr()) 180 break; 181 if (!MI.isDebugValue() || !MI.getDebugVariable()->isParameter()) 182 continue; 183 if (any_of(MI.debug_operands(), 184 [](const MachineOperand &MO) { return MO.isFI(); })) { 185 // We can only emit valid locations for frame indices after the frame 186 // setup, so do not stash away them. 187 FrameIndexValues.push_back(&MI); 188 continue; 189 } 190 const DILocalVariable *Var = MI.getDebugVariable(); 191 const DIExpression *Expr = MI.getDebugExpression(); 192 auto Overlaps = [Var, Expr](const MachineInstr *DV) { 193 return Var == DV->getDebugVariable() && 194 Expr->fragmentsOverlap(DV->getDebugExpression()); 195 }; 196 // See if the debug value overlaps with any preceding debug value that will 197 // not be stashed. If that is the case, then we can't stash this value, as 198 // we would then reorder the values at reinsertion. 199 if (llvm::none_of(FrameIndexValues, Overlaps)) 200 EntryDbgValues[&MBB].push_back(&MI); 201 } 202 203 // Remove stashed debug values from the block. 204 if (EntryDbgValues.count(&MBB)) 205 for (auto *MI : EntryDbgValues[&MBB]) 206 MI->removeFromParent(); 207 } 208 209 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 210 /// frame indexes with appropriate references. 211 bool PEI::runOnMachineFunction(MachineFunction &MF) { 212 NumFuncSeen++; 213 const Function &F = MF.getFunction(); 214 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 215 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 216 217 RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr; 218 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF); 219 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 220 221 // Calculate the MaxCallFrameSize and AdjustsStack variables for the 222 // function's frame information. Also eliminates call frame pseudo 223 // instructions. 224 calculateCallFrameInfo(MF); 225 226 // Determine placement of CSR spill/restore code and prolog/epilog code: 227 // place all spills in the entry block, all restores in return blocks. 228 calculateSaveRestoreBlocks(MF); 229 230 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code. 231 SavedDbgValuesMap EntryDbgValues; 232 for (MachineBasicBlock *SaveBlock : SaveBlocks) 233 stashEntryDbgValues(*SaveBlock, EntryDbgValues); 234 235 // Handle CSR spilling and restoring, for targets that need it. 236 if (MF.getTarget().usesPhysRegsForValues()) 237 spillCalleeSavedRegs(MF); 238 239 // Allow the target machine to make final modifications to the function 240 // before the frame layout is finalized. 241 TFI->processFunctionBeforeFrameFinalized(MF, RS); 242 243 // Calculate actual frame offsets for all abstract stack objects... 244 calculateFrameObjectOffsets(MF); 245 246 // Add prolog and epilog code to the function. This function is required 247 // to align the stack frame as necessary for any stack variables or 248 // called functions. Because of this, calculateCalleeSavedRegisters() 249 // must be called before this function in order to set the AdjustsStack 250 // and MaxCallFrameSize variables. 251 if (!F.hasFnAttribute(Attribute::Naked)) 252 insertPrologEpilogCode(MF); 253 254 // Reinsert stashed debug values at the start of the entry blocks. 255 for (auto &I : EntryDbgValues) 256 I.first->insert(I.first->begin(), I.second.begin(), I.second.end()); 257 258 // Allow the target machine to make final modifications to the function 259 // before the frame layout is finalized. 260 TFI->processFunctionBeforeFrameIndicesReplaced(MF, RS); 261 262 // Replace all MO_FrameIndex operands with physical register references 263 // and actual offsets. 264 // 265 replaceFrameIndices(MF); 266 267 // If register scavenging is needed, as we've enabled doing it as a 268 // post-pass, scavenge the virtual registers that frame index elimination 269 // inserted. 270 if (TRI->requiresRegisterScavenging(MF) && FrameIndexVirtualScavenging) 271 scavengeFrameVirtualRegs(MF, *RS); 272 273 // Warn on stack size when we exceeds the given limit. 274 MachineFrameInfo &MFI = MF.getFrameInfo(); 275 uint64_t StackSize = MFI.getStackSize(); 276 277 unsigned Threshold = UINT_MAX; 278 if (MF.getFunction().hasFnAttribute("warn-stack-size")) { 279 bool Failed = MF.getFunction() 280 .getFnAttribute("warn-stack-size") 281 .getValueAsString() 282 .getAsInteger(10, Threshold); 283 // Verifier should have caught this. 284 assert(!Failed && "Invalid warn-stack-size fn attr value"); 285 (void)Failed; 286 } 287 uint64_t UnsafeStackSize = MFI.getUnsafeStackSize(); 288 if (MF.getFunction().hasFnAttribute(Attribute::SafeStack)) 289 StackSize += UnsafeStackSize; 290 291 if (StackSize > Threshold) { 292 DiagnosticInfoStackSize DiagStackSize(F, StackSize, Threshold, DS_Warning); 293 F.getContext().diagnose(DiagStackSize); 294 int64_t SpillSize = 0; 295 for (int Idx = MFI.getObjectIndexBegin(), End = MFI.getObjectIndexEnd(); 296 Idx != End; ++Idx) { 297 if (MFI.isSpillSlotObjectIndex(Idx)) 298 SpillSize += MFI.getObjectSize(Idx); 299 } 300 301 float SpillPct = 302 static_cast<float>(SpillSize) / static_cast<float>(StackSize); 303 float VarPct = 1.0f - SpillPct; 304 int64_t VariableSize = StackSize - SpillSize; 305 dbgs() << formatv("{0}/{1} ({3:P}) spills, {2}/{1} ({4:P}) variables", 306 SpillSize, StackSize, VariableSize, SpillPct, VarPct); 307 if (UnsafeStackSize != 0) { 308 float UnsafePct = 309 static_cast<float>(UnsafeStackSize) / static_cast<float>(StackSize); 310 dbgs() << formatv(", {0}/{2} ({1:P}) unsafe stack", UnsafeStackSize, 311 UnsafePct, StackSize); 312 } 313 dbgs() << "\n"; 314 } 315 316 ORE->emit([&]() { 317 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize", 318 MF.getFunction().getSubprogram(), 319 &MF.front()) 320 << ore::NV("NumStackBytes", StackSize) << " stack bytes in function"; 321 }); 322 323 delete RS; 324 SaveBlocks.clear(); 325 RestoreBlocks.clear(); 326 MFI.setSavePoint(nullptr); 327 MFI.setRestorePoint(nullptr); 328 return true; 329 } 330 331 /// Calculate the MaxCallFrameSize and AdjustsStack 332 /// variables for the function's frame information and eliminate call frame 333 /// pseudo instructions. 334 void PEI::calculateCallFrameInfo(MachineFunction &MF) { 335 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 336 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 337 MachineFrameInfo &MFI = MF.getFrameInfo(); 338 339 unsigned MaxCallFrameSize = 0; 340 bool AdjustsStack = MFI.adjustsStack(); 341 342 // Get the function call frame set-up and tear-down instruction opcode 343 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 344 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 345 346 // Early exit for targets which have no call frame setup/destroy pseudo 347 // instructions. 348 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 349 return; 350 351 std::vector<MachineBasicBlock::iterator> FrameSDOps; 352 for (MachineBasicBlock &BB : MF) 353 for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) 354 if (TII.isFrameInstr(*I)) { 355 unsigned Size = TII.getFrameSize(*I); 356 if (Size > MaxCallFrameSize) MaxCallFrameSize = Size; 357 AdjustsStack = true; 358 FrameSDOps.push_back(I); 359 } else if (I->isInlineAsm()) { 360 // Some inline asm's need a stack frame, as indicated by operand 1. 361 unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 362 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 363 AdjustsStack = true; 364 } 365 366 assert(!MFI.isMaxCallFrameSizeComputed() || 367 (MFI.getMaxCallFrameSize() == MaxCallFrameSize && 368 MFI.adjustsStack() == AdjustsStack)); 369 MFI.setAdjustsStack(AdjustsStack); 370 MFI.setMaxCallFrameSize(MaxCallFrameSize); 371 372 for (MachineBasicBlock::iterator I : FrameSDOps) { 373 // If call frames are not being included as part of the stack frame, and 374 // the target doesn't indicate otherwise, remove the call frame pseudos 375 // here. The sub/add sp instruction pairs are still inserted, but we don't 376 // need to track the SP adjustment for frame index elimination. 377 if (TFI->canSimplifyCallFramePseudos(MF)) 378 TFI->eliminateCallFramePseudoInstr(MF, *I->getParent(), I); 379 } 380 } 381 382 /// Compute the sets of entry and return blocks for saving and restoring 383 /// callee-saved registers, and placing prolog and epilog code. 384 void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) { 385 const MachineFrameInfo &MFI = MF.getFrameInfo(); 386 387 // Even when we do not change any CSR, we still want to insert the 388 // prologue and epilogue of the function. 389 // So set the save points for those. 390 391 // Use the points found by shrink-wrapping, if any. 392 if (MFI.getSavePoint()) { 393 SaveBlocks.push_back(MFI.getSavePoint()); 394 assert(MFI.getRestorePoint() && "Both restore and save must be set"); 395 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 396 // If RestoreBlock does not have any successor and is not a return block 397 // then the end point is unreachable and we do not need to insert any 398 // epilogue. 399 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock()) 400 RestoreBlocks.push_back(RestoreBlock); 401 return; 402 } 403 404 // Save refs to entry and return blocks. 405 SaveBlocks.push_back(&MF.front()); 406 for (MachineBasicBlock &MBB : MF) { 407 if (MBB.isEHFuncletEntry()) 408 SaveBlocks.push_back(&MBB); 409 if (MBB.isReturnBlock()) 410 RestoreBlocks.push_back(&MBB); 411 } 412 } 413 414 static void assignCalleeSavedSpillSlots(MachineFunction &F, 415 const BitVector &SavedRegs, 416 unsigned &MinCSFrameIndex, 417 unsigned &MaxCSFrameIndex) { 418 if (SavedRegs.empty()) 419 return; 420 421 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo(); 422 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs(); 423 BitVector CSMask(SavedRegs.size()); 424 425 for (unsigned i = 0; CSRegs[i]; ++i) 426 CSMask.set(CSRegs[i]); 427 428 std::vector<CalleeSavedInfo> CSI; 429 for (unsigned i = 0; CSRegs[i]; ++i) { 430 unsigned Reg = CSRegs[i]; 431 if (SavedRegs.test(Reg)) { 432 bool SavedSuper = false; 433 for (const MCPhysReg &SuperReg : RegInfo->superregs(Reg)) { 434 // Some backends set all aliases for some registers as saved, such as 435 // Mips's $fp, so they appear in SavedRegs but not CSRegs. 436 if (SavedRegs.test(SuperReg) && CSMask.test(SuperReg)) { 437 SavedSuper = true; 438 break; 439 } 440 } 441 442 if (!SavedSuper) 443 CSI.push_back(CalleeSavedInfo(Reg)); 444 } 445 } 446 447 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering(); 448 MachineFrameInfo &MFI = F.getFrameInfo(); 449 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI, MinCSFrameIndex, 450 MaxCSFrameIndex)) { 451 // If target doesn't implement this, use generic code. 452 453 if (CSI.empty()) 454 return; // Early exit if no callee saved registers are modified! 455 456 unsigned NumFixedSpillSlots; 457 const TargetFrameLowering::SpillSlot *FixedSpillSlots = 458 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); 459 460 // Now that we know which registers need to be saved and restored, allocate 461 // stack slots for them. 462 for (auto &CS : CSI) { 463 // If the target has spilled this register to another register, we don't 464 // need to allocate a stack slot. 465 if (CS.isSpilledToReg()) 466 continue; 467 468 unsigned Reg = CS.getReg(); 469 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 470 471 int FrameIdx; 472 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { 473 CS.setFrameIdx(FrameIdx); 474 continue; 475 } 476 477 // Check to see if this physreg must be spilled to a particular stack slot 478 // on this target. 479 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots; 480 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots && 481 FixedSlot->Reg != Reg) 482 ++FixedSlot; 483 484 unsigned Size = RegInfo->getSpillSize(*RC); 485 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { 486 // Nope, just spill it anywhere convenient. 487 Align Alignment = RegInfo->getSpillAlign(*RC); 488 // We may not be able to satisfy the desired alignment specification of 489 // the TargetRegisterClass if the stack alignment is smaller. Use the 490 // min. 491 Alignment = std::min(Alignment, TFI->getStackAlign()); 492 FrameIdx = MFI.CreateStackObject(Size, Alignment, true); 493 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 494 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 495 } else { 496 // Spill it to the stack where we must. 497 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset); 498 } 499 500 CS.setFrameIdx(FrameIdx); 501 } 502 } 503 504 MFI.setCalleeSavedInfo(CSI); 505 } 506 507 /// Helper function to update the liveness information for the callee-saved 508 /// registers. 509 static void updateLiveness(MachineFunction &MF) { 510 MachineFrameInfo &MFI = MF.getFrameInfo(); 511 // Visited will contain all the basic blocks that are in the region 512 // where the callee saved registers are alive: 513 // - Anything that is not Save or Restore -> LiveThrough. 514 // - Save -> LiveIn. 515 // - Restore -> LiveOut. 516 // The live-out is not attached to the block, so no need to keep 517 // Restore in this set. 518 SmallPtrSet<MachineBasicBlock *, 8> Visited; 519 SmallVector<MachineBasicBlock *, 8> WorkList; 520 MachineBasicBlock *Entry = &MF.front(); 521 MachineBasicBlock *Save = MFI.getSavePoint(); 522 523 if (!Save) 524 Save = Entry; 525 526 if (Entry != Save) { 527 WorkList.push_back(Entry); 528 Visited.insert(Entry); 529 } 530 Visited.insert(Save); 531 532 MachineBasicBlock *Restore = MFI.getRestorePoint(); 533 if (Restore) 534 // By construction Restore cannot be visited, otherwise it 535 // means there exists a path to Restore that does not go 536 // through Save. 537 WorkList.push_back(Restore); 538 539 while (!WorkList.empty()) { 540 const MachineBasicBlock *CurBB = WorkList.pop_back_val(); 541 // By construction, the region that is after the save point is 542 // dominated by the Save and post-dominated by the Restore. 543 if (CurBB == Save && Save != Restore) 544 continue; 545 // Enqueue all the successors not already visited. 546 // Those are by construction either before Save or after Restore. 547 for (MachineBasicBlock *SuccBB : CurBB->successors()) 548 if (Visited.insert(SuccBB).second) 549 WorkList.push_back(SuccBB); 550 } 551 552 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 553 554 MachineRegisterInfo &MRI = MF.getRegInfo(); 555 for (const CalleeSavedInfo &I : CSI) { 556 for (MachineBasicBlock *MBB : Visited) { 557 MCPhysReg Reg = I.getReg(); 558 // Add the callee-saved register as live-in. 559 // It's killed at the spill. 560 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg)) 561 MBB->addLiveIn(Reg); 562 } 563 // If callee-saved register is spilled to another register rather than 564 // spilling to stack, the destination register has to be marked as live for 565 // each MBB between the prologue and epilogue so that it is not clobbered 566 // before it is reloaded in the epilogue. The Visited set contains all 567 // blocks outside of the region delimited by prologue/epilogue. 568 if (I.isSpilledToReg()) { 569 for (MachineBasicBlock &MBB : MF) { 570 if (Visited.count(&MBB)) 571 continue; 572 MCPhysReg DstReg = I.getDstReg(); 573 if (!MBB.isLiveIn(DstReg)) 574 MBB.addLiveIn(DstReg); 575 } 576 } 577 } 578 } 579 580 /// Insert spill code for the callee-saved registers used in the function. 581 static void insertCSRSaves(MachineBasicBlock &SaveBlock, 582 ArrayRef<CalleeSavedInfo> CSI) { 583 MachineFunction &MF = *SaveBlock.getParent(); 584 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 585 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 586 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 587 588 MachineBasicBlock::iterator I = SaveBlock.begin(); 589 if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) { 590 for (const CalleeSavedInfo &CS : CSI) { 591 // Insert the spill to the stack frame. 592 unsigned Reg = CS.getReg(); 593 594 if (CS.isSpilledToReg()) { 595 BuildMI(SaveBlock, I, DebugLoc(), 596 TII.get(TargetOpcode::COPY), CS.getDstReg()) 597 .addReg(Reg, getKillRegState(true)); 598 } else { 599 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 600 TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC, 601 TRI); 602 } 603 } 604 } 605 } 606 607 /// Insert restore code for the callee-saved registers used in the function. 608 static void insertCSRRestores(MachineBasicBlock &RestoreBlock, 609 std::vector<CalleeSavedInfo> &CSI) { 610 MachineFunction &MF = *RestoreBlock.getParent(); 611 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 612 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 613 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 614 615 // Restore all registers immediately before the return and any 616 // terminators that precede it. 617 MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator(); 618 619 if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) { 620 for (const CalleeSavedInfo &CI : reverse(CSI)) { 621 unsigned Reg = CI.getReg(); 622 if (CI.isSpilledToReg()) { 623 BuildMI(RestoreBlock, I, DebugLoc(), TII.get(TargetOpcode::COPY), Reg) 624 .addReg(CI.getDstReg(), getKillRegState(true)); 625 } else { 626 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 627 TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI); 628 assert(I != RestoreBlock.begin() && 629 "loadRegFromStackSlot didn't insert any code!"); 630 // Insert in reverse order. loadRegFromStackSlot can insert 631 // multiple instructions. 632 } 633 } 634 } 635 } 636 637 void PEI::spillCalleeSavedRegs(MachineFunction &MF) { 638 // We can't list this requirement in getRequiredProperties because some 639 // targets (WebAssembly) use virtual registers past this point, and the pass 640 // pipeline is set up without giving the passes a chance to look at the 641 // TargetMachine. 642 // FIXME: Find a way to express this in getRequiredProperties. 643 assert(MF.getProperties().hasProperty( 644 MachineFunctionProperties::Property::NoVRegs)); 645 646 const Function &F = MF.getFunction(); 647 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 648 MachineFrameInfo &MFI = MF.getFrameInfo(); 649 MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 650 MaxCSFrameIndex = 0; 651 652 // Determine which of the registers in the callee save list should be saved. 653 BitVector SavedRegs; 654 TFI->determineCalleeSaves(MF, SavedRegs, RS); 655 656 // Assign stack slots for any callee-saved registers that must be spilled. 657 assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); 658 659 // Add the code to save and restore the callee saved registers. 660 if (!F.hasFnAttribute(Attribute::Naked)) { 661 MFI.setCalleeSavedInfoValid(true); 662 663 std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 664 if (!CSI.empty()) { 665 if (!MFI.hasCalls()) 666 NumLeafFuncWithSpills++; 667 668 for (MachineBasicBlock *SaveBlock : SaveBlocks) 669 insertCSRSaves(*SaveBlock, CSI); 670 671 // Update the live-in information of all the blocks up to the save point. 672 updateLiveness(MF); 673 674 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 675 insertCSRRestores(*RestoreBlock, CSI); 676 } 677 } 678 } 679 680 /// AdjustStackOffset - Helper function used to adjust the stack frame offset. 681 static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, 682 bool StackGrowsDown, int64_t &Offset, 683 Align &MaxAlign, unsigned Skew) { 684 // If the stack grows down, add the object size to find the lowest address. 685 if (StackGrowsDown) 686 Offset += MFI.getObjectSize(FrameIdx); 687 688 Align Alignment = MFI.getObjectAlign(FrameIdx); 689 690 // If the alignment of this object is greater than that of the stack, then 691 // increase the stack alignment to match. 692 MaxAlign = std::max(MaxAlign, Alignment); 693 694 // Adjust to alignment boundary. 695 Offset = alignTo(Offset, Alignment, Skew); 696 697 if (StackGrowsDown) { 698 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset 699 << "]\n"); 700 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset 701 } else { 702 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset 703 << "]\n"); 704 MFI.setObjectOffset(FrameIdx, Offset); 705 Offset += MFI.getObjectSize(FrameIdx); 706 } 707 } 708 709 /// Compute which bytes of fixed and callee-save stack area are unused and keep 710 /// track of them in StackBytesFree. 711 static inline void 712 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown, 713 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex, 714 int64_t FixedCSEnd, BitVector &StackBytesFree) { 715 // Avoid undefined int64_t -> int conversion below in extreme case. 716 if (FixedCSEnd > std::numeric_limits<int>::max()) 717 return; 718 719 StackBytesFree.resize(FixedCSEnd, true); 720 721 SmallVector<int, 16> AllocatedFrameSlots; 722 // Add fixed objects. 723 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) 724 // StackSlot scavenging is only implemented for the default stack. 725 if (MFI.getStackID(i) == TargetStackID::Default) 726 AllocatedFrameSlots.push_back(i); 727 // Add callee-save objects if there are any. 728 if (MinCSFrameIndex <= MaxCSFrameIndex) { 729 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i) 730 if (MFI.getStackID(i) == TargetStackID::Default) 731 AllocatedFrameSlots.push_back(i); 732 } 733 734 for (int i : AllocatedFrameSlots) { 735 // These are converted from int64_t, but they should always fit in int 736 // because of the FixedCSEnd check above. 737 int ObjOffset = MFI.getObjectOffset(i); 738 int ObjSize = MFI.getObjectSize(i); 739 int ObjStart, ObjEnd; 740 if (StackGrowsDown) { 741 // ObjOffset is negative when StackGrowsDown is true. 742 ObjStart = -ObjOffset - ObjSize; 743 ObjEnd = -ObjOffset; 744 } else { 745 ObjStart = ObjOffset; 746 ObjEnd = ObjOffset + ObjSize; 747 } 748 // Ignore fixed holes that are in the previous stack frame. 749 if (ObjEnd > 0) 750 StackBytesFree.reset(ObjStart, ObjEnd); 751 } 752 } 753 754 /// Assign frame object to an unused portion of the stack in the fixed stack 755 /// object range. Return true if the allocation was successful. 756 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, 757 bool StackGrowsDown, Align MaxAlign, 758 BitVector &StackBytesFree) { 759 if (MFI.isVariableSizedObjectIndex(FrameIdx)) 760 return false; 761 762 if (StackBytesFree.none()) { 763 // clear it to speed up later scavengeStackSlot calls to 764 // StackBytesFree.none() 765 StackBytesFree.clear(); 766 return false; 767 } 768 769 Align ObjAlign = MFI.getObjectAlign(FrameIdx); 770 if (ObjAlign > MaxAlign) 771 return false; 772 773 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 774 int FreeStart; 775 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; 776 FreeStart = StackBytesFree.find_next(FreeStart)) { 777 778 // Check that free space has suitable alignment. 779 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; 780 if (alignTo(ObjStart, ObjAlign) != ObjStart) 781 continue; 782 783 if (FreeStart + ObjSize > StackBytesFree.size()) 784 return false; 785 786 bool AllBytesFree = true; 787 for (unsigned Byte = 0; Byte < ObjSize; ++Byte) 788 if (!StackBytesFree.test(FreeStart + Byte)) { 789 AllBytesFree = false; 790 break; 791 } 792 if (AllBytesFree) 793 break; 794 } 795 796 if (FreeStart == -1) 797 return false; 798 799 if (StackGrowsDown) { 800 int ObjStart = -(FreeStart + ObjSize); 801 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 802 << ObjStart << "]\n"); 803 MFI.setObjectOffset(FrameIdx, ObjStart); 804 } else { 805 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 806 << FreeStart << "]\n"); 807 MFI.setObjectOffset(FrameIdx, FreeStart); 808 } 809 810 StackBytesFree.reset(FreeStart, FreeStart + ObjSize); 811 return true; 812 } 813 814 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e., 815 /// those required to be close to the Stack Protector) to stack offsets. 816 static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs, 817 SmallSet<int, 16> &ProtectedObjs, 818 MachineFrameInfo &MFI, bool StackGrowsDown, 819 int64_t &Offset, Align &MaxAlign, 820 unsigned Skew) { 821 822 for (int i : UnassignedObjs) { 823 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew); 824 ProtectedObjs.insert(i); 825 } 826 } 827 828 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the 829 /// abstract stack objects. 830 void PEI::calculateFrameObjectOffsets(MachineFunction &MF) { 831 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 832 833 bool StackGrowsDown = 834 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 835 836 // Loop over all of the stack objects, assigning sequential addresses... 837 MachineFrameInfo &MFI = MF.getFrameInfo(); 838 839 // Start at the beginning of the local area. 840 // The Offset is the distance from the stack top in the direction 841 // of stack growth -- so it's always nonnegative. 842 int LocalAreaOffset = TFI.getOffsetOfLocalArea(); 843 if (StackGrowsDown) 844 LocalAreaOffset = -LocalAreaOffset; 845 assert(LocalAreaOffset >= 0 846 && "Local area offset should be in direction of stack growth"); 847 int64_t Offset = LocalAreaOffset; 848 849 // Skew to be applied to alignment. 850 unsigned Skew = TFI.getStackAlignmentSkew(MF); 851 852 #ifdef EXPENSIVE_CHECKS 853 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) 854 if (!MFI.isDeadObjectIndex(i) && 855 MFI.getStackID(i) == TargetStackID::Default) 856 assert(MFI.getObjectAlign(i) <= MFI.getMaxAlign() && 857 "MaxAlignment is invalid"); 858 #endif 859 860 // If there are fixed sized objects that are preallocated in the local area, 861 // non-fixed objects can't be allocated right at the start of local area. 862 // Adjust 'Offset' to point to the end of last fixed sized preallocated 863 // object. 864 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) { 865 // Only allocate objects on the default stack. 866 if (MFI.getStackID(i) != TargetStackID::Default) 867 continue; 868 869 int64_t FixedOff; 870 if (StackGrowsDown) { 871 // The maximum distance from the stack pointer is at lower address of 872 // the object -- which is given by offset. For down growing stack 873 // the offset is negative, so we negate the offset to get the distance. 874 FixedOff = -MFI.getObjectOffset(i); 875 } else { 876 // The maximum distance from the start pointer is at the upper 877 // address of the object. 878 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i); 879 } 880 if (FixedOff > Offset) Offset = FixedOff; 881 } 882 883 Align MaxAlign = MFI.getMaxAlign(); 884 // First assign frame offsets to stack objects that are used to spill 885 // callee saved registers. 886 if (MaxCSFrameIndex >= MinCSFrameIndex) { 887 for (unsigned i = 0; i <= MaxCSFrameIndex - MinCSFrameIndex; ++i) { 888 unsigned FrameIndex = 889 StackGrowsDown ? MinCSFrameIndex + i : MaxCSFrameIndex - i; 890 891 // Only allocate objects on the default stack. 892 if (MFI.getStackID(FrameIndex) != TargetStackID::Default) 893 continue; 894 895 // TODO: should this just be if (MFI.isDeadObjectIndex(FrameIndex)) 896 if (!StackGrowsDown && MFI.isDeadObjectIndex(FrameIndex)) 897 continue; 898 899 AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign, 900 Skew); 901 } 902 } 903 904 assert(MaxAlign == MFI.getMaxAlign() && 905 "MFI.getMaxAlign should already account for all callee-saved " 906 "registers without a fixed stack slot"); 907 908 // FixedCSEnd is the stack offset to the end of the fixed and callee-save 909 // stack area. 910 int64_t FixedCSEnd = Offset; 911 912 // Make sure the special register scavenging spill slot is closest to the 913 // incoming stack pointer if a frame pointer is required and is closer 914 // to the incoming rather than the final stack pointer. 915 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 916 bool EarlyScavengingSlots = TFI.allocateScavengingFrameIndexesNearIncomingSP(MF); 917 if (RS && EarlyScavengingSlots) { 918 SmallVector<int, 2> SFIs; 919 RS->getScavengingFrameIndices(SFIs); 920 for (int SFI : SFIs) 921 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); 922 } 923 924 // FIXME: Once this is working, then enable flag will change to a target 925 // check for whether the frame is large enough to want to use virtual 926 // frame index registers. Functions which don't want/need this optimization 927 // will continue to use the existing code path. 928 if (MFI.getUseLocalStackAllocationBlock()) { 929 Align Alignment = MFI.getLocalFrameMaxAlign(); 930 931 // Adjust to alignment boundary. 932 Offset = alignTo(Offset, Alignment, Skew); 933 934 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); 935 936 // Resolve offsets for objects in the local block. 937 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) { 938 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i); 939 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; 940 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset 941 << "]\n"); 942 MFI.setObjectOffset(Entry.first, FIOffset); 943 } 944 // Allocate the local block 945 Offset += MFI.getLocalFrameSize(); 946 947 MaxAlign = std::max(Alignment, MaxAlign); 948 } 949 950 // Retrieve the Exception Handler registration node. 951 int EHRegNodeFrameIndex = std::numeric_limits<int>::max(); 952 if (const WinEHFuncInfo *FuncInfo = MF.getWinEHFuncInfo()) 953 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; 954 955 // Make sure that the stack protector comes before the local variables on the 956 // stack. 957 SmallSet<int, 16> ProtectedObjs; 958 if (MFI.hasStackProtectorIndex()) { 959 int StackProtectorFI = MFI.getStackProtectorIndex(); 960 StackObjSet LargeArrayObjs; 961 StackObjSet SmallArrayObjs; 962 StackObjSet AddrOfObjs; 963 964 // If we need a stack protector, we need to make sure that 965 // LocalStackSlotPass didn't already allocate a slot for it. 966 // If we are told to use the LocalStackAllocationBlock, the stack protector 967 // is expected to be already pre-allocated. 968 if (MFI.getStackID(StackProtectorFI) != TargetStackID::Default) { 969 // If the stack protector isn't on the default stack then it's up to the 970 // target to set the stack offset. 971 assert(MFI.getObjectOffset(StackProtectorFI) != 0 && 972 "Offset of stack protector on non-default stack expected to be " 973 "already set."); 974 assert(!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex()) && 975 "Stack protector on non-default stack expected to not be " 976 "pre-allocated by LocalStackSlotPass."); 977 } else if (!MFI.getUseLocalStackAllocationBlock()) { 978 AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, MaxAlign, 979 Skew); 980 } else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) { 981 llvm_unreachable( 982 "Stack protector not pre-allocated by LocalStackSlotPass."); 983 } 984 985 // Assign large stack objects first. 986 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 987 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 988 continue; 989 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 990 continue; 991 if (RS && RS->isScavengingFrameIndex((int)i)) 992 continue; 993 if (MFI.isDeadObjectIndex(i)) 994 continue; 995 if (StackProtectorFI == (int)i || EHRegNodeFrameIndex == (int)i) 996 continue; 997 // Only allocate objects on the default stack. 998 if (MFI.getStackID(i) != TargetStackID::Default) 999 continue; 1000 1001 switch (MFI.getObjectSSPLayout(i)) { 1002 case MachineFrameInfo::SSPLK_None: 1003 continue; 1004 case MachineFrameInfo::SSPLK_SmallArray: 1005 SmallArrayObjs.insert(i); 1006 continue; 1007 case MachineFrameInfo::SSPLK_AddrOf: 1008 AddrOfObjs.insert(i); 1009 continue; 1010 case MachineFrameInfo::SSPLK_LargeArray: 1011 LargeArrayObjs.insert(i); 1012 continue; 1013 } 1014 llvm_unreachable("Unexpected SSPLayoutKind."); 1015 } 1016 1017 // We expect **all** the protected stack objects to be pre-allocated by 1018 // LocalStackSlotPass. If it turns out that PEI still has to allocate some 1019 // of them, we may end up messing up the expected order of the objects. 1020 if (MFI.getUseLocalStackAllocationBlock() && 1021 !(LargeArrayObjs.empty() && SmallArrayObjs.empty() && 1022 AddrOfObjs.empty())) 1023 llvm_unreachable("Found protected stack objects not pre-allocated by " 1024 "LocalStackSlotPass."); 1025 1026 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1027 Offset, MaxAlign, Skew); 1028 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1029 Offset, MaxAlign, Skew); 1030 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, 1031 Offset, MaxAlign, Skew); 1032 } 1033 1034 SmallVector<int, 8> ObjectsToAllocate; 1035 1036 // Then prepare to assign frame offsets to stack objects that are not used to 1037 // spill callee saved registers. 1038 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 1039 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 1040 continue; 1041 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 1042 continue; 1043 if (RS && RS->isScavengingFrameIndex((int)i)) 1044 continue; 1045 if (MFI.isDeadObjectIndex(i)) 1046 continue; 1047 if (MFI.getStackProtectorIndex() == (int)i || EHRegNodeFrameIndex == (int)i) 1048 continue; 1049 if (ProtectedObjs.count(i)) 1050 continue; 1051 // Only allocate objects on the default stack. 1052 if (MFI.getStackID(i) != TargetStackID::Default) 1053 continue; 1054 1055 // Add the objects that we need to allocate to our working set. 1056 ObjectsToAllocate.push_back(i); 1057 } 1058 1059 // Allocate the EH registration node first if one is present. 1060 if (EHRegNodeFrameIndex != std::numeric_limits<int>::max()) 1061 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, 1062 MaxAlign, Skew); 1063 1064 // Give the targets a chance to order the objects the way they like it. 1065 if (MF.getTarget().getOptLevel() != CodeGenOpt::None && 1066 MF.getTarget().Options.StackSymbolOrdering) 1067 TFI.orderFrameObjects(MF, ObjectsToAllocate); 1068 1069 // Keep track of which bytes in the fixed and callee-save range are used so we 1070 // can use the holes when allocating later stack objects. Only do this if 1071 // stack protector isn't being used and the target requests it and we're 1072 // optimizing. 1073 BitVector StackBytesFree; 1074 if (!ObjectsToAllocate.empty() && 1075 MF.getTarget().getOptLevel() != CodeGenOpt::None && 1076 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(MF)) 1077 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex, 1078 FixedCSEnd, StackBytesFree); 1079 1080 // Now walk the objects and actually assign base offsets to them. 1081 for (auto &Object : ObjectsToAllocate) 1082 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, 1083 StackBytesFree)) 1084 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew); 1085 1086 // Make sure the special register scavenging spill slot is closest to the 1087 // stack pointer. 1088 if (RS && !EarlyScavengingSlots) { 1089 SmallVector<int, 2> SFIs; 1090 RS->getScavengingFrameIndices(SFIs); 1091 for (int SFI : SFIs) 1092 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); 1093 } 1094 1095 if (!TFI.targetHandlesStackFrameRounding()) { 1096 // If we have reserved argument space for call sites in the function 1097 // immediately on entry to the current function, count it as part of the 1098 // overall stack size. 1099 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(MF)) 1100 Offset += MFI.getMaxCallFrameSize(); 1101 1102 // Round up the size to a multiple of the alignment. If the function has 1103 // any calls or alloca's, align to the target's StackAlignment value to 1104 // ensure that the callee's frame or the alloca data is suitably aligned; 1105 // otherwise, for leaf functions, align to the TransientStackAlignment 1106 // value. 1107 Align StackAlign; 1108 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() || 1109 (RegInfo->hasStackRealignment(MF) && MFI.getObjectIndexEnd() != 0)) 1110 StackAlign = TFI.getStackAlign(); 1111 else 1112 StackAlign = TFI.getTransientStackAlign(); 1113 1114 // If the frame pointer is eliminated, all frame offsets will be relative to 1115 // SP not FP. Align to MaxAlign so this works. 1116 StackAlign = std::max(StackAlign, MaxAlign); 1117 int64_t OffsetBeforeAlignment = Offset; 1118 Offset = alignTo(Offset, StackAlign, Skew); 1119 1120 // If we have increased the offset to fulfill the alignment constrants, 1121 // then the scavenging spill slots may become harder to reach from the 1122 // stack pointer, float them so they stay close. 1123 if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS && 1124 !EarlyScavengingSlots) { 1125 SmallVector<int, 2> SFIs; 1126 RS->getScavengingFrameIndices(SFIs); 1127 LLVM_DEBUG(if (!SFIs.empty()) llvm::dbgs() 1128 << "Adjusting emergency spill slots!\n";); 1129 int64_t Delta = Offset - OffsetBeforeAlignment; 1130 for (int SFI : SFIs) { 1131 LLVM_DEBUG(llvm::dbgs() 1132 << "Adjusting offset of emergency spill slot #" << SFI 1133 << " from " << MFI.getObjectOffset(SFI);); 1134 MFI.setObjectOffset(SFI, MFI.getObjectOffset(SFI) - Delta); 1135 LLVM_DEBUG(llvm::dbgs() << " to " << MFI.getObjectOffset(SFI) << "\n";); 1136 } 1137 } 1138 } 1139 1140 // Update frame info to pretend that this is part of the stack... 1141 int64_t StackSize = Offset - LocalAreaOffset; 1142 MFI.setStackSize(StackSize); 1143 NumBytesStackSpace += StackSize; 1144 } 1145 1146 /// insertPrologEpilogCode - Scan the function for modified callee saved 1147 /// registers, insert spill code for these callee saved registers, then add 1148 /// prolog and epilog code to the function. 1149 void PEI::insertPrologEpilogCode(MachineFunction &MF) { 1150 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1151 1152 // Add prologue to the function... 1153 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1154 TFI.emitPrologue(MF, *SaveBlock); 1155 1156 // Add epilogue to restore the callee-save registers in each exiting block. 1157 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 1158 TFI.emitEpilogue(MF, *RestoreBlock); 1159 1160 // Zero call used registers before restoring callee-saved registers. 1161 insertZeroCallUsedRegs(MF); 1162 1163 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1164 TFI.inlineStackProbe(MF, *SaveBlock); 1165 1166 // Emit additional code that is required to support segmented stacks, if 1167 // we've been asked for it. This, when linked with a runtime with support 1168 // for segmented stacks (libgcc is one), will result in allocating stack 1169 // space in small chunks instead of one large contiguous block. 1170 if (MF.shouldSplitStack()) { 1171 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1172 TFI.adjustForSegmentedStacks(MF, *SaveBlock); 1173 } 1174 1175 // Emit additional code that is required to explicitly handle the stack in 1176 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The 1177 // approach is rather similar to that of Segmented Stacks, but it uses a 1178 // different conditional check and another BIF for allocating more stack 1179 // space. 1180 if (MF.getFunction().getCallingConv() == CallingConv::HiPE) 1181 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1182 TFI.adjustForHiPEPrologue(MF, *SaveBlock); 1183 } 1184 1185 /// insertZeroCallUsedRegs - Zero out call used registers. 1186 void PEI::insertZeroCallUsedRegs(MachineFunction &MF) { 1187 const Function &F = MF.getFunction(); 1188 1189 if (!F.hasFnAttribute("zero-call-used-regs")) 1190 return; 1191 1192 using namespace ZeroCallUsedRegs; 1193 1194 ZeroCallUsedRegsKind ZeroRegsKind = 1195 StringSwitch<ZeroCallUsedRegsKind>( 1196 F.getFnAttribute("zero-call-used-regs").getValueAsString()) 1197 .Case("skip", ZeroCallUsedRegsKind::Skip) 1198 .Case("used-gpr-arg", ZeroCallUsedRegsKind::UsedGPRArg) 1199 .Case("used-gpr", ZeroCallUsedRegsKind::UsedGPR) 1200 .Case("used-arg", ZeroCallUsedRegsKind::UsedArg) 1201 .Case("used", ZeroCallUsedRegsKind::Used) 1202 .Case("all-gpr-arg", ZeroCallUsedRegsKind::AllGPRArg) 1203 .Case("all-gpr", ZeroCallUsedRegsKind::AllGPR) 1204 .Case("all-arg", ZeroCallUsedRegsKind::AllArg) 1205 .Case("all", ZeroCallUsedRegsKind::All); 1206 1207 if (ZeroRegsKind == ZeroCallUsedRegsKind::Skip) 1208 return; 1209 1210 const bool OnlyGPR = static_cast<unsigned>(ZeroRegsKind) & ONLY_GPR; 1211 const bool OnlyUsed = static_cast<unsigned>(ZeroRegsKind) & ONLY_USED; 1212 const bool OnlyArg = static_cast<unsigned>(ZeroRegsKind) & ONLY_ARG; 1213 1214 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1215 const BitVector AllocatableSet(TRI.getAllocatableSet(MF)); 1216 1217 // Mark all used registers. 1218 BitVector UsedRegs(TRI.getNumRegs()); 1219 if (OnlyUsed) 1220 for (const MachineBasicBlock &MBB : MF) 1221 for (const MachineInstr &MI : MBB) 1222 for (const MachineOperand &MO : MI.operands()) { 1223 if (!MO.isReg()) 1224 continue; 1225 1226 MCRegister Reg = MO.getReg(); 1227 if (AllocatableSet[Reg] && !MO.isImplicit() && 1228 (MO.isDef() || MO.isUse())) 1229 UsedRegs.set(Reg); 1230 } 1231 1232 // Get a list of registers that are used. 1233 BitVector LiveIns(TRI.getNumRegs()); 1234 for (const MachineBasicBlock::RegisterMaskPair &LI : MF.front().liveins()) 1235 LiveIns.set(LI.PhysReg); 1236 1237 BitVector RegsToZero(TRI.getNumRegs()); 1238 for (MCRegister Reg : AllocatableSet.set_bits()) { 1239 // Skip over fixed registers. 1240 if (TRI.isFixedRegister(MF, Reg)) 1241 continue; 1242 1243 // Want only general purpose registers. 1244 if (OnlyGPR && !TRI.isGeneralPurposeRegister(MF, Reg)) 1245 continue; 1246 1247 // Want only used registers. 1248 if (OnlyUsed && !UsedRegs[Reg]) 1249 continue; 1250 1251 // Want only registers used for arguments. 1252 if (OnlyArg) { 1253 if (OnlyUsed) { 1254 if (!LiveIns[Reg]) 1255 continue; 1256 } else if (!TRI.isArgumentRegister(MF, Reg)) { 1257 continue; 1258 } 1259 } 1260 1261 RegsToZero.set(Reg); 1262 } 1263 1264 // Don't clear registers that are live when leaving the function. 1265 for (const MachineBasicBlock &MBB : MF) 1266 for (const MachineInstr &MI : MBB.terminators()) { 1267 if (!MI.isReturn()) 1268 continue; 1269 1270 for (const auto &MO : MI.operands()) { 1271 if (!MO.isReg()) 1272 continue; 1273 1274 for (MCPhysReg SReg : TRI.sub_and_superregs_inclusive(MO.getReg())) 1275 RegsToZero.reset(SReg); 1276 } 1277 } 1278 1279 // Don't need to clear registers that are used/clobbered by terminating 1280 // instructions. 1281 for (const MachineBasicBlock &MBB : MF) { 1282 if (!MBB.isReturnBlock()) 1283 continue; 1284 1285 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator(); 1286 for (MachineBasicBlock::const_iterator I = MBBI, E = MBB.end(); I != E; 1287 ++I) { 1288 for (const MachineOperand &MO : I->operands()) { 1289 if (!MO.isReg()) 1290 continue; 1291 1292 for (const MCPhysReg &Reg : 1293 TRI.sub_and_superregs_inclusive(MO.getReg())) 1294 RegsToZero.reset(Reg); 1295 } 1296 } 1297 } 1298 1299 // Don't clear registers that must be preserved. 1300 for (const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF); 1301 MCPhysReg CSReg = *CSRegs; ++CSRegs) 1302 for (MCRegister Reg : TRI.sub_and_superregs_inclusive(CSReg)) 1303 RegsToZero.reset(Reg); 1304 1305 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1306 for (MachineBasicBlock &MBB : MF) 1307 if (MBB.isReturnBlock()) 1308 TFI.emitZeroCallUsedRegs(RegsToZero, MBB); 1309 } 1310 1311 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical 1312 /// register references and actual offsets. 1313 void PEI::replaceFrameIndices(MachineFunction &MF) { 1314 const auto &ST = MF.getSubtarget(); 1315 const TargetFrameLowering &TFI = *ST.getFrameLowering(); 1316 if (!TFI.needsFrameIndexResolution(MF)) 1317 return; 1318 1319 const TargetRegisterInfo *TRI = ST.getRegisterInfo(); 1320 1321 // Allow the target to determine this after knowing the frame size. 1322 FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) || 1323 TRI->requiresFrameIndexReplacementScavenging(MF); 1324 1325 // Store SPAdj at exit of a basic block. 1326 SmallVector<int, 8> SPState; 1327 SPState.resize(MF.getNumBlockIDs()); 1328 df_iterator_default_set<MachineBasicBlock*> Reachable; 1329 1330 // Iterate over the reachable blocks in DFS order. 1331 for (auto DFI = df_ext_begin(&MF, Reachable), DFE = df_ext_end(&MF, Reachable); 1332 DFI != DFE; ++DFI) { 1333 int SPAdj = 0; 1334 // Check the exit state of the DFS stack predecessor. 1335 if (DFI.getPathLength() >= 2) { 1336 MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 1337 assert(Reachable.count(StackPred) && 1338 "DFS stack predecessor is already visited.\n"); 1339 SPAdj = SPState[StackPred->getNumber()]; 1340 } 1341 MachineBasicBlock *BB = *DFI; 1342 replaceFrameIndices(BB, MF, SPAdj); 1343 SPState[BB->getNumber()] = SPAdj; 1344 } 1345 1346 // Handle the unreachable blocks. 1347 for (auto &BB : MF) { 1348 if (Reachable.count(&BB)) 1349 // Already handled in DFS traversal. 1350 continue; 1351 int SPAdj = 0; 1352 replaceFrameIndices(&BB, MF, SPAdj); 1353 } 1354 } 1355 1356 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 1357 int &SPAdj) { 1358 assert(MF.getSubtarget().getRegisterInfo() && 1359 "getRegisterInfo() must be implemented!"); 1360 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1361 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1362 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1363 1364 if (RS && FrameIndexEliminationScavenging) 1365 RS->enterBasicBlock(*BB); 1366 1367 bool InsideCallSequence = false; 1368 1369 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { 1370 if (TII.isFrameInstr(*I)) { 1371 InsideCallSequence = TII.isFrameSetup(*I); 1372 SPAdj += TII.getSPAdjust(*I); 1373 I = TFI->eliminateCallFramePseudoInstr(MF, *BB, I); 1374 continue; 1375 } 1376 1377 MachineInstr &MI = *I; 1378 bool DoIncr = true; 1379 bool DidFinishLoop = true; 1380 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1381 if (!MI.getOperand(i).isFI()) 1382 continue; 1383 1384 // Frame indices in debug values are encoded in a target independent 1385 // way with simply the frame index and offset rather than any 1386 // target-specific addressing mode. 1387 if (MI.isDebugValue()) { 1388 MachineOperand &Op = MI.getOperand(i); 1389 assert( 1390 MI.isDebugOperand(&Op) && 1391 "Frame indices can only appear as a debug operand in a DBG_VALUE*" 1392 " machine instruction"); 1393 Register Reg; 1394 unsigned FrameIdx = Op.getIndex(); 1395 unsigned Size = MF.getFrameInfo().getObjectSize(FrameIdx); 1396 1397 StackOffset Offset = 1398 TFI->getFrameIndexReference(MF, FrameIdx, Reg); 1399 Op.ChangeToRegister(Reg, false /*isDef*/); 1400 1401 const DIExpression *DIExpr = MI.getDebugExpression(); 1402 1403 // If we have a direct DBG_VALUE, and its location expression isn't 1404 // currently complex, then adding an offset will morph it into a 1405 // complex location that is interpreted as being a memory address. 1406 // This changes a pointer-valued variable to dereference that pointer, 1407 // which is incorrect. Fix by adding DW_OP_stack_value. 1408 1409 if (MI.isNonListDebugValue()) { 1410 unsigned PrependFlags = DIExpression::ApplyOffset; 1411 if (!MI.isIndirectDebugValue() && !DIExpr->isComplex()) 1412 PrependFlags |= DIExpression::StackValue; 1413 1414 // If we have DBG_VALUE that is indirect and has a Implicit location 1415 // expression need to insert a deref before prepending a Memory 1416 // location expression. Also after doing this we change the DBG_VALUE 1417 // to be direct. 1418 if (MI.isIndirectDebugValue() && DIExpr->isImplicit()) { 1419 SmallVector<uint64_t, 2> Ops = {dwarf::DW_OP_deref_size, Size}; 1420 bool WithStackValue = true; 1421 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); 1422 // Make the DBG_VALUE direct. 1423 MI.getDebugOffset().ChangeToRegister(0, false); 1424 } 1425 DIExpr = TRI.prependOffsetExpression(DIExpr, PrependFlags, Offset); 1426 } else { 1427 // The debug operand at DebugOpIndex was a frame index at offset 1428 // `Offset`; now the operand has been replaced with the frame 1429 // register, we must add Offset with `register x, plus Offset`. 1430 unsigned DebugOpIndex = MI.getDebugOperandIndex(&Op); 1431 SmallVector<uint64_t, 3> Ops; 1432 TRI.getOffsetOpcodes(Offset, Ops); 1433 DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, DebugOpIndex); 1434 } 1435 MI.getDebugExpressionOp().setMetadata(DIExpr); 1436 continue; 1437 } else if (MI.isDebugPHI()) { 1438 // Allow stack ref to continue onwards. 1439 continue; 1440 } 1441 1442 // TODO: This code should be commoned with the code for 1443 // PATCHPOINT. There's no good reason for the difference in 1444 // implementation other than historical accident. The only 1445 // remaining difference is the unconditional use of the stack 1446 // pointer as the base register. 1447 if (MI.getOpcode() == TargetOpcode::STATEPOINT) { 1448 assert((!MI.isDebugValue() || i == 0) && 1449 "Frame indicies can only appear as the first operand of a " 1450 "DBG_VALUE machine instruction"); 1451 Register Reg; 1452 MachineOperand &Offset = MI.getOperand(i + 1); 1453 StackOffset refOffset = TFI->getFrameIndexReferencePreferSP( 1454 MF, MI.getOperand(i).getIndex(), Reg, /*IgnoreSPUpdates*/ false); 1455 assert(!refOffset.getScalable() && 1456 "Frame offsets with a scalable component are not supported"); 1457 Offset.setImm(Offset.getImm() + refOffset.getFixed() + SPAdj); 1458 MI.getOperand(i).ChangeToRegister(Reg, false /*isDef*/); 1459 continue; 1460 } 1461 1462 // Some instructions (e.g. inline asm instructions) can have 1463 // multiple frame indices and/or cause eliminateFrameIndex 1464 // to insert more than one instruction. We need the register 1465 // scavenger to go through all of these instructions so that 1466 // it can update its register information. We keep the 1467 // iterator at the point before insertion so that we can 1468 // revisit them in full. 1469 bool AtBeginning = (I == BB->begin()); 1470 if (!AtBeginning) --I; 1471 1472 // If this instruction has a FrameIndex operand, we need to 1473 // use that target machine register info object to eliminate 1474 // it. 1475 TRI.eliminateFrameIndex(MI, SPAdj, i, 1476 FrameIndexEliminationScavenging ? RS : nullptr); 1477 1478 // Reset the iterator if we were at the beginning of the BB. 1479 if (AtBeginning) { 1480 I = BB->begin(); 1481 DoIncr = false; 1482 } 1483 1484 DidFinishLoop = false; 1485 break; 1486 } 1487 1488 // If we are looking at a call sequence, we need to keep track of 1489 // the SP adjustment made by each instruction in the sequence. 1490 // This includes both the frame setup/destroy pseudos (handled above), 1491 // as well as other instructions that have side effects w.r.t the SP. 1492 // Note that this must come after eliminateFrameIndex, because 1493 // if I itself referred to a frame index, we shouldn't count its own 1494 // adjustment. 1495 if (DidFinishLoop && InsideCallSequence) 1496 SPAdj += TII.getSPAdjust(MI); 1497 1498 if (DoIncr && I != BB->end()) ++I; 1499 1500 // Update register states. 1501 if (RS && FrameIndexEliminationScavenging && DidFinishLoop) 1502 RS->forward(MI); 1503 } 1504 } 1505