1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass is responsible for finalizing the functions frame layout, saving 10 // callee saved registers, and for emitting prolog & epilog code for the 11 // function. 12 // 13 // This pass must be run after register allocation. After this pass is 14 // executed, it is illegal to construct MO_FrameIndex operands. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Statistic.h" 26 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 27 #include "llvm/CodeGen/MachineBasicBlock.h" 28 #include "llvm/CodeGen/MachineDominators.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunction.h" 31 #include "llvm/CodeGen/MachineFunctionPass.h" 32 #include "llvm/CodeGen/MachineInstr.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineLoopInfo.h" 35 #include "llvm/CodeGen/MachineModuleInfo.h" 36 #include "llvm/CodeGen/MachineOperand.h" 37 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/CodeGen/RegisterScavenging.h" 40 #include "llvm/CodeGen/TargetFrameLowering.h" 41 #include "llvm/CodeGen/TargetInstrInfo.h" 42 #include "llvm/CodeGen/TargetOpcodes.h" 43 #include "llvm/CodeGen/TargetRegisterInfo.h" 44 #include "llvm/CodeGen/TargetSubtargetInfo.h" 45 #include "llvm/CodeGen/WinEHFuncInfo.h" 46 #include "llvm/IR/Attributes.h" 47 #include "llvm/IR/CallingConv.h" 48 #include "llvm/IR/DebugInfoMetadata.h" 49 #include "llvm/IR/DiagnosticInfo.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/InlineAsm.h" 52 #include "llvm/IR/LLVMContext.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/MC/MCRegisterInfo.h" 55 #include "llvm/Pass.h" 56 #include "llvm/Support/CodeGen.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/ErrorHandling.h" 59 #include "llvm/Support/FormatVariadic.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Target/TargetMachine.h" 62 #include "llvm/Target/TargetOptions.h" 63 #include <algorithm> 64 #include <cassert> 65 #include <cstdint> 66 #include <functional> 67 #include <limits> 68 #include <utility> 69 #include <vector> 70 71 using namespace llvm; 72 73 #define DEBUG_TYPE "prologepilog" 74 75 using MBBVector = SmallVector<MachineBasicBlock *, 4>; 76 77 STATISTIC(NumLeafFuncWithSpills, "Number of leaf functions with CSRs"); 78 STATISTIC(NumFuncSeen, "Number of functions seen in PEI"); 79 80 81 namespace { 82 83 class PEI : public MachineFunctionPass { 84 public: 85 static char ID; 86 87 PEI() : MachineFunctionPass(ID) { 88 initializePEIPass(*PassRegistry::getPassRegistry()); 89 } 90 91 void getAnalysisUsage(AnalysisUsage &AU) const override; 92 93 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 94 /// frame indexes with appropriate references. 95 bool runOnMachineFunction(MachineFunction &MF) override; 96 97 private: 98 RegScavenger *RS = nullptr; 99 100 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved 101 // stack frame indexes. 102 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 103 unsigned MaxCSFrameIndex = 0; 104 105 // Save and Restore blocks of the current function. Typically there is a 106 // single save block, unless Windows EH funclets are involved. 107 MBBVector SaveBlocks; 108 MBBVector RestoreBlocks; 109 110 // Flag to control whether to use the register scavenger to resolve 111 // frame index materialization registers. Set according to 112 // TRI->requiresFrameIndexScavenging() for the current function. 113 bool FrameIndexVirtualScavenging = false; 114 115 // Flag to control whether the scavenger should be passed even though 116 // FrameIndexVirtualScavenging is used. 117 bool FrameIndexEliminationScavenging = false; 118 119 // Emit remarks. 120 MachineOptimizationRemarkEmitter *ORE = nullptr; 121 122 void calculateCallFrameInfo(MachineFunction &MF); 123 void calculateSaveRestoreBlocks(MachineFunction &MF); 124 void spillCalleeSavedRegs(MachineFunction &MF); 125 126 void calculateFrameObjectOffsets(MachineFunction &MF); 127 void replaceFrameIndices(MachineFunction &MF); 128 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 129 int &SPAdj); 130 // Frame indices in debug values are encoded in a target independent 131 // way with simply the frame index and offset rather than any 132 // target-specific addressing mode. 133 bool replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI, 134 unsigned OpIdx, int SPAdj = 0); 135 // Does same as replaceFrameIndices but using the backward MIR walk and 136 // backward register scavenger walk. 137 void replaceFrameIndicesBackward(MachineFunction &MF); 138 void replaceFrameIndicesBackward(MachineBasicBlock *BB, MachineFunction &MF, 139 int &SPAdj); 140 141 void insertPrologEpilogCode(MachineFunction &MF); 142 void insertZeroCallUsedRegs(MachineFunction &MF); 143 }; 144 145 } // end anonymous namespace 146 147 char PEI::ID = 0; 148 149 char &llvm::PrologEpilogCodeInserterID = PEI::ID; 150 151 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false, 152 false) 153 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass) 154 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass) 155 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 156 INITIALIZE_PASS_END(PEI, DEBUG_TYPE, 157 "Prologue/Epilogue Insertion & Frame Finalization", false, 158 false) 159 160 MachineFunctionPass *llvm::createPrologEpilogInserterPass() { 161 return new PEI(); 162 } 163 164 STATISTIC(NumBytesStackSpace, 165 "Number of bytes used for stack in all functions"); 166 167 void PEI::getAnalysisUsage(AnalysisUsage &AU) const { 168 AU.setPreservesCFG(); 169 AU.addPreserved<MachineLoopInfoWrapperPass>(); 170 AU.addPreserved<MachineDominatorTreeWrapperPass>(); 171 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 172 MachineFunctionPass::getAnalysisUsage(AU); 173 } 174 175 /// StackObjSet - A set of stack object indexes 176 using StackObjSet = SmallSetVector<int, 8>; 177 178 using SavedDbgValuesMap = 179 SmallDenseMap<MachineBasicBlock *, SmallVector<MachineInstr *, 4>, 4>; 180 181 /// Stash DBG_VALUEs that describe parameters and which are placed at the start 182 /// of the block. Later on, after the prologue code has been emitted, the 183 /// stashed DBG_VALUEs will be reinserted at the start of the block. 184 static void stashEntryDbgValues(MachineBasicBlock &MBB, 185 SavedDbgValuesMap &EntryDbgValues) { 186 SmallVector<const MachineInstr *, 4> FrameIndexValues; 187 188 for (auto &MI : MBB) { 189 if (!MI.isDebugInstr()) 190 break; 191 if (!MI.isDebugValue() || !MI.getDebugVariable()->isParameter()) 192 continue; 193 if (any_of(MI.debug_operands(), 194 [](const MachineOperand &MO) { return MO.isFI(); })) { 195 // We can only emit valid locations for frame indices after the frame 196 // setup, so do not stash away them. 197 FrameIndexValues.push_back(&MI); 198 continue; 199 } 200 const DILocalVariable *Var = MI.getDebugVariable(); 201 const DIExpression *Expr = MI.getDebugExpression(); 202 auto Overlaps = [Var, Expr](const MachineInstr *DV) { 203 return Var == DV->getDebugVariable() && 204 Expr->fragmentsOverlap(DV->getDebugExpression()); 205 }; 206 // See if the debug value overlaps with any preceding debug value that will 207 // not be stashed. If that is the case, then we can't stash this value, as 208 // we would then reorder the values at reinsertion. 209 if (llvm::none_of(FrameIndexValues, Overlaps)) 210 EntryDbgValues[&MBB].push_back(&MI); 211 } 212 213 // Remove stashed debug values from the block. 214 if (EntryDbgValues.count(&MBB)) 215 for (auto *MI : EntryDbgValues[&MBB]) 216 MI->removeFromParent(); 217 } 218 219 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 220 /// frame indexes with appropriate references. 221 bool PEI::runOnMachineFunction(MachineFunction &MF) { 222 NumFuncSeen++; 223 const Function &F = MF.getFunction(); 224 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 225 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 226 227 RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr; 228 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF); 229 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 230 231 // Spill frame pointer and/or base pointer registers if they are clobbered. 232 // It is placed before call frame instruction elimination so it will not mess 233 // with stack arguments. 234 TFI->spillFPBP(MF); 235 236 // Calculate the MaxCallFrameSize value for the function's frame 237 // information. Also eliminates call frame pseudo instructions. 238 calculateCallFrameInfo(MF); 239 240 // Determine placement of CSR spill/restore code and prolog/epilog code: 241 // place all spills in the entry block, all restores in return blocks. 242 calculateSaveRestoreBlocks(MF); 243 244 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code. 245 SavedDbgValuesMap EntryDbgValues; 246 for (MachineBasicBlock *SaveBlock : SaveBlocks) 247 stashEntryDbgValues(*SaveBlock, EntryDbgValues); 248 249 // Handle CSR spilling and restoring, for targets that need it. 250 if (MF.getTarget().usesPhysRegsForValues()) 251 spillCalleeSavedRegs(MF); 252 253 // Allow the target machine to make final modifications to the function 254 // before the frame layout is finalized. 255 TFI->processFunctionBeforeFrameFinalized(MF, RS); 256 257 // Calculate actual frame offsets for all abstract stack objects... 258 calculateFrameObjectOffsets(MF); 259 260 // Add prolog and epilog code to the function. This function is required 261 // to align the stack frame as necessary for any stack variables or 262 // called functions. Because of this, calculateCalleeSavedRegisters() 263 // must be called before this function in order to set the AdjustsStack 264 // and MaxCallFrameSize variables. 265 if (!F.hasFnAttribute(Attribute::Naked)) 266 insertPrologEpilogCode(MF); 267 268 // Reinsert stashed debug values at the start of the entry blocks. 269 for (auto &I : EntryDbgValues) 270 I.first->insert(I.first->begin(), I.second.begin(), I.second.end()); 271 272 // Allow the target machine to make final modifications to the function 273 // before the frame layout is finalized. 274 TFI->processFunctionBeforeFrameIndicesReplaced(MF, RS); 275 276 // Replace all MO_FrameIndex operands with physical register references 277 // and actual offsets. 278 if (TFI->needsFrameIndexResolution(MF)) { 279 // Allow the target to determine this after knowing the frame size. 280 FrameIndexEliminationScavenging = 281 (RS && !FrameIndexVirtualScavenging) || 282 TRI->requiresFrameIndexReplacementScavenging(MF); 283 284 if (TRI->eliminateFrameIndicesBackwards()) 285 replaceFrameIndicesBackward(MF); 286 else 287 replaceFrameIndices(MF); 288 } 289 290 // If register scavenging is needed, as we've enabled doing it as a 291 // post-pass, scavenge the virtual registers that frame index elimination 292 // inserted. 293 if (TRI->requiresRegisterScavenging(MF) && FrameIndexVirtualScavenging) 294 scavengeFrameVirtualRegs(MF, *RS); 295 296 // Warn on stack size when we exceeds the given limit. 297 MachineFrameInfo &MFI = MF.getFrameInfo(); 298 uint64_t StackSize = MFI.getStackSize(); 299 300 uint64_t Threshold = TFI->getStackThreshold(); 301 if (MF.getFunction().hasFnAttribute("warn-stack-size")) { 302 bool Failed = MF.getFunction() 303 .getFnAttribute("warn-stack-size") 304 .getValueAsString() 305 .getAsInteger(10, Threshold); 306 // Verifier should have caught this. 307 assert(!Failed && "Invalid warn-stack-size fn attr value"); 308 (void)Failed; 309 } 310 uint64_t UnsafeStackSize = MFI.getUnsafeStackSize(); 311 if (MF.getFunction().hasFnAttribute(Attribute::SafeStack)) 312 StackSize += UnsafeStackSize; 313 314 if (StackSize > Threshold) { 315 DiagnosticInfoStackSize DiagStackSize(F, StackSize, Threshold, DS_Warning); 316 F.getContext().diagnose(DiagStackSize); 317 int64_t SpillSize = 0; 318 for (int Idx = MFI.getObjectIndexBegin(), End = MFI.getObjectIndexEnd(); 319 Idx != End; ++Idx) { 320 if (MFI.isSpillSlotObjectIndex(Idx)) 321 SpillSize += MFI.getObjectSize(Idx); 322 } 323 324 [[maybe_unused]] float SpillPct = 325 static_cast<float>(SpillSize) / static_cast<float>(StackSize); 326 LLVM_DEBUG( 327 dbgs() << formatv("{0}/{1} ({3:P}) spills, {2}/{1} ({4:P}) variables", 328 SpillSize, StackSize, StackSize - SpillSize, SpillPct, 329 1.0f - SpillPct)); 330 if (UnsafeStackSize != 0) { 331 LLVM_DEBUG(dbgs() << formatv(", {0}/{2} ({1:P}) unsafe stack", 332 UnsafeStackSize, 333 static_cast<float>(UnsafeStackSize) / 334 static_cast<float>(StackSize), 335 StackSize)); 336 } 337 LLVM_DEBUG(dbgs() << "\n"); 338 } 339 340 ORE->emit([&]() { 341 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize", 342 MF.getFunction().getSubprogram(), 343 &MF.front()) 344 << ore::NV("NumStackBytes", StackSize) 345 << " stack bytes in function '" 346 << ore::NV("Function", MF.getFunction().getName()) << "'"; 347 }); 348 349 // Emit any remarks implemented for the target, based on final frame layout. 350 TFI->emitRemarks(MF, ORE); 351 352 delete RS; 353 SaveBlocks.clear(); 354 RestoreBlocks.clear(); 355 MFI.setSavePoint(nullptr); 356 MFI.setRestorePoint(nullptr); 357 return true; 358 } 359 360 /// Calculate the MaxCallFrameSize variable for the function's frame 361 /// information and eliminate call frame pseudo instructions. 362 void PEI::calculateCallFrameInfo(MachineFunction &MF) { 363 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 364 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 365 MachineFrameInfo &MFI = MF.getFrameInfo(); 366 367 // Get the function call frame set-up and tear-down instruction opcode 368 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 369 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 370 371 // Early exit for targets which have no call frame setup/destroy pseudo 372 // instructions. 373 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 374 return; 375 376 // (Re-)Compute the MaxCallFrameSize. 377 [[maybe_unused]] uint64_t MaxCFSIn = 378 MFI.isMaxCallFrameSizeComputed() ? MFI.getMaxCallFrameSize() : UINT64_MAX; 379 std::vector<MachineBasicBlock::iterator> FrameSDOps; 380 MFI.computeMaxCallFrameSize(MF, &FrameSDOps); 381 assert(MFI.getMaxCallFrameSize() <= MaxCFSIn && 382 "Recomputing MaxCFS gave a larger value."); 383 assert((FrameSDOps.empty() || MF.getFrameInfo().adjustsStack()) && 384 "AdjustsStack not set in presence of a frame pseudo instruction."); 385 386 if (TFI->canSimplifyCallFramePseudos(MF)) { 387 // If call frames are not being included as part of the stack frame, and 388 // the target doesn't indicate otherwise, remove the call frame pseudos 389 // here. The sub/add sp instruction pairs are still inserted, but we don't 390 // need to track the SP adjustment for frame index elimination. 391 for (MachineBasicBlock::iterator I : FrameSDOps) 392 TFI->eliminateCallFramePseudoInstr(MF, *I->getParent(), I); 393 394 // We can't track the call frame size after call frame pseudos have been 395 // eliminated. Set it to zero everywhere to keep MachineVerifier happy. 396 for (MachineBasicBlock &MBB : MF) 397 MBB.setCallFrameSize(0); 398 } 399 } 400 401 /// Compute the sets of entry and return blocks for saving and restoring 402 /// callee-saved registers, and placing prolog and epilog code. 403 void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) { 404 const MachineFrameInfo &MFI = MF.getFrameInfo(); 405 406 // Even when we do not change any CSR, we still want to insert the 407 // prologue and epilogue of the function. 408 // So set the save points for those. 409 410 // Use the points found by shrink-wrapping, if any. 411 if (MFI.getSavePoint()) { 412 SaveBlocks.push_back(MFI.getSavePoint()); 413 assert(MFI.getRestorePoint() && "Both restore and save must be set"); 414 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 415 // If RestoreBlock does not have any successor and is not a return block 416 // then the end point is unreachable and we do not need to insert any 417 // epilogue. 418 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock()) 419 RestoreBlocks.push_back(RestoreBlock); 420 return; 421 } 422 423 // Save refs to entry and return blocks. 424 SaveBlocks.push_back(&MF.front()); 425 for (MachineBasicBlock &MBB : MF) { 426 if (MBB.isEHFuncletEntry()) 427 SaveBlocks.push_back(&MBB); 428 if (MBB.isReturnBlock()) 429 RestoreBlocks.push_back(&MBB); 430 } 431 } 432 433 static void assignCalleeSavedSpillSlots(MachineFunction &F, 434 const BitVector &SavedRegs, 435 unsigned &MinCSFrameIndex, 436 unsigned &MaxCSFrameIndex) { 437 if (SavedRegs.empty()) 438 return; 439 440 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo(); 441 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs(); 442 BitVector CSMask(SavedRegs.size()); 443 444 for (unsigned i = 0; CSRegs[i]; ++i) 445 CSMask.set(CSRegs[i]); 446 447 std::vector<CalleeSavedInfo> CSI; 448 for (unsigned i = 0; CSRegs[i]; ++i) { 449 unsigned Reg = CSRegs[i]; 450 if (SavedRegs.test(Reg)) { 451 bool SavedSuper = false; 452 for (const MCPhysReg &SuperReg : RegInfo->superregs(Reg)) { 453 // Some backends set all aliases for some registers as saved, such as 454 // Mips's $fp, so they appear in SavedRegs but not CSRegs. 455 if (SavedRegs.test(SuperReg) && CSMask.test(SuperReg)) { 456 SavedSuper = true; 457 break; 458 } 459 } 460 461 if (!SavedSuper) 462 CSI.push_back(CalleeSavedInfo(Reg)); 463 } 464 } 465 466 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering(); 467 MachineFrameInfo &MFI = F.getFrameInfo(); 468 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI, MinCSFrameIndex, 469 MaxCSFrameIndex)) { 470 // If target doesn't implement this, use generic code. 471 472 if (CSI.empty()) 473 return; // Early exit if no callee saved registers are modified! 474 475 unsigned NumFixedSpillSlots; 476 const TargetFrameLowering::SpillSlot *FixedSpillSlots = 477 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); 478 479 // Now that we know which registers need to be saved and restored, allocate 480 // stack slots for them. 481 for (auto &CS : CSI) { 482 // If the target has spilled this register to another register, we don't 483 // need to allocate a stack slot. 484 if (CS.isSpilledToReg()) 485 continue; 486 487 unsigned Reg = CS.getReg(); 488 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 489 490 int FrameIdx; 491 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { 492 CS.setFrameIdx(FrameIdx); 493 continue; 494 } 495 496 // Check to see if this physreg must be spilled to a particular stack slot 497 // on this target. 498 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots; 499 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots && 500 FixedSlot->Reg != Reg) 501 ++FixedSlot; 502 503 unsigned Size = RegInfo->getSpillSize(*RC); 504 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { 505 // Nope, just spill it anywhere convenient. 506 Align Alignment = RegInfo->getSpillAlign(*RC); 507 // We may not be able to satisfy the desired alignment specification of 508 // the TargetRegisterClass if the stack alignment is smaller. Use the 509 // min. 510 Alignment = std::min(Alignment, TFI->getStackAlign()); 511 FrameIdx = MFI.CreateStackObject(Size, Alignment, true); 512 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 513 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 514 } else { 515 // Spill it to the stack where we must. 516 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset); 517 } 518 519 CS.setFrameIdx(FrameIdx); 520 } 521 } 522 523 MFI.setCalleeSavedInfo(CSI); 524 } 525 526 /// Helper function to update the liveness information for the callee-saved 527 /// registers. 528 static void updateLiveness(MachineFunction &MF) { 529 MachineFrameInfo &MFI = MF.getFrameInfo(); 530 // Visited will contain all the basic blocks that are in the region 531 // where the callee saved registers are alive: 532 // - Anything that is not Save or Restore -> LiveThrough. 533 // - Save -> LiveIn. 534 // - Restore -> LiveOut. 535 // The live-out is not attached to the block, so no need to keep 536 // Restore in this set. 537 SmallPtrSet<MachineBasicBlock *, 8> Visited; 538 SmallVector<MachineBasicBlock *, 8> WorkList; 539 MachineBasicBlock *Entry = &MF.front(); 540 MachineBasicBlock *Save = MFI.getSavePoint(); 541 542 if (!Save) 543 Save = Entry; 544 545 if (Entry != Save) { 546 WorkList.push_back(Entry); 547 Visited.insert(Entry); 548 } 549 Visited.insert(Save); 550 551 MachineBasicBlock *Restore = MFI.getRestorePoint(); 552 if (Restore) 553 // By construction Restore cannot be visited, otherwise it 554 // means there exists a path to Restore that does not go 555 // through Save. 556 WorkList.push_back(Restore); 557 558 while (!WorkList.empty()) { 559 const MachineBasicBlock *CurBB = WorkList.pop_back_val(); 560 // By construction, the region that is after the save point is 561 // dominated by the Save and post-dominated by the Restore. 562 if (CurBB == Save && Save != Restore) 563 continue; 564 // Enqueue all the successors not already visited. 565 // Those are by construction either before Save or after Restore. 566 for (MachineBasicBlock *SuccBB : CurBB->successors()) 567 if (Visited.insert(SuccBB).second) 568 WorkList.push_back(SuccBB); 569 } 570 571 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 572 573 MachineRegisterInfo &MRI = MF.getRegInfo(); 574 for (const CalleeSavedInfo &I : CSI) { 575 for (MachineBasicBlock *MBB : Visited) { 576 MCPhysReg Reg = I.getReg(); 577 // Add the callee-saved register as live-in. 578 // It's killed at the spill. 579 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg)) 580 MBB->addLiveIn(Reg); 581 } 582 // If callee-saved register is spilled to another register rather than 583 // spilling to stack, the destination register has to be marked as live for 584 // each MBB between the prologue and epilogue so that it is not clobbered 585 // before it is reloaded in the epilogue. The Visited set contains all 586 // blocks outside of the region delimited by prologue/epilogue. 587 if (I.isSpilledToReg()) { 588 for (MachineBasicBlock &MBB : MF) { 589 if (Visited.count(&MBB)) 590 continue; 591 MCPhysReg DstReg = I.getDstReg(); 592 if (!MBB.isLiveIn(DstReg)) 593 MBB.addLiveIn(DstReg); 594 } 595 } 596 } 597 } 598 599 /// Insert spill code for the callee-saved registers used in the function. 600 static void insertCSRSaves(MachineBasicBlock &SaveBlock, 601 ArrayRef<CalleeSavedInfo> CSI) { 602 MachineFunction &MF = *SaveBlock.getParent(); 603 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 604 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 605 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 606 607 MachineBasicBlock::iterator I = SaveBlock.begin(); 608 if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) { 609 for (const CalleeSavedInfo &CS : CSI) { 610 // Insert the spill to the stack frame. 611 unsigned Reg = CS.getReg(); 612 613 if (CS.isSpilledToReg()) { 614 BuildMI(SaveBlock, I, DebugLoc(), 615 TII.get(TargetOpcode::COPY), CS.getDstReg()) 616 .addReg(Reg, getKillRegState(true)); 617 } else { 618 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 619 TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC, 620 TRI, Register()); 621 } 622 } 623 } 624 } 625 626 /// Insert restore code for the callee-saved registers used in the function. 627 static void insertCSRRestores(MachineBasicBlock &RestoreBlock, 628 std::vector<CalleeSavedInfo> &CSI) { 629 MachineFunction &MF = *RestoreBlock.getParent(); 630 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 631 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 632 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 633 634 // Restore all registers immediately before the return and any 635 // terminators that precede it. 636 MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator(); 637 638 if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) { 639 for (const CalleeSavedInfo &CI : reverse(CSI)) { 640 unsigned Reg = CI.getReg(); 641 if (CI.isSpilledToReg()) { 642 BuildMI(RestoreBlock, I, DebugLoc(), TII.get(TargetOpcode::COPY), Reg) 643 .addReg(CI.getDstReg(), getKillRegState(true)); 644 } else { 645 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 646 TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, 647 TRI, Register()); 648 assert(I != RestoreBlock.begin() && 649 "loadRegFromStackSlot didn't insert any code!"); 650 // Insert in reverse order. loadRegFromStackSlot can insert 651 // multiple instructions. 652 } 653 } 654 } 655 } 656 657 void PEI::spillCalleeSavedRegs(MachineFunction &MF) { 658 // We can't list this requirement in getRequiredProperties because some 659 // targets (WebAssembly) use virtual registers past this point, and the pass 660 // pipeline is set up without giving the passes a chance to look at the 661 // TargetMachine. 662 // FIXME: Find a way to express this in getRequiredProperties. 663 assert(MF.getProperties().hasProperty( 664 MachineFunctionProperties::Property::NoVRegs)); 665 666 const Function &F = MF.getFunction(); 667 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 668 MachineFrameInfo &MFI = MF.getFrameInfo(); 669 MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 670 MaxCSFrameIndex = 0; 671 672 // Determine which of the registers in the callee save list should be saved. 673 BitVector SavedRegs; 674 TFI->determineCalleeSaves(MF, SavedRegs, RS); 675 676 // Assign stack slots for any callee-saved registers that must be spilled. 677 assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); 678 679 // Add the code to save and restore the callee saved registers. 680 if (!F.hasFnAttribute(Attribute::Naked)) { 681 MFI.setCalleeSavedInfoValid(true); 682 683 std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 684 if (!CSI.empty()) { 685 if (!MFI.hasCalls()) 686 NumLeafFuncWithSpills++; 687 688 for (MachineBasicBlock *SaveBlock : SaveBlocks) 689 insertCSRSaves(*SaveBlock, CSI); 690 691 // Update the live-in information of all the blocks up to the save point. 692 updateLiveness(MF); 693 694 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 695 insertCSRRestores(*RestoreBlock, CSI); 696 } 697 } 698 } 699 700 /// AdjustStackOffset - Helper function used to adjust the stack frame offset. 701 static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, 702 bool StackGrowsDown, int64_t &Offset, 703 Align &MaxAlign) { 704 // If the stack grows down, add the object size to find the lowest address. 705 if (StackGrowsDown) 706 Offset += MFI.getObjectSize(FrameIdx); 707 708 Align Alignment = MFI.getObjectAlign(FrameIdx); 709 710 // If the alignment of this object is greater than that of the stack, then 711 // increase the stack alignment to match. 712 MaxAlign = std::max(MaxAlign, Alignment); 713 714 // Adjust to alignment boundary. 715 Offset = alignTo(Offset, Alignment); 716 717 if (StackGrowsDown) { 718 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset 719 << "]\n"); 720 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset 721 } else { 722 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset 723 << "]\n"); 724 MFI.setObjectOffset(FrameIdx, Offset); 725 Offset += MFI.getObjectSize(FrameIdx); 726 } 727 } 728 729 /// Compute which bytes of fixed and callee-save stack area are unused and keep 730 /// track of them in StackBytesFree. 731 static inline void 732 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown, 733 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex, 734 int64_t FixedCSEnd, BitVector &StackBytesFree) { 735 // Avoid undefined int64_t -> int conversion below in extreme case. 736 if (FixedCSEnd > std::numeric_limits<int>::max()) 737 return; 738 739 StackBytesFree.resize(FixedCSEnd, true); 740 741 SmallVector<int, 16> AllocatedFrameSlots; 742 // Add fixed objects. 743 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) 744 // StackSlot scavenging is only implemented for the default stack. 745 if (MFI.getStackID(i) == TargetStackID::Default) 746 AllocatedFrameSlots.push_back(i); 747 // Add callee-save objects if there are any. 748 if (MinCSFrameIndex <= MaxCSFrameIndex) { 749 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i) 750 if (MFI.getStackID(i) == TargetStackID::Default) 751 AllocatedFrameSlots.push_back(i); 752 } 753 754 for (int i : AllocatedFrameSlots) { 755 // These are converted from int64_t, but they should always fit in int 756 // because of the FixedCSEnd check above. 757 int ObjOffset = MFI.getObjectOffset(i); 758 int ObjSize = MFI.getObjectSize(i); 759 int ObjStart, ObjEnd; 760 if (StackGrowsDown) { 761 // ObjOffset is negative when StackGrowsDown is true. 762 ObjStart = -ObjOffset - ObjSize; 763 ObjEnd = -ObjOffset; 764 } else { 765 ObjStart = ObjOffset; 766 ObjEnd = ObjOffset + ObjSize; 767 } 768 // Ignore fixed holes that are in the previous stack frame. 769 if (ObjEnd > 0) 770 StackBytesFree.reset(ObjStart, ObjEnd); 771 } 772 } 773 774 /// Assign frame object to an unused portion of the stack in the fixed stack 775 /// object range. Return true if the allocation was successful. 776 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, 777 bool StackGrowsDown, Align MaxAlign, 778 BitVector &StackBytesFree) { 779 if (MFI.isVariableSizedObjectIndex(FrameIdx)) 780 return false; 781 782 if (StackBytesFree.none()) { 783 // clear it to speed up later scavengeStackSlot calls to 784 // StackBytesFree.none() 785 StackBytesFree.clear(); 786 return false; 787 } 788 789 Align ObjAlign = MFI.getObjectAlign(FrameIdx); 790 if (ObjAlign > MaxAlign) 791 return false; 792 793 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 794 int FreeStart; 795 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; 796 FreeStart = StackBytesFree.find_next(FreeStart)) { 797 798 // Check that free space has suitable alignment. 799 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; 800 if (alignTo(ObjStart, ObjAlign) != ObjStart) 801 continue; 802 803 if (FreeStart + ObjSize > StackBytesFree.size()) 804 return false; 805 806 bool AllBytesFree = true; 807 for (unsigned Byte = 0; Byte < ObjSize; ++Byte) 808 if (!StackBytesFree.test(FreeStart + Byte)) { 809 AllBytesFree = false; 810 break; 811 } 812 if (AllBytesFree) 813 break; 814 } 815 816 if (FreeStart == -1) 817 return false; 818 819 if (StackGrowsDown) { 820 int ObjStart = -(FreeStart + ObjSize); 821 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 822 << ObjStart << "]\n"); 823 MFI.setObjectOffset(FrameIdx, ObjStart); 824 } else { 825 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 826 << FreeStart << "]\n"); 827 MFI.setObjectOffset(FrameIdx, FreeStart); 828 } 829 830 StackBytesFree.reset(FreeStart, FreeStart + ObjSize); 831 return true; 832 } 833 834 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e., 835 /// those required to be close to the Stack Protector) to stack offsets. 836 static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs, 837 SmallSet<int, 16> &ProtectedObjs, 838 MachineFrameInfo &MFI, bool StackGrowsDown, 839 int64_t &Offset, Align &MaxAlign) { 840 841 for (int i : UnassignedObjs) { 842 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign); 843 ProtectedObjs.insert(i); 844 } 845 } 846 847 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the 848 /// abstract stack objects. 849 void PEI::calculateFrameObjectOffsets(MachineFunction &MF) { 850 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 851 852 bool StackGrowsDown = 853 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 854 855 // Loop over all of the stack objects, assigning sequential addresses... 856 MachineFrameInfo &MFI = MF.getFrameInfo(); 857 858 // Start at the beginning of the local area. 859 // The Offset is the distance from the stack top in the direction 860 // of stack growth -- so it's always nonnegative. 861 int LocalAreaOffset = TFI.getOffsetOfLocalArea(); 862 if (StackGrowsDown) 863 LocalAreaOffset = -LocalAreaOffset; 864 assert(LocalAreaOffset >= 0 865 && "Local area offset should be in direction of stack growth"); 866 int64_t Offset = LocalAreaOffset; 867 868 #ifdef EXPENSIVE_CHECKS 869 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) 870 if (!MFI.isDeadObjectIndex(i) && 871 MFI.getStackID(i) == TargetStackID::Default) 872 assert(MFI.getObjectAlign(i) <= MFI.getMaxAlign() && 873 "MaxAlignment is invalid"); 874 #endif 875 876 // If there are fixed sized objects that are preallocated in the local area, 877 // non-fixed objects can't be allocated right at the start of local area. 878 // Adjust 'Offset' to point to the end of last fixed sized preallocated 879 // object. 880 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) { 881 // Only allocate objects on the default stack. 882 if (MFI.getStackID(i) != TargetStackID::Default) 883 continue; 884 885 int64_t FixedOff; 886 if (StackGrowsDown) { 887 // The maximum distance from the stack pointer is at lower address of 888 // the object -- which is given by offset. For down growing stack 889 // the offset is negative, so we negate the offset to get the distance. 890 FixedOff = -MFI.getObjectOffset(i); 891 } else { 892 // The maximum distance from the start pointer is at the upper 893 // address of the object. 894 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i); 895 } 896 if (FixedOff > Offset) Offset = FixedOff; 897 } 898 899 Align MaxAlign = MFI.getMaxAlign(); 900 // First assign frame offsets to stack objects that are used to spill 901 // callee saved registers. 902 if (MaxCSFrameIndex >= MinCSFrameIndex) { 903 for (unsigned i = 0; i <= MaxCSFrameIndex - MinCSFrameIndex; ++i) { 904 unsigned FrameIndex = 905 StackGrowsDown ? MinCSFrameIndex + i : MaxCSFrameIndex - i; 906 907 // Only allocate objects on the default stack. 908 if (MFI.getStackID(FrameIndex) != TargetStackID::Default) 909 continue; 910 911 // TODO: should this just be if (MFI.isDeadObjectIndex(FrameIndex)) 912 if (!StackGrowsDown && MFI.isDeadObjectIndex(FrameIndex)) 913 continue; 914 915 AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign); 916 } 917 } 918 919 assert(MaxAlign == MFI.getMaxAlign() && 920 "MFI.getMaxAlign should already account for all callee-saved " 921 "registers without a fixed stack slot"); 922 923 // FixedCSEnd is the stack offset to the end of the fixed and callee-save 924 // stack area. 925 int64_t FixedCSEnd = Offset; 926 927 // Make sure the special register scavenging spill slot is closest to the 928 // incoming stack pointer if a frame pointer is required and is closer 929 // to the incoming rather than the final stack pointer. 930 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 931 bool EarlyScavengingSlots = TFI.allocateScavengingFrameIndexesNearIncomingSP(MF); 932 if (RS && EarlyScavengingSlots) { 933 SmallVector<int, 2> SFIs; 934 RS->getScavengingFrameIndices(SFIs); 935 for (int SFI : SFIs) 936 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); 937 } 938 939 // FIXME: Once this is working, then enable flag will change to a target 940 // check for whether the frame is large enough to want to use virtual 941 // frame index registers. Functions which don't want/need this optimization 942 // will continue to use the existing code path. 943 if (MFI.getUseLocalStackAllocationBlock()) { 944 Align Alignment = MFI.getLocalFrameMaxAlign(); 945 946 // Adjust to alignment boundary. 947 Offset = alignTo(Offset, Alignment); 948 949 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); 950 951 // Resolve offsets for objects in the local block. 952 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) { 953 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i); 954 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; 955 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset 956 << "]\n"); 957 MFI.setObjectOffset(Entry.first, FIOffset); 958 } 959 // Allocate the local block 960 Offset += MFI.getLocalFrameSize(); 961 962 MaxAlign = std::max(Alignment, MaxAlign); 963 } 964 965 // Retrieve the Exception Handler registration node. 966 int EHRegNodeFrameIndex = std::numeric_limits<int>::max(); 967 if (const WinEHFuncInfo *FuncInfo = MF.getWinEHFuncInfo()) 968 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; 969 970 // Make sure that the stack protector comes before the local variables on the 971 // stack. 972 SmallSet<int, 16> ProtectedObjs; 973 if (MFI.hasStackProtectorIndex()) { 974 int StackProtectorFI = MFI.getStackProtectorIndex(); 975 StackObjSet LargeArrayObjs; 976 StackObjSet SmallArrayObjs; 977 StackObjSet AddrOfObjs; 978 979 // If we need a stack protector, we need to make sure that 980 // LocalStackSlotPass didn't already allocate a slot for it. 981 // If we are told to use the LocalStackAllocationBlock, the stack protector 982 // is expected to be already pre-allocated. 983 if (MFI.getStackID(StackProtectorFI) != TargetStackID::Default) { 984 // If the stack protector isn't on the default stack then it's up to the 985 // target to set the stack offset. 986 assert(MFI.getObjectOffset(StackProtectorFI) != 0 && 987 "Offset of stack protector on non-default stack expected to be " 988 "already set."); 989 assert(!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex()) && 990 "Stack protector on non-default stack expected to not be " 991 "pre-allocated by LocalStackSlotPass."); 992 } else if (!MFI.getUseLocalStackAllocationBlock()) { 993 AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, 994 MaxAlign); 995 } else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) { 996 llvm_unreachable( 997 "Stack protector not pre-allocated by LocalStackSlotPass."); 998 } 999 1000 // Assign large stack objects first. 1001 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 1002 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 1003 continue; 1004 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 1005 continue; 1006 if (RS && RS->isScavengingFrameIndex((int)i)) 1007 continue; 1008 if (MFI.isDeadObjectIndex(i)) 1009 continue; 1010 if (StackProtectorFI == (int)i || EHRegNodeFrameIndex == (int)i) 1011 continue; 1012 // Only allocate objects on the default stack. 1013 if (MFI.getStackID(i) != TargetStackID::Default) 1014 continue; 1015 1016 switch (MFI.getObjectSSPLayout(i)) { 1017 case MachineFrameInfo::SSPLK_None: 1018 continue; 1019 case MachineFrameInfo::SSPLK_SmallArray: 1020 SmallArrayObjs.insert(i); 1021 continue; 1022 case MachineFrameInfo::SSPLK_AddrOf: 1023 AddrOfObjs.insert(i); 1024 continue; 1025 case MachineFrameInfo::SSPLK_LargeArray: 1026 LargeArrayObjs.insert(i); 1027 continue; 1028 } 1029 llvm_unreachable("Unexpected SSPLayoutKind."); 1030 } 1031 1032 // We expect **all** the protected stack objects to be pre-allocated by 1033 // LocalStackSlotPass. If it turns out that PEI still has to allocate some 1034 // of them, we may end up messing up the expected order of the objects. 1035 if (MFI.getUseLocalStackAllocationBlock() && 1036 !(LargeArrayObjs.empty() && SmallArrayObjs.empty() && 1037 AddrOfObjs.empty())) 1038 llvm_unreachable("Found protected stack objects not pre-allocated by " 1039 "LocalStackSlotPass."); 1040 1041 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1042 Offset, MaxAlign); 1043 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1044 Offset, MaxAlign); 1045 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, 1046 Offset, MaxAlign); 1047 } 1048 1049 SmallVector<int, 8> ObjectsToAllocate; 1050 1051 // Then prepare to assign frame offsets to stack objects that are not used to 1052 // spill callee saved registers. 1053 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 1054 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 1055 continue; 1056 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 1057 continue; 1058 if (RS && RS->isScavengingFrameIndex((int)i)) 1059 continue; 1060 if (MFI.isDeadObjectIndex(i)) 1061 continue; 1062 if (MFI.getStackProtectorIndex() == (int)i || EHRegNodeFrameIndex == (int)i) 1063 continue; 1064 if (ProtectedObjs.count(i)) 1065 continue; 1066 // Only allocate objects on the default stack. 1067 if (MFI.getStackID(i) != TargetStackID::Default) 1068 continue; 1069 1070 // Add the objects that we need to allocate to our working set. 1071 ObjectsToAllocate.push_back(i); 1072 } 1073 1074 // Allocate the EH registration node first if one is present. 1075 if (EHRegNodeFrameIndex != std::numeric_limits<int>::max()) 1076 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, 1077 MaxAlign); 1078 1079 // Give the targets a chance to order the objects the way they like it. 1080 if (MF.getTarget().getOptLevel() != CodeGenOptLevel::None && 1081 MF.getTarget().Options.StackSymbolOrdering) 1082 TFI.orderFrameObjects(MF, ObjectsToAllocate); 1083 1084 // Keep track of which bytes in the fixed and callee-save range are used so we 1085 // can use the holes when allocating later stack objects. Only do this if 1086 // stack protector isn't being used and the target requests it and we're 1087 // optimizing. 1088 BitVector StackBytesFree; 1089 if (!ObjectsToAllocate.empty() && 1090 MF.getTarget().getOptLevel() != CodeGenOptLevel::None && 1091 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(MF)) 1092 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex, 1093 FixedCSEnd, StackBytesFree); 1094 1095 // Now walk the objects and actually assign base offsets to them. 1096 for (auto &Object : ObjectsToAllocate) 1097 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, 1098 StackBytesFree)) 1099 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign); 1100 1101 // Make sure the special register scavenging spill slot is closest to the 1102 // stack pointer. 1103 if (RS && !EarlyScavengingSlots) { 1104 SmallVector<int, 2> SFIs; 1105 RS->getScavengingFrameIndices(SFIs); 1106 for (int SFI : SFIs) 1107 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); 1108 } 1109 1110 if (!TFI.targetHandlesStackFrameRounding()) { 1111 // If we have reserved argument space for call sites in the function 1112 // immediately on entry to the current function, count it as part of the 1113 // overall stack size. 1114 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(MF)) 1115 Offset += MFI.getMaxCallFrameSize(); 1116 1117 // Round up the size to a multiple of the alignment. If the function has 1118 // any calls or alloca's, align to the target's StackAlignment value to 1119 // ensure that the callee's frame or the alloca data is suitably aligned; 1120 // otherwise, for leaf functions, align to the TransientStackAlignment 1121 // value. 1122 Align StackAlign; 1123 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() || 1124 (RegInfo->hasStackRealignment(MF) && MFI.getObjectIndexEnd() != 0)) 1125 StackAlign = TFI.getStackAlign(); 1126 else 1127 StackAlign = TFI.getTransientStackAlign(); 1128 1129 // If the frame pointer is eliminated, all frame offsets will be relative to 1130 // SP not FP. Align to MaxAlign so this works. 1131 StackAlign = std::max(StackAlign, MaxAlign); 1132 int64_t OffsetBeforeAlignment = Offset; 1133 Offset = alignTo(Offset, StackAlign); 1134 1135 // If we have increased the offset to fulfill the alignment constrants, 1136 // then the scavenging spill slots may become harder to reach from the 1137 // stack pointer, float them so they stay close. 1138 if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS && 1139 !EarlyScavengingSlots) { 1140 SmallVector<int, 2> SFIs; 1141 RS->getScavengingFrameIndices(SFIs); 1142 LLVM_DEBUG(if (!SFIs.empty()) llvm::dbgs() 1143 << "Adjusting emergency spill slots!\n";); 1144 int64_t Delta = Offset - OffsetBeforeAlignment; 1145 for (int SFI : SFIs) { 1146 LLVM_DEBUG(llvm::dbgs() 1147 << "Adjusting offset of emergency spill slot #" << SFI 1148 << " from " << MFI.getObjectOffset(SFI);); 1149 MFI.setObjectOffset(SFI, MFI.getObjectOffset(SFI) - Delta); 1150 LLVM_DEBUG(llvm::dbgs() << " to " << MFI.getObjectOffset(SFI) << "\n";); 1151 } 1152 } 1153 } 1154 1155 // Update frame info to pretend that this is part of the stack... 1156 int64_t StackSize = Offset - LocalAreaOffset; 1157 MFI.setStackSize(StackSize); 1158 NumBytesStackSpace += StackSize; 1159 } 1160 1161 /// insertPrologEpilogCode - Scan the function for modified callee saved 1162 /// registers, insert spill code for these callee saved registers, then add 1163 /// prolog and epilog code to the function. 1164 void PEI::insertPrologEpilogCode(MachineFunction &MF) { 1165 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1166 1167 // Add prologue to the function... 1168 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1169 TFI.emitPrologue(MF, *SaveBlock); 1170 1171 // Add epilogue to restore the callee-save registers in each exiting block. 1172 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 1173 TFI.emitEpilogue(MF, *RestoreBlock); 1174 1175 // Zero call used registers before restoring callee-saved registers. 1176 insertZeroCallUsedRegs(MF); 1177 1178 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1179 TFI.inlineStackProbe(MF, *SaveBlock); 1180 1181 // Emit additional code that is required to support segmented stacks, if 1182 // we've been asked for it. This, when linked with a runtime with support 1183 // for segmented stacks (libgcc is one), will result in allocating stack 1184 // space in small chunks instead of one large contiguous block. 1185 if (MF.shouldSplitStack()) { 1186 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1187 TFI.adjustForSegmentedStacks(MF, *SaveBlock); 1188 } 1189 1190 // Emit additional code that is required to explicitly handle the stack in 1191 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The 1192 // approach is rather similar to that of Segmented Stacks, but it uses a 1193 // different conditional check and another BIF for allocating more stack 1194 // space. 1195 if (MF.getFunction().getCallingConv() == CallingConv::HiPE) 1196 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1197 TFI.adjustForHiPEPrologue(MF, *SaveBlock); 1198 } 1199 1200 /// insertZeroCallUsedRegs - Zero out call used registers. 1201 void PEI::insertZeroCallUsedRegs(MachineFunction &MF) { 1202 const Function &F = MF.getFunction(); 1203 1204 if (!F.hasFnAttribute("zero-call-used-regs")) 1205 return; 1206 1207 using namespace ZeroCallUsedRegs; 1208 1209 ZeroCallUsedRegsKind ZeroRegsKind = 1210 StringSwitch<ZeroCallUsedRegsKind>( 1211 F.getFnAttribute("zero-call-used-regs").getValueAsString()) 1212 .Case("skip", ZeroCallUsedRegsKind::Skip) 1213 .Case("used-gpr-arg", ZeroCallUsedRegsKind::UsedGPRArg) 1214 .Case("used-gpr", ZeroCallUsedRegsKind::UsedGPR) 1215 .Case("used-arg", ZeroCallUsedRegsKind::UsedArg) 1216 .Case("used", ZeroCallUsedRegsKind::Used) 1217 .Case("all-gpr-arg", ZeroCallUsedRegsKind::AllGPRArg) 1218 .Case("all-gpr", ZeroCallUsedRegsKind::AllGPR) 1219 .Case("all-arg", ZeroCallUsedRegsKind::AllArg) 1220 .Case("all", ZeroCallUsedRegsKind::All); 1221 1222 if (ZeroRegsKind == ZeroCallUsedRegsKind::Skip) 1223 return; 1224 1225 const bool OnlyGPR = static_cast<unsigned>(ZeroRegsKind) & ONLY_GPR; 1226 const bool OnlyUsed = static_cast<unsigned>(ZeroRegsKind) & ONLY_USED; 1227 const bool OnlyArg = static_cast<unsigned>(ZeroRegsKind) & ONLY_ARG; 1228 1229 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1230 const BitVector AllocatableSet(TRI.getAllocatableSet(MF)); 1231 1232 // Mark all used registers. 1233 BitVector UsedRegs(TRI.getNumRegs()); 1234 if (OnlyUsed) 1235 for (const MachineBasicBlock &MBB : MF) 1236 for (const MachineInstr &MI : MBB) { 1237 // skip debug instructions 1238 if (MI.isDebugInstr()) 1239 continue; 1240 1241 for (const MachineOperand &MO : MI.operands()) { 1242 if (!MO.isReg()) 1243 continue; 1244 1245 MCRegister Reg = MO.getReg(); 1246 if (AllocatableSet[Reg] && !MO.isImplicit() && 1247 (MO.isDef() || MO.isUse())) 1248 UsedRegs.set(Reg); 1249 } 1250 } 1251 1252 // Get a list of registers that are used. 1253 BitVector LiveIns(TRI.getNumRegs()); 1254 for (const MachineBasicBlock::RegisterMaskPair &LI : MF.front().liveins()) 1255 LiveIns.set(LI.PhysReg); 1256 1257 BitVector RegsToZero(TRI.getNumRegs()); 1258 for (MCRegister Reg : AllocatableSet.set_bits()) { 1259 // Skip over fixed registers. 1260 if (TRI.isFixedRegister(MF, Reg)) 1261 continue; 1262 1263 // Want only general purpose registers. 1264 if (OnlyGPR && !TRI.isGeneralPurposeRegister(MF, Reg)) 1265 continue; 1266 1267 // Want only used registers. 1268 if (OnlyUsed && !UsedRegs[Reg]) 1269 continue; 1270 1271 // Want only registers used for arguments. 1272 if (OnlyArg) { 1273 if (OnlyUsed) { 1274 if (!LiveIns[Reg]) 1275 continue; 1276 } else if (!TRI.isArgumentRegister(MF, Reg)) { 1277 continue; 1278 } 1279 } 1280 1281 RegsToZero.set(Reg); 1282 } 1283 1284 // Don't clear registers that are live when leaving the function. 1285 for (const MachineBasicBlock &MBB : MF) 1286 for (const MachineInstr &MI : MBB.terminators()) { 1287 if (!MI.isReturn()) 1288 continue; 1289 1290 for (const auto &MO : MI.operands()) { 1291 if (!MO.isReg()) 1292 continue; 1293 1294 MCRegister Reg = MO.getReg(); 1295 if (!Reg) 1296 continue; 1297 1298 // This picks up sibling registers (e.q. %al -> %ah). 1299 for (MCRegUnit Unit : TRI.regunits(Reg)) 1300 RegsToZero.reset(Unit); 1301 1302 for (MCPhysReg SReg : TRI.sub_and_superregs_inclusive(Reg)) 1303 RegsToZero.reset(SReg); 1304 } 1305 } 1306 1307 // Don't need to clear registers that are used/clobbered by terminating 1308 // instructions. 1309 for (const MachineBasicBlock &MBB : MF) { 1310 if (!MBB.isReturnBlock()) 1311 continue; 1312 1313 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator(); 1314 for (MachineBasicBlock::const_iterator I = MBBI, E = MBB.end(); I != E; 1315 ++I) { 1316 for (const MachineOperand &MO : I->operands()) { 1317 if (!MO.isReg()) 1318 continue; 1319 1320 MCRegister Reg = MO.getReg(); 1321 if (!Reg) 1322 continue; 1323 1324 for (const MCPhysReg Reg : TRI.sub_and_superregs_inclusive(Reg)) 1325 RegsToZero.reset(Reg); 1326 } 1327 } 1328 } 1329 1330 // Don't clear registers that must be preserved. 1331 for (const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF); 1332 MCPhysReg CSReg = *CSRegs; ++CSRegs) 1333 for (MCRegister Reg : TRI.sub_and_superregs_inclusive(CSReg)) 1334 RegsToZero.reset(Reg); 1335 1336 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1337 for (MachineBasicBlock &MBB : MF) 1338 if (MBB.isReturnBlock()) 1339 TFI.emitZeroCallUsedRegs(RegsToZero, MBB); 1340 } 1341 1342 /// Replace all FrameIndex operands with physical register references and actual 1343 /// offsets. 1344 void PEI::replaceFrameIndicesBackward(MachineFunction &MF) { 1345 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1346 1347 for (auto &MBB : MF) { 1348 int SPAdj = 0; 1349 if (!MBB.succ_empty()) { 1350 // Get the SP adjustment for the end of MBB from the start of any of its 1351 // successors. They should all be the same. 1352 assert(all_of(MBB.successors(), [&MBB](const MachineBasicBlock *Succ) { 1353 return Succ->getCallFrameSize() == 1354 (*MBB.succ_begin())->getCallFrameSize(); 1355 })); 1356 const MachineBasicBlock &FirstSucc = **MBB.succ_begin(); 1357 SPAdj = TFI.alignSPAdjust(FirstSucc.getCallFrameSize()); 1358 if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp) 1359 SPAdj = -SPAdj; 1360 } 1361 1362 replaceFrameIndicesBackward(&MBB, MF, SPAdj); 1363 1364 // We can't track the call frame size after call frame pseudos have been 1365 // eliminated. Set it to zero everywhere to keep MachineVerifier happy. 1366 MBB.setCallFrameSize(0); 1367 } 1368 } 1369 1370 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical 1371 /// register references and actual offsets. 1372 void PEI::replaceFrameIndices(MachineFunction &MF) { 1373 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1374 1375 for (auto &MBB : MF) { 1376 int SPAdj = TFI.alignSPAdjust(MBB.getCallFrameSize()); 1377 if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp) 1378 SPAdj = -SPAdj; 1379 1380 replaceFrameIndices(&MBB, MF, SPAdj); 1381 1382 // We can't track the call frame size after call frame pseudos have been 1383 // eliminated. Set it to zero everywhere to keep MachineVerifier happy. 1384 MBB.setCallFrameSize(0); 1385 } 1386 } 1387 1388 bool PEI::replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI, 1389 unsigned OpIdx, int SPAdj) { 1390 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1391 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1392 if (MI.isDebugValue()) { 1393 1394 MachineOperand &Op = MI.getOperand(OpIdx); 1395 assert(MI.isDebugOperand(&Op) && 1396 "Frame indices can only appear as a debug operand in a DBG_VALUE*" 1397 " machine instruction"); 1398 Register Reg; 1399 unsigned FrameIdx = Op.getIndex(); 1400 unsigned Size = MF.getFrameInfo().getObjectSize(FrameIdx); 1401 1402 StackOffset Offset = TFI->getFrameIndexReference(MF, FrameIdx, Reg); 1403 Op.ChangeToRegister(Reg, false /*isDef*/); 1404 1405 const DIExpression *DIExpr = MI.getDebugExpression(); 1406 1407 // If we have a direct DBG_VALUE, and its location expression isn't 1408 // currently complex, then adding an offset will morph it into a 1409 // complex location that is interpreted as being a memory address. 1410 // This changes a pointer-valued variable to dereference that pointer, 1411 // which is incorrect. Fix by adding DW_OP_stack_value. 1412 1413 if (MI.isNonListDebugValue()) { 1414 unsigned PrependFlags = DIExpression::ApplyOffset; 1415 if (!MI.isIndirectDebugValue() && !DIExpr->isComplex()) 1416 PrependFlags |= DIExpression::StackValue; 1417 1418 // If we have DBG_VALUE that is indirect and has a Implicit location 1419 // expression need to insert a deref before prepending a Memory 1420 // location expression. Also after doing this we change the DBG_VALUE 1421 // to be direct. 1422 if (MI.isIndirectDebugValue() && DIExpr->isImplicit()) { 1423 SmallVector<uint64_t, 2> Ops = {dwarf::DW_OP_deref_size, Size}; 1424 bool WithStackValue = true; 1425 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); 1426 // Make the DBG_VALUE direct. 1427 MI.getDebugOffset().ChangeToRegister(0, false); 1428 } 1429 DIExpr = TRI.prependOffsetExpression(DIExpr, PrependFlags, Offset); 1430 } else { 1431 // The debug operand at DebugOpIndex was a frame index at offset 1432 // `Offset`; now the operand has been replaced with the frame 1433 // register, we must add Offset with `register x, plus Offset`. 1434 unsigned DebugOpIndex = MI.getDebugOperandIndex(&Op); 1435 SmallVector<uint64_t, 3> Ops; 1436 TRI.getOffsetOpcodes(Offset, Ops); 1437 DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, DebugOpIndex); 1438 } 1439 MI.getDebugExpressionOp().setMetadata(DIExpr); 1440 return true; 1441 } 1442 1443 if (MI.isDebugPHI()) { 1444 // Allow stack ref to continue onwards. 1445 return true; 1446 } 1447 1448 // TODO: This code should be commoned with the code for 1449 // PATCHPOINT. There's no good reason for the difference in 1450 // implementation other than historical accident. The only 1451 // remaining difference is the unconditional use of the stack 1452 // pointer as the base register. 1453 if (MI.getOpcode() == TargetOpcode::STATEPOINT) { 1454 assert((!MI.isDebugValue() || OpIdx == 0) && 1455 "Frame indices can only appear as the first operand of a " 1456 "DBG_VALUE machine instruction"); 1457 Register Reg; 1458 MachineOperand &Offset = MI.getOperand(OpIdx + 1); 1459 StackOffset refOffset = TFI->getFrameIndexReferencePreferSP( 1460 MF, MI.getOperand(OpIdx).getIndex(), Reg, /*IgnoreSPUpdates*/ false); 1461 assert(!refOffset.getScalable() && 1462 "Frame offsets with a scalable component are not supported"); 1463 Offset.setImm(Offset.getImm() + refOffset.getFixed() + SPAdj); 1464 MI.getOperand(OpIdx).ChangeToRegister(Reg, false /*isDef*/); 1465 return true; 1466 } 1467 return false; 1468 } 1469 1470 void PEI::replaceFrameIndicesBackward(MachineBasicBlock *BB, 1471 MachineFunction &MF, int &SPAdj) { 1472 assert(MF.getSubtarget().getRegisterInfo() && 1473 "getRegisterInfo() must be implemented!"); 1474 1475 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1476 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1477 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1478 1479 RegScavenger *LocalRS = FrameIndexEliminationScavenging ? RS : nullptr; 1480 if (LocalRS) 1481 LocalRS->enterBasicBlockEnd(*BB); 1482 1483 for (MachineBasicBlock::iterator I = BB->end(); I != BB->begin();) { 1484 MachineInstr &MI = *std::prev(I); 1485 1486 if (TII.isFrameInstr(MI)) { 1487 SPAdj -= TII.getSPAdjust(MI); 1488 TFI.eliminateCallFramePseudoInstr(MF, *BB, &MI); 1489 continue; 1490 } 1491 1492 // Step backwards to get the liveness state at (immedately after) MI. 1493 if (LocalRS) 1494 LocalRS->backward(I); 1495 1496 bool RemovedMI = false; 1497 for (const auto &[Idx, Op] : enumerate(MI.operands())) { 1498 if (!Op.isFI()) 1499 continue; 1500 1501 if (replaceFrameIndexDebugInstr(MF, MI, Idx, SPAdj)) 1502 continue; 1503 1504 // Eliminate this FrameIndex operand. 1505 RemovedMI = TRI.eliminateFrameIndex(MI, SPAdj, Idx, LocalRS); 1506 if (RemovedMI) 1507 break; 1508 } 1509 1510 if (!RemovedMI) 1511 --I; 1512 } 1513 } 1514 1515 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 1516 int &SPAdj) { 1517 assert(MF.getSubtarget().getRegisterInfo() && 1518 "getRegisterInfo() must be implemented!"); 1519 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1520 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1521 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1522 1523 bool InsideCallSequence = false; 1524 1525 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { 1526 if (TII.isFrameInstr(*I)) { 1527 InsideCallSequence = TII.isFrameSetup(*I); 1528 SPAdj += TII.getSPAdjust(*I); 1529 I = TFI->eliminateCallFramePseudoInstr(MF, *BB, I); 1530 continue; 1531 } 1532 1533 MachineInstr &MI = *I; 1534 bool DoIncr = true; 1535 bool DidFinishLoop = true; 1536 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1537 if (!MI.getOperand(i).isFI()) 1538 continue; 1539 1540 if (replaceFrameIndexDebugInstr(MF, MI, i, SPAdj)) 1541 continue; 1542 1543 // Some instructions (e.g. inline asm instructions) can have 1544 // multiple frame indices and/or cause eliminateFrameIndex 1545 // to insert more than one instruction. We need the register 1546 // scavenger to go through all of these instructions so that 1547 // it can update its register information. We keep the 1548 // iterator at the point before insertion so that we can 1549 // revisit them in full. 1550 bool AtBeginning = (I == BB->begin()); 1551 if (!AtBeginning) --I; 1552 1553 // If this instruction has a FrameIndex operand, we need to 1554 // use that target machine register info object to eliminate 1555 // it. 1556 TRI.eliminateFrameIndex(MI, SPAdj, i, RS); 1557 1558 // Reset the iterator if we were at the beginning of the BB. 1559 if (AtBeginning) { 1560 I = BB->begin(); 1561 DoIncr = false; 1562 } 1563 1564 DidFinishLoop = false; 1565 break; 1566 } 1567 1568 // If we are looking at a call sequence, we need to keep track of 1569 // the SP adjustment made by each instruction in the sequence. 1570 // This includes both the frame setup/destroy pseudos (handled above), 1571 // as well as other instructions that have side effects w.r.t the SP. 1572 // Note that this must come after eliminateFrameIndex, because 1573 // if I itself referred to a frame index, we shouldn't count its own 1574 // adjustment. 1575 if (DidFinishLoop && InsideCallSequence) 1576 SPAdj += TII.getSPAdjust(MI); 1577 1578 if (DoIncr && I != BB->end()) 1579 ++I; 1580 } 1581 } 1582