1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass is responsible for finalizing the functions frame layout, saving 10 // callee saved registers, and for emitting prolog & epilog code for the 11 // function. 12 // 13 // This pass must be run after register allocation. After this pass is 14 // executed, it is illegal to construct MO_FrameIndex operands. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Statistic.h" 26 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 27 #include "llvm/CodeGen/MachineBasicBlock.h" 28 #include "llvm/CodeGen/MachineDominators.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunction.h" 31 #include "llvm/CodeGen/MachineFunctionPass.h" 32 #include "llvm/CodeGen/MachineInstr.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineLoopInfo.h" 35 #include "llvm/CodeGen/MachineModuleInfo.h" 36 #include "llvm/CodeGen/MachineOperand.h" 37 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/CodeGen/RegisterScavenging.h" 40 #include "llvm/CodeGen/TargetFrameLowering.h" 41 #include "llvm/CodeGen/TargetInstrInfo.h" 42 #include "llvm/CodeGen/TargetOpcodes.h" 43 #include "llvm/CodeGen/TargetRegisterInfo.h" 44 #include "llvm/CodeGen/TargetSubtargetInfo.h" 45 #include "llvm/CodeGen/WinEHFuncInfo.h" 46 #include "llvm/IR/Attributes.h" 47 #include "llvm/IR/CallingConv.h" 48 #include "llvm/IR/DebugInfoMetadata.h" 49 #include "llvm/IR/DiagnosticInfo.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/InitializePasses.h" 53 #include "llvm/Pass.h" 54 #include "llvm/Support/CodeGen.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/FormatVariadic.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Target/TargetMachine.h" 60 #include "llvm/Target/TargetOptions.h" 61 #include <algorithm> 62 #include <cassert> 63 #include <cstdint> 64 #include <limits> 65 #include <utility> 66 #include <vector> 67 68 using namespace llvm; 69 70 #define DEBUG_TYPE "prologepilog" 71 72 using MBBVector = SmallVector<MachineBasicBlock *, 4>; 73 74 STATISTIC(NumLeafFuncWithSpills, "Number of leaf functions with CSRs"); 75 STATISTIC(NumFuncSeen, "Number of functions seen in PEI"); 76 77 78 namespace { 79 80 class PEI : public MachineFunctionPass { 81 public: 82 static char ID; 83 84 PEI() : MachineFunctionPass(ID) { 85 initializePEIPass(*PassRegistry::getPassRegistry()); 86 } 87 88 void getAnalysisUsage(AnalysisUsage &AU) const override; 89 90 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 91 /// frame indexes with appropriate references. 92 bool runOnMachineFunction(MachineFunction &MF) override; 93 94 private: 95 RegScavenger *RS = nullptr; 96 97 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved 98 // stack frame indexes. 99 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 100 unsigned MaxCSFrameIndex = 0; 101 102 // Save and Restore blocks of the current function. Typically there is a 103 // single save block, unless Windows EH funclets are involved. 104 MBBVector SaveBlocks; 105 MBBVector RestoreBlocks; 106 107 // Flag to control whether to use the register scavenger to resolve 108 // frame index materialization registers. Set according to 109 // TRI->requiresFrameIndexScavenging() for the current function. 110 bool FrameIndexVirtualScavenging = false; 111 112 // Flag to control whether the scavenger should be passed even though 113 // FrameIndexVirtualScavenging is used. 114 bool FrameIndexEliminationScavenging = false; 115 116 // Emit remarks. 117 MachineOptimizationRemarkEmitter *ORE = nullptr; 118 119 void calculateCallFrameInfo(MachineFunction &MF); 120 void calculateSaveRestoreBlocks(MachineFunction &MF); 121 void spillCalleeSavedRegs(MachineFunction &MF); 122 123 void calculateFrameObjectOffsets(MachineFunction &MF); 124 void replaceFrameIndices(MachineFunction &MF); 125 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 126 int &SPAdj); 127 // Frame indices in debug values are encoded in a target independent 128 // way with simply the frame index and offset rather than any 129 // target-specific addressing mode. 130 bool replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI, 131 unsigned OpIdx, int SPAdj = 0); 132 // Does same as replaceFrameIndices but using the backward MIR walk and 133 // backward register scavenger walk. 134 void replaceFrameIndicesBackward(MachineFunction &MF); 135 void replaceFrameIndicesBackward(MachineBasicBlock *BB, MachineFunction &MF, 136 int &SPAdj); 137 138 void insertPrologEpilogCode(MachineFunction &MF); 139 void insertZeroCallUsedRegs(MachineFunction &MF); 140 }; 141 142 } // end anonymous namespace 143 144 char PEI::ID = 0; 145 146 char &llvm::PrologEpilogCodeInserterID = PEI::ID; 147 148 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false, 149 false) 150 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass) 151 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass) 152 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 153 INITIALIZE_PASS_END(PEI, DEBUG_TYPE, 154 "Prologue/Epilogue Insertion & Frame Finalization", false, 155 false) 156 157 MachineFunctionPass *llvm::createPrologEpilogInserterPass() { 158 return new PEI(); 159 } 160 161 STATISTIC(NumBytesStackSpace, 162 "Number of bytes used for stack in all functions"); 163 164 void PEI::getAnalysisUsage(AnalysisUsage &AU) const { 165 AU.setPreservesCFG(); 166 AU.addPreserved<MachineLoopInfoWrapperPass>(); 167 AU.addPreserved<MachineDominatorTreeWrapperPass>(); 168 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 169 MachineFunctionPass::getAnalysisUsage(AU); 170 } 171 172 /// StackObjSet - A set of stack object indexes 173 using StackObjSet = SmallSetVector<int, 8>; 174 175 using SavedDbgValuesMap = 176 SmallDenseMap<MachineBasicBlock *, SmallVector<MachineInstr *, 4>, 4>; 177 178 /// Stash DBG_VALUEs that describe parameters and which are placed at the start 179 /// of the block. Later on, after the prologue code has been emitted, the 180 /// stashed DBG_VALUEs will be reinserted at the start of the block. 181 static void stashEntryDbgValues(MachineBasicBlock &MBB, 182 SavedDbgValuesMap &EntryDbgValues) { 183 SmallVector<const MachineInstr *, 4> FrameIndexValues; 184 185 for (auto &MI : MBB) { 186 if (!MI.isDebugInstr()) 187 break; 188 if (!MI.isDebugValue() || !MI.getDebugVariable()->isParameter()) 189 continue; 190 if (any_of(MI.debug_operands(), 191 [](const MachineOperand &MO) { return MO.isFI(); })) { 192 // We can only emit valid locations for frame indices after the frame 193 // setup, so do not stash away them. 194 FrameIndexValues.push_back(&MI); 195 continue; 196 } 197 const DILocalVariable *Var = MI.getDebugVariable(); 198 const DIExpression *Expr = MI.getDebugExpression(); 199 auto Overlaps = [Var, Expr](const MachineInstr *DV) { 200 return Var == DV->getDebugVariable() && 201 Expr->fragmentsOverlap(DV->getDebugExpression()); 202 }; 203 // See if the debug value overlaps with any preceding debug value that will 204 // not be stashed. If that is the case, then we can't stash this value, as 205 // we would then reorder the values at reinsertion. 206 if (llvm::none_of(FrameIndexValues, Overlaps)) 207 EntryDbgValues[&MBB].push_back(&MI); 208 } 209 210 // Remove stashed debug values from the block. 211 if (auto It = EntryDbgValues.find(&MBB); It != EntryDbgValues.end()) 212 for (auto *MI : It->second) 213 MI->removeFromParent(); 214 } 215 216 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 217 /// frame indexes with appropriate references. 218 bool PEI::runOnMachineFunction(MachineFunction &MF) { 219 NumFuncSeen++; 220 const Function &F = MF.getFunction(); 221 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 222 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 223 224 RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr; 225 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF); 226 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 227 228 // Spill frame pointer and/or base pointer registers if they are clobbered. 229 // It is placed before call frame instruction elimination so it will not mess 230 // with stack arguments. 231 TFI->spillFPBP(MF); 232 233 // Calculate the MaxCallFrameSize value for the function's frame 234 // information. Also eliminates call frame pseudo instructions. 235 calculateCallFrameInfo(MF); 236 237 // Determine placement of CSR spill/restore code and prolog/epilog code: 238 // place all spills in the entry block, all restores in return blocks. 239 calculateSaveRestoreBlocks(MF); 240 241 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code. 242 SavedDbgValuesMap EntryDbgValues; 243 for (MachineBasicBlock *SaveBlock : SaveBlocks) 244 stashEntryDbgValues(*SaveBlock, EntryDbgValues); 245 246 // Handle CSR spilling and restoring, for targets that need it. 247 if (MF.getTarget().usesPhysRegsForValues()) 248 spillCalleeSavedRegs(MF); 249 250 // Allow the target machine to make final modifications to the function 251 // before the frame layout is finalized. 252 TFI->processFunctionBeforeFrameFinalized(MF, RS); 253 254 // Calculate actual frame offsets for all abstract stack objects... 255 calculateFrameObjectOffsets(MF); 256 257 // Add prolog and epilog code to the function. This function is required 258 // to align the stack frame as necessary for any stack variables or 259 // called functions. Because of this, calculateCalleeSavedRegisters() 260 // must be called before this function in order to set the AdjustsStack 261 // and MaxCallFrameSize variables. 262 if (!F.hasFnAttribute(Attribute::Naked)) 263 insertPrologEpilogCode(MF); 264 265 // Reinsert stashed debug values at the start of the entry blocks. 266 for (auto &I : EntryDbgValues) 267 I.first->insert(I.first->begin(), I.second.begin(), I.second.end()); 268 269 // Allow the target machine to make final modifications to the function 270 // before the frame layout is finalized. 271 TFI->processFunctionBeforeFrameIndicesReplaced(MF, RS); 272 273 // Replace all MO_FrameIndex operands with physical register references 274 // and actual offsets. 275 if (TFI->needsFrameIndexResolution(MF)) { 276 // Allow the target to determine this after knowing the frame size. 277 FrameIndexEliminationScavenging = 278 (RS && !FrameIndexVirtualScavenging) || 279 TRI->requiresFrameIndexReplacementScavenging(MF); 280 281 if (TRI->eliminateFrameIndicesBackwards()) 282 replaceFrameIndicesBackward(MF); 283 else 284 replaceFrameIndices(MF); 285 } 286 287 // If register scavenging is needed, as we've enabled doing it as a 288 // post-pass, scavenge the virtual registers that frame index elimination 289 // inserted. 290 if (TRI->requiresRegisterScavenging(MF) && FrameIndexVirtualScavenging) 291 scavengeFrameVirtualRegs(MF, *RS); 292 293 // Warn on stack size when we exceeds the given limit. 294 MachineFrameInfo &MFI = MF.getFrameInfo(); 295 uint64_t StackSize = MFI.getStackSize(); 296 297 uint64_t Threshold = TFI->getStackThreshold(); 298 if (MF.getFunction().hasFnAttribute("warn-stack-size")) { 299 bool Failed = MF.getFunction() 300 .getFnAttribute("warn-stack-size") 301 .getValueAsString() 302 .getAsInteger(10, Threshold); 303 // Verifier should have caught this. 304 assert(!Failed && "Invalid warn-stack-size fn attr value"); 305 (void)Failed; 306 } 307 uint64_t UnsafeStackSize = MFI.getUnsafeStackSize(); 308 if (MF.getFunction().hasFnAttribute(Attribute::SafeStack)) 309 StackSize += UnsafeStackSize; 310 311 if (StackSize > Threshold) { 312 DiagnosticInfoStackSize DiagStackSize(F, StackSize, Threshold, DS_Warning); 313 F.getContext().diagnose(DiagStackSize); 314 int64_t SpillSize = 0; 315 for (int Idx = MFI.getObjectIndexBegin(), End = MFI.getObjectIndexEnd(); 316 Idx != End; ++Idx) { 317 if (MFI.isSpillSlotObjectIndex(Idx)) 318 SpillSize += MFI.getObjectSize(Idx); 319 } 320 321 [[maybe_unused]] float SpillPct = 322 static_cast<float>(SpillSize) / static_cast<float>(StackSize); 323 LLVM_DEBUG( 324 dbgs() << formatv("{0}/{1} ({3:P}) spills, {2}/{1} ({4:P}) variables", 325 SpillSize, StackSize, StackSize - SpillSize, SpillPct, 326 1.0f - SpillPct)); 327 if (UnsafeStackSize != 0) { 328 LLVM_DEBUG(dbgs() << formatv(", {0}/{2} ({1:P}) unsafe stack", 329 UnsafeStackSize, 330 static_cast<float>(UnsafeStackSize) / 331 static_cast<float>(StackSize), 332 StackSize)); 333 } 334 LLVM_DEBUG(dbgs() << "\n"); 335 } 336 337 ORE->emit([&]() { 338 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize", 339 MF.getFunction().getSubprogram(), 340 &MF.front()) 341 << ore::NV("NumStackBytes", StackSize) 342 << " stack bytes in function '" 343 << ore::NV("Function", MF.getFunction().getName()) << "'"; 344 }); 345 346 // Emit any remarks implemented for the target, based on final frame layout. 347 TFI->emitRemarks(MF, ORE); 348 349 delete RS; 350 SaveBlocks.clear(); 351 RestoreBlocks.clear(); 352 MFI.setSavePoint(nullptr); 353 MFI.setRestorePoint(nullptr); 354 return true; 355 } 356 357 /// Calculate the MaxCallFrameSize variable for the function's frame 358 /// information and eliminate call frame pseudo instructions. 359 void PEI::calculateCallFrameInfo(MachineFunction &MF) { 360 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 361 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 362 MachineFrameInfo &MFI = MF.getFrameInfo(); 363 364 // Get the function call frame set-up and tear-down instruction opcode 365 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 366 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 367 368 // Early exit for targets which have no call frame setup/destroy pseudo 369 // instructions. 370 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 371 return; 372 373 // (Re-)Compute the MaxCallFrameSize. 374 [[maybe_unused]] uint64_t MaxCFSIn = 375 MFI.isMaxCallFrameSizeComputed() ? MFI.getMaxCallFrameSize() : UINT64_MAX; 376 std::vector<MachineBasicBlock::iterator> FrameSDOps; 377 MFI.computeMaxCallFrameSize(MF, &FrameSDOps); 378 assert(MFI.getMaxCallFrameSize() <= MaxCFSIn && 379 "Recomputing MaxCFS gave a larger value."); 380 assert((FrameSDOps.empty() || MF.getFrameInfo().adjustsStack()) && 381 "AdjustsStack not set in presence of a frame pseudo instruction."); 382 383 if (TFI->canSimplifyCallFramePseudos(MF)) { 384 // If call frames are not being included as part of the stack frame, and 385 // the target doesn't indicate otherwise, remove the call frame pseudos 386 // here. The sub/add sp instruction pairs are still inserted, but we don't 387 // need to track the SP adjustment for frame index elimination. 388 for (MachineBasicBlock::iterator I : FrameSDOps) 389 TFI->eliminateCallFramePseudoInstr(MF, *I->getParent(), I); 390 391 // We can't track the call frame size after call frame pseudos have been 392 // eliminated. Set it to zero everywhere to keep MachineVerifier happy. 393 for (MachineBasicBlock &MBB : MF) 394 MBB.setCallFrameSize(0); 395 } 396 } 397 398 /// Compute the sets of entry and return blocks for saving and restoring 399 /// callee-saved registers, and placing prolog and epilog code. 400 void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) { 401 const MachineFrameInfo &MFI = MF.getFrameInfo(); 402 403 // Even when we do not change any CSR, we still want to insert the 404 // prologue and epilogue of the function. 405 // So set the save points for those. 406 407 // Use the points found by shrink-wrapping, if any. 408 if (MFI.getSavePoint()) { 409 SaveBlocks.push_back(MFI.getSavePoint()); 410 assert(MFI.getRestorePoint() && "Both restore and save must be set"); 411 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 412 // If RestoreBlock does not have any successor and is not a return block 413 // then the end point is unreachable and we do not need to insert any 414 // epilogue. 415 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock()) 416 RestoreBlocks.push_back(RestoreBlock); 417 return; 418 } 419 420 // Save refs to entry and return blocks. 421 SaveBlocks.push_back(&MF.front()); 422 for (MachineBasicBlock &MBB : MF) { 423 if (MBB.isEHFuncletEntry()) 424 SaveBlocks.push_back(&MBB); 425 if (MBB.isReturnBlock()) 426 RestoreBlocks.push_back(&MBB); 427 } 428 } 429 430 static void assignCalleeSavedSpillSlots(MachineFunction &F, 431 const BitVector &SavedRegs, 432 unsigned &MinCSFrameIndex, 433 unsigned &MaxCSFrameIndex) { 434 if (SavedRegs.empty()) 435 return; 436 437 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo(); 438 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs(); 439 BitVector CSMask(SavedRegs.size()); 440 441 for (unsigned i = 0; CSRegs[i]; ++i) 442 CSMask.set(CSRegs[i]); 443 444 std::vector<CalleeSavedInfo> CSI; 445 for (unsigned i = 0; CSRegs[i]; ++i) { 446 unsigned Reg = CSRegs[i]; 447 if (SavedRegs.test(Reg)) { 448 bool SavedSuper = false; 449 for (const MCPhysReg &SuperReg : RegInfo->superregs(Reg)) { 450 // Some backends set all aliases for some registers as saved, such as 451 // Mips's $fp, so they appear in SavedRegs but not CSRegs. 452 if (SavedRegs.test(SuperReg) && CSMask.test(SuperReg)) { 453 SavedSuper = true; 454 break; 455 } 456 } 457 458 if (!SavedSuper) 459 CSI.push_back(CalleeSavedInfo(Reg)); 460 } 461 } 462 463 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering(); 464 MachineFrameInfo &MFI = F.getFrameInfo(); 465 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI, MinCSFrameIndex, 466 MaxCSFrameIndex)) { 467 // If target doesn't implement this, use generic code. 468 469 if (CSI.empty()) 470 return; // Early exit if no callee saved registers are modified! 471 472 unsigned NumFixedSpillSlots; 473 const TargetFrameLowering::SpillSlot *FixedSpillSlots = 474 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); 475 476 // Now that we know which registers need to be saved and restored, allocate 477 // stack slots for them. 478 for (auto &CS : CSI) { 479 // If the target has spilled this register to another register, we don't 480 // need to allocate a stack slot. 481 if (CS.isSpilledToReg()) 482 continue; 483 484 unsigned Reg = CS.getReg(); 485 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 486 487 int FrameIdx; 488 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { 489 CS.setFrameIdx(FrameIdx); 490 continue; 491 } 492 493 // Check to see if this physreg must be spilled to a particular stack slot 494 // on this target. 495 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots; 496 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots && 497 FixedSlot->Reg != Reg) 498 ++FixedSlot; 499 500 unsigned Size = RegInfo->getSpillSize(*RC); 501 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { 502 // Nope, just spill it anywhere convenient. 503 Align Alignment = RegInfo->getSpillAlign(*RC); 504 // We may not be able to satisfy the desired alignment specification of 505 // the TargetRegisterClass if the stack alignment is smaller. Use the 506 // min. 507 Alignment = std::min(Alignment, TFI->getStackAlign()); 508 FrameIdx = MFI.CreateStackObject(Size, Alignment, true); 509 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 510 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 511 } else { 512 // Spill it to the stack where we must. 513 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset); 514 } 515 516 CS.setFrameIdx(FrameIdx); 517 } 518 } 519 520 MFI.setCalleeSavedInfo(CSI); 521 } 522 523 /// Helper function to update the liveness information for the callee-saved 524 /// registers. 525 static void updateLiveness(MachineFunction &MF) { 526 MachineFrameInfo &MFI = MF.getFrameInfo(); 527 // Visited will contain all the basic blocks that are in the region 528 // where the callee saved registers are alive: 529 // - Anything that is not Save or Restore -> LiveThrough. 530 // - Save -> LiveIn. 531 // - Restore -> LiveOut. 532 // The live-out is not attached to the block, so no need to keep 533 // Restore in this set. 534 SmallPtrSet<MachineBasicBlock *, 8> Visited; 535 SmallVector<MachineBasicBlock *, 8> WorkList; 536 MachineBasicBlock *Entry = &MF.front(); 537 MachineBasicBlock *Save = MFI.getSavePoint(); 538 539 if (!Save) 540 Save = Entry; 541 542 if (Entry != Save) { 543 WorkList.push_back(Entry); 544 Visited.insert(Entry); 545 } 546 Visited.insert(Save); 547 548 MachineBasicBlock *Restore = MFI.getRestorePoint(); 549 if (Restore) 550 // By construction Restore cannot be visited, otherwise it 551 // means there exists a path to Restore that does not go 552 // through Save. 553 WorkList.push_back(Restore); 554 555 while (!WorkList.empty()) { 556 const MachineBasicBlock *CurBB = WorkList.pop_back_val(); 557 // By construction, the region that is after the save point is 558 // dominated by the Save and post-dominated by the Restore. 559 if (CurBB == Save && Save != Restore) 560 continue; 561 // Enqueue all the successors not already visited. 562 // Those are by construction either before Save or after Restore. 563 for (MachineBasicBlock *SuccBB : CurBB->successors()) 564 if (Visited.insert(SuccBB).second) 565 WorkList.push_back(SuccBB); 566 } 567 568 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 569 570 MachineRegisterInfo &MRI = MF.getRegInfo(); 571 for (const CalleeSavedInfo &I : CSI) { 572 for (MachineBasicBlock *MBB : Visited) { 573 MCPhysReg Reg = I.getReg(); 574 // Add the callee-saved register as live-in. 575 // It's killed at the spill. 576 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg)) 577 MBB->addLiveIn(Reg); 578 } 579 // If callee-saved register is spilled to another register rather than 580 // spilling to stack, the destination register has to be marked as live for 581 // each MBB between the prologue and epilogue so that it is not clobbered 582 // before it is reloaded in the epilogue. The Visited set contains all 583 // blocks outside of the region delimited by prologue/epilogue. 584 if (I.isSpilledToReg()) { 585 for (MachineBasicBlock &MBB : MF) { 586 if (Visited.count(&MBB)) 587 continue; 588 MCPhysReg DstReg = I.getDstReg(); 589 if (!MBB.isLiveIn(DstReg)) 590 MBB.addLiveIn(DstReg); 591 } 592 } 593 } 594 } 595 596 /// Insert spill code for the callee-saved registers used in the function. 597 static void insertCSRSaves(MachineBasicBlock &SaveBlock, 598 ArrayRef<CalleeSavedInfo> CSI) { 599 MachineFunction &MF = *SaveBlock.getParent(); 600 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 601 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 602 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 603 604 MachineBasicBlock::iterator I = SaveBlock.begin(); 605 if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) { 606 for (const CalleeSavedInfo &CS : CSI) { 607 // Insert the spill to the stack frame. 608 unsigned Reg = CS.getReg(); 609 610 if (CS.isSpilledToReg()) { 611 BuildMI(SaveBlock, I, DebugLoc(), 612 TII.get(TargetOpcode::COPY), CS.getDstReg()) 613 .addReg(Reg, getKillRegState(true)); 614 } else { 615 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 616 TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC, 617 TRI, Register()); 618 } 619 } 620 } 621 } 622 623 /// Insert restore code for the callee-saved registers used in the function. 624 static void insertCSRRestores(MachineBasicBlock &RestoreBlock, 625 std::vector<CalleeSavedInfo> &CSI) { 626 MachineFunction &MF = *RestoreBlock.getParent(); 627 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 628 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 629 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 630 631 // Restore all registers immediately before the return and any 632 // terminators that precede it. 633 MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator(); 634 635 if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) { 636 for (const CalleeSavedInfo &CI : reverse(CSI)) { 637 unsigned Reg = CI.getReg(); 638 if (CI.isSpilledToReg()) { 639 BuildMI(RestoreBlock, I, DebugLoc(), TII.get(TargetOpcode::COPY), Reg) 640 .addReg(CI.getDstReg(), getKillRegState(true)); 641 } else { 642 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 643 TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, 644 TRI, Register()); 645 assert(I != RestoreBlock.begin() && 646 "loadRegFromStackSlot didn't insert any code!"); 647 // Insert in reverse order. loadRegFromStackSlot can insert 648 // multiple instructions. 649 } 650 } 651 } 652 } 653 654 void PEI::spillCalleeSavedRegs(MachineFunction &MF) { 655 // We can't list this requirement in getRequiredProperties because some 656 // targets (WebAssembly) use virtual registers past this point, and the pass 657 // pipeline is set up without giving the passes a chance to look at the 658 // TargetMachine. 659 // FIXME: Find a way to express this in getRequiredProperties. 660 assert(MF.getProperties().hasProperty( 661 MachineFunctionProperties::Property::NoVRegs)); 662 663 const Function &F = MF.getFunction(); 664 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 665 MachineFrameInfo &MFI = MF.getFrameInfo(); 666 MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 667 MaxCSFrameIndex = 0; 668 669 // Determine which of the registers in the callee save list should be saved. 670 BitVector SavedRegs; 671 TFI->determineCalleeSaves(MF, SavedRegs, RS); 672 673 // Assign stack slots for any callee-saved registers that must be spilled. 674 assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); 675 676 // Add the code to save and restore the callee saved registers. 677 if (!F.hasFnAttribute(Attribute::Naked)) { 678 MFI.setCalleeSavedInfoValid(true); 679 680 std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 681 if (!CSI.empty()) { 682 if (!MFI.hasCalls()) 683 NumLeafFuncWithSpills++; 684 685 for (MachineBasicBlock *SaveBlock : SaveBlocks) 686 insertCSRSaves(*SaveBlock, CSI); 687 688 // Update the live-in information of all the blocks up to the save point. 689 updateLiveness(MF); 690 691 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 692 insertCSRRestores(*RestoreBlock, CSI); 693 } 694 } 695 } 696 697 /// AdjustStackOffset - Helper function used to adjust the stack frame offset. 698 static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, 699 bool StackGrowsDown, int64_t &Offset, 700 Align &MaxAlign) { 701 // If the stack grows down, add the object size to find the lowest address. 702 if (StackGrowsDown) 703 Offset += MFI.getObjectSize(FrameIdx); 704 705 Align Alignment = MFI.getObjectAlign(FrameIdx); 706 707 // If the alignment of this object is greater than that of the stack, then 708 // increase the stack alignment to match. 709 MaxAlign = std::max(MaxAlign, Alignment); 710 711 // Adjust to alignment boundary. 712 Offset = alignTo(Offset, Alignment); 713 714 if (StackGrowsDown) { 715 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset 716 << "]\n"); 717 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset 718 } else { 719 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset 720 << "]\n"); 721 MFI.setObjectOffset(FrameIdx, Offset); 722 Offset += MFI.getObjectSize(FrameIdx); 723 } 724 } 725 726 /// Compute which bytes of fixed and callee-save stack area are unused and keep 727 /// track of them in StackBytesFree. 728 static inline void 729 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown, 730 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex, 731 int64_t FixedCSEnd, BitVector &StackBytesFree) { 732 // Avoid undefined int64_t -> int conversion below in extreme case. 733 if (FixedCSEnd > std::numeric_limits<int>::max()) 734 return; 735 736 StackBytesFree.resize(FixedCSEnd, true); 737 738 SmallVector<int, 16> AllocatedFrameSlots; 739 // Add fixed objects. 740 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) 741 // StackSlot scavenging is only implemented for the default stack. 742 if (MFI.getStackID(i) == TargetStackID::Default) 743 AllocatedFrameSlots.push_back(i); 744 // Add callee-save objects if there are any. 745 if (MinCSFrameIndex <= MaxCSFrameIndex) { 746 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i) 747 if (MFI.getStackID(i) == TargetStackID::Default) 748 AllocatedFrameSlots.push_back(i); 749 } 750 751 for (int i : AllocatedFrameSlots) { 752 // These are converted from int64_t, but they should always fit in int 753 // because of the FixedCSEnd check above. 754 int ObjOffset = MFI.getObjectOffset(i); 755 int ObjSize = MFI.getObjectSize(i); 756 int ObjStart, ObjEnd; 757 if (StackGrowsDown) { 758 // ObjOffset is negative when StackGrowsDown is true. 759 ObjStart = -ObjOffset - ObjSize; 760 ObjEnd = -ObjOffset; 761 } else { 762 ObjStart = ObjOffset; 763 ObjEnd = ObjOffset + ObjSize; 764 } 765 // Ignore fixed holes that are in the previous stack frame. 766 if (ObjEnd > 0) 767 StackBytesFree.reset(ObjStart, ObjEnd); 768 } 769 } 770 771 /// Assign frame object to an unused portion of the stack in the fixed stack 772 /// object range. Return true if the allocation was successful. 773 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, 774 bool StackGrowsDown, Align MaxAlign, 775 BitVector &StackBytesFree) { 776 if (MFI.isVariableSizedObjectIndex(FrameIdx)) 777 return false; 778 779 if (StackBytesFree.none()) { 780 // clear it to speed up later scavengeStackSlot calls to 781 // StackBytesFree.none() 782 StackBytesFree.clear(); 783 return false; 784 } 785 786 Align ObjAlign = MFI.getObjectAlign(FrameIdx); 787 if (ObjAlign > MaxAlign) 788 return false; 789 790 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 791 int FreeStart; 792 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; 793 FreeStart = StackBytesFree.find_next(FreeStart)) { 794 795 // Check that free space has suitable alignment. 796 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; 797 if (alignTo(ObjStart, ObjAlign) != ObjStart) 798 continue; 799 800 if (FreeStart + ObjSize > StackBytesFree.size()) 801 return false; 802 803 bool AllBytesFree = true; 804 for (unsigned Byte = 0; Byte < ObjSize; ++Byte) 805 if (!StackBytesFree.test(FreeStart + Byte)) { 806 AllBytesFree = false; 807 break; 808 } 809 if (AllBytesFree) 810 break; 811 } 812 813 if (FreeStart == -1) 814 return false; 815 816 if (StackGrowsDown) { 817 int ObjStart = -(FreeStart + ObjSize); 818 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 819 << ObjStart << "]\n"); 820 MFI.setObjectOffset(FrameIdx, ObjStart); 821 } else { 822 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 823 << FreeStart << "]\n"); 824 MFI.setObjectOffset(FrameIdx, FreeStart); 825 } 826 827 StackBytesFree.reset(FreeStart, FreeStart + ObjSize); 828 return true; 829 } 830 831 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e., 832 /// those required to be close to the Stack Protector) to stack offsets. 833 static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs, 834 SmallSet<int, 16> &ProtectedObjs, 835 MachineFrameInfo &MFI, bool StackGrowsDown, 836 int64_t &Offset, Align &MaxAlign) { 837 838 for (int i : UnassignedObjs) { 839 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign); 840 ProtectedObjs.insert(i); 841 } 842 } 843 844 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the 845 /// abstract stack objects. 846 void PEI::calculateFrameObjectOffsets(MachineFunction &MF) { 847 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 848 849 bool StackGrowsDown = 850 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 851 852 // Loop over all of the stack objects, assigning sequential addresses... 853 MachineFrameInfo &MFI = MF.getFrameInfo(); 854 855 // Start at the beginning of the local area. 856 // The Offset is the distance from the stack top in the direction 857 // of stack growth -- so it's always nonnegative. 858 int LocalAreaOffset = TFI.getOffsetOfLocalArea(); 859 if (StackGrowsDown) 860 LocalAreaOffset = -LocalAreaOffset; 861 assert(LocalAreaOffset >= 0 862 && "Local area offset should be in direction of stack growth"); 863 int64_t Offset = LocalAreaOffset; 864 865 #ifdef EXPENSIVE_CHECKS 866 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) 867 if (!MFI.isDeadObjectIndex(i) && 868 MFI.getStackID(i) == TargetStackID::Default) 869 assert(MFI.getObjectAlign(i) <= MFI.getMaxAlign() && 870 "MaxAlignment is invalid"); 871 #endif 872 873 // If there are fixed sized objects that are preallocated in the local area, 874 // non-fixed objects can't be allocated right at the start of local area. 875 // Adjust 'Offset' to point to the end of last fixed sized preallocated 876 // object. 877 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) { 878 // Only allocate objects on the default stack. 879 if (MFI.getStackID(i) != TargetStackID::Default) 880 continue; 881 882 int64_t FixedOff; 883 if (StackGrowsDown) { 884 // The maximum distance from the stack pointer is at lower address of 885 // the object -- which is given by offset. For down growing stack 886 // the offset is negative, so we negate the offset to get the distance. 887 FixedOff = -MFI.getObjectOffset(i); 888 } else { 889 // The maximum distance from the start pointer is at the upper 890 // address of the object. 891 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i); 892 } 893 if (FixedOff > Offset) Offset = FixedOff; 894 } 895 896 Align MaxAlign = MFI.getMaxAlign(); 897 // First assign frame offsets to stack objects that are used to spill 898 // callee saved registers. 899 if (MaxCSFrameIndex >= MinCSFrameIndex) { 900 for (unsigned i = 0; i <= MaxCSFrameIndex - MinCSFrameIndex; ++i) { 901 unsigned FrameIndex = 902 StackGrowsDown ? MinCSFrameIndex + i : MaxCSFrameIndex - i; 903 904 // Only allocate objects on the default stack. 905 if (MFI.getStackID(FrameIndex) != TargetStackID::Default) 906 continue; 907 908 // TODO: should this just be if (MFI.isDeadObjectIndex(FrameIndex)) 909 if (!StackGrowsDown && MFI.isDeadObjectIndex(FrameIndex)) 910 continue; 911 912 AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign); 913 } 914 } 915 916 assert(MaxAlign == MFI.getMaxAlign() && 917 "MFI.getMaxAlign should already account for all callee-saved " 918 "registers without a fixed stack slot"); 919 920 // FixedCSEnd is the stack offset to the end of the fixed and callee-save 921 // stack area. 922 int64_t FixedCSEnd = Offset; 923 924 // Make sure the special register scavenging spill slot is closest to the 925 // incoming stack pointer if a frame pointer is required and is closer 926 // to the incoming rather than the final stack pointer. 927 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 928 bool EarlyScavengingSlots = TFI.allocateScavengingFrameIndexesNearIncomingSP(MF); 929 if (RS && EarlyScavengingSlots) { 930 SmallVector<int, 2> SFIs; 931 RS->getScavengingFrameIndices(SFIs); 932 for (int SFI : SFIs) 933 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); 934 } 935 936 // FIXME: Once this is working, then enable flag will change to a target 937 // check for whether the frame is large enough to want to use virtual 938 // frame index registers. Functions which don't want/need this optimization 939 // will continue to use the existing code path. 940 if (MFI.getUseLocalStackAllocationBlock()) { 941 Align Alignment = MFI.getLocalFrameMaxAlign(); 942 943 // Adjust to alignment boundary. 944 Offset = alignTo(Offset, Alignment); 945 946 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); 947 948 // Resolve offsets for objects in the local block. 949 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) { 950 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i); 951 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; 952 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset 953 << "]\n"); 954 MFI.setObjectOffset(Entry.first, FIOffset); 955 } 956 // Allocate the local block 957 Offset += MFI.getLocalFrameSize(); 958 959 MaxAlign = std::max(Alignment, MaxAlign); 960 } 961 962 // Retrieve the Exception Handler registration node. 963 int EHRegNodeFrameIndex = std::numeric_limits<int>::max(); 964 if (const WinEHFuncInfo *FuncInfo = MF.getWinEHFuncInfo()) 965 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; 966 967 // Make sure that the stack protector comes before the local variables on the 968 // stack. 969 SmallSet<int, 16> ProtectedObjs; 970 if (MFI.hasStackProtectorIndex()) { 971 int StackProtectorFI = MFI.getStackProtectorIndex(); 972 StackObjSet LargeArrayObjs; 973 StackObjSet SmallArrayObjs; 974 StackObjSet AddrOfObjs; 975 976 // If we need a stack protector, we need to make sure that 977 // LocalStackSlotPass didn't already allocate a slot for it. 978 // If we are told to use the LocalStackAllocationBlock, the stack protector 979 // is expected to be already pre-allocated. 980 if (MFI.getStackID(StackProtectorFI) != TargetStackID::Default) { 981 // If the stack protector isn't on the default stack then it's up to the 982 // target to set the stack offset. 983 assert(MFI.getObjectOffset(StackProtectorFI) != 0 && 984 "Offset of stack protector on non-default stack expected to be " 985 "already set."); 986 assert(!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex()) && 987 "Stack protector on non-default stack expected to not be " 988 "pre-allocated by LocalStackSlotPass."); 989 } else if (!MFI.getUseLocalStackAllocationBlock()) { 990 AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, 991 MaxAlign); 992 } else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) { 993 llvm_unreachable( 994 "Stack protector not pre-allocated by LocalStackSlotPass."); 995 } 996 997 // Assign large stack objects first. 998 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 999 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 1000 continue; 1001 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 1002 continue; 1003 if (RS && RS->isScavengingFrameIndex((int)i)) 1004 continue; 1005 if (MFI.isDeadObjectIndex(i)) 1006 continue; 1007 if (StackProtectorFI == (int)i || EHRegNodeFrameIndex == (int)i) 1008 continue; 1009 // Only allocate objects on the default stack. 1010 if (MFI.getStackID(i) != TargetStackID::Default) 1011 continue; 1012 1013 switch (MFI.getObjectSSPLayout(i)) { 1014 case MachineFrameInfo::SSPLK_None: 1015 continue; 1016 case MachineFrameInfo::SSPLK_SmallArray: 1017 SmallArrayObjs.insert(i); 1018 continue; 1019 case MachineFrameInfo::SSPLK_AddrOf: 1020 AddrOfObjs.insert(i); 1021 continue; 1022 case MachineFrameInfo::SSPLK_LargeArray: 1023 LargeArrayObjs.insert(i); 1024 continue; 1025 } 1026 llvm_unreachable("Unexpected SSPLayoutKind."); 1027 } 1028 1029 // We expect **all** the protected stack objects to be pre-allocated by 1030 // LocalStackSlotPass. If it turns out that PEI still has to allocate some 1031 // of them, we may end up messing up the expected order of the objects. 1032 if (MFI.getUseLocalStackAllocationBlock() && 1033 !(LargeArrayObjs.empty() && SmallArrayObjs.empty() && 1034 AddrOfObjs.empty())) 1035 llvm_unreachable("Found protected stack objects not pre-allocated by " 1036 "LocalStackSlotPass."); 1037 1038 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1039 Offset, MaxAlign); 1040 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1041 Offset, MaxAlign); 1042 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, 1043 Offset, MaxAlign); 1044 } 1045 1046 SmallVector<int, 8> ObjectsToAllocate; 1047 1048 // Then prepare to assign frame offsets to stack objects that are not used to 1049 // spill callee saved registers. 1050 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 1051 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 1052 continue; 1053 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 1054 continue; 1055 if (RS && RS->isScavengingFrameIndex((int)i)) 1056 continue; 1057 if (MFI.isDeadObjectIndex(i)) 1058 continue; 1059 if (MFI.getStackProtectorIndex() == (int)i || EHRegNodeFrameIndex == (int)i) 1060 continue; 1061 if (ProtectedObjs.count(i)) 1062 continue; 1063 // Only allocate objects on the default stack. 1064 if (MFI.getStackID(i) != TargetStackID::Default) 1065 continue; 1066 1067 // Add the objects that we need to allocate to our working set. 1068 ObjectsToAllocate.push_back(i); 1069 } 1070 1071 // Allocate the EH registration node first if one is present. 1072 if (EHRegNodeFrameIndex != std::numeric_limits<int>::max()) 1073 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, 1074 MaxAlign); 1075 1076 // Give the targets a chance to order the objects the way they like it. 1077 if (MF.getTarget().getOptLevel() != CodeGenOptLevel::None && 1078 MF.getTarget().Options.StackSymbolOrdering) 1079 TFI.orderFrameObjects(MF, ObjectsToAllocate); 1080 1081 // Keep track of which bytes in the fixed and callee-save range are used so we 1082 // can use the holes when allocating later stack objects. Only do this if 1083 // stack protector isn't being used and the target requests it and we're 1084 // optimizing. 1085 BitVector StackBytesFree; 1086 if (!ObjectsToAllocate.empty() && 1087 MF.getTarget().getOptLevel() != CodeGenOptLevel::None && 1088 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(MF)) 1089 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex, 1090 FixedCSEnd, StackBytesFree); 1091 1092 // Now walk the objects and actually assign base offsets to them. 1093 for (auto &Object : ObjectsToAllocate) 1094 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, 1095 StackBytesFree)) 1096 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign); 1097 1098 // Make sure the special register scavenging spill slot is closest to the 1099 // stack pointer. 1100 if (RS && !EarlyScavengingSlots) { 1101 SmallVector<int, 2> SFIs; 1102 RS->getScavengingFrameIndices(SFIs); 1103 for (int SFI : SFIs) 1104 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign); 1105 } 1106 1107 if (!TFI.targetHandlesStackFrameRounding()) { 1108 // If we have reserved argument space for call sites in the function 1109 // immediately on entry to the current function, count it as part of the 1110 // overall stack size. 1111 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(MF)) 1112 Offset += MFI.getMaxCallFrameSize(); 1113 1114 // Round up the size to a multiple of the alignment. If the function has 1115 // any calls or alloca's, align to the target's StackAlignment value to 1116 // ensure that the callee's frame or the alloca data is suitably aligned; 1117 // otherwise, for leaf functions, align to the TransientStackAlignment 1118 // value. 1119 Align StackAlign; 1120 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() || 1121 (RegInfo->hasStackRealignment(MF) && MFI.getObjectIndexEnd() != 0)) 1122 StackAlign = TFI.getStackAlign(); 1123 else 1124 StackAlign = TFI.getTransientStackAlign(); 1125 1126 // If the frame pointer is eliminated, all frame offsets will be relative to 1127 // SP not FP. Align to MaxAlign so this works. 1128 StackAlign = std::max(StackAlign, MaxAlign); 1129 int64_t OffsetBeforeAlignment = Offset; 1130 Offset = alignTo(Offset, StackAlign); 1131 1132 // If we have increased the offset to fulfill the alignment constrants, 1133 // then the scavenging spill slots may become harder to reach from the 1134 // stack pointer, float them so they stay close. 1135 if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS && 1136 !EarlyScavengingSlots) { 1137 SmallVector<int, 2> SFIs; 1138 RS->getScavengingFrameIndices(SFIs); 1139 LLVM_DEBUG(if (!SFIs.empty()) llvm::dbgs() 1140 << "Adjusting emergency spill slots!\n";); 1141 int64_t Delta = Offset - OffsetBeforeAlignment; 1142 for (int SFI : SFIs) { 1143 LLVM_DEBUG(llvm::dbgs() 1144 << "Adjusting offset of emergency spill slot #" << SFI 1145 << " from " << MFI.getObjectOffset(SFI);); 1146 MFI.setObjectOffset(SFI, MFI.getObjectOffset(SFI) - Delta); 1147 LLVM_DEBUG(llvm::dbgs() << " to " << MFI.getObjectOffset(SFI) << "\n";); 1148 } 1149 } 1150 } 1151 1152 // Update frame info to pretend that this is part of the stack... 1153 int64_t StackSize = Offset - LocalAreaOffset; 1154 MFI.setStackSize(StackSize); 1155 NumBytesStackSpace += StackSize; 1156 } 1157 1158 /// insertPrologEpilogCode - Scan the function for modified callee saved 1159 /// registers, insert spill code for these callee saved registers, then add 1160 /// prolog and epilog code to the function. 1161 void PEI::insertPrologEpilogCode(MachineFunction &MF) { 1162 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1163 1164 // Add prologue to the function... 1165 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1166 TFI.emitPrologue(MF, *SaveBlock); 1167 1168 // Add epilogue to restore the callee-save registers in each exiting block. 1169 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 1170 TFI.emitEpilogue(MF, *RestoreBlock); 1171 1172 // Zero call used registers before restoring callee-saved registers. 1173 insertZeroCallUsedRegs(MF); 1174 1175 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1176 TFI.inlineStackProbe(MF, *SaveBlock); 1177 1178 // Emit additional code that is required to support segmented stacks, if 1179 // we've been asked for it. This, when linked with a runtime with support 1180 // for segmented stacks (libgcc is one), will result in allocating stack 1181 // space in small chunks instead of one large contiguous block. 1182 if (MF.shouldSplitStack()) { 1183 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1184 TFI.adjustForSegmentedStacks(MF, *SaveBlock); 1185 } 1186 1187 // Emit additional code that is required to explicitly handle the stack in 1188 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The 1189 // approach is rather similar to that of Segmented Stacks, but it uses a 1190 // different conditional check and another BIF for allocating more stack 1191 // space. 1192 if (MF.getFunction().getCallingConv() == CallingConv::HiPE) 1193 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1194 TFI.adjustForHiPEPrologue(MF, *SaveBlock); 1195 } 1196 1197 /// insertZeroCallUsedRegs - Zero out call used registers. 1198 void PEI::insertZeroCallUsedRegs(MachineFunction &MF) { 1199 const Function &F = MF.getFunction(); 1200 1201 if (!F.hasFnAttribute("zero-call-used-regs")) 1202 return; 1203 1204 using namespace ZeroCallUsedRegs; 1205 1206 ZeroCallUsedRegsKind ZeroRegsKind = 1207 StringSwitch<ZeroCallUsedRegsKind>( 1208 F.getFnAttribute("zero-call-used-regs").getValueAsString()) 1209 .Case("skip", ZeroCallUsedRegsKind::Skip) 1210 .Case("used-gpr-arg", ZeroCallUsedRegsKind::UsedGPRArg) 1211 .Case("used-gpr", ZeroCallUsedRegsKind::UsedGPR) 1212 .Case("used-arg", ZeroCallUsedRegsKind::UsedArg) 1213 .Case("used", ZeroCallUsedRegsKind::Used) 1214 .Case("all-gpr-arg", ZeroCallUsedRegsKind::AllGPRArg) 1215 .Case("all-gpr", ZeroCallUsedRegsKind::AllGPR) 1216 .Case("all-arg", ZeroCallUsedRegsKind::AllArg) 1217 .Case("all", ZeroCallUsedRegsKind::All); 1218 1219 if (ZeroRegsKind == ZeroCallUsedRegsKind::Skip) 1220 return; 1221 1222 const bool OnlyGPR = static_cast<unsigned>(ZeroRegsKind) & ONLY_GPR; 1223 const bool OnlyUsed = static_cast<unsigned>(ZeroRegsKind) & ONLY_USED; 1224 const bool OnlyArg = static_cast<unsigned>(ZeroRegsKind) & ONLY_ARG; 1225 1226 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1227 const BitVector AllocatableSet(TRI.getAllocatableSet(MF)); 1228 1229 // Mark all used registers. 1230 BitVector UsedRegs(TRI.getNumRegs()); 1231 if (OnlyUsed) 1232 for (const MachineBasicBlock &MBB : MF) 1233 for (const MachineInstr &MI : MBB) { 1234 // skip debug instructions 1235 if (MI.isDebugInstr()) 1236 continue; 1237 1238 for (const MachineOperand &MO : MI.operands()) { 1239 if (!MO.isReg()) 1240 continue; 1241 1242 MCRegister Reg = MO.getReg(); 1243 if (AllocatableSet[Reg.id()] && !MO.isImplicit() && 1244 (MO.isDef() || MO.isUse())) 1245 UsedRegs.set(Reg.id()); 1246 } 1247 } 1248 1249 // Get a list of registers that are used. 1250 BitVector LiveIns(TRI.getNumRegs()); 1251 for (const MachineBasicBlock::RegisterMaskPair &LI : MF.front().liveins()) 1252 LiveIns.set(LI.PhysReg); 1253 1254 BitVector RegsToZero(TRI.getNumRegs()); 1255 for (MCRegister Reg : AllocatableSet.set_bits()) { 1256 // Skip over fixed registers. 1257 if (TRI.isFixedRegister(MF, Reg)) 1258 continue; 1259 1260 // Want only general purpose registers. 1261 if (OnlyGPR && !TRI.isGeneralPurposeRegister(MF, Reg)) 1262 continue; 1263 1264 // Want only used registers. 1265 if (OnlyUsed && !UsedRegs[Reg.id()]) 1266 continue; 1267 1268 // Want only registers used for arguments. 1269 if (OnlyArg) { 1270 if (OnlyUsed) { 1271 if (!LiveIns[Reg.id()]) 1272 continue; 1273 } else if (!TRI.isArgumentRegister(MF, Reg)) { 1274 continue; 1275 } 1276 } 1277 1278 RegsToZero.set(Reg.id()); 1279 } 1280 1281 // Don't clear registers that are live when leaving the function. 1282 for (const MachineBasicBlock &MBB : MF) 1283 for (const MachineInstr &MI : MBB.terminators()) { 1284 if (!MI.isReturn()) 1285 continue; 1286 1287 for (const auto &MO : MI.operands()) { 1288 if (!MO.isReg()) 1289 continue; 1290 1291 MCRegister Reg = MO.getReg(); 1292 if (!Reg) 1293 continue; 1294 1295 // This picks up sibling registers (e.q. %al -> %ah). 1296 for (MCRegUnit Unit : TRI.regunits(Reg)) 1297 RegsToZero.reset(Unit); 1298 1299 for (MCPhysReg SReg : TRI.sub_and_superregs_inclusive(Reg)) 1300 RegsToZero.reset(SReg); 1301 } 1302 } 1303 1304 // Don't need to clear registers that are used/clobbered by terminating 1305 // instructions. 1306 for (const MachineBasicBlock &MBB : MF) { 1307 if (!MBB.isReturnBlock()) 1308 continue; 1309 1310 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator(); 1311 for (MachineBasicBlock::const_iterator I = MBBI, E = MBB.end(); I != E; 1312 ++I) { 1313 for (const MachineOperand &MO : I->operands()) { 1314 if (!MO.isReg()) 1315 continue; 1316 1317 MCRegister Reg = MO.getReg(); 1318 if (!Reg) 1319 continue; 1320 1321 for (const MCPhysReg Reg : TRI.sub_and_superregs_inclusive(Reg)) 1322 RegsToZero.reset(Reg); 1323 } 1324 } 1325 } 1326 1327 // Don't clear registers that must be preserved. 1328 for (const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF); 1329 MCPhysReg CSReg = *CSRegs; ++CSRegs) 1330 for (MCRegister Reg : TRI.sub_and_superregs_inclusive(CSReg)) 1331 RegsToZero.reset(Reg.id()); 1332 1333 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1334 for (MachineBasicBlock &MBB : MF) 1335 if (MBB.isReturnBlock()) 1336 TFI.emitZeroCallUsedRegs(RegsToZero, MBB); 1337 } 1338 1339 /// Replace all FrameIndex operands with physical register references and actual 1340 /// offsets. 1341 void PEI::replaceFrameIndicesBackward(MachineFunction &MF) { 1342 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1343 1344 for (auto &MBB : MF) { 1345 int SPAdj = 0; 1346 if (!MBB.succ_empty()) { 1347 // Get the SP adjustment for the end of MBB from the start of any of its 1348 // successors. They should all be the same. 1349 assert(all_of(MBB.successors(), [&MBB](const MachineBasicBlock *Succ) { 1350 return Succ->getCallFrameSize() == 1351 (*MBB.succ_begin())->getCallFrameSize(); 1352 })); 1353 const MachineBasicBlock &FirstSucc = **MBB.succ_begin(); 1354 SPAdj = TFI.alignSPAdjust(FirstSucc.getCallFrameSize()); 1355 if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp) 1356 SPAdj = -SPAdj; 1357 } 1358 1359 replaceFrameIndicesBackward(&MBB, MF, SPAdj); 1360 1361 // We can't track the call frame size after call frame pseudos have been 1362 // eliminated. Set it to zero everywhere to keep MachineVerifier happy. 1363 MBB.setCallFrameSize(0); 1364 } 1365 } 1366 1367 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical 1368 /// register references and actual offsets. 1369 void PEI::replaceFrameIndices(MachineFunction &MF) { 1370 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1371 1372 for (auto &MBB : MF) { 1373 int SPAdj = TFI.alignSPAdjust(MBB.getCallFrameSize()); 1374 if (TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp) 1375 SPAdj = -SPAdj; 1376 1377 replaceFrameIndices(&MBB, MF, SPAdj); 1378 1379 // We can't track the call frame size after call frame pseudos have been 1380 // eliminated. Set it to zero everywhere to keep MachineVerifier happy. 1381 MBB.setCallFrameSize(0); 1382 } 1383 } 1384 1385 bool PEI::replaceFrameIndexDebugInstr(MachineFunction &MF, MachineInstr &MI, 1386 unsigned OpIdx, int SPAdj) { 1387 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1388 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1389 if (MI.isDebugValue()) { 1390 1391 MachineOperand &Op = MI.getOperand(OpIdx); 1392 assert(MI.isDebugOperand(&Op) && 1393 "Frame indices can only appear as a debug operand in a DBG_VALUE*" 1394 " machine instruction"); 1395 Register Reg; 1396 unsigned FrameIdx = Op.getIndex(); 1397 unsigned Size = MF.getFrameInfo().getObjectSize(FrameIdx); 1398 1399 StackOffset Offset = TFI->getFrameIndexReference(MF, FrameIdx, Reg); 1400 Op.ChangeToRegister(Reg, false /*isDef*/); 1401 1402 const DIExpression *DIExpr = MI.getDebugExpression(); 1403 1404 // If we have a direct DBG_VALUE, and its location expression isn't 1405 // currently complex, then adding an offset will morph it into a 1406 // complex location that is interpreted as being a memory address. 1407 // This changes a pointer-valued variable to dereference that pointer, 1408 // which is incorrect. Fix by adding DW_OP_stack_value. 1409 1410 if (MI.isNonListDebugValue()) { 1411 unsigned PrependFlags = DIExpression::ApplyOffset; 1412 if (!MI.isIndirectDebugValue() && !DIExpr->isComplex()) 1413 PrependFlags |= DIExpression::StackValue; 1414 1415 // If we have DBG_VALUE that is indirect and has a Implicit location 1416 // expression need to insert a deref before prepending a Memory 1417 // location expression. Also after doing this we change the DBG_VALUE 1418 // to be direct. 1419 if (MI.isIndirectDebugValue() && DIExpr->isImplicit()) { 1420 SmallVector<uint64_t, 2> Ops = {dwarf::DW_OP_deref_size, Size}; 1421 bool WithStackValue = true; 1422 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); 1423 // Make the DBG_VALUE direct. 1424 MI.getDebugOffset().ChangeToRegister(0, false); 1425 } 1426 DIExpr = TRI.prependOffsetExpression(DIExpr, PrependFlags, Offset); 1427 } else { 1428 // The debug operand at DebugOpIndex was a frame index at offset 1429 // `Offset`; now the operand has been replaced with the frame 1430 // register, we must add Offset with `register x, plus Offset`. 1431 unsigned DebugOpIndex = MI.getDebugOperandIndex(&Op); 1432 SmallVector<uint64_t, 3> Ops; 1433 TRI.getOffsetOpcodes(Offset, Ops); 1434 DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, DebugOpIndex); 1435 } 1436 MI.getDebugExpressionOp().setMetadata(DIExpr); 1437 return true; 1438 } 1439 1440 if (MI.isDebugPHI()) { 1441 // Allow stack ref to continue onwards. 1442 return true; 1443 } 1444 1445 // TODO: This code should be commoned with the code for 1446 // PATCHPOINT. There's no good reason for the difference in 1447 // implementation other than historical accident. The only 1448 // remaining difference is the unconditional use of the stack 1449 // pointer as the base register. 1450 if (MI.getOpcode() == TargetOpcode::STATEPOINT) { 1451 assert((!MI.isDebugValue() || OpIdx == 0) && 1452 "Frame indices can only appear as the first operand of a " 1453 "DBG_VALUE machine instruction"); 1454 Register Reg; 1455 MachineOperand &Offset = MI.getOperand(OpIdx + 1); 1456 StackOffset refOffset = TFI->getFrameIndexReferencePreferSP( 1457 MF, MI.getOperand(OpIdx).getIndex(), Reg, /*IgnoreSPUpdates*/ false); 1458 assert(!refOffset.getScalable() && 1459 "Frame offsets with a scalable component are not supported"); 1460 Offset.setImm(Offset.getImm() + refOffset.getFixed() + SPAdj); 1461 MI.getOperand(OpIdx).ChangeToRegister(Reg, false /*isDef*/); 1462 return true; 1463 } 1464 return false; 1465 } 1466 1467 void PEI::replaceFrameIndicesBackward(MachineBasicBlock *BB, 1468 MachineFunction &MF, int &SPAdj) { 1469 assert(MF.getSubtarget().getRegisterInfo() && 1470 "getRegisterInfo() must be implemented!"); 1471 1472 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1473 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1474 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1475 1476 RegScavenger *LocalRS = FrameIndexEliminationScavenging ? RS : nullptr; 1477 if (LocalRS) 1478 LocalRS->enterBasicBlockEnd(*BB); 1479 1480 for (MachineBasicBlock::iterator I = BB->end(); I != BB->begin();) { 1481 MachineInstr &MI = *std::prev(I); 1482 1483 if (TII.isFrameInstr(MI)) { 1484 SPAdj -= TII.getSPAdjust(MI); 1485 TFI.eliminateCallFramePseudoInstr(MF, *BB, &MI); 1486 continue; 1487 } 1488 1489 // Step backwards to get the liveness state at (immedately after) MI. 1490 if (LocalRS) 1491 LocalRS->backward(I); 1492 1493 bool RemovedMI = false; 1494 for (const auto &[Idx, Op] : enumerate(MI.operands())) { 1495 if (!Op.isFI()) 1496 continue; 1497 1498 if (replaceFrameIndexDebugInstr(MF, MI, Idx, SPAdj)) 1499 continue; 1500 1501 // Eliminate this FrameIndex operand. 1502 RemovedMI = TRI.eliminateFrameIndex(MI, SPAdj, Idx, LocalRS); 1503 if (RemovedMI) 1504 break; 1505 } 1506 1507 if (!RemovedMI) 1508 --I; 1509 } 1510 } 1511 1512 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 1513 int &SPAdj) { 1514 assert(MF.getSubtarget().getRegisterInfo() && 1515 "getRegisterInfo() must be implemented!"); 1516 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1517 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1518 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1519 1520 bool InsideCallSequence = false; 1521 1522 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { 1523 if (TII.isFrameInstr(*I)) { 1524 InsideCallSequence = TII.isFrameSetup(*I); 1525 SPAdj += TII.getSPAdjust(*I); 1526 I = TFI->eliminateCallFramePseudoInstr(MF, *BB, I); 1527 continue; 1528 } 1529 1530 MachineInstr &MI = *I; 1531 bool DoIncr = true; 1532 bool DidFinishLoop = true; 1533 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1534 if (!MI.getOperand(i).isFI()) 1535 continue; 1536 1537 if (replaceFrameIndexDebugInstr(MF, MI, i, SPAdj)) 1538 continue; 1539 1540 // Some instructions (e.g. inline asm instructions) can have 1541 // multiple frame indices and/or cause eliminateFrameIndex 1542 // to insert more than one instruction. We need the register 1543 // scavenger to go through all of these instructions so that 1544 // it can update its register information. We keep the 1545 // iterator at the point before insertion so that we can 1546 // revisit them in full. 1547 bool AtBeginning = (I == BB->begin()); 1548 if (!AtBeginning) --I; 1549 1550 // If this instruction has a FrameIndex operand, we need to 1551 // use that target machine register info object to eliminate 1552 // it. 1553 TRI.eliminateFrameIndex(MI, SPAdj, i); 1554 1555 // Reset the iterator if we were at the beginning of the BB. 1556 if (AtBeginning) { 1557 I = BB->begin(); 1558 DoIncr = false; 1559 } 1560 1561 DidFinishLoop = false; 1562 break; 1563 } 1564 1565 // If we are looking at a call sequence, we need to keep track of 1566 // the SP adjustment made by each instruction in the sequence. 1567 // This includes both the frame setup/destroy pseudos (handled above), 1568 // as well as other instructions that have side effects w.r.t the SP. 1569 // Note that this must come after eliminateFrameIndex, because 1570 // if I itself referred to a frame index, we shouldn't count its own 1571 // adjustment. 1572 if (DidFinishLoop && InsideCallSequence) 1573 SPAdj += TII.getSPAdjust(MI); 1574 1575 if (DoIncr && I != BB->end()) 1576 ++I; 1577 } 1578 } 1579