1 //===- PrologEpilogInserter.cpp - Insert Prolog/Epilog code in function ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass is responsible for finalizing the functions frame layout, saving 10 // callee saved registers, and for emitting prolog & epilog code for the 11 // function. 12 // 13 // This pass must be run after register allocation. After this pass is 14 // executed, it is illegal to construct MO_FrameIndex operands. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/BitVector.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineDominators.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineFunctionPass.h" 33 #include "llvm/CodeGen/MachineInstr.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineModuleInfo.h" 37 #include "llvm/CodeGen/MachineOperand.h" 38 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 39 #include "llvm/CodeGen/MachineRegisterInfo.h" 40 #include "llvm/CodeGen/RegisterScavenging.h" 41 #include "llvm/CodeGen/TargetFrameLowering.h" 42 #include "llvm/CodeGen/TargetInstrInfo.h" 43 #include "llvm/CodeGen/TargetOpcodes.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSubtargetInfo.h" 46 #include "llvm/CodeGen/WinEHFuncInfo.h" 47 #include "llvm/IR/Attributes.h" 48 #include "llvm/IR/CallingConv.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DiagnosticInfo.h" 51 #include "llvm/IR/Function.h" 52 #include "llvm/IR/InlineAsm.h" 53 #include "llvm/IR/LLVMContext.h" 54 #include "llvm/InitializePasses.h" 55 #include "llvm/MC/MCRegisterInfo.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/CodeGen.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/ErrorHandling.h" 61 #include "llvm/Support/MathExtras.h" 62 #include "llvm/Support/raw_ostream.h" 63 #include "llvm/Target/TargetMachine.h" 64 #include "llvm/Target/TargetOptions.h" 65 #include <algorithm> 66 #include <cassert> 67 #include <cstdint> 68 #include <functional> 69 #include <limits> 70 #include <utility> 71 #include <vector> 72 73 using namespace llvm; 74 75 #define DEBUG_TYPE "prologepilog" 76 77 using MBBVector = SmallVector<MachineBasicBlock *, 4>; 78 79 STATISTIC(NumLeafFuncWithSpills, "Number of leaf functions with CSRs"); 80 STATISTIC(NumFuncSeen, "Number of functions seen in PEI"); 81 82 83 namespace { 84 85 class PEI : public MachineFunctionPass { 86 public: 87 static char ID; 88 89 PEI() : MachineFunctionPass(ID) { 90 initializePEIPass(*PassRegistry::getPassRegistry()); 91 } 92 93 void getAnalysisUsage(AnalysisUsage &AU) const override; 94 95 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 96 /// frame indexes with appropriate references. 97 bool runOnMachineFunction(MachineFunction &MF) override; 98 99 private: 100 RegScavenger *RS; 101 102 // MinCSFrameIndex, MaxCSFrameIndex - Keeps the range of callee saved 103 // stack frame indexes. 104 unsigned MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 105 unsigned MaxCSFrameIndex = 0; 106 107 // Save and Restore blocks of the current function. Typically there is a 108 // single save block, unless Windows EH funclets are involved. 109 MBBVector SaveBlocks; 110 MBBVector RestoreBlocks; 111 112 // Flag to control whether to use the register scavenger to resolve 113 // frame index materialization registers. Set according to 114 // TRI->requiresFrameIndexScavenging() for the current function. 115 bool FrameIndexVirtualScavenging; 116 117 // Flag to control whether the scavenger should be passed even though 118 // FrameIndexVirtualScavenging is used. 119 bool FrameIndexEliminationScavenging; 120 121 // Emit remarks. 122 MachineOptimizationRemarkEmitter *ORE = nullptr; 123 124 void calculateCallFrameInfo(MachineFunction &MF); 125 void calculateSaveRestoreBlocks(MachineFunction &MF); 126 void spillCalleeSavedRegs(MachineFunction &MF); 127 128 void calculateFrameObjectOffsets(MachineFunction &MF); 129 void replaceFrameIndices(MachineFunction &MF); 130 void replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 131 int &SPAdj); 132 void insertPrologEpilogCode(MachineFunction &MF); 133 void insertZeroCallUsedRegs(MachineFunction &MF); 134 }; 135 136 } // end anonymous namespace 137 138 char PEI::ID = 0; 139 140 char &llvm::PrologEpilogCodeInserterID = PEI::ID; 141 142 INITIALIZE_PASS_BEGIN(PEI, DEBUG_TYPE, "Prologue/Epilogue Insertion", false, 143 false) 144 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 145 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 146 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 147 INITIALIZE_PASS_END(PEI, DEBUG_TYPE, 148 "Prologue/Epilogue Insertion & Frame Finalization", false, 149 false) 150 151 MachineFunctionPass *llvm::createPrologEpilogInserterPass() { 152 return new PEI(); 153 } 154 155 STATISTIC(NumBytesStackSpace, 156 "Number of bytes used for stack in all functions"); 157 158 void PEI::getAnalysisUsage(AnalysisUsage &AU) const { 159 AU.setPreservesCFG(); 160 AU.addPreserved<MachineLoopInfo>(); 161 AU.addPreserved<MachineDominatorTree>(); 162 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 163 MachineFunctionPass::getAnalysisUsage(AU); 164 } 165 166 /// StackObjSet - A set of stack object indexes 167 using StackObjSet = SmallSetVector<int, 8>; 168 169 using SavedDbgValuesMap = 170 SmallDenseMap<MachineBasicBlock *, SmallVector<MachineInstr *, 4>, 4>; 171 172 /// Stash DBG_VALUEs that describe parameters and which are placed at the start 173 /// of the block. Later on, after the prologue code has been emitted, the 174 /// stashed DBG_VALUEs will be reinserted at the start of the block. 175 static void stashEntryDbgValues(MachineBasicBlock &MBB, 176 SavedDbgValuesMap &EntryDbgValues) { 177 SmallVector<const MachineInstr *, 4> FrameIndexValues; 178 179 for (auto &MI : MBB) { 180 if (!MI.isDebugInstr()) 181 break; 182 if (!MI.isDebugValue() || !MI.getDebugVariable()->isParameter()) 183 continue; 184 if (any_of(MI.debug_operands(), 185 [](const MachineOperand &MO) { return MO.isFI(); })) { 186 // We can only emit valid locations for frame indices after the frame 187 // setup, so do not stash away them. 188 FrameIndexValues.push_back(&MI); 189 continue; 190 } 191 const DILocalVariable *Var = MI.getDebugVariable(); 192 const DIExpression *Expr = MI.getDebugExpression(); 193 auto Overlaps = [Var, Expr](const MachineInstr *DV) { 194 return Var == DV->getDebugVariable() && 195 Expr->fragmentsOverlap(DV->getDebugExpression()); 196 }; 197 // See if the debug value overlaps with any preceding debug value that will 198 // not be stashed. If that is the case, then we can't stash this value, as 199 // we would then reorder the values at reinsertion. 200 if (llvm::none_of(FrameIndexValues, Overlaps)) 201 EntryDbgValues[&MBB].push_back(&MI); 202 } 203 204 // Remove stashed debug values from the block. 205 if (EntryDbgValues.count(&MBB)) 206 for (auto *MI : EntryDbgValues[&MBB]) 207 MI->removeFromParent(); 208 } 209 210 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract 211 /// frame indexes with appropriate references. 212 bool PEI::runOnMachineFunction(MachineFunction &MF) { 213 NumFuncSeen++; 214 const Function &F = MF.getFunction(); 215 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 216 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 217 218 RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr; 219 FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF); 220 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 221 222 // Calculate the MaxCallFrameSize and AdjustsStack variables for the 223 // function's frame information. Also eliminates call frame pseudo 224 // instructions. 225 calculateCallFrameInfo(MF); 226 227 // Determine placement of CSR spill/restore code and prolog/epilog code: 228 // place all spills in the entry block, all restores in return blocks. 229 calculateSaveRestoreBlocks(MF); 230 231 // Stash away DBG_VALUEs that should not be moved by insertion of prolog code. 232 SavedDbgValuesMap EntryDbgValues; 233 for (MachineBasicBlock *SaveBlock : SaveBlocks) 234 stashEntryDbgValues(*SaveBlock, EntryDbgValues); 235 236 // Handle CSR spilling and restoring, for targets that need it. 237 if (MF.getTarget().usesPhysRegsForValues()) 238 spillCalleeSavedRegs(MF); 239 240 // Allow the target machine to make final modifications to the function 241 // before the frame layout is finalized. 242 TFI->processFunctionBeforeFrameFinalized(MF, RS); 243 244 // Calculate actual frame offsets for all abstract stack objects... 245 calculateFrameObjectOffsets(MF); 246 247 // Add prolog and epilog code to the function. This function is required 248 // to align the stack frame as necessary for any stack variables or 249 // called functions. Because of this, calculateCalleeSavedRegisters() 250 // must be called before this function in order to set the AdjustsStack 251 // and MaxCallFrameSize variables. 252 if (!F.hasFnAttribute(Attribute::Naked)) 253 insertPrologEpilogCode(MF); 254 255 // Reinsert stashed debug values at the start of the entry blocks. 256 for (auto &I : EntryDbgValues) 257 I.first->insert(I.first->begin(), I.second.begin(), I.second.end()); 258 259 // Allow the target machine to make final modifications to the function 260 // before the frame layout is finalized. 261 TFI->processFunctionBeforeFrameIndicesReplaced(MF, RS); 262 263 // Replace all MO_FrameIndex operands with physical register references 264 // and actual offsets. 265 // 266 replaceFrameIndices(MF); 267 268 // If register scavenging is needed, as we've enabled doing it as a 269 // post-pass, scavenge the virtual registers that frame index elimination 270 // inserted. 271 if (TRI->requiresRegisterScavenging(MF) && FrameIndexVirtualScavenging) 272 scavengeFrameVirtualRegs(MF, *RS); 273 274 // Warn on stack size when we exceeds the given limit. 275 MachineFrameInfo &MFI = MF.getFrameInfo(); 276 uint64_t StackSize = MFI.getStackSize(); 277 278 unsigned Threshold = UINT_MAX; 279 if (MF.getFunction().hasFnAttribute("warn-stack-size")) { 280 bool Failed = MF.getFunction() 281 .getFnAttribute("warn-stack-size") 282 .getValueAsString() 283 .getAsInteger(10, Threshold); 284 // Verifier should have caught this. 285 assert(!Failed && "Invalid warn-stack-size fn attr value"); 286 (void)Failed; 287 } 288 if (StackSize > Threshold) { 289 DiagnosticInfoStackSize DiagStackSize(F, StackSize, Threshold, DS_Warning); 290 F.getContext().diagnose(DiagStackSize); 291 } 292 ORE->emit([&]() { 293 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize", 294 MF.getFunction().getSubprogram(), 295 &MF.front()) 296 << ore::NV("NumStackBytes", StackSize) << " stack bytes in function"; 297 }); 298 299 delete RS; 300 SaveBlocks.clear(); 301 RestoreBlocks.clear(); 302 MFI.setSavePoint(nullptr); 303 MFI.setRestorePoint(nullptr); 304 return true; 305 } 306 307 /// Calculate the MaxCallFrameSize and AdjustsStack 308 /// variables for the function's frame information and eliminate call frame 309 /// pseudo instructions. 310 void PEI::calculateCallFrameInfo(MachineFunction &MF) { 311 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 312 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 313 MachineFrameInfo &MFI = MF.getFrameInfo(); 314 315 unsigned MaxCallFrameSize = 0; 316 bool AdjustsStack = MFI.adjustsStack(); 317 318 // Get the function call frame set-up and tear-down instruction opcode 319 unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode(); 320 unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode(); 321 322 // Early exit for targets which have no call frame setup/destroy pseudo 323 // instructions. 324 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u) 325 return; 326 327 std::vector<MachineBasicBlock::iterator> FrameSDOps; 328 for (MachineBasicBlock &BB : MF) 329 for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) 330 if (TII.isFrameInstr(*I)) { 331 unsigned Size = TII.getFrameSize(*I); 332 if (Size > MaxCallFrameSize) MaxCallFrameSize = Size; 333 AdjustsStack = true; 334 FrameSDOps.push_back(I); 335 } else if (I->isInlineAsm()) { 336 // Some inline asm's need a stack frame, as indicated by operand 1. 337 unsigned ExtraInfo = I->getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); 338 if (ExtraInfo & InlineAsm::Extra_IsAlignStack) 339 AdjustsStack = true; 340 } 341 342 assert(!MFI.isMaxCallFrameSizeComputed() || 343 (MFI.getMaxCallFrameSize() == MaxCallFrameSize && 344 MFI.adjustsStack() == AdjustsStack)); 345 MFI.setAdjustsStack(AdjustsStack); 346 MFI.setMaxCallFrameSize(MaxCallFrameSize); 347 348 for (MachineBasicBlock::iterator I : FrameSDOps) { 349 // If call frames are not being included as part of the stack frame, and 350 // the target doesn't indicate otherwise, remove the call frame pseudos 351 // here. The sub/add sp instruction pairs are still inserted, but we don't 352 // need to track the SP adjustment for frame index elimination. 353 if (TFI->canSimplifyCallFramePseudos(MF)) 354 TFI->eliminateCallFramePseudoInstr(MF, *I->getParent(), I); 355 } 356 } 357 358 /// Compute the sets of entry and return blocks for saving and restoring 359 /// callee-saved registers, and placing prolog and epilog code. 360 void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) { 361 const MachineFrameInfo &MFI = MF.getFrameInfo(); 362 363 // Even when we do not change any CSR, we still want to insert the 364 // prologue and epilogue of the function. 365 // So set the save points for those. 366 367 // Use the points found by shrink-wrapping, if any. 368 if (MFI.getSavePoint()) { 369 SaveBlocks.push_back(MFI.getSavePoint()); 370 assert(MFI.getRestorePoint() && "Both restore and save must be set"); 371 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint(); 372 // If RestoreBlock does not have any successor and is not a return block 373 // then the end point is unreachable and we do not need to insert any 374 // epilogue. 375 if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock()) 376 RestoreBlocks.push_back(RestoreBlock); 377 return; 378 } 379 380 // Save refs to entry and return blocks. 381 SaveBlocks.push_back(&MF.front()); 382 for (MachineBasicBlock &MBB : MF) { 383 if (MBB.isEHFuncletEntry()) 384 SaveBlocks.push_back(&MBB); 385 if (MBB.isReturnBlock()) 386 RestoreBlocks.push_back(&MBB); 387 } 388 } 389 390 static void assignCalleeSavedSpillSlots(MachineFunction &F, 391 const BitVector &SavedRegs, 392 unsigned &MinCSFrameIndex, 393 unsigned &MaxCSFrameIndex) { 394 if (SavedRegs.empty()) 395 return; 396 397 const TargetRegisterInfo *RegInfo = F.getSubtarget().getRegisterInfo(); 398 const MCPhysReg *CSRegs = F.getRegInfo().getCalleeSavedRegs(); 399 BitVector CSMask(SavedRegs.size()); 400 401 for (unsigned i = 0; CSRegs[i]; ++i) 402 CSMask.set(CSRegs[i]); 403 404 std::vector<CalleeSavedInfo> CSI; 405 for (unsigned i = 0; CSRegs[i]; ++i) { 406 unsigned Reg = CSRegs[i]; 407 if (SavedRegs.test(Reg)) { 408 bool SavedSuper = false; 409 for (const MCPhysReg &SuperReg : RegInfo->superregs(Reg)) { 410 // Some backends set all aliases for some registers as saved, such as 411 // Mips's $fp, so they appear in SavedRegs but not CSRegs. 412 if (SavedRegs.test(SuperReg) && CSMask.test(SuperReg)) { 413 SavedSuper = true; 414 break; 415 } 416 } 417 418 if (!SavedSuper) 419 CSI.push_back(CalleeSavedInfo(Reg)); 420 } 421 } 422 423 const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering(); 424 MachineFrameInfo &MFI = F.getFrameInfo(); 425 if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI, MinCSFrameIndex, 426 MaxCSFrameIndex)) { 427 // If target doesn't implement this, use generic code. 428 429 if (CSI.empty()) 430 return; // Early exit if no callee saved registers are modified! 431 432 unsigned NumFixedSpillSlots; 433 const TargetFrameLowering::SpillSlot *FixedSpillSlots = 434 TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); 435 436 // Now that we know which registers need to be saved and restored, allocate 437 // stack slots for them. 438 for (auto &CS : CSI) { 439 // If the target has spilled this register to another register, we don't 440 // need to allocate a stack slot. 441 if (CS.isSpilledToReg()) 442 continue; 443 444 unsigned Reg = CS.getReg(); 445 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 446 447 int FrameIdx; 448 if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { 449 CS.setFrameIdx(FrameIdx); 450 continue; 451 } 452 453 // Check to see if this physreg must be spilled to a particular stack slot 454 // on this target. 455 const TargetFrameLowering::SpillSlot *FixedSlot = FixedSpillSlots; 456 while (FixedSlot != FixedSpillSlots + NumFixedSpillSlots && 457 FixedSlot->Reg != Reg) 458 ++FixedSlot; 459 460 unsigned Size = RegInfo->getSpillSize(*RC); 461 if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { 462 // Nope, just spill it anywhere convenient. 463 Align Alignment = RegInfo->getSpillAlign(*RC); 464 // We may not be able to satisfy the desired alignment specification of 465 // the TargetRegisterClass if the stack alignment is smaller. Use the 466 // min. 467 Alignment = std::min(Alignment, TFI->getStackAlign()); 468 FrameIdx = MFI.CreateStackObject(Size, Alignment, true); 469 if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; 470 if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; 471 } else { 472 // Spill it to the stack where we must. 473 FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset); 474 } 475 476 CS.setFrameIdx(FrameIdx); 477 } 478 } 479 480 MFI.setCalleeSavedInfo(CSI); 481 } 482 483 /// Helper function to update the liveness information for the callee-saved 484 /// registers. 485 static void updateLiveness(MachineFunction &MF) { 486 MachineFrameInfo &MFI = MF.getFrameInfo(); 487 // Visited will contain all the basic blocks that are in the region 488 // where the callee saved registers are alive: 489 // - Anything that is not Save or Restore -> LiveThrough. 490 // - Save -> LiveIn. 491 // - Restore -> LiveOut. 492 // The live-out is not attached to the block, so no need to keep 493 // Restore in this set. 494 SmallPtrSet<MachineBasicBlock *, 8> Visited; 495 SmallVector<MachineBasicBlock *, 8> WorkList; 496 MachineBasicBlock *Entry = &MF.front(); 497 MachineBasicBlock *Save = MFI.getSavePoint(); 498 499 if (!Save) 500 Save = Entry; 501 502 if (Entry != Save) { 503 WorkList.push_back(Entry); 504 Visited.insert(Entry); 505 } 506 Visited.insert(Save); 507 508 MachineBasicBlock *Restore = MFI.getRestorePoint(); 509 if (Restore) 510 // By construction Restore cannot be visited, otherwise it 511 // means there exists a path to Restore that does not go 512 // through Save. 513 WorkList.push_back(Restore); 514 515 while (!WorkList.empty()) { 516 const MachineBasicBlock *CurBB = WorkList.pop_back_val(); 517 // By construction, the region that is after the save point is 518 // dominated by the Save and post-dominated by the Restore. 519 if (CurBB == Save && Save != Restore) 520 continue; 521 // Enqueue all the successors not already visited. 522 // Those are by construction either before Save or after Restore. 523 for (MachineBasicBlock *SuccBB : CurBB->successors()) 524 if (Visited.insert(SuccBB).second) 525 WorkList.push_back(SuccBB); 526 } 527 528 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 529 530 MachineRegisterInfo &MRI = MF.getRegInfo(); 531 for (const CalleeSavedInfo &I : CSI) { 532 for (MachineBasicBlock *MBB : Visited) { 533 MCPhysReg Reg = I.getReg(); 534 // Add the callee-saved register as live-in. 535 // It's killed at the spill. 536 if (!MRI.isReserved(Reg) && !MBB->isLiveIn(Reg)) 537 MBB->addLiveIn(Reg); 538 } 539 // If callee-saved register is spilled to another register rather than 540 // spilling to stack, the destination register has to be marked as live for 541 // each MBB between the prologue and epilogue so that it is not clobbered 542 // before it is reloaded in the epilogue. The Visited set contains all 543 // blocks outside of the region delimited by prologue/epilogue. 544 if (I.isSpilledToReg()) { 545 for (MachineBasicBlock &MBB : MF) { 546 if (Visited.count(&MBB)) 547 continue; 548 MCPhysReg DstReg = I.getDstReg(); 549 if (!MBB.isLiveIn(DstReg)) 550 MBB.addLiveIn(DstReg); 551 } 552 } 553 } 554 } 555 556 /// Insert restore code for the callee-saved registers used in the function. 557 static void insertCSRSaves(MachineBasicBlock &SaveBlock, 558 ArrayRef<CalleeSavedInfo> CSI) { 559 MachineFunction &MF = *SaveBlock.getParent(); 560 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 561 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 562 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 563 564 MachineBasicBlock::iterator I = SaveBlock.begin(); 565 if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) { 566 for (const CalleeSavedInfo &CS : CSI) { 567 // Insert the spill to the stack frame. 568 unsigned Reg = CS.getReg(); 569 570 if (CS.isSpilledToReg()) { 571 BuildMI(SaveBlock, I, DebugLoc(), 572 TII.get(TargetOpcode::COPY), CS.getDstReg()) 573 .addReg(Reg, getKillRegState(true)); 574 } else { 575 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 576 TII.storeRegToStackSlot(SaveBlock, I, Reg, true, CS.getFrameIdx(), RC, 577 TRI); 578 } 579 } 580 } 581 } 582 583 /// Insert restore code for the callee-saved registers used in the function. 584 static void insertCSRRestores(MachineBasicBlock &RestoreBlock, 585 std::vector<CalleeSavedInfo> &CSI) { 586 MachineFunction &MF = *RestoreBlock.getParent(); 587 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 588 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 589 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 590 591 // Restore all registers immediately before the return and any 592 // terminators that precede it. 593 MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator(); 594 595 if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) { 596 for (const CalleeSavedInfo &CI : reverse(CSI)) { 597 unsigned Reg = CI.getReg(); 598 if (CI.isSpilledToReg()) { 599 BuildMI(RestoreBlock, I, DebugLoc(), TII.get(TargetOpcode::COPY), Reg) 600 .addReg(CI.getDstReg(), getKillRegState(true)); 601 } else { 602 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 603 TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI); 604 assert(I != RestoreBlock.begin() && 605 "loadRegFromStackSlot didn't insert any code!"); 606 // Insert in reverse order. loadRegFromStackSlot can insert 607 // multiple instructions. 608 } 609 } 610 } 611 } 612 613 void PEI::spillCalleeSavedRegs(MachineFunction &MF) { 614 // We can't list this requirement in getRequiredProperties because some 615 // targets (WebAssembly) use virtual registers past this point, and the pass 616 // pipeline is set up without giving the passes a chance to look at the 617 // TargetMachine. 618 // FIXME: Find a way to express this in getRequiredProperties. 619 assert(MF.getProperties().hasProperty( 620 MachineFunctionProperties::Property::NoVRegs)); 621 622 const Function &F = MF.getFunction(); 623 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 624 MachineFrameInfo &MFI = MF.getFrameInfo(); 625 MinCSFrameIndex = std::numeric_limits<unsigned>::max(); 626 MaxCSFrameIndex = 0; 627 628 // Determine which of the registers in the callee save list should be saved. 629 BitVector SavedRegs; 630 TFI->determineCalleeSaves(MF, SavedRegs, RS); 631 632 // Assign stack slots for any callee-saved registers that must be spilled. 633 assignCalleeSavedSpillSlots(MF, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex); 634 635 // Add the code to save and restore the callee saved registers. 636 if (!F.hasFnAttribute(Attribute::Naked)) { 637 MFI.setCalleeSavedInfoValid(true); 638 639 std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 640 if (!CSI.empty()) { 641 if (!MFI.hasCalls()) 642 NumLeafFuncWithSpills++; 643 644 for (MachineBasicBlock *SaveBlock : SaveBlocks) 645 insertCSRSaves(*SaveBlock, CSI); 646 647 // Update the live-in information of all the blocks up to the save point. 648 updateLiveness(MF); 649 650 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 651 insertCSRRestores(*RestoreBlock, CSI); 652 } 653 } 654 } 655 656 /// AdjustStackOffset - Helper function used to adjust the stack frame offset. 657 static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx, 658 bool StackGrowsDown, int64_t &Offset, 659 Align &MaxAlign, unsigned Skew) { 660 // If the stack grows down, add the object size to find the lowest address. 661 if (StackGrowsDown) 662 Offset += MFI.getObjectSize(FrameIdx); 663 664 Align Alignment = MFI.getObjectAlign(FrameIdx); 665 666 // If the alignment of this object is greater than that of the stack, then 667 // increase the stack alignment to match. 668 MaxAlign = std::max(MaxAlign, Alignment); 669 670 // Adjust to alignment boundary. 671 Offset = alignTo(Offset, Alignment, Skew); 672 673 if (StackGrowsDown) { 674 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset 675 << "]\n"); 676 MFI.setObjectOffset(FrameIdx, -Offset); // Set the computed offset 677 } else { 678 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset 679 << "]\n"); 680 MFI.setObjectOffset(FrameIdx, Offset); 681 Offset += MFI.getObjectSize(FrameIdx); 682 } 683 } 684 685 /// Compute which bytes of fixed and callee-save stack area are unused and keep 686 /// track of them in StackBytesFree. 687 static inline void 688 computeFreeStackSlots(MachineFrameInfo &MFI, bool StackGrowsDown, 689 unsigned MinCSFrameIndex, unsigned MaxCSFrameIndex, 690 int64_t FixedCSEnd, BitVector &StackBytesFree) { 691 // Avoid undefined int64_t -> int conversion below in extreme case. 692 if (FixedCSEnd > std::numeric_limits<int>::max()) 693 return; 694 695 StackBytesFree.resize(FixedCSEnd, true); 696 697 SmallVector<int, 16> AllocatedFrameSlots; 698 // Add fixed objects. 699 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) 700 // StackSlot scavenging is only implemented for the default stack. 701 if (MFI.getStackID(i) == TargetStackID::Default) 702 AllocatedFrameSlots.push_back(i); 703 // Add callee-save objects if there are any. 704 if (MinCSFrameIndex <= MaxCSFrameIndex) { 705 for (int i = MinCSFrameIndex; i <= (int)MaxCSFrameIndex; ++i) 706 if (MFI.getStackID(i) == TargetStackID::Default) 707 AllocatedFrameSlots.push_back(i); 708 } 709 710 for (int i : AllocatedFrameSlots) { 711 // These are converted from int64_t, but they should always fit in int 712 // because of the FixedCSEnd check above. 713 int ObjOffset = MFI.getObjectOffset(i); 714 int ObjSize = MFI.getObjectSize(i); 715 int ObjStart, ObjEnd; 716 if (StackGrowsDown) { 717 // ObjOffset is negative when StackGrowsDown is true. 718 ObjStart = -ObjOffset - ObjSize; 719 ObjEnd = -ObjOffset; 720 } else { 721 ObjStart = ObjOffset; 722 ObjEnd = ObjOffset + ObjSize; 723 } 724 // Ignore fixed holes that are in the previous stack frame. 725 if (ObjEnd > 0) 726 StackBytesFree.reset(ObjStart, ObjEnd); 727 } 728 } 729 730 /// Assign frame object to an unused portion of the stack in the fixed stack 731 /// object range. Return true if the allocation was successful. 732 static inline bool scavengeStackSlot(MachineFrameInfo &MFI, int FrameIdx, 733 bool StackGrowsDown, Align MaxAlign, 734 BitVector &StackBytesFree) { 735 if (MFI.isVariableSizedObjectIndex(FrameIdx)) 736 return false; 737 738 if (StackBytesFree.none()) { 739 // clear it to speed up later scavengeStackSlot calls to 740 // StackBytesFree.none() 741 StackBytesFree.clear(); 742 return false; 743 } 744 745 Align ObjAlign = MFI.getObjectAlign(FrameIdx); 746 if (ObjAlign > MaxAlign) 747 return false; 748 749 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 750 int FreeStart; 751 for (FreeStart = StackBytesFree.find_first(); FreeStart != -1; 752 FreeStart = StackBytesFree.find_next(FreeStart)) { 753 754 // Check that free space has suitable alignment. 755 unsigned ObjStart = StackGrowsDown ? FreeStart + ObjSize : FreeStart; 756 if (alignTo(ObjStart, ObjAlign) != ObjStart) 757 continue; 758 759 if (FreeStart + ObjSize > StackBytesFree.size()) 760 return false; 761 762 bool AllBytesFree = true; 763 for (unsigned Byte = 0; Byte < ObjSize; ++Byte) 764 if (!StackBytesFree.test(FreeStart + Byte)) { 765 AllBytesFree = false; 766 break; 767 } 768 if (AllBytesFree) 769 break; 770 } 771 772 if (FreeStart == -1) 773 return false; 774 775 if (StackGrowsDown) { 776 int ObjStart = -(FreeStart + ObjSize); 777 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 778 << ObjStart << "]\n"); 779 MFI.setObjectOffset(FrameIdx, ObjStart); 780 } else { 781 LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") scavenged at SP[" 782 << FreeStart << "]\n"); 783 MFI.setObjectOffset(FrameIdx, FreeStart); 784 } 785 786 StackBytesFree.reset(FreeStart, FreeStart + ObjSize); 787 return true; 788 } 789 790 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e., 791 /// those required to be close to the Stack Protector) to stack offsets. 792 static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs, 793 SmallSet<int, 16> &ProtectedObjs, 794 MachineFrameInfo &MFI, bool StackGrowsDown, 795 int64_t &Offset, Align &MaxAlign, 796 unsigned Skew) { 797 798 for (int i : UnassignedObjs) { 799 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew); 800 ProtectedObjs.insert(i); 801 } 802 } 803 804 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the 805 /// abstract stack objects. 806 void PEI::calculateFrameObjectOffsets(MachineFunction &MF) { 807 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 808 809 bool StackGrowsDown = 810 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; 811 812 // Loop over all of the stack objects, assigning sequential addresses... 813 MachineFrameInfo &MFI = MF.getFrameInfo(); 814 815 // Start at the beginning of the local area. 816 // The Offset is the distance from the stack top in the direction 817 // of stack growth -- so it's always nonnegative. 818 int LocalAreaOffset = TFI.getOffsetOfLocalArea(); 819 if (StackGrowsDown) 820 LocalAreaOffset = -LocalAreaOffset; 821 assert(LocalAreaOffset >= 0 822 && "Local area offset should be in direction of stack growth"); 823 int64_t Offset = LocalAreaOffset; 824 825 // Skew to be applied to alignment. 826 unsigned Skew = TFI.getStackAlignmentSkew(MF); 827 828 #ifdef EXPENSIVE_CHECKS 829 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) 830 if (!MFI.isDeadObjectIndex(i) && 831 MFI.getStackID(i) == TargetStackID::Default) 832 assert(MFI.getObjectAlign(i) <= MFI.getMaxAlign() && 833 "MaxAlignment is invalid"); 834 #endif 835 836 // If there are fixed sized objects that are preallocated in the local area, 837 // non-fixed objects can't be allocated right at the start of local area. 838 // Adjust 'Offset' to point to the end of last fixed sized preallocated 839 // object. 840 for (int i = MFI.getObjectIndexBegin(); i != 0; ++i) { 841 if (MFI.getStackID(i) != 842 TargetStackID::Default) // Only allocate objects on the default stack. 843 continue; 844 845 int64_t FixedOff; 846 if (StackGrowsDown) { 847 // The maximum distance from the stack pointer is at lower address of 848 // the object -- which is given by offset. For down growing stack 849 // the offset is negative, so we negate the offset to get the distance. 850 FixedOff = -MFI.getObjectOffset(i); 851 } else { 852 // The maximum distance from the start pointer is at the upper 853 // address of the object. 854 FixedOff = MFI.getObjectOffset(i) + MFI.getObjectSize(i); 855 } 856 if (FixedOff > Offset) Offset = FixedOff; 857 } 858 859 // First assign frame offsets to stack objects that are used to spill 860 // callee saved registers. 861 if (StackGrowsDown && MaxCSFrameIndex >= MinCSFrameIndex) { 862 for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) { 863 if (MFI.getStackID(i) != 864 TargetStackID::Default) // Only allocate objects on the default stack. 865 continue; 866 867 // If the stack grows down, we need to add the size to find the lowest 868 // address of the object. 869 Offset += MFI.getObjectSize(i); 870 871 // Adjust to alignment boundary 872 Offset = alignTo(Offset, MFI.getObjectAlign(i), Skew); 873 874 LLVM_DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << -Offset << "]\n"); 875 MFI.setObjectOffset(i, -Offset); // Set the computed offset 876 } 877 } else if (MaxCSFrameIndex >= MinCSFrameIndex) { 878 // Be careful about underflow in comparisons agains MinCSFrameIndex. 879 for (unsigned i = MaxCSFrameIndex; i != MinCSFrameIndex - 1; --i) { 880 if (MFI.getStackID(i) != 881 TargetStackID::Default) // Only allocate objects on the default stack. 882 continue; 883 884 if (MFI.isDeadObjectIndex(i)) 885 continue; 886 887 // Adjust to alignment boundary 888 Offset = alignTo(Offset, MFI.getObjectAlign(i), Skew); 889 890 LLVM_DEBUG(dbgs() << "alloc FI(" << i << ") at SP[" << Offset << "]\n"); 891 MFI.setObjectOffset(i, Offset); 892 Offset += MFI.getObjectSize(i); 893 } 894 } 895 896 // FixedCSEnd is the stack offset to the end of the fixed and callee-save 897 // stack area. 898 int64_t FixedCSEnd = Offset; 899 Align MaxAlign = MFI.getMaxAlign(); 900 901 // Make sure the special register scavenging spill slot is closest to the 902 // incoming stack pointer if a frame pointer is required and is closer 903 // to the incoming rather than the final stack pointer. 904 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 905 bool EarlyScavengingSlots = TFI.allocateScavengingFrameIndexesNearIncomingSP(MF); 906 if (RS && EarlyScavengingSlots) { 907 SmallVector<int, 2> SFIs; 908 RS->getScavengingFrameIndices(SFIs); 909 for (int SFI : SFIs) 910 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); 911 } 912 913 // FIXME: Once this is working, then enable flag will change to a target 914 // check for whether the frame is large enough to want to use virtual 915 // frame index registers. Functions which don't want/need this optimization 916 // will continue to use the existing code path. 917 if (MFI.getUseLocalStackAllocationBlock()) { 918 Align Alignment = MFI.getLocalFrameMaxAlign(); 919 920 // Adjust to alignment boundary. 921 Offset = alignTo(Offset, Alignment, Skew); 922 923 LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n"); 924 925 // Resolve offsets for objects in the local block. 926 for (unsigned i = 0, e = MFI.getLocalFrameObjectCount(); i != e; ++i) { 927 std::pair<int, int64_t> Entry = MFI.getLocalFrameObjectMap(i); 928 int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second; 929 LLVM_DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" << FIOffset 930 << "]\n"); 931 MFI.setObjectOffset(Entry.first, FIOffset); 932 } 933 // Allocate the local block 934 Offset += MFI.getLocalFrameSize(); 935 936 MaxAlign = std::max(Alignment, MaxAlign); 937 } 938 939 // Retrieve the Exception Handler registration node. 940 int EHRegNodeFrameIndex = std::numeric_limits<int>::max(); 941 if (const WinEHFuncInfo *FuncInfo = MF.getWinEHFuncInfo()) 942 EHRegNodeFrameIndex = FuncInfo->EHRegNodeFrameIndex; 943 944 // Make sure that the stack protector comes before the local variables on the 945 // stack. 946 SmallSet<int, 16> ProtectedObjs; 947 if (MFI.hasStackProtectorIndex()) { 948 int StackProtectorFI = MFI.getStackProtectorIndex(); 949 StackObjSet LargeArrayObjs; 950 StackObjSet SmallArrayObjs; 951 StackObjSet AddrOfObjs; 952 953 // If we need a stack protector, we need to make sure that 954 // LocalStackSlotPass didn't already allocate a slot for it. 955 // If we are told to use the LocalStackAllocationBlock, the stack protector 956 // is expected to be already pre-allocated. 957 if (MFI.getStackID(StackProtectorFI) != TargetStackID::Default) { 958 // If the stack protector isn't on the default stack then it's up to the 959 // target to set the stack offset. 960 assert(MFI.getObjectOffset(StackProtectorFI) != 0 && 961 "Offset of stack protector on non-default stack expected to be " 962 "already set."); 963 assert(!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex()) && 964 "Stack protector on non-default stack expected to not be " 965 "pre-allocated by LocalStackSlotPass."); 966 } else if (!MFI.getUseLocalStackAllocationBlock()) { 967 AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, MaxAlign, 968 Skew); 969 } else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) { 970 llvm_unreachable( 971 "Stack protector not pre-allocated by LocalStackSlotPass."); 972 } 973 974 // Assign large stack objects first. 975 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 976 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 977 continue; 978 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 979 continue; 980 if (RS && RS->isScavengingFrameIndex((int)i)) 981 continue; 982 if (MFI.isDeadObjectIndex(i)) 983 continue; 984 if (StackProtectorFI == (int)i || EHRegNodeFrameIndex == (int)i) 985 continue; 986 if (MFI.getStackID(i) != 987 TargetStackID::Default) // Only allocate objects on the default stack. 988 continue; 989 990 switch (MFI.getObjectSSPLayout(i)) { 991 case MachineFrameInfo::SSPLK_None: 992 continue; 993 case MachineFrameInfo::SSPLK_SmallArray: 994 SmallArrayObjs.insert(i); 995 continue; 996 case MachineFrameInfo::SSPLK_AddrOf: 997 AddrOfObjs.insert(i); 998 continue; 999 case MachineFrameInfo::SSPLK_LargeArray: 1000 LargeArrayObjs.insert(i); 1001 continue; 1002 } 1003 llvm_unreachable("Unexpected SSPLayoutKind."); 1004 } 1005 1006 // We expect **all** the protected stack objects to be pre-allocated by 1007 // LocalStackSlotPass. If it turns out that PEI still has to allocate some 1008 // of them, we may end up messing up the expected order of the objects. 1009 if (MFI.getUseLocalStackAllocationBlock() && 1010 !(LargeArrayObjs.empty() && SmallArrayObjs.empty() && 1011 AddrOfObjs.empty())) 1012 llvm_unreachable("Found protected stack objects not pre-allocated by " 1013 "LocalStackSlotPass."); 1014 1015 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1016 Offset, MaxAlign, Skew); 1017 AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown, 1018 Offset, MaxAlign, Skew); 1019 AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown, 1020 Offset, MaxAlign, Skew); 1021 } 1022 1023 SmallVector<int, 8> ObjectsToAllocate; 1024 1025 // Then prepare to assign frame offsets to stack objects that are not used to 1026 // spill callee saved registers. 1027 for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { 1028 if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock()) 1029 continue; 1030 if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex) 1031 continue; 1032 if (RS && RS->isScavengingFrameIndex((int)i)) 1033 continue; 1034 if (MFI.isDeadObjectIndex(i)) 1035 continue; 1036 if (MFI.getStackProtectorIndex() == (int)i || EHRegNodeFrameIndex == (int)i) 1037 continue; 1038 if (ProtectedObjs.count(i)) 1039 continue; 1040 if (MFI.getStackID(i) != 1041 TargetStackID::Default) // Only allocate objects on the default stack. 1042 continue; 1043 1044 // Add the objects that we need to allocate to our working set. 1045 ObjectsToAllocate.push_back(i); 1046 } 1047 1048 // Allocate the EH registration node first if one is present. 1049 if (EHRegNodeFrameIndex != std::numeric_limits<int>::max()) 1050 AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset, 1051 MaxAlign, Skew); 1052 1053 // Give the targets a chance to order the objects the way they like it. 1054 if (MF.getTarget().getOptLevel() != CodeGenOpt::None && 1055 MF.getTarget().Options.StackSymbolOrdering) 1056 TFI.orderFrameObjects(MF, ObjectsToAllocate); 1057 1058 // Keep track of which bytes in the fixed and callee-save range are used so we 1059 // can use the holes when allocating later stack objects. Only do this if 1060 // stack protector isn't being used and the target requests it and we're 1061 // optimizing. 1062 BitVector StackBytesFree; 1063 if (!ObjectsToAllocate.empty() && 1064 MF.getTarget().getOptLevel() != CodeGenOpt::None && 1065 MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(MF)) 1066 computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex, 1067 FixedCSEnd, StackBytesFree); 1068 1069 // Now walk the objects and actually assign base offsets to them. 1070 for (auto &Object : ObjectsToAllocate) 1071 if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign, 1072 StackBytesFree)) 1073 AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew); 1074 1075 // Make sure the special register scavenging spill slot is closest to the 1076 // stack pointer. 1077 if (RS && !EarlyScavengingSlots) { 1078 SmallVector<int, 2> SFIs; 1079 RS->getScavengingFrameIndices(SFIs); 1080 for (int SFI : SFIs) 1081 AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew); 1082 } 1083 1084 if (!TFI.targetHandlesStackFrameRounding()) { 1085 // If we have reserved argument space for call sites in the function 1086 // immediately on entry to the current function, count it as part of the 1087 // overall stack size. 1088 if (MFI.adjustsStack() && TFI.hasReservedCallFrame(MF)) 1089 Offset += MFI.getMaxCallFrameSize(); 1090 1091 // Round up the size to a multiple of the alignment. If the function has 1092 // any calls or alloca's, align to the target's StackAlignment value to 1093 // ensure that the callee's frame or the alloca data is suitably aligned; 1094 // otherwise, for leaf functions, align to the TransientStackAlignment 1095 // value. 1096 Align StackAlign; 1097 if (MFI.adjustsStack() || MFI.hasVarSizedObjects() || 1098 (RegInfo->hasStackRealignment(MF) && MFI.getObjectIndexEnd() != 0)) 1099 StackAlign = TFI.getStackAlign(); 1100 else 1101 StackAlign = TFI.getTransientStackAlign(); 1102 1103 // If the frame pointer is eliminated, all frame offsets will be relative to 1104 // SP not FP. Align to MaxAlign so this works. 1105 StackAlign = std::max(StackAlign, MaxAlign); 1106 int64_t OffsetBeforeAlignment = Offset; 1107 Offset = alignTo(Offset, StackAlign, Skew); 1108 1109 // If we have increased the offset to fulfill the alignment constrants, 1110 // then the scavenging spill slots may become harder to reach from the 1111 // stack pointer, float them so they stay close. 1112 if (StackGrowsDown && OffsetBeforeAlignment != Offset && RS && 1113 !EarlyScavengingSlots) { 1114 SmallVector<int, 2> SFIs; 1115 RS->getScavengingFrameIndices(SFIs); 1116 LLVM_DEBUG(if (!SFIs.empty()) llvm::dbgs() 1117 << "Adjusting emergency spill slots!\n";); 1118 int64_t Delta = Offset - OffsetBeforeAlignment; 1119 for (int SFI : SFIs) { 1120 LLVM_DEBUG(llvm::dbgs() 1121 << "Adjusting offset of emergency spill slot #" << SFI 1122 << " from " << MFI.getObjectOffset(SFI);); 1123 MFI.setObjectOffset(SFI, MFI.getObjectOffset(SFI) - Delta); 1124 LLVM_DEBUG(llvm::dbgs() << " to " << MFI.getObjectOffset(SFI) << "\n";); 1125 } 1126 } 1127 } 1128 1129 // Update frame info to pretend that this is part of the stack... 1130 int64_t StackSize = Offset - LocalAreaOffset; 1131 MFI.setStackSize(StackSize); 1132 NumBytesStackSpace += StackSize; 1133 } 1134 1135 /// insertPrologEpilogCode - Scan the function for modified callee saved 1136 /// registers, insert spill code for these callee saved registers, then add 1137 /// prolog and epilog code to the function. 1138 void PEI::insertPrologEpilogCode(MachineFunction &MF) { 1139 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1140 1141 // Add prologue to the function... 1142 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1143 TFI.emitPrologue(MF, *SaveBlock); 1144 1145 // Add epilogue to restore the callee-save registers in each exiting block. 1146 for (MachineBasicBlock *RestoreBlock : RestoreBlocks) 1147 TFI.emitEpilogue(MF, *RestoreBlock); 1148 1149 // Zero call used registers before restoring callee-saved registers. 1150 insertZeroCallUsedRegs(MF); 1151 1152 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1153 TFI.inlineStackProbe(MF, *SaveBlock); 1154 1155 // Emit additional code that is required to support segmented stacks, if 1156 // we've been asked for it. This, when linked with a runtime with support 1157 // for segmented stacks (libgcc is one), will result in allocating stack 1158 // space in small chunks instead of one large contiguous block. 1159 if (MF.shouldSplitStack()) { 1160 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1161 TFI.adjustForSegmentedStacks(MF, *SaveBlock); 1162 // Record that there are split-stack functions, so we will emit a 1163 // special section to tell the linker. 1164 MF.getMMI().setHasSplitStack(true); 1165 } else 1166 MF.getMMI().setHasNosplitStack(true); 1167 1168 // Emit additional code that is required to explicitly handle the stack in 1169 // HiPE native code (if needed) when loaded in the Erlang/OTP runtime. The 1170 // approach is rather similar to that of Segmented Stacks, but it uses a 1171 // different conditional check and another BIF for allocating more stack 1172 // space. 1173 if (MF.getFunction().getCallingConv() == CallingConv::HiPE) 1174 for (MachineBasicBlock *SaveBlock : SaveBlocks) 1175 TFI.adjustForHiPEPrologue(MF, *SaveBlock); 1176 } 1177 1178 /// insertZeroCallUsedRegs - Zero out call used registers. 1179 void PEI::insertZeroCallUsedRegs(MachineFunction &MF) { 1180 const Function &F = MF.getFunction(); 1181 1182 if (!F.hasFnAttribute("zero-call-used-regs")) 1183 return; 1184 1185 using namespace ZeroCallUsedRegs; 1186 1187 ZeroCallUsedRegsKind ZeroRegsKind = 1188 StringSwitch<ZeroCallUsedRegsKind>( 1189 F.getFnAttribute("zero-call-used-regs").getValueAsString()) 1190 .Case("skip", ZeroCallUsedRegsKind::Skip) 1191 .Case("used-gpr-arg", ZeroCallUsedRegsKind::UsedGPRArg) 1192 .Case("used-gpr", ZeroCallUsedRegsKind::UsedGPR) 1193 .Case("used-arg", ZeroCallUsedRegsKind::UsedArg) 1194 .Case("used", ZeroCallUsedRegsKind::Used) 1195 .Case("all-gpr-arg", ZeroCallUsedRegsKind::AllGPRArg) 1196 .Case("all-gpr", ZeroCallUsedRegsKind::AllGPR) 1197 .Case("all-arg", ZeroCallUsedRegsKind::AllArg) 1198 .Case("all", ZeroCallUsedRegsKind::All); 1199 1200 if (ZeroRegsKind == ZeroCallUsedRegsKind::Skip) 1201 return; 1202 1203 const bool OnlyGPR = static_cast<unsigned>(ZeroRegsKind) & ONLY_GPR; 1204 const bool OnlyUsed = static_cast<unsigned>(ZeroRegsKind) & ONLY_USED; 1205 const bool OnlyArg = static_cast<unsigned>(ZeroRegsKind) & ONLY_ARG; 1206 1207 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1208 const BitVector AllocatableSet(TRI.getAllocatableSet(MF)); 1209 1210 // Mark all used registers. 1211 BitVector UsedRegs(TRI.getNumRegs()); 1212 if (OnlyUsed) 1213 for (const MachineBasicBlock &MBB : MF) 1214 for (const MachineInstr &MI : MBB) 1215 for (const MachineOperand &MO : MI.operands()) { 1216 if (!MO.isReg()) 1217 continue; 1218 1219 MCRegister Reg = MO.getReg(); 1220 if (AllocatableSet[Reg] && !MO.isImplicit() && 1221 (MO.isDef() || MO.isUse())) 1222 UsedRegs.set(Reg); 1223 } 1224 1225 BitVector RegsToZero(TRI.getNumRegs()); 1226 for (MCRegister Reg : AllocatableSet.set_bits()) { 1227 // Skip over fixed registers. 1228 if (TRI.isFixedRegister(MF, Reg)) 1229 continue; 1230 1231 // Want only general purpose registers. 1232 if (OnlyGPR && !TRI.isGeneralPurposeRegister(MF, Reg)) 1233 continue; 1234 1235 // Want only used registers. 1236 if (OnlyUsed && !UsedRegs[Reg]) 1237 continue; 1238 1239 // Want only registers used for arguments. 1240 if (OnlyArg && !TRI.isArgumentRegister(MF, Reg)) 1241 continue; 1242 1243 RegsToZero.set(Reg); 1244 } 1245 1246 // Remove registers that are live when leaving the function. 1247 for (const MachineBasicBlock &MBB : MF) 1248 for (const MachineInstr &MI : MBB.terminators()) { 1249 if (!MI.isReturn()) 1250 continue; 1251 1252 for (const auto &MO : MI.operands()) { 1253 if (!MO.isReg()) 1254 continue; 1255 1256 for (MCPhysReg SReg : TRI.sub_and_superregs_inclusive(MO.getReg())) 1257 RegsToZero.reset(SReg); 1258 } 1259 } 1260 1261 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); 1262 for (MachineBasicBlock &MBB : MF) 1263 if (MBB.isReturnBlock()) 1264 TFI.emitZeroCallUsedRegs(RegsToZero, MBB); 1265 } 1266 1267 /// replaceFrameIndices - Replace all MO_FrameIndex operands with physical 1268 /// register references and actual offsets. 1269 void PEI::replaceFrameIndices(MachineFunction &MF) { 1270 const auto &ST = MF.getSubtarget(); 1271 const TargetFrameLowering &TFI = *ST.getFrameLowering(); 1272 if (!TFI.needsFrameIndexResolution(MF)) 1273 return; 1274 1275 const TargetRegisterInfo *TRI = ST.getRegisterInfo(); 1276 1277 // Allow the target to determine this after knowing the frame size. 1278 FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) || 1279 TRI->requiresFrameIndexReplacementScavenging(MF); 1280 1281 // Store SPAdj at exit of a basic block. 1282 SmallVector<int, 8> SPState; 1283 SPState.resize(MF.getNumBlockIDs()); 1284 df_iterator_default_set<MachineBasicBlock*> Reachable; 1285 1286 // Iterate over the reachable blocks in DFS order. 1287 for (auto DFI = df_ext_begin(&MF, Reachable), DFE = df_ext_end(&MF, Reachable); 1288 DFI != DFE; ++DFI) { 1289 int SPAdj = 0; 1290 // Check the exit state of the DFS stack predecessor. 1291 if (DFI.getPathLength() >= 2) { 1292 MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2); 1293 assert(Reachable.count(StackPred) && 1294 "DFS stack predecessor is already visited.\n"); 1295 SPAdj = SPState[StackPred->getNumber()]; 1296 } 1297 MachineBasicBlock *BB = *DFI; 1298 replaceFrameIndices(BB, MF, SPAdj); 1299 SPState[BB->getNumber()] = SPAdj; 1300 } 1301 1302 // Handle the unreachable blocks. 1303 for (auto &BB : MF) { 1304 if (Reachable.count(&BB)) 1305 // Already handled in DFS traversal. 1306 continue; 1307 int SPAdj = 0; 1308 replaceFrameIndices(&BB, MF, SPAdj); 1309 } 1310 } 1311 1312 void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &MF, 1313 int &SPAdj) { 1314 assert(MF.getSubtarget().getRegisterInfo() && 1315 "getRegisterInfo() must be implemented!"); 1316 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1317 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 1318 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 1319 1320 if (RS && FrameIndexEliminationScavenging) 1321 RS->enterBasicBlock(*BB); 1322 1323 bool InsideCallSequence = false; 1324 1325 for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) { 1326 if (TII.isFrameInstr(*I)) { 1327 InsideCallSequence = TII.isFrameSetup(*I); 1328 SPAdj += TII.getSPAdjust(*I); 1329 I = TFI->eliminateCallFramePseudoInstr(MF, *BB, I); 1330 continue; 1331 } 1332 1333 MachineInstr &MI = *I; 1334 bool DoIncr = true; 1335 bool DidFinishLoop = true; 1336 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1337 if (!MI.getOperand(i).isFI()) 1338 continue; 1339 1340 // Frame indices in debug values are encoded in a target independent 1341 // way with simply the frame index and offset rather than any 1342 // target-specific addressing mode. 1343 if (MI.isDebugValue()) { 1344 MachineOperand &Op = MI.getOperand(i); 1345 assert( 1346 MI.isDebugOperand(&Op) && 1347 "Frame indices can only appear as a debug operand in a DBG_VALUE*" 1348 " machine instruction"); 1349 Register Reg; 1350 unsigned FrameIdx = Op.getIndex(); 1351 unsigned Size = MF.getFrameInfo().getObjectSize(FrameIdx); 1352 1353 StackOffset Offset = 1354 TFI->getFrameIndexReference(MF, FrameIdx, Reg); 1355 Op.ChangeToRegister(Reg, false /*isDef*/); 1356 1357 const DIExpression *DIExpr = MI.getDebugExpression(); 1358 1359 // If we have a direct DBG_VALUE, and its location expression isn't 1360 // currently complex, then adding an offset will morph it into a 1361 // complex location that is interpreted as being a memory address. 1362 // This changes a pointer-valued variable to dereference that pointer, 1363 // which is incorrect. Fix by adding DW_OP_stack_value. 1364 1365 if (MI.isNonListDebugValue()) { 1366 unsigned PrependFlags = DIExpression::ApplyOffset; 1367 if (!MI.isIndirectDebugValue() && !DIExpr->isComplex()) 1368 PrependFlags |= DIExpression::StackValue; 1369 1370 // If we have DBG_VALUE that is indirect and has a Implicit location 1371 // expression need to insert a deref before prepending a Memory 1372 // location expression. Also after doing this we change the DBG_VALUE 1373 // to be direct. 1374 if (MI.isIndirectDebugValue() && DIExpr->isImplicit()) { 1375 SmallVector<uint64_t, 2> Ops = {dwarf::DW_OP_deref_size, Size}; 1376 bool WithStackValue = true; 1377 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); 1378 // Make the DBG_VALUE direct. 1379 MI.getDebugOffset().ChangeToRegister(0, false); 1380 } 1381 DIExpr = TRI.prependOffsetExpression(DIExpr, PrependFlags, Offset); 1382 } else { 1383 // The debug operand at DebugOpIndex was a frame index at offset 1384 // `Offset`; now the operand has been replaced with the frame 1385 // register, we must add Offset with `register x, plus Offset`. 1386 unsigned DebugOpIndex = MI.getDebugOperandIndex(&Op); 1387 SmallVector<uint64_t, 3> Ops; 1388 TRI.getOffsetOpcodes(Offset, Ops); 1389 DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, DebugOpIndex); 1390 } 1391 MI.getDebugExpressionOp().setMetadata(DIExpr); 1392 continue; 1393 } else if (MI.isDebugPHI()) { 1394 // Allow stack ref to continue onwards. 1395 continue; 1396 } 1397 1398 // TODO: This code should be commoned with the code for 1399 // PATCHPOINT. There's no good reason for the difference in 1400 // implementation other than historical accident. The only 1401 // remaining difference is the unconditional use of the stack 1402 // pointer as the base register. 1403 if (MI.getOpcode() == TargetOpcode::STATEPOINT) { 1404 assert((!MI.isDebugValue() || i == 0) && 1405 "Frame indicies can only appear as the first operand of a " 1406 "DBG_VALUE machine instruction"); 1407 Register Reg; 1408 MachineOperand &Offset = MI.getOperand(i + 1); 1409 StackOffset refOffset = TFI->getFrameIndexReferencePreferSP( 1410 MF, MI.getOperand(i).getIndex(), Reg, /*IgnoreSPUpdates*/ false); 1411 assert(!refOffset.getScalable() && 1412 "Frame offsets with a scalable component are not supported"); 1413 Offset.setImm(Offset.getImm() + refOffset.getFixed() + SPAdj); 1414 MI.getOperand(i).ChangeToRegister(Reg, false /*isDef*/); 1415 continue; 1416 } 1417 1418 // Some instructions (e.g. inline asm instructions) can have 1419 // multiple frame indices and/or cause eliminateFrameIndex 1420 // to insert more than one instruction. We need the register 1421 // scavenger to go through all of these instructions so that 1422 // it can update its register information. We keep the 1423 // iterator at the point before insertion so that we can 1424 // revisit them in full. 1425 bool AtBeginning = (I == BB->begin()); 1426 if (!AtBeginning) --I; 1427 1428 // If this instruction has a FrameIndex operand, we need to 1429 // use that target machine register info object to eliminate 1430 // it. 1431 TRI.eliminateFrameIndex(MI, SPAdj, i, 1432 FrameIndexEliminationScavenging ? RS : nullptr); 1433 1434 // Reset the iterator if we were at the beginning of the BB. 1435 if (AtBeginning) { 1436 I = BB->begin(); 1437 DoIncr = false; 1438 } 1439 1440 DidFinishLoop = false; 1441 break; 1442 } 1443 1444 // If we are looking at a call sequence, we need to keep track of 1445 // the SP adjustment made by each instruction in the sequence. 1446 // This includes both the frame setup/destroy pseudos (handled above), 1447 // as well as other instructions that have side effects w.r.t the SP. 1448 // Note that this must come after eliminateFrameIndex, because 1449 // if I itself referred to a frame index, we shouldn't count its own 1450 // adjustment. 1451 if (DidFinishLoop && InsideCallSequence) 1452 SPAdj += TII.getSPAdjust(MI); 1453 1454 if (DoIncr && I != BB->end()) ++I; 1455 1456 // Update register states. 1457 if (RS && FrameIndexEliminationScavenging && DidFinishLoop) 1458 RS->forward(MI); 1459 } 1460 } 1461