1 //===-- SystemZFrameLowering.cpp - Frame lowering for SystemZ -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "SystemZFrameLowering.h" 10 #include "SystemZCallingConv.h" 11 #include "SystemZInstrBuilder.h" 12 #include "SystemZInstrInfo.h" 13 #include "SystemZMachineFunctionInfo.h" 14 #include "SystemZRegisterInfo.h" 15 #include "SystemZSubtarget.h" 16 #include "llvm/CodeGen/MachineModuleInfo.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/RegisterScavenging.h" 19 #include "llvm/IR/Function.h" 20 #include "llvm/Target/TargetMachine.h" 21 22 using namespace llvm; 23 24 namespace { 25 // The ABI-defined register save slots, relative to the CFA (i.e. 26 // incoming stack pointer + SystemZMC::ELFCallFrameSize). 27 static const TargetFrameLowering::SpillSlot ELFSpillOffsetTable[] = { 28 { SystemZ::R2D, 0x10 }, 29 { SystemZ::R3D, 0x18 }, 30 { SystemZ::R4D, 0x20 }, 31 { SystemZ::R5D, 0x28 }, 32 { SystemZ::R6D, 0x30 }, 33 { SystemZ::R7D, 0x38 }, 34 { SystemZ::R8D, 0x40 }, 35 { SystemZ::R9D, 0x48 }, 36 { SystemZ::R10D, 0x50 }, 37 { SystemZ::R11D, 0x58 }, 38 { SystemZ::R12D, 0x60 }, 39 { SystemZ::R13D, 0x68 }, 40 { SystemZ::R14D, 0x70 }, 41 { SystemZ::R15D, 0x78 }, 42 { SystemZ::F0D, 0x80 }, 43 { SystemZ::F2D, 0x88 }, 44 { SystemZ::F4D, 0x90 }, 45 { SystemZ::F6D, 0x98 } 46 }; 47 48 static const TargetFrameLowering::SpillSlot XPLINKSpillOffsetTable[] = { 49 {SystemZ::R4D, 0x00}, {SystemZ::R5D, 0x08}, {SystemZ::R6D, 0x10}, 50 {SystemZ::R7D, 0x18}, {SystemZ::R8D, 0x20}, {SystemZ::R9D, 0x28}, 51 {SystemZ::R10D, 0x30}, {SystemZ::R11D, 0x38}, {SystemZ::R12D, 0x40}, 52 {SystemZ::R13D, 0x48}, {SystemZ::R14D, 0x50}, {SystemZ::R15D, 0x58}}; 53 } // end anonymous namespace 54 55 SystemZFrameLowering::SystemZFrameLowering(StackDirection D, Align StackAl, 56 int LAO, Align TransAl, 57 bool StackReal) 58 : TargetFrameLowering(D, StackAl, LAO, TransAl, StackReal) {} 59 60 std::unique_ptr<SystemZFrameLowering> 61 SystemZFrameLowering::create(const SystemZSubtarget &STI) { 62 if (STI.isTargetXPLINK64()) 63 return std::make_unique<SystemZXPLINKFrameLowering>(); 64 return std::make_unique<SystemZELFFrameLowering>(); 65 } 66 67 MachineBasicBlock::iterator SystemZFrameLowering::eliminateCallFramePseudoInstr( 68 MachineFunction &MF, MachineBasicBlock &MBB, 69 MachineBasicBlock::iterator MI) const { 70 switch (MI->getOpcode()) { 71 case SystemZ::ADJCALLSTACKDOWN: 72 case SystemZ::ADJCALLSTACKUP: 73 assert(hasReservedCallFrame(MF) && 74 "ADJSTACKDOWN and ADJSTACKUP should be no-ops"); 75 return MBB.erase(MI); 76 break; 77 78 default: 79 llvm_unreachable("Unexpected call frame instruction"); 80 } 81 } 82 83 bool SystemZFrameLowering::hasReservedCallFrame( 84 const MachineFunction &MF) const { 85 // The ELF ABI requires us to allocate 160 bytes of stack space for the 86 // callee, with any outgoing stack arguments being placed above that. It 87 // seems better to make that area a permanent feature of the frame even if 88 // we're using a frame pointer. Similarly, 64-bit XPLINK requires 96 bytes 89 // of stack space for the register save area. 90 return true; 91 } 92 93 bool SystemZELFFrameLowering::assignCalleeSavedSpillSlots( 94 MachineFunction &MF, const TargetRegisterInfo *TRI, 95 std::vector<CalleeSavedInfo> &CSI) const { 96 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); 97 MachineFrameInfo &MFFrame = MF.getFrameInfo(); 98 bool IsVarArg = MF.getFunction().isVarArg(); 99 if (CSI.empty()) 100 return true; // Early exit if no callee saved registers are modified! 101 102 unsigned LowGPR = 0; 103 unsigned HighGPR = SystemZ::R15D; 104 int StartSPOffset = SystemZMC::ELFCallFrameSize; 105 for (auto &CS : CSI) { 106 unsigned Reg = CS.getReg(); 107 int Offset = getRegSpillOffset(MF, Reg); 108 if (Offset) { 109 if (SystemZ::GR64BitRegClass.contains(Reg) && StartSPOffset > Offset) { 110 LowGPR = Reg; 111 StartSPOffset = Offset; 112 } 113 Offset -= SystemZMC::ELFCallFrameSize; 114 int FrameIdx = MFFrame.CreateFixedSpillStackObject(8, Offset); 115 CS.setFrameIdx(FrameIdx); 116 } else 117 CS.setFrameIdx(INT32_MAX); 118 } 119 120 // Save the range of call-saved registers, for use by the 121 // prologue/epilogue inserters. 122 ZFI->setRestoreGPRRegs(LowGPR, HighGPR, StartSPOffset); 123 if (IsVarArg) { 124 // Also save the GPR varargs, if any. R6D is call-saved, so would 125 // already be included, but we also need to handle the call-clobbered 126 // argument registers. 127 unsigned FirstGPR = ZFI->getVarArgsFirstGPR(); 128 if (FirstGPR < SystemZ::ELFNumArgGPRs) { 129 unsigned Reg = SystemZ::ELFArgGPRs[FirstGPR]; 130 int Offset = getRegSpillOffset(MF, Reg); 131 if (StartSPOffset > Offset) { 132 LowGPR = Reg; StartSPOffset = Offset; 133 } 134 } 135 } 136 ZFI->setSpillGPRRegs(LowGPR, HighGPR, StartSPOffset); 137 138 // Create fixed stack objects for the remaining registers. 139 int CurrOffset = -SystemZMC::ELFCallFrameSize; 140 if (usePackedStack(MF)) 141 CurrOffset += StartSPOffset; 142 143 for (auto &CS : CSI) { 144 if (CS.getFrameIdx() != INT32_MAX) 145 continue; 146 unsigned Reg = CS.getReg(); 147 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 148 unsigned Size = TRI->getSpillSize(*RC); 149 CurrOffset -= Size; 150 assert(CurrOffset % 8 == 0 && 151 "8-byte alignment required for for all register save slots"); 152 int FrameIdx = MFFrame.CreateFixedSpillStackObject(Size, CurrOffset); 153 CS.setFrameIdx(FrameIdx); 154 } 155 156 return true; 157 } 158 159 void SystemZELFFrameLowering::determineCalleeSaves(MachineFunction &MF, 160 BitVector &SavedRegs, 161 RegScavenger *RS) const { 162 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 163 164 MachineFrameInfo &MFFrame = MF.getFrameInfo(); 165 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 166 bool HasFP = hasFP(MF); 167 SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>(); 168 bool IsVarArg = MF.getFunction().isVarArg(); 169 170 // va_start stores incoming FPR varargs in the normal way, but delegates 171 // the saving of incoming GPR varargs to spillCalleeSavedRegisters(). 172 // Record these pending uses, which typically include the call-saved 173 // argument register R6D. 174 if (IsVarArg) 175 for (unsigned I = MFI->getVarArgsFirstGPR(); I < SystemZ::ELFNumArgGPRs; ++I) 176 SavedRegs.set(SystemZ::ELFArgGPRs[I]); 177 178 // If there are any landing pads, entering them will modify r6/r7. 179 if (!MF.getLandingPads().empty()) { 180 SavedRegs.set(SystemZ::R6D); 181 SavedRegs.set(SystemZ::R7D); 182 } 183 184 // If the function requires a frame pointer, record that the hard 185 // frame pointer will be clobbered. 186 if (HasFP) 187 SavedRegs.set(SystemZ::R11D); 188 189 // If the function calls other functions, record that the return 190 // address register will be clobbered. 191 if (MFFrame.hasCalls()) 192 SavedRegs.set(SystemZ::R14D); 193 194 // If we are saving GPRs other than the stack pointer, we might as well 195 // save and restore the stack pointer at the same time, via STMG and LMG. 196 // This allows the deallocation to be done by the LMG, rather than needing 197 // a separate %r15 addition. 198 const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF); 199 for (unsigned I = 0; CSRegs[I]; ++I) { 200 unsigned Reg = CSRegs[I]; 201 if (SystemZ::GR64BitRegClass.contains(Reg) && SavedRegs.test(Reg)) { 202 SavedRegs.set(SystemZ::R15D); 203 break; 204 } 205 } 206 } 207 208 SystemZELFFrameLowering::SystemZELFFrameLowering() 209 : SystemZFrameLowering(TargetFrameLowering::StackGrowsDown, Align(8), 0, 210 Align(8), /* StackRealignable */ false), 211 RegSpillOffsets(0) { 212 213 // Due to the SystemZ ABI, the DWARF CFA (Canonical Frame Address) is not 214 // equal to the incoming stack pointer, but to incoming stack pointer plus 215 // 160. Instead of using a Local Area Offset, the Register save area will 216 // be occupied by fixed frame objects, and all offsets are actually 217 // relative to CFA. 218 219 // Create a mapping from register number to save slot offset. 220 // These offsets are relative to the start of the register save area. 221 RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS); 222 for (unsigned I = 0, E = array_lengthof(ELFSpillOffsetTable); I != E; ++I) 223 RegSpillOffsets[ELFSpillOffsetTable[I].Reg] = ELFSpillOffsetTable[I].Offset; 224 } 225 226 // Add GPR64 to the save instruction being built by MIB, which is in basic 227 // block MBB. IsImplicit says whether this is an explicit operand to the 228 // instruction, or an implicit one that comes between the explicit start 229 // and end registers. 230 static void addSavedGPR(MachineBasicBlock &MBB, MachineInstrBuilder &MIB, 231 unsigned GPR64, bool IsImplicit) { 232 const TargetRegisterInfo *RI = 233 MBB.getParent()->getSubtarget().getRegisterInfo(); 234 Register GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32); 235 bool IsLive = MBB.isLiveIn(GPR64) || MBB.isLiveIn(GPR32); 236 if (!IsLive || !IsImplicit) { 237 MIB.addReg(GPR64, getImplRegState(IsImplicit) | getKillRegState(!IsLive)); 238 if (!IsLive) 239 MBB.addLiveIn(GPR64); 240 } 241 } 242 243 bool SystemZELFFrameLowering::spillCalleeSavedRegisters( 244 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 245 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 246 if (CSI.empty()) 247 return false; 248 249 MachineFunction &MF = *MBB.getParent(); 250 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 251 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); 252 bool IsVarArg = MF.getFunction().isVarArg(); 253 DebugLoc DL; 254 255 // Save GPRs 256 SystemZ::GPRRegs SpillGPRs = ZFI->getSpillGPRRegs(); 257 if (SpillGPRs.LowGPR) { 258 assert(SpillGPRs.LowGPR != SpillGPRs.HighGPR && 259 "Should be saving %r15 and something else"); 260 261 // Build an STMG instruction. 262 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::STMG)); 263 264 // Add the explicit register operands. 265 addSavedGPR(MBB, MIB, SpillGPRs.LowGPR, false); 266 addSavedGPR(MBB, MIB, SpillGPRs.HighGPR, false); 267 268 // Add the address. 269 MIB.addReg(SystemZ::R15D).addImm(SpillGPRs.GPROffset); 270 271 // Make sure all call-saved GPRs are included as operands and are 272 // marked as live on entry. 273 for (unsigned I = 0, E = CSI.size(); I != E; ++I) { 274 unsigned Reg = CSI[I].getReg(); 275 if (SystemZ::GR64BitRegClass.contains(Reg)) 276 addSavedGPR(MBB, MIB, Reg, true); 277 } 278 279 // ...likewise GPR varargs. 280 if (IsVarArg) 281 for (unsigned I = ZFI->getVarArgsFirstGPR(); I < SystemZ::ELFNumArgGPRs; ++I) 282 addSavedGPR(MBB, MIB, SystemZ::ELFArgGPRs[I], true); 283 } 284 285 // Save FPRs/VRs in the normal TargetInstrInfo way. 286 for (unsigned I = 0, E = CSI.size(); I != E; ++I) { 287 unsigned Reg = CSI[I].getReg(); 288 if (SystemZ::FP64BitRegClass.contains(Reg)) { 289 MBB.addLiveIn(Reg); 290 TII->storeRegToStackSlot(MBB, MBBI, Reg, true, CSI[I].getFrameIdx(), 291 &SystemZ::FP64BitRegClass, TRI); 292 } 293 if (SystemZ::VR128BitRegClass.contains(Reg)) { 294 MBB.addLiveIn(Reg); 295 TII->storeRegToStackSlot(MBB, MBBI, Reg, true, CSI[I].getFrameIdx(), 296 &SystemZ::VR128BitRegClass, TRI); 297 } 298 } 299 300 return true; 301 } 302 303 bool SystemZELFFrameLowering::restoreCalleeSavedRegisters( 304 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 305 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 306 if (CSI.empty()) 307 return false; 308 309 MachineFunction &MF = *MBB.getParent(); 310 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 311 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); 312 bool HasFP = hasFP(MF); 313 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 314 315 // Restore FPRs/VRs in the normal TargetInstrInfo way. 316 for (unsigned I = 0, E = CSI.size(); I != E; ++I) { 317 unsigned Reg = CSI[I].getReg(); 318 if (SystemZ::FP64BitRegClass.contains(Reg)) 319 TII->loadRegFromStackSlot(MBB, MBBI, Reg, CSI[I].getFrameIdx(), 320 &SystemZ::FP64BitRegClass, TRI); 321 if (SystemZ::VR128BitRegClass.contains(Reg)) 322 TII->loadRegFromStackSlot(MBB, MBBI, Reg, CSI[I].getFrameIdx(), 323 &SystemZ::VR128BitRegClass, TRI); 324 } 325 326 // Restore call-saved GPRs (but not call-clobbered varargs, which at 327 // this point might hold return values). 328 SystemZ::GPRRegs RestoreGPRs = ZFI->getRestoreGPRRegs(); 329 if (RestoreGPRs.LowGPR) { 330 // If we saved any of %r2-%r5 as varargs, we should also be saving 331 // and restoring %r6. If we're saving %r6 or above, we should be 332 // restoring it too. 333 assert(RestoreGPRs.LowGPR != RestoreGPRs.HighGPR && 334 "Should be loading %r15 and something else"); 335 336 // Build an LMG instruction. 337 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::LMG)); 338 339 // Add the explicit register operands. 340 MIB.addReg(RestoreGPRs.LowGPR, RegState::Define); 341 MIB.addReg(RestoreGPRs.HighGPR, RegState::Define); 342 343 // Add the address. 344 MIB.addReg(HasFP ? SystemZ::R11D : SystemZ::R15D); 345 MIB.addImm(RestoreGPRs.GPROffset); 346 347 // Do a second scan adding regs as being defined by instruction 348 for (unsigned I = 0, E = CSI.size(); I != E; ++I) { 349 unsigned Reg = CSI[I].getReg(); 350 if (Reg != RestoreGPRs.LowGPR && Reg != RestoreGPRs.HighGPR && 351 SystemZ::GR64BitRegClass.contains(Reg)) 352 MIB.addReg(Reg, RegState::ImplicitDefine); 353 } 354 } 355 356 return true; 357 } 358 359 void SystemZELFFrameLowering::processFunctionBeforeFrameFinalized( 360 MachineFunction &MF, RegScavenger *RS) const { 361 MachineFrameInfo &MFFrame = MF.getFrameInfo(); 362 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); 363 MachineRegisterInfo *MRI = &MF.getRegInfo(); 364 bool BackChain = MF.getFunction().hasFnAttribute("backchain"); 365 366 if (!usePackedStack(MF) || BackChain) 367 // Create the incoming register save area. 368 getOrCreateFramePointerSaveIndex(MF); 369 370 // Get the size of our stack frame to be allocated ... 371 uint64_t StackSize = (MFFrame.estimateStackSize(MF) + 372 SystemZMC::ELFCallFrameSize); 373 // ... and the maximum offset we may need to reach into the 374 // caller's frame to access the save area or stack arguments. 375 int64_t MaxArgOffset = 0; 376 for (int I = MFFrame.getObjectIndexBegin(); I != 0; ++I) 377 if (MFFrame.getObjectOffset(I) >= 0) { 378 int64_t ArgOffset = MFFrame.getObjectOffset(I) + 379 MFFrame.getObjectSize(I); 380 MaxArgOffset = std::max(MaxArgOffset, ArgOffset); 381 } 382 383 uint64_t MaxReach = StackSize + MaxArgOffset; 384 if (!isUInt<12>(MaxReach)) { 385 // We may need register scavenging slots if some parts of the frame 386 // are outside the reach of an unsigned 12-bit displacement. 387 // Create 2 for the case where both addresses in an MVC are 388 // out of range. 389 RS->addScavengingFrameIndex(MFFrame.CreateStackObject(8, Align(8), false)); 390 RS->addScavengingFrameIndex(MFFrame.CreateStackObject(8, Align(8), false)); 391 } 392 393 // If R6 is used as an argument register it is still callee saved. If it in 394 // this case is not clobbered (and restored) it should never be marked as 395 // killed. 396 if (MF.front().isLiveIn(SystemZ::R6D) && 397 ZFI->getRestoreGPRRegs().LowGPR != SystemZ::R6D) 398 for (auto &MO : MRI->use_nodbg_operands(SystemZ::R6D)) 399 MO.setIsKill(false); 400 } 401 402 // Emit instructions before MBBI (in MBB) to add NumBytes to Reg. 403 static void emitIncrement(MachineBasicBlock &MBB, 404 MachineBasicBlock::iterator &MBBI, const DebugLoc &DL, 405 Register Reg, int64_t NumBytes, 406 const TargetInstrInfo *TII) { 407 while (NumBytes) { 408 unsigned Opcode; 409 int64_t ThisVal = NumBytes; 410 if (isInt<16>(NumBytes)) 411 Opcode = SystemZ::AGHI; 412 else { 413 Opcode = SystemZ::AGFI; 414 // Make sure we maintain 8-byte stack alignment. 415 int64_t MinVal = -uint64_t(1) << 31; 416 int64_t MaxVal = (int64_t(1) << 31) - 8; 417 if (ThisVal < MinVal) 418 ThisVal = MinVal; 419 else if (ThisVal > MaxVal) 420 ThisVal = MaxVal; 421 } 422 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII->get(Opcode), Reg) 423 .addReg(Reg).addImm(ThisVal); 424 // The CC implicit def is dead. 425 MI->getOperand(3).setIsDead(); 426 NumBytes -= ThisVal; 427 } 428 } 429 430 // Add CFI for the new CFA offset. 431 static void buildCFAOffs(MachineBasicBlock &MBB, 432 MachineBasicBlock::iterator MBBI, 433 const DebugLoc &DL, int Offset, 434 const SystemZInstrInfo *ZII) { 435 unsigned CFIIndex = MBB.getParent()->addFrameInst( 436 MCCFIInstruction::cfiDefCfaOffset(nullptr, -Offset)); 437 BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION)) 438 .addCFIIndex(CFIIndex); 439 } 440 441 // Add CFI for the new frame location. 442 static void buildDefCFAReg(MachineBasicBlock &MBB, 443 MachineBasicBlock::iterator MBBI, 444 const DebugLoc &DL, unsigned Reg, 445 const SystemZInstrInfo *ZII) { 446 MachineFunction &MF = *MBB.getParent(); 447 MachineModuleInfo &MMI = MF.getMMI(); 448 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 449 unsigned RegNum = MRI->getDwarfRegNum(Reg, true); 450 unsigned CFIIndex = MF.addFrameInst( 451 MCCFIInstruction::createDefCfaRegister(nullptr, RegNum)); 452 BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION)) 453 .addCFIIndex(CFIIndex); 454 } 455 456 void SystemZELFFrameLowering::emitPrologue(MachineFunction &MF, 457 MachineBasicBlock &MBB) const { 458 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 459 const SystemZSubtarget &STI = MF.getSubtarget<SystemZSubtarget>(); 460 const SystemZTargetLowering &TLI = *STI.getTargetLowering(); 461 MachineFrameInfo &MFFrame = MF.getFrameInfo(); 462 auto *ZII = static_cast<const SystemZInstrInfo *>(STI.getInstrInfo()); 463 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); 464 MachineBasicBlock::iterator MBBI = MBB.begin(); 465 MachineModuleInfo &MMI = MF.getMMI(); 466 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); 467 const std::vector<CalleeSavedInfo> &CSI = MFFrame.getCalleeSavedInfo(); 468 bool HasFP = hasFP(MF); 469 470 // In GHC calling convention C stack space, including the ABI-defined 471 // 160-byte base area, is (de)allocated by GHC itself. This stack space may 472 // be used by LLVM as spill slots for the tail recursive GHC functions. Thus 473 // do not allocate stack space here, too. 474 if (MF.getFunction().getCallingConv() == CallingConv::GHC) { 475 if (MFFrame.getStackSize() > 2048 * sizeof(long)) { 476 report_fatal_error( 477 "Pre allocated stack space for GHC function is too small"); 478 } 479 if (HasFP) { 480 report_fatal_error( 481 "In GHC calling convention a frame pointer is not supported"); 482 } 483 MFFrame.setStackSize(MFFrame.getStackSize() + SystemZMC::ELFCallFrameSize); 484 return; 485 } 486 487 // Debug location must be unknown since the first debug location is used 488 // to determine the end of the prologue. 489 DebugLoc DL; 490 491 // The current offset of the stack pointer from the CFA. 492 int64_t SPOffsetFromCFA = -SystemZMC::ELFCFAOffsetFromInitialSP; 493 494 if (ZFI->getSpillGPRRegs().LowGPR) { 495 // Skip over the GPR saves. 496 if (MBBI != MBB.end() && MBBI->getOpcode() == SystemZ::STMG) 497 ++MBBI; 498 else 499 llvm_unreachable("Couldn't skip over GPR saves"); 500 501 // Add CFI for the GPR saves. 502 for (auto &Save : CSI) { 503 unsigned Reg = Save.getReg(); 504 if (SystemZ::GR64BitRegClass.contains(Reg)) { 505 int FI = Save.getFrameIdx(); 506 int64_t Offset = MFFrame.getObjectOffset(FI); 507 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( 508 nullptr, MRI->getDwarfRegNum(Reg, true), Offset)); 509 BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION)) 510 .addCFIIndex(CFIIndex); 511 } 512 } 513 } 514 515 uint64_t StackSize = MFFrame.getStackSize(); 516 // We need to allocate the ABI-defined 160-byte base area whenever 517 // we allocate stack space for our own use and whenever we call another 518 // function. 519 bool HasStackObject = false; 520 for (unsigned i = 0, e = MFFrame.getObjectIndexEnd(); i != e; ++i) 521 if (!MFFrame.isDeadObjectIndex(i)) { 522 HasStackObject = true; 523 break; 524 } 525 if (HasStackObject || MFFrame.hasCalls()) 526 StackSize += SystemZMC::ELFCallFrameSize; 527 // Don't allocate the incoming reg save area. 528 StackSize = StackSize > SystemZMC::ELFCallFrameSize 529 ? StackSize - SystemZMC::ELFCallFrameSize 530 : 0; 531 MFFrame.setStackSize(StackSize); 532 533 if (StackSize) { 534 // Allocate StackSize bytes. 535 int64_t Delta = -int64_t(StackSize); 536 const unsigned ProbeSize = TLI.getStackProbeSize(MF); 537 bool FreeProbe = (ZFI->getSpillGPRRegs().GPROffset && 538 (ZFI->getSpillGPRRegs().GPROffset + StackSize) < ProbeSize); 539 if (!FreeProbe && 540 MF.getSubtarget().getTargetLowering()->hasInlineStackProbe(MF)) { 541 // Stack probing may involve looping, but splitting the prologue block 542 // is not possible at this point since it would invalidate the 543 // SaveBlocks / RestoreBlocks sets of PEI in the single block function 544 // case. Build a pseudo to be handled later by inlineStackProbe(). 545 BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::PROBED_STACKALLOC)) 546 .addImm(StackSize); 547 } 548 else { 549 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); 550 // If we need backchain, save current stack pointer. R1 is free at 551 // this point. 552 if (StoreBackchain) 553 BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::LGR)) 554 .addReg(SystemZ::R1D, RegState::Define).addReg(SystemZ::R15D); 555 emitIncrement(MBB, MBBI, DL, SystemZ::R15D, Delta, ZII); 556 buildCFAOffs(MBB, MBBI, DL, SPOffsetFromCFA + Delta, ZII); 557 if (StoreBackchain) 558 BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::STG)) 559 .addReg(SystemZ::R1D, RegState::Kill).addReg(SystemZ::R15D) 560 .addImm(getBackchainOffset(MF)).addReg(0); 561 } 562 SPOffsetFromCFA += Delta; 563 } 564 565 if (HasFP) { 566 // Copy the base of the frame to R11. 567 BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::LGR), SystemZ::R11D) 568 .addReg(SystemZ::R15D); 569 570 // Add CFI for the new frame location. 571 buildDefCFAReg(MBB, MBBI, DL, SystemZ::R11D, ZII); 572 573 // Mark the FramePtr as live at the beginning of every block except 574 // the entry block. (We'll have marked R11 as live on entry when 575 // saving the GPRs.) 576 for (MachineBasicBlock &MBBJ : llvm::drop_begin(MF)) 577 MBBJ.addLiveIn(SystemZ::R11D); 578 } 579 580 // Skip over the FPR/VR saves. 581 SmallVector<unsigned, 8> CFIIndexes; 582 for (auto &Save : CSI) { 583 unsigned Reg = Save.getReg(); 584 if (SystemZ::FP64BitRegClass.contains(Reg)) { 585 if (MBBI != MBB.end() && 586 (MBBI->getOpcode() == SystemZ::STD || 587 MBBI->getOpcode() == SystemZ::STDY)) 588 ++MBBI; 589 else 590 llvm_unreachable("Couldn't skip over FPR save"); 591 } else if (SystemZ::VR128BitRegClass.contains(Reg)) { 592 if (MBBI != MBB.end() && 593 MBBI->getOpcode() == SystemZ::VST) 594 ++MBBI; 595 else 596 llvm_unreachable("Couldn't skip over VR save"); 597 } else 598 continue; 599 600 // Add CFI for the this save. 601 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 602 Register IgnoredFrameReg; 603 int64_t Offset = 604 getFrameIndexReference(MF, Save.getFrameIdx(), IgnoredFrameReg) 605 .getFixed(); 606 607 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( 608 nullptr, DwarfReg, SPOffsetFromCFA + Offset)); 609 CFIIndexes.push_back(CFIIndex); 610 } 611 // Complete the CFI for the FPR/VR saves, modelling them as taking effect 612 // after the last save. 613 for (auto CFIIndex : CFIIndexes) { 614 BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION)) 615 .addCFIIndex(CFIIndex); 616 } 617 } 618 619 void SystemZELFFrameLowering::emitEpilogue(MachineFunction &MF, 620 MachineBasicBlock &MBB) const { 621 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 622 auto *ZII = 623 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 624 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); 625 MachineFrameInfo &MFFrame = MF.getFrameInfo(); 626 627 // See SystemZELFFrameLowering::emitPrologue 628 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 629 return; 630 631 // Skip the return instruction. 632 assert(MBBI->isReturn() && "Can only insert epilogue into returning blocks"); 633 634 uint64_t StackSize = MFFrame.getStackSize(); 635 if (ZFI->getRestoreGPRRegs().LowGPR) { 636 --MBBI; 637 unsigned Opcode = MBBI->getOpcode(); 638 if (Opcode != SystemZ::LMG) 639 llvm_unreachable("Expected to see callee-save register restore code"); 640 641 unsigned AddrOpNo = 2; 642 DebugLoc DL = MBBI->getDebugLoc(); 643 uint64_t Offset = StackSize + MBBI->getOperand(AddrOpNo + 1).getImm(); 644 unsigned NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset); 645 646 // If the offset is too large, use the largest stack-aligned offset 647 // and add the rest to the base register (the stack or frame pointer). 648 if (!NewOpcode) { 649 uint64_t NumBytes = Offset - 0x7fff8; 650 emitIncrement(MBB, MBBI, DL, MBBI->getOperand(AddrOpNo).getReg(), 651 NumBytes, ZII); 652 Offset -= NumBytes; 653 NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset); 654 assert(NewOpcode && "No restore instruction available"); 655 } 656 657 MBBI->setDesc(ZII->get(NewOpcode)); 658 MBBI->getOperand(AddrOpNo + 1).ChangeToImmediate(Offset); 659 } else if (StackSize) { 660 DebugLoc DL = MBBI->getDebugLoc(); 661 emitIncrement(MBB, MBBI, DL, SystemZ::R15D, StackSize, ZII); 662 } 663 } 664 665 void SystemZELFFrameLowering::inlineStackProbe( 666 MachineFunction &MF, MachineBasicBlock &PrologMBB) const { 667 auto *ZII = 668 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo()); 669 const SystemZSubtarget &STI = MF.getSubtarget<SystemZSubtarget>(); 670 const SystemZTargetLowering &TLI = *STI.getTargetLowering(); 671 672 MachineInstr *StackAllocMI = nullptr; 673 for (MachineInstr &MI : PrologMBB) 674 if (MI.getOpcode() == SystemZ::PROBED_STACKALLOC) { 675 StackAllocMI = &MI; 676 break; 677 } 678 if (StackAllocMI == nullptr) 679 return; 680 uint64_t StackSize = StackAllocMI->getOperand(0).getImm(); 681 const unsigned ProbeSize = TLI.getStackProbeSize(MF); 682 uint64_t NumFullBlocks = StackSize / ProbeSize; 683 uint64_t Residual = StackSize % ProbeSize; 684 int64_t SPOffsetFromCFA = -SystemZMC::ELFCFAOffsetFromInitialSP; 685 MachineBasicBlock *MBB = &PrologMBB; 686 MachineBasicBlock::iterator MBBI = StackAllocMI; 687 const DebugLoc DL = StackAllocMI->getDebugLoc(); 688 689 // Allocate a block of Size bytes on the stack and probe it. 690 auto allocateAndProbe = [&](MachineBasicBlock &InsMBB, 691 MachineBasicBlock::iterator InsPt, unsigned Size, 692 bool EmitCFI) -> void { 693 emitIncrement(InsMBB, InsPt, DL, SystemZ::R15D, -int64_t(Size), ZII); 694 if (EmitCFI) { 695 SPOffsetFromCFA -= Size; 696 buildCFAOffs(InsMBB, InsPt, DL, SPOffsetFromCFA, ZII); 697 } 698 // Probe by means of a volatile compare. 699 MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo(), 700 MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad, 8, Align(1)); 701 BuildMI(InsMBB, InsPt, DL, ZII->get(SystemZ::CG)) 702 .addReg(SystemZ::R0D, RegState::Undef) 703 .addReg(SystemZ::R15D).addImm(Size - 8).addReg(0) 704 .addMemOperand(MMO); 705 }; 706 707 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain"); 708 if (StoreBackchain) 709 BuildMI(*MBB, MBBI, DL, ZII->get(SystemZ::LGR)) 710 .addReg(SystemZ::R1D, RegState::Define).addReg(SystemZ::R15D); 711 712 MachineBasicBlock *DoneMBB = nullptr; 713 MachineBasicBlock *LoopMBB = nullptr; 714 if (NumFullBlocks < 3) { 715 // Emit unrolled probe statements. 716 for (unsigned int i = 0; i < NumFullBlocks; i++) 717 allocateAndProbe(*MBB, MBBI, ProbeSize, true/*EmitCFI*/); 718 } else { 719 // Emit a loop probing the pages. 720 uint64_t LoopAlloc = ProbeSize * NumFullBlocks; 721 SPOffsetFromCFA -= LoopAlloc; 722 723 // Use R0D to hold the exit value. 724 BuildMI(*MBB, MBBI, DL, ZII->get(SystemZ::LGR), SystemZ::R0D) 725 .addReg(SystemZ::R15D); 726 buildDefCFAReg(*MBB, MBBI, DL, SystemZ::R0D, ZII); 727 emitIncrement(*MBB, MBBI, DL, SystemZ::R0D, -int64_t(LoopAlloc), ZII); 728 buildCFAOffs(*MBB, MBBI, DL, -int64_t(SystemZMC::ELFCallFrameSize + LoopAlloc), 729 ZII); 730 731 DoneMBB = SystemZ::splitBlockBefore(MBBI, MBB); 732 LoopMBB = SystemZ::emitBlockAfter(MBB); 733 MBB->addSuccessor(LoopMBB); 734 LoopMBB->addSuccessor(LoopMBB); 735 LoopMBB->addSuccessor(DoneMBB); 736 737 MBB = LoopMBB; 738 allocateAndProbe(*MBB, MBB->end(), ProbeSize, false/*EmitCFI*/); 739 BuildMI(*MBB, MBB->end(), DL, ZII->get(SystemZ::CLGR)) 740 .addReg(SystemZ::R15D).addReg(SystemZ::R0D); 741 BuildMI(*MBB, MBB->end(), DL, ZII->get(SystemZ::BRC)) 742 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_GT).addMBB(MBB); 743 744 MBB = DoneMBB; 745 MBBI = DoneMBB->begin(); 746 buildDefCFAReg(*MBB, MBBI, DL, SystemZ::R15D, ZII); 747 } 748 749 if (Residual) 750 allocateAndProbe(*MBB, MBBI, Residual, true/*EmitCFI*/); 751 752 if (StoreBackchain) 753 BuildMI(*MBB, MBBI, DL, ZII->get(SystemZ::STG)) 754 .addReg(SystemZ::R1D, RegState::Kill).addReg(SystemZ::R15D) 755 .addImm(getBackchainOffset(MF)).addReg(0); 756 757 StackAllocMI->eraseFromParent(); 758 if (DoneMBB != nullptr) { 759 // Compute the live-in lists for the new blocks. 760 recomputeLiveIns(*DoneMBB); 761 recomputeLiveIns(*LoopMBB); 762 } 763 } 764 765 bool SystemZELFFrameLowering::hasFP(const MachineFunction &MF) const { 766 return (MF.getTarget().Options.DisableFramePointerElim(MF) || 767 MF.getFrameInfo().hasVarSizedObjects() || 768 MF.getInfo<SystemZMachineFunctionInfo>()->getManipulatesSP()); 769 } 770 771 StackOffset SystemZELFFrameLowering::getFrameIndexReference( 772 const MachineFunction &MF, int FI, Register &FrameReg) const { 773 // Our incoming SP is actually SystemZMC::ELFCallFrameSize below the CFA, so 774 // add that difference here. 775 StackOffset Offset = 776 TargetFrameLowering::getFrameIndexReference(MF, FI, FrameReg); 777 return Offset + StackOffset::getFixed(SystemZMC::ELFCallFrameSize); 778 } 779 780 unsigned SystemZELFFrameLowering::getRegSpillOffset(MachineFunction &MF, 781 Register Reg) const { 782 bool IsVarArg = MF.getFunction().isVarArg(); 783 bool BackChain = MF.getFunction().hasFnAttribute("backchain"); 784 bool SoftFloat = MF.getSubtarget<SystemZSubtarget>().hasSoftFloat(); 785 unsigned Offset = RegSpillOffsets[Reg]; 786 if (usePackedStack(MF) && !(IsVarArg && !SoftFloat)) { 787 if (SystemZ::GR64BitRegClass.contains(Reg)) 788 // Put all GPRs at the top of the Register save area with packed 789 // stack. Make room for the backchain if needed. 790 Offset += BackChain ? 24 : 32; 791 else 792 Offset = 0; 793 } 794 return Offset; 795 } 796 797 int SystemZELFFrameLowering::getOrCreateFramePointerSaveIndex( 798 MachineFunction &MF) const { 799 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); 800 int FI = ZFI->getFramePointerSaveIndex(); 801 if (!FI) { 802 MachineFrameInfo &MFFrame = MF.getFrameInfo(); 803 int Offset = getBackchainOffset(MF) - SystemZMC::ELFCallFrameSize; 804 FI = MFFrame.CreateFixedObject(8, Offset, false); 805 ZFI->setFramePointerSaveIndex(FI); 806 } 807 return FI; 808 } 809 810 bool SystemZELFFrameLowering::usePackedStack(MachineFunction &MF) const { 811 bool HasPackedStackAttr = MF.getFunction().hasFnAttribute("packed-stack"); 812 bool BackChain = MF.getFunction().hasFnAttribute("backchain"); 813 bool SoftFloat = MF.getSubtarget<SystemZSubtarget>().hasSoftFloat(); 814 if (HasPackedStackAttr && BackChain && !SoftFloat) 815 report_fatal_error("packed-stack + backchain + hard-float is unsupported."); 816 bool CallConv = MF.getFunction().getCallingConv() != CallingConv::GHC; 817 return HasPackedStackAttr && CallConv; 818 } 819 820 SystemZXPLINKFrameLowering::SystemZXPLINKFrameLowering() 821 : SystemZFrameLowering(TargetFrameLowering::StackGrowsUp, Align(32), 128, 822 Align(32), /* StackRealignable */ false), 823 RegSpillOffsets(-1) { 824 825 // Create a mapping from register number to save slot offset. 826 // These offsets are relative to the start of the local are area. 827 RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS); 828 for (unsigned I = 0, E = array_lengthof(XPLINKSpillOffsetTable); I != E; ++I) 829 RegSpillOffsets[XPLINKSpillOffsetTable[I].Reg] = 830 XPLINKSpillOffsetTable[I].Offset; 831 } 832 833 bool SystemZXPLINKFrameLowering::assignCalleeSavedSpillSlots( 834 MachineFunction &MF, const TargetRegisterInfo *TRI, 835 std::vector<CalleeSavedInfo> &CSI) const { 836 MachineFrameInfo &MFFrame = MF.getFrameInfo(); 837 SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>(); 838 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 839 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); 840 841 // Scan the call-saved GPRs and find the bounds of the register spill area. 842 unsigned LowGPR = 0; 843 int LowOffset = INT32_MAX; 844 unsigned HighGPR = LowGPR; 845 int HighOffset = -1; 846 847 unsigned RegSP = Regs.getStackPointerRegister(); 848 auto &GRRegClass = SystemZ::GR64BitRegClass; 849 const unsigned RegSize = 8; 850 851 auto ProcessCSI = [&](std::vector<CalleeSavedInfo> &CSIList) { 852 for (auto &CS : CSIList) { 853 unsigned Reg = CS.getReg(); 854 int Offset = RegSpillOffsets[Reg]; 855 if (Offset >= 0) { 856 if (GRRegClass.contains(Reg)) { 857 if (LowOffset > Offset) { 858 LowOffset = Offset; 859 LowGPR = Reg; 860 } 861 862 if (Offset > HighOffset) { 863 HighOffset = Offset; 864 HighGPR = Reg; 865 } 866 } 867 int FrameIdx = MFFrame.CreateFixedSpillStackObject(RegSize, Offset); 868 CS.setFrameIdx(FrameIdx); 869 } else 870 CS.setFrameIdx(INT32_MAX); 871 } 872 }; 873 874 std::vector<CalleeSavedInfo> Spills; 875 876 // For non-leaf functions: 877 // - the address of callee (entry point) register R6 must be saved 878 Spills.push_back(CalleeSavedInfo(Regs.getAddressOfCalleeRegister())); 879 880 // If the function needs a frame pointer, or if the backchain pointer should 881 // be stored, then save the stack pointer register R4. 882 if (hasFP(MF) || MF.getFunction().hasFnAttribute("backchain")) 883 Spills.push_back(CalleeSavedInfo(RegSP)); 884 885 // Save the range of call-saved registers, for use by the 886 // prologue/epilogue inserters. 887 ProcessCSI(CSI); 888 MFI->setRestoreGPRRegs(LowGPR, HighGPR, LowOffset); 889 890 // Save the range of call-saved registers, for use by the epilogue inserter. 891 ProcessCSI(Spills); 892 MFI->setSpillGPRRegs(LowGPR, HighGPR, LowOffset); 893 894 // Create spill slots for the remaining registers. 895 for (auto &CS : CSI) { 896 if (CS.getFrameIdx() != INT32_MAX) 897 continue; 898 unsigned Reg = CS.getReg(); 899 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 900 Align Alignment = TRI->getSpillAlign(*RC); 901 unsigned Size = TRI->getSpillSize(*RC); 902 Alignment = std::min(Alignment, getStackAlign()); 903 int FrameIdx = MFFrame.CreateStackObject(Size, Alignment, true); 904 CS.setFrameIdx(FrameIdx); 905 } 906 907 return true; 908 } 909 910 void SystemZXPLINKFrameLowering::determineCalleeSaves(MachineFunction &MF, 911 BitVector &SavedRegs, 912 RegScavenger *RS) const { 913 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 914 915 bool HasFP = hasFP(MF); 916 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 917 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); 918 919 // If the function requires a frame pointer, record that the hard 920 // frame pointer will be clobbered. 921 if (HasFP) 922 SavedRegs.set(Regs.getFramePointerRegister()); 923 924 // If the function is not an XPLeaf function, we need to save the 925 // return address register. We also always use that register for 926 // the return instruction, so it needs to be restored in the 927 // epilogue even though that register is considered to be volatile. 928 // #TODO: Implement leaf detection. 929 SavedRegs.set(Regs.getReturnFunctionAddressRegister()); 930 } 931 932 bool SystemZXPLINKFrameLowering::spillCalleeSavedRegisters( 933 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 934 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 935 if (CSI.empty()) 936 return true; 937 938 MachineFunction &MF = *MBB.getParent(); 939 SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>(); 940 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>(); 941 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 942 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>(); 943 SystemZ::GPRRegs SpillGPRs = ZFI->getSpillGPRRegs(); 944 DebugLoc DL; 945 946 // Save GPRs 947 if (SpillGPRs.LowGPR) { 948 assert(SpillGPRs.LowGPR != SpillGPRs.HighGPR && 949 "Should be saving multiple registers"); 950 951 // Build an STM/STMG instruction. 952 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::STMG)); 953 954 // Add the explicit register operands. 955 addSavedGPR(MBB, MIB, SpillGPRs.LowGPR, false); 956 addSavedGPR(MBB, MIB, SpillGPRs.HighGPR, false); 957 958 // Add the address r4 959 MIB.addReg(Regs.getStackPointerRegister()); 960 961 // Add the partial offset 962 // We cannot add the actual offset as, at the stack is not finalized 963 MIB.addImm(SpillGPRs.GPROffset); 964 965 // Make sure all call-saved GPRs are included as operands and are 966 // marked as live on entry. 967 auto &GRRegClass = SystemZ::GR64BitRegClass; 968 for (unsigned I = 0, E = CSI.size(); I != E; ++I) { 969 unsigned Reg = CSI[I].getReg(); 970 if (GRRegClass.contains(Reg)) 971 addSavedGPR(MBB, MIB, Reg, true); 972 } 973 } 974 975 // Spill FPRs to the stack in the normal TargetInstrInfo way 976 for (unsigned I = 0, E = CSI.size(); I != E; ++I) { 977 unsigned Reg = CSI[I].getReg(); 978 if (SystemZ::FP64BitRegClass.contains(Reg)) { 979 MBB.addLiveIn(Reg); 980 TII->storeRegToStackSlot(MBB, MBBI, Reg, true, CSI[I].getFrameIdx(), 981 &SystemZ::FP64BitRegClass, TRI); 982 } 983 if (SystemZ::VR128BitRegClass.contains(Reg)) { 984 MBB.addLiveIn(Reg); 985 TII->storeRegToStackSlot(MBB, MBBI, Reg, true, CSI[I].getFrameIdx(), 986 &SystemZ::VR128BitRegClass, TRI); 987 } 988 } 989 990 return true; 991 } 992 993 void SystemZXPLINKFrameLowering::emitPrologue(MachineFunction &MF, 994 MachineBasicBlock &MBB) const {} 995 996 void SystemZXPLINKFrameLowering::emitEpilogue(MachineFunction &MF, 997 MachineBasicBlock &MBB) const {} 998 999 bool SystemZXPLINKFrameLowering::hasFP(const MachineFunction &MF) const { 1000 return false; 1001 } 1002