1 //===-- XCoreFrameLowering.cpp - Frame info for XCore Target --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains XCore frame information that doesn't fit anywhere else 10 // cleanly... 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "XCoreFrameLowering.h" 15 #include "XCore.h" 16 #include "XCoreInstrInfo.h" 17 #include "XCoreMachineFunctionInfo.h" 18 #include "XCoreSubtarget.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineFunction.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineModuleInfo.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/RegisterScavenging.h" 25 #include "llvm/CodeGen/TargetLowering.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Target/TargetOptions.h" 30 #include <algorithm> 31 32 using namespace llvm; 33 34 static const unsigned FramePtr = XCore::R10; 35 static const int MaxImmU16 = (1<<16) - 1; 36 37 // helper functions. FIXME: Eliminate. 38 static inline bool isImmU6(unsigned val) { 39 return val < (1 << 6); 40 } 41 42 static inline bool isImmU16(unsigned val) { 43 return val < (1 << 16); 44 } 45 46 // Helper structure with compare function for handling stack slots. 47 namespace { 48 struct StackSlotInfo { 49 int FI; 50 int Offset; 51 unsigned Reg; 52 StackSlotInfo(int f, int o, int r) : FI(f), Offset(o), Reg(r){}; 53 }; 54 } // end anonymous namespace 55 56 static bool CompareSSIOffset(const StackSlotInfo& a, const StackSlotInfo& b) { 57 return a.Offset < b.Offset; 58 } 59 60 static void EmitDefCfaRegister(MachineBasicBlock &MBB, 61 MachineBasicBlock::iterator MBBI, 62 const DebugLoc &dl, const TargetInstrInfo &TII, 63 MachineFunction &MF, unsigned DRegNum) { 64 unsigned CFIIndex = MF.addFrameInst( 65 MCCFIInstruction::createDefCfaRegister(nullptr, DRegNum)); 66 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 67 .addCFIIndex(CFIIndex); 68 } 69 70 static void EmitDefCfaOffset(MachineBasicBlock &MBB, 71 MachineBasicBlock::iterator MBBI, 72 const DebugLoc &dl, const TargetInstrInfo &TII, 73 int Offset) { 74 MachineFunction &MF = *MBB.getParent(); 75 unsigned CFIIndex = 76 MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, Offset)); 77 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 78 .addCFIIndex(CFIIndex); 79 } 80 81 static void EmitCfiOffset(MachineBasicBlock &MBB, 82 MachineBasicBlock::iterator MBBI, const DebugLoc &dl, 83 const TargetInstrInfo &TII, unsigned DRegNum, 84 int Offset) { 85 MachineFunction &MF = *MBB.getParent(); 86 unsigned CFIIndex = MF.addFrameInst( 87 MCCFIInstruction::createOffset(nullptr, DRegNum, Offset)); 88 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 89 .addCFIIndex(CFIIndex); 90 } 91 92 /// The SP register is moved in steps of 'MaxImmU16' towards the bottom of the 93 /// frame. During these steps, it may be necessary to spill registers. 94 /// IfNeededExtSP emits the necessary EXTSP instructions to move the SP only 95 /// as far as to make 'OffsetFromBottom' reachable using an STWSP_lru6. 96 /// \param OffsetFromTop the spill offset from the top of the frame. 97 /// \param [in,out] Adjusted the current SP offset from the top of the frame. 98 static void IfNeededExtSP(MachineBasicBlock &MBB, 99 MachineBasicBlock::iterator MBBI, const DebugLoc &dl, 100 const TargetInstrInfo &TII, int OffsetFromTop, 101 int &Adjusted, int FrameSize, bool emitFrameMoves) { 102 while (OffsetFromTop > Adjusted) { 103 assert(Adjusted < FrameSize && "OffsetFromTop is beyond FrameSize"); 104 int remaining = FrameSize - Adjusted; 105 int OpImm = (remaining > MaxImmU16) ? MaxImmU16 : remaining; 106 int Opcode = isImmU6(OpImm) ? XCore::EXTSP_u6 : XCore::EXTSP_lu6; 107 BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(OpImm); 108 Adjusted += OpImm; 109 if (emitFrameMoves) 110 EmitDefCfaOffset(MBB, MBBI, dl, TII, Adjusted*4); 111 } 112 } 113 114 /// The SP register is moved in steps of 'MaxImmU16' towards the top of the 115 /// frame. During these steps, it may be necessary to re-load registers. 116 /// IfNeededLDAWSP emits the necessary LDAWSP instructions to move the SP only 117 /// as far as to make 'OffsetFromTop' reachable using an LDAWSP_lru6. 118 /// \param OffsetFromTop the spill offset from the top of the frame. 119 /// \param [in,out] RemainingAdj the current SP offset from the top of the 120 /// frame. 121 static void IfNeededLDAWSP(MachineBasicBlock &MBB, 122 MachineBasicBlock::iterator MBBI, const DebugLoc &dl, 123 const TargetInstrInfo &TII, int OffsetFromTop, 124 int &RemainingAdj) { 125 while (OffsetFromTop < RemainingAdj - MaxImmU16) { 126 assert(RemainingAdj && "OffsetFromTop is beyond FrameSize"); 127 int OpImm = (RemainingAdj > MaxImmU16) ? MaxImmU16 : RemainingAdj; 128 int Opcode = isImmU6(OpImm) ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; 129 BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(OpImm); 130 RemainingAdj -= OpImm; 131 } 132 } 133 134 /// Creates an ordered list of registers that are spilled 135 /// during the emitPrologue/emitEpilogue. 136 /// Registers are ordered according to their frame offset. 137 /// As offsets are negative, the largest offsets will be first. 138 static void GetSpillList(SmallVectorImpl<StackSlotInfo> &SpillList, 139 MachineFrameInfo &MFI, XCoreFunctionInfo *XFI, 140 bool fetchLR, bool fetchFP) { 141 if (fetchLR) { 142 int Offset = MFI.getObjectOffset(XFI->getLRSpillSlot()); 143 SpillList.push_back(StackSlotInfo(XFI->getLRSpillSlot(), 144 Offset, 145 XCore::LR)); 146 } 147 if (fetchFP) { 148 int Offset = MFI.getObjectOffset(XFI->getFPSpillSlot()); 149 SpillList.push_back(StackSlotInfo(XFI->getFPSpillSlot(), 150 Offset, 151 FramePtr)); 152 } 153 llvm::sort(SpillList, CompareSSIOffset); 154 } 155 156 /// Creates an ordered list of EH info register 'spills'. 157 /// These slots are only used by the unwinder and calls to llvm.eh.return(). 158 /// Registers are ordered according to their frame offset. 159 /// As offsets are negative, the largest offsets will be first. 160 static void GetEHSpillList(SmallVectorImpl<StackSlotInfo> &SpillList, 161 MachineFrameInfo &MFI, XCoreFunctionInfo *XFI, 162 const Constant *PersonalityFn, 163 const TargetLowering *TL) { 164 assert(XFI->hasEHSpillSlot() && "There are no EH register spill slots"); 165 const int *EHSlot = XFI->getEHSpillSlot(); 166 SpillList.push_back( 167 StackSlotInfo(EHSlot[0], MFI.getObjectOffset(EHSlot[0]), 168 TL->getExceptionPointerRegister(PersonalityFn))); 169 SpillList.push_back( 170 StackSlotInfo(EHSlot[0], MFI.getObjectOffset(EHSlot[1]), 171 TL->getExceptionSelectorRegister(PersonalityFn))); 172 llvm::sort(SpillList, CompareSSIOffset); 173 } 174 175 static MachineMemOperand *getFrameIndexMMO(MachineBasicBlock &MBB, 176 int FrameIndex, 177 MachineMemOperand::Flags flags) { 178 MachineFunction *MF = MBB.getParent(); 179 const MachineFrameInfo &MFI = MF->getFrameInfo(); 180 MachineMemOperand *MMO = MF->getMachineMemOperand( 181 MachinePointerInfo::getFixedStack(*MF, FrameIndex), flags, 182 MFI.getObjectSize(FrameIndex), MFI.getObjectAlign(FrameIndex)); 183 return MMO; 184 } 185 186 187 /// Restore clobbered registers with their spill slot value. 188 /// The SP will be adjusted at the same time, thus the SpillList must be ordered 189 /// with the largest (negative) offsets first. 190 static void RestoreSpillList(MachineBasicBlock &MBB, 191 MachineBasicBlock::iterator MBBI, 192 const DebugLoc &dl, const TargetInstrInfo &TII, 193 int &RemainingAdj, 194 SmallVectorImpl<StackSlotInfo> &SpillList) { 195 for (unsigned i = 0, e = SpillList.size(); i != e; ++i) { 196 assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset"); 197 assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset"); 198 int OffsetFromTop = - SpillList[i].Offset/4; 199 IfNeededLDAWSP(MBB, MBBI, dl, TII, OffsetFromTop, RemainingAdj); 200 int Offset = RemainingAdj - OffsetFromTop; 201 int Opcode = isImmU6(Offset) ? XCore::LDWSP_ru6 : XCore::LDWSP_lru6; 202 BuildMI(MBB, MBBI, dl, TII.get(Opcode), SpillList[i].Reg) 203 .addImm(Offset) 204 .addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI, 205 MachineMemOperand::MOLoad)); 206 } 207 } 208 209 //===----------------------------------------------------------------------===// 210 // XCoreFrameLowering: 211 //===----------------------------------------------------------------------===// 212 213 XCoreFrameLowering::XCoreFrameLowering(const XCoreSubtarget &sti) 214 : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, Align(4), 0) { 215 // Do nothing 216 } 217 218 bool XCoreFrameLowering::hasFP(const MachineFunction &MF) const { 219 return MF.getTarget().Options.DisableFramePointerElim(MF) || 220 MF.getFrameInfo().hasVarSizedObjects(); 221 } 222 223 void XCoreFrameLowering::emitPrologue(MachineFunction &MF, 224 MachineBasicBlock &MBB) const { 225 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); 226 MachineBasicBlock::iterator MBBI = MBB.begin(); 227 MachineFrameInfo &MFI = MF.getFrameInfo(); 228 const MCRegisterInfo *MRI = MF.getContext().getRegisterInfo(); 229 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo(); 230 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 231 // Debug location must be unknown since the first debug location is used 232 // to determine the end of the prologue. 233 DebugLoc dl; 234 235 if (MFI.getMaxAlign() > getStackAlign()) 236 report_fatal_error("emitPrologue unsupported alignment: " + 237 Twine(MFI.getMaxAlign().value())); 238 239 const AttributeList &PAL = MF.getFunction().getAttributes(); 240 if (PAL.hasAttrSomewhere(Attribute::Nest)) 241 BuildMI(MBB, MBBI, dl, TII.get(XCore::LDWSP_ru6), XCore::R11).addImm(0); 242 // FIX: Needs addMemOperand() but can't use getFixedStack() or getStack(). 243 244 // Work out frame sizes. 245 // We will adjust the SP in stages towards the final FrameSize. 246 assert(MFI.getStackSize()%4 == 0 && "Misaligned frame size"); 247 const int FrameSize = MFI.getStackSize() / 4; 248 int Adjusted = 0; 249 250 bool saveLR = XFI->hasLRSpillSlot(); 251 bool UseENTSP = saveLR && FrameSize 252 && (MFI.getObjectOffset(XFI->getLRSpillSlot()) == 0); 253 if (UseENTSP) 254 saveLR = false; 255 bool FP = hasFP(MF); 256 bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(MF); 257 258 if (UseENTSP) { 259 // Allocate space on the stack at the same time as saving LR. 260 Adjusted = (FrameSize > MaxImmU16) ? MaxImmU16 : FrameSize; 261 int Opcode = isImmU6(Adjusted) ? XCore::ENTSP_u6 : XCore::ENTSP_lu6; 262 MBB.addLiveIn(XCore::LR); 263 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode)); 264 MIB.addImm(Adjusted); 265 MIB->addRegisterKilled(XCore::LR, MF.getSubtarget().getRegisterInfo(), 266 true); 267 if (emitFrameMoves) { 268 EmitDefCfaOffset(MBB, MBBI, dl, TII, Adjusted*4); 269 unsigned DRegNum = MRI->getDwarfRegNum(XCore::LR, true); 270 EmitCfiOffset(MBB, MBBI, dl, TII, DRegNum, 0); 271 } 272 } 273 274 // If necessary, save LR and FP to the stack, as we EXTSP. 275 SmallVector<StackSlotInfo,2> SpillList; 276 GetSpillList(SpillList, MFI, XFI, saveLR, FP); 277 // We want the nearest (negative) offsets first, so reverse list. 278 std::reverse(SpillList.begin(), SpillList.end()); 279 for (unsigned i = 0, e = SpillList.size(); i != e; ++i) { 280 assert(SpillList[i].Offset % 4 == 0 && "Misaligned stack offset"); 281 assert(SpillList[i].Offset <= 0 && "Unexpected positive stack offset"); 282 int OffsetFromTop = - SpillList[i].Offset/4; 283 IfNeededExtSP(MBB, MBBI, dl, TII, OffsetFromTop, Adjusted, FrameSize, 284 emitFrameMoves); 285 int Offset = Adjusted - OffsetFromTop; 286 int Opcode = isImmU6(Offset) ? XCore::STWSP_ru6 : XCore::STWSP_lru6; 287 MBB.addLiveIn(SpillList[i].Reg); 288 BuildMI(MBB, MBBI, dl, TII.get(Opcode)) 289 .addReg(SpillList[i].Reg, RegState::Kill) 290 .addImm(Offset) 291 .addMemOperand(getFrameIndexMMO(MBB, SpillList[i].FI, 292 MachineMemOperand::MOStore)); 293 if (emitFrameMoves) { 294 unsigned DRegNum = MRI->getDwarfRegNum(SpillList[i].Reg, true); 295 EmitCfiOffset(MBB, MBBI, dl, TII, DRegNum, SpillList[i].Offset); 296 } 297 } 298 299 // Complete any remaining Stack adjustment. 300 IfNeededExtSP(MBB, MBBI, dl, TII, FrameSize, Adjusted, FrameSize, 301 emitFrameMoves); 302 assert(Adjusted==FrameSize && "IfNeededExtSP has not completed adjustment"); 303 304 if (FP) { 305 // Set the FP from the SP. 306 BuildMI(MBB, MBBI, dl, TII.get(XCore::LDAWSP_ru6), FramePtr).addImm(0); 307 if (emitFrameMoves) 308 EmitDefCfaRegister(MBB, MBBI, dl, TII, MF, 309 MRI->getDwarfRegNum(FramePtr, true)); 310 } 311 312 if (emitFrameMoves) { 313 // Frame moves for callee saved. 314 for (const auto &SpillLabel : XFI->getSpillLabels()) { 315 MachineBasicBlock::iterator Pos = SpillLabel.first; 316 ++Pos; 317 const CalleeSavedInfo &CSI = SpillLabel.second; 318 int Offset = MFI.getObjectOffset(CSI.getFrameIdx()); 319 unsigned DRegNum = MRI->getDwarfRegNum(CSI.getReg(), true); 320 EmitCfiOffset(MBB, Pos, dl, TII, DRegNum, Offset); 321 } 322 if (XFI->hasEHSpillSlot()) { 323 // The unwinder requires stack slot & CFI offsets for the exception info. 324 // We do not save/spill these registers. 325 const Function *Fn = &MF.getFunction(); 326 const Constant *PersonalityFn = 327 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr; 328 SmallVector<StackSlotInfo, 2> SpillList; 329 GetEHSpillList(SpillList, MFI, XFI, PersonalityFn, 330 MF.getSubtarget().getTargetLowering()); 331 assert(SpillList.size()==2 && "Unexpected SpillList size"); 332 EmitCfiOffset(MBB, MBBI, dl, TII, 333 MRI->getDwarfRegNum(SpillList[0].Reg, true), 334 SpillList[0].Offset); 335 EmitCfiOffset(MBB, MBBI, dl, TII, 336 MRI->getDwarfRegNum(SpillList[1].Reg, true), 337 SpillList[1].Offset); 338 } 339 } 340 } 341 342 void XCoreFrameLowering::emitEpilogue(MachineFunction &MF, 343 MachineBasicBlock &MBB) const { 344 MachineFrameInfo &MFI = MF.getFrameInfo(); 345 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 346 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo(); 347 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 348 DebugLoc dl = MBBI->getDebugLoc(); 349 unsigned RetOpcode = MBBI->getOpcode(); 350 351 // Work out frame sizes. 352 // We will adjust the SP in stages towards the final FrameSize. 353 int RemainingAdj = MFI.getStackSize(); 354 assert(RemainingAdj%4 == 0 && "Misaligned frame size"); 355 RemainingAdj /= 4; 356 357 if (RetOpcode == XCore::EH_RETURN) { 358 // 'Restore' the exception info the unwinder has placed into the stack 359 // slots. 360 const Function *Fn = &MF.getFunction(); 361 const Constant *PersonalityFn = 362 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr; 363 SmallVector<StackSlotInfo, 2> SpillList; 364 GetEHSpillList(SpillList, MFI, XFI, PersonalityFn, 365 MF.getSubtarget().getTargetLowering()); 366 RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList); 367 368 // Return to the landing pad. 369 Register EhStackReg = MBBI->getOperand(0).getReg(); 370 Register EhHandlerReg = MBBI->getOperand(1).getReg(); 371 BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(EhStackReg); 372 BuildMI(MBB, MBBI, dl, TII.get(XCore::BAU_1r)).addReg(EhHandlerReg); 373 MBB.erase(MBBI); // Erase the previous return instruction. 374 return; 375 } 376 377 bool restoreLR = XFI->hasLRSpillSlot(); 378 bool UseRETSP = restoreLR && RemainingAdj 379 && (MFI.getObjectOffset(XFI->getLRSpillSlot()) == 0); 380 if (UseRETSP) 381 restoreLR = false; 382 bool FP = hasFP(MF); 383 384 if (FP) // Restore the stack pointer. 385 BuildMI(MBB, MBBI, dl, TII.get(XCore::SETSP_1r)).addReg(FramePtr); 386 387 // If necessary, restore LR and FP from the stack, as we EXTSP. 388 SmallVector<StackSlotInfo,2> SpillList; 389 GetSpillList(SpillList, MFI, XFI, restoreLR, FP); 390 RestoreSpillList(MBB, MBBI, dl, TII, RemainingAdj, SpillList); 391 392 if (RemainingAdj) { 393 // Complete all but one of the remaining Stack adjustments. 394 IfNeededLDAWSP(MBB, MBBI, dl, TII, 0, RemainingAdj); 395 if (UseRETSP) { 396 // Fold prologue into return instruction 397 assert(RetOpcode == XCore::RETSP_u6 398 || RetOpcode == XCore::RETSP_lu6); 399 int Opcode = isImmU6(RemainingAdj) ? XCore::RETSP_u6 : XCore::RETSP_lu6; 400 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opcode)) 401 .addImm(RemainingAdj); 402 for (unsigned i = 3, e = MBBI->getNumOperands(); i < e; ++i) 403 MIB->addOperand(MBBI->getOperand(i)); // copy any variadic operands 404 MBB.erase(MBBI); // Erase the previous return instruction. 405 } else { 406 int Opcode = isImmU6(RemainingAdj) ? XCore::LDAWSP_ru6 : 407 XCore::LDAWSP_lru6; 408 BuildMI(MBB, MBBI, dl, TII.get(Opcode), XCore::SP).addImm(RemainingAdj); 409 // Don't erase the return instruction. 410 } 411 } // else Don't erase the return instruction. 412 } 413 414 bool XCoreFrameLowering::spillCalleeSavedRegisters( 415 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 416 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 417 if (CSI.empty()) 418 return true; 419 420 MachineFunction *MF = MBB.getParent(); 421 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo(); 422 XCoreFunctionInfo *XFI = MF->getInfo<XCoreFunctionInfo>(); 423 bool emitFrameMoves = XCoreRegisterInfo::needsFrameMoves(*MF); 424 425 DebugLoc DL; 426 if (MI != MBB.end() && !MI->isDebugInstr()) 427 DL = MI->getDebugLoc(); 428 429 for (const CalleeSavedInfo &I : CSI) { 430 Register Reg = I.getReg(); 431 assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) && 432 "LR & FP are always handled in emitPrologue"); 433 434 // Add the callee-saved register as live-in. It's killed at the spill. 435 MBB.addLiveIn(Reg); 436 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 437 TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, TRI, 438 Register()); 439 if (emitFrameMoves) { 440 auto Store = MI; 441 --Store; 442 XFI->getSpillLabels().push_back(std::make_pair(Store, I)); 443 } 444 } 445 return true; 446 } 447 448 bool XCoreFrameLowering::restoreCalleeSavedRegisters( 449 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 450 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 451 MachineFunction *MF = MBB.getParent(); 452 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo(); 453 bool AtStart = MI == MBB.begin(); 454 MachineBasicBlock::iterator BeforeI = MI; 455 if (!AtStart) 456 --BeforeI; 457 for (const CalleeSavedInfo &CSR : CSI) { 458 Register Reg = CSR.getReg(); 459 assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) && 460 "LR & FP are always handled in emitEpilogue"); 461 462 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); 463 TII.loadRegFromStackSlot(MBB, MI, Reg, CSR.getFrameIdx(), RC, TRI, 464 Register()); 465 assert(MI != MBB.begin() && 466 "loadRegFromStackSlot didn't insert any code!"); 467 // Insert in reverse order. loadRegFromStackSlot can insert multiple 468 // instructions. 469 if (AtStart) 470 MI = MBB.begin(); 471 else { 472 MI = BeforeI; 473 ++MI; 474 } 475 } 476 return true; 477 } 478 479 // This function eliminates ADJCALLSTACKDOWN, 480 // ADJCALLSTACKUP pseudo instructions 481 MachineBasicBlock::iterator XCoreFrameLowering::eliminateCallFramePseudoInstr( 482 MachineFunction &MF, MachineBasicBlock &MBB, 483 MachineBasicBlock::iterator I) const { 484 const XCoreInstrInfo &TII = *MF.getSubtarget<XCoreSubtarget>().getInstrInfo(); 485 if (!hasReservedCallFrame(MF)) { 486 // Turn the adjcallstackdown instruction into 'extsp <amt>' and the 487 // adjcallstackup instruction into 'ldaw sp, sp[<amt>]' 488 MachineInstr &Old = *I; 489 uint64_t Amount = Old.getOperand(0).getImm(); 490 if (Amount != 0) { 491 // We need to keep the stack aligned properly. To do this, we round the 492 // amount of space needed for the outgoing arguments up to the next 493 // alignment boundary. 494 Amount = alignTo(Amount, getStackAlign()); 495 496 assert(Amount%4 == 0); 497 Amount /= 4; 498 499 bool isU6 = isImmU6(Amount); 500 if (!isU6 && !isImmU16(Amount)) { 501 // FIX could emit multiple instructions in this case. 502 #ifndef NDEBUG 503 errs() << "eliminateCallFramePseudoInstr size too big: " 504 << Amount << "\n"; 505 #endif 506 llvm_unreachable(nullptr); 507 } 508 509 MachineInstr *New; 510 if (Old.getOpcode() == XCore::ADJCALLSTACKDOWN) { 511 int Opcode = isU6 ? XCore::EXTSP_u6 : XCore::EXTSP_lu6; 512 New = BuildMI(MF, Old.getDebugLoc(), TII.get(Opcode)).addImm(Amount); 513 } else { 514 assert(Old.getOpcode() == XCore::ADJCALLSTACKUP); 515 int Opcode = isU6 ? XCore::LDAWSP_ru6 : XCore::LDAWSP_lru6; 516 New = BuildMI(MF, Old.getDebugLoc(), TII.get(Opcode), XCore::SP) 517 .addImm(Amount); 518 } 519 520 // Replace the pseudo instruction with a new instruction... 521 MBB.insert(I, New); 522 } 523 } 524 525 return MBB.erase(I); 526 } 527 528 void XCoreFrameLowering::determineCalleeSaves(MachineFunction &MF, 529 BitVector &SavedRegs, 530 RegScavenger *RS) const { 531 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 532 533 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 534 535 const MachineRegisterInfo &MRI = MF.getRegInfo(); 536 bool LRUsed = MRI.isPhysRegModified(XCore::LR); 537 538 if (!LRUsed && !MF.getFunction().isVarArg() && 539 MF.getFrameInfo().estimateStackSize(MF)) 540 // If we need to extend the stack it is more efficient to use entsp / retsp. 541 // We force the LR to be saved so these instructions are used. 542 LRUsed = true; 543 544 if (MF.callsUnwindInit() || MF.callsEHReturn()) { 545 // The unwinder expects to find spill slots for the exception info regs R0 546 // & R1. These are used during llvm.eh.return() to 'restore' the exception 547 // info. N.B. we do not spill or restore R0, R1 during normal operation. 548 XFI->createEHSpillSlot(MF); 549 // As we will have a stack, we force the LR to be saved. 550 LRUsed = true; 551 } 552 553 if (LRUsed) { 554 // We will handle the LR in the prologue/epilogue 555 // and allocate space on the stack ourselves. 556 SavedRegs.reset(XCore::LR); 557 XFI->createLRSpillSlot(MF); 558 } 559 560 if (hasFP(MF)) 561 // A callee save register is used to hold the FP. 562 // This needs saving / restoring in the epilogue / prologue. 563 XFI->createFPSpillSlot(MF); 564 } 565 566 void XCoreFrameLowering:: 567 processFunctionBeforeFrameFinalized(MachineFunction &MF, 568 RegScavenger *RS) const { 569 assert(RS && "requiresRegisterScavenging failed"); 570 MachineFrameInfo &MFI = MF.getFrameInfo(); 571 const TargetRegisterClass &RC = XCore::GRRegsRegClass; 572 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 573 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 574 // Reserve slots close to SP or frame pointer for Scavenging spills. 575 // When using SP for small frames, we don't need any scratch registers. 576 // When using SP for large frames, we may need 2 scratch registers. 577 // When using FP, for large or small frames, we may need 1 scratch register. 578 unsigned Size = TRI.getSpillSize(RC); 579 Align Alignment = TRI.getSpillAlign(RC); 580 if (XFI->isLargeFrame(MF) || hasFP(MF)) 581 RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Alignment, false)); 582 if (XFI->isLargeFrame(MF) && !hasFP(MF)) 583 RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Alignment, false)); 584 } 585