1 //===- ARMFrameLowering.cpp - ARM Frame Information -----------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the ARM implementation of TargetFrameLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "ARMFrameLowering.h" 15 #include "ARMBaseInstrInfo.h" 16 #include "ARMBaseRegisterInfo.h" 17 #include "ARMConstantPoolValue.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMSubtarget.h" 20 #include "MCTargetDesc/ARMAddressingModes.h" 21 #include "MCTargetDesc/ARMBaseInfo.h" 22 #include "Utils/ARMBaseInfo.h" 23 #include "llvm/ADT/BitVector.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/CodeGen/MachineBasicBlock.h" 28 #include "llvm/CodeGen/MachineConstantPool.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunction.h" 31 #include "llvm/CodeGen/MachineInstr.h" 32 #include "llvm/CodeGen/MachineInstrBuilder.h" 33 #include "llvm/CodeGen/MachineModuleInfo.h" 34 #include "llvm/CodeGen/MachineOperand.h" 35 #include "llvm/CodeGen/MachineRegisterInfo.h" 36 #include "llvm/CodeGen/RegisterScavenging.h" 37 #include "llvm/CodeGen/TargetInstrInfo.h" 38 #include "llvm/CodeGen/TargetOpcodes.h" 39 #include "llvm/CodeGen/TargetRegisterInfo.h" 40 #include "llvm/CodeGen/TargetSubtargetInfo.h" 41 #include "llvm/IR/Attributes.h" 42 #include "llvm/IR/CallingConv.h" 43 #include "llvm/IR/DebugLoc.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/MC/MCContext.h" 46 #include "llvm/MC/MCDwarf.h" 47 #include "llvm/MC/MCInstrDesc.h" 48 #include "llvm/MC/MCRegisterInfo.h" 49 #include "llvm/Support/CodeGen.h" 50 #include "llvm/Support/CommandLine.h" 51 #include "llvm/Support/Compiler.h" 52 #include "llvm/Support/Debug.h" 53 #include "llvm/Support/ErrorHandling.h" 54 #include "llvm/Support/MathExtras.h" 55 #include "llvm/Support/raw_ostream.h" 56 #include "llvm/Target/TargetMachine.h" 57 #include "llvm/Target/TargetOptions.h" 58 #include <algorithm> 59 #include <cassert> 60 #include <cstddef> 61 #include <cstdint> 62 #include <iterator> 63 #include <utility> 64 #include <vector> 65 66 #define DEBUG_TYPE "arm-frame-lowering" 67 68 using namespace llvm; 69 70 static cl::opt<bool> 71 SpillAlignedNEONRegs("align-neon-spills", cl::Hidden, cl::init(true), 72 cl::desc("Align ARM NEON spills in prolog and epilog")); 73 74 static MachineBasicBlock::iterator 75 skipAlignedDPRCS2Spills(MachineBasicBlock::iterator MI, 76 unsigned NumAlignedDPRCS2Regs); 77 78 ARMFrameLowering::ARMFrameLowering(const ARMSubtarget &sti) 79 : TargetFrameLowering(StackGrowsDown, sti.getStackAlignment(), 0, 4), 80 STI(sti) {} 81 82 bool ARMFrameLowering::noFramePointerElim(const MachineFunction &MF) const { 83 // iOS always has a FP for backtracking, force other targets to keep their FP 84 // when doing FastISel. The emitted code is currently superior, and in cases 85 // like test-suite's lencod FastISel isn't quite correct when FP is eliminated. 86 return TargetFrameLowering::noFramePointerElim(MF) || 87 MF.getSubtarget<ARMSubtarget>().useFastISel(); 88 } 89 90 /// Returns true if the target can safely skip saving callee-saved registers 91 /// for noreturn nounwind functions. 92 bool ARMFrameLowering::enableCalleeSaveSkip(const MachineFunction &MF) const { 93 assert(MF.getFunction().hasFnAttribute(Attribute::NoReturn) && 94 MF.getFunction().hasFnAttribute(Attribute::NoUnwind) && 95 !MF.getFunction().hasFnAttribute(Attribute::UWTable)); 96 97 // Frame pointer and link register are not treated as normal CSR, thus we 98 // can always skip CSR saves for nonreturning functions. 99 return true; 100 } 101 102 /// hasFP - Return true if the specified function should have a dedicated frame 103 /// pointer register. This is true if the function has variable sized allocas 104 /// or if frame pointer elimination is disabled. 105 bool ARMFrameLowering::hasFP(const MachineFunction &MF) const { 106 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 107 const MachineFrameInfo &MFI = MF.getFrameInfo(); 108 109 // ABI-required frame pointer. 110 if (MF.getTarget().Options.DisableFramePointerElim(MF)) 111 return true; 112 113 // Frame pointer required for use within this function. 114 return (RegInfo->needsStackRealignment(MF) || 115 MFI.hasVarSizedObjects() || 116 MFI.isFrameAddressTaken()); 117 } 118 119 /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is 120 /// not required, we reserve argument space for call sites in the function 121 /// immediately on entry to the current function. This eliminates the need for 122 /// add/sub sp brackets around call sites. Returns true if the call frame is 123 /// included as part of the stack frame. 124 bool ARMFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 125 const MachineFrameInfo &MFI = MF.getFrameInfo(); 126 unsigned CFSize = MFI.getMaxCallFrameSize(); 127 // It's not always a good idea to include the call frame as part of the 128 // stack frame. ARM (especially Thumb) has small immediate offset to 129 // address the stack frame. So a large call frame can cause poor codegen 130 // and may even makes it impossible to scavenge a register. 131 if (CFSize >= ((1 << 12) - 1) / 2) // Half of imm12 132 return false; 133 134 return !MFI.hasVarSizedObjects(); 135 } 136 137 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the 138 /// call frame pseudos can be simplified. Unlike most targets, having a FP 139 /// is not sufficient here since we still may reference some objects via SP 140 /// even when FP is available in Thumb2 mode. 141 bool 142 ARMFrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const { 143 return hasReservedCallFrame(MF) || MF.getFrameInfo().hasVarSizedObjects(); 144 } 145 146 static bool isCSRestore(MachineInstr &MI, const ARMBaseInstrInfo &TII, 147 const MCPhysReg *CSRegs) { 148 // Integer spill area is handled with "pop". 149 if (isPopOpcode(MI.getOpcode())) { 150 // The first two operands are predicates. The last two are 151 // imp-def and imp-use of SP. Check everything in between. 152 for (int i = 5, e = MI.getNumOperands(); i != e; ++i) 153 if (!isCalleeSavedRegister(MI.getOperand(i).getReg(), CSRegs)) 154 return false; 155 return true; 156 } 157 if ((MI.getOpcode() == ARM::LDR_POST_IMM || 158 MI.getOpcode() == ARM::LDR_POST_REG || 159 MI.getOpcode() == ARM::t2LDR_POST) && 160 isCalleeSavedRegister(MI.getOperand(0).getReg(), CSRegs) && 161 MI.getOperand(1).getReg() == ARM::SP) 162 return true; 163 164 return false; 165 } 166 167 static void emitRegPlusImmediate( 168 bool isARM, MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 169 const DebugLoc &dl, const ARMBaseInstrInfo &TII, unsigned DestReg, 170 unsigned SrcReg, int NumBytes, unsigned MIFlags = MachineInstr::NoFlags, 171 ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) { 172 if (isARM) 173 emitARMRegPlusImmediate(MBB, MBBI, dl, DestReg, SrcReg, NumBytes, 174 Pred, PredReg, TII, MIFlags); 175 else 176 emitT2RegPlusImmediate(MBB, MBBI, dl, DestReg, SrcReg, NumBytes, 177 Pred, PredReg, TII, MIFlags); 178 } 179 180 static void emitSPUpdate(bool isARM, MachineBasicBlock &MBB, 181 MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, 182 const ARMBaseInstrInfo &TII, int NumBytes, 183 unsigned MIFlags = MachineInstr::NoFlags, 184 ARMCC::CondCodes Pred = ARMCC::AL, 185 unsigned PredReg = 0) { 186 emitRegPlusImmediate(isARM, MBB, MBBI, dl, TII, ARM::SP, ARM::SP, NumBytes, 187 MIFlags, Pred, PredReg); 188 } 189 190 static int sizeOfSPAdjustment(const MachineInstr &MI) { 191 int RegSize; 192 switch (MI.getOpcode()) { 193 case ARM::VSTMDDB_UPD: 194 RegSize = 8; 195 break; 196 case ARM::STMDB_UPD: 197 case ARM::t2STMDB_UPD: 198 RegSize = 4; 199 break; 200 case ARM::t2STR_PRE: 201 case ARM::STR_PRE_IMM: 202 return 4; 203 default: 204 llvm_unreachable("Unknown push or pop like instruction"); 205 } 206 207 int count = 0; 208 // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+ 209 // pred) so the list starts at 4. 210 for (int i = MI.getNumOperands() - 1; i >= 4; --i) 211 count += RegSize; 212 return count; 213 } 214 215 static bool WindowsRequiresStackProbe(const MachineFunction &MF, 216 size_t StackSizeInBytes) { 217 const MachineFrameInfo &MFI = MF.getFrameInfo(); 218 const Function &F = MF.getFunction(); 219 unsigned StackProbeSize = (MFI.getStackProtectorIndex() > 0) ? 4080 : 4096; 220 if (F.hasFnAttribute("stack-probe-size")) 221 F.getFnAttribute("stack-probe-size") 222 .getValueAsString() 223 .getAsInteger(0, StackProbeSize); 224 return (StackSizeInBytes >= StackProbeSize) && 225 !F.hasFnAttribute("no-stack-arg-probe"); 226 } 227 228 namespace { 229 230 struct StackAdjustingInsts { 231 struct InstInfo { 232 MachineBasicBlock::iterator I; 233 unsigned SPAdjust; 234 bool BeforeFPSet; 235 }; 236 237 SmallVector<InstInfo, 4> Insts; 238 239 void addInst(MachineBasicBlock::iterator I, unsigned SPAdjust, 240 bool BeforeFPSet = false) { 241 InstInfo Info = {I, SPAdjust, BeforeFPSet}; 242 Insts.push_back(Info); 243 } 244 245 void addExtraBytes(const MachineBasicBlock::iterator I, unsigned ExtraBytes) { 246 auto Info = 247 llvm::find_if(Insts, [&](InstInfo &Info) { return Info.I == I; }); 248 assert(Info != Insts.end() && "invalid sp adjusting instruction"); 249 Info->SPAdjust += ExtraBytes; 250 } 251 252 void emitDefCFAOffsets(MachineBasicBlock &MBB, const DebugLoc &dl, 253 const ARMBaseInstrInfo &TII, bool HasFP) { 254 MachineFunction &MF = *MBB.getParent(); 255 unsigned CFAOffset = 0; 256 for (auto &Info : Insts) { 257 if (HasFP && !Info.BeforeFPSet) 258 return; 259 260 CFAOffset -= Info.SPAdjust; 261 unsigned CFIIndex = MF.addFrameInst( 262 MCCFIInstruction::createDefCfaOffset(nullptr, CFAOffset)); 263 BuildMI(MBB, std::next(Info.I), dl, 264 TII.get(TargetOpcode::CFI_INSTRUCTION)) 265 .addCFIIndex(CFIIndex) 266 .setMIFlags(MachineInstr::FrameSetup); 267 } 268 } 269 }; 270 271 } // end anonymous namespace 272 273 /// Emit an instruction sequence that will align the address in 274 /// register Reg by zero-ing out the lower bits. For versions of the 275 /// architecture that support Neon, this must be done in a single 276 /// instruction, since skipAlignedDPRCS2Spills assumes it is done in a 277 /// single instruction. That function only gets called when optimizing 278 /// spilling of D registers on a core with the Neon instruction set 279 /// present. 280 static void emitAligningInstructions(MachineFunction &MF, ARMFunctionInfo *AFI, 281 const TargetInstrInfo &TII, 282 MachineBasicBlock &MBB, 283 MachineBasicBlock::iterator MBBI, 284 const DebugLoc &DL, const unsigned Reg, 285 const unsigned Alignment, 286 const bool MustBeSingleInstruction) { 287 const ARMSubtarget &AST = 288 static_cast<const ARMSubtarget &>(MF.getSubtarget()); 289 const bool CanUseBFC = AST.hasV6T2Ops() || AST.hasV7Ops(); 290 const unsigned AlignMask = Alignment - 1; 291 const unsigned NrBitsToZero = countTrailingZeros(Alignment); 292 assert(!AFI->isThumb1OnlyFunction() && "Thumb1 not supported"); 293 if (!AFI->isThumbFunction()) { 294 // if the BFC instruction is available, use that to zero the lower 295 // bits: 296 // bfc Reg, #0, log2(Alignment) 297 // otherwise use BIC, if the mask to zero the required number of bits 298 // can be encoded in the bic immediate field 299 // bic Reg, Reg, Alignment-1 300 // otherwise, emit 301 // lsr Reg, Reg, log2(Alignment) 302 // lsl Reg, Reg, log2(Alignment) 303 if (CanUseBFC) { 304 BuildMI(MBB, MBBI, DL, TII.get(ARM::BFC), Reg) 305 .addReg(Reg, RegState::Kill) 306 .addImm(~AlignMask) 307 .add(predOps(ARMCC::AL)); 308 } else if (AlignMask <= 255) { 309 BuildMI(MBB, MBBI, DL, TII.get(ARM::BICri), Reg) 310 .addReg(Reg, RegState::Kill) 311 .addImm(AlignMask) 312 .add(predOps(ARMCC::AL)) 313 .add(condCodeOp()); 314 } else { 315 assert(!MustBeSingleInstruction && 316 "Shouldn't call emitAligningInstructions demanding a single " 317 "instruction to be emitted for large stack alignment for a target " 318 "without BFC."); 319 BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg) 320 .addReg(Reg, RegState::Kill) 321 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsr, NrBitsToZero)) 322 .add(predOps(ARMCC::AL)) 323 .add(condCodeOp()); 324 BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg) 325 .addReg(Reg, RegState::Kill) 326 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, NrBitsToZero)) 327 .add(predOps(ARMCC::AL)) 328 .add(condCodeOp()); 329 } 330 } else { 331 // Since this is only reached for Thumb-2 targets, the BFC instruction 332 // should always be available. 333 assert(CanUseBFC); 334 BuildMI(MBB, MBBI, DL, TII.get(ARM::t2BFC), Reg) 335 .addReg(Reg, RegState::Kill) 336 .addImm(~AlignMask) 337 .add(predOps(ARMCC::AL)); 338 } 339 } 340 341 /// We need the offset of the frame pointer relative to other MachineFrameInfo 342 /// offsets which are encoded relative to SP at function begin. 343 /// See also emitPrologue() for how the FP is set up. 344 /// Unfortunately we cannot determine this value in determineCalleeSaves() yet 345 /// as assignCalleeSavedSpillSlots() hasn't run at this point. Instead we use 346 /// this to produce a conservative estimate that we check in an assert() later. 347 static int getMaxFPOffset(const Function &F, const ARMFunctionInfo &AFI) { 348 // This is a conservative estimation: Assume the frame pointer being r7 and 349 // pc("r15") up to r8 getting spilled before (= 8 registers). 350 return -AFI.getArgRegsSaveSize() - (8 * 4); 351 } 352 353 void ARMFrameLowering::emitPrologue(MachineFunction &MF, 354 MachineBasicBlock &MBB) const { 355 MachineBasicBlock::iterator MBBI = MBB.begin(); 356 MachineFrameInfo &MFI = MF.getFrameInfo(); 357 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 358 MachineModuleInfo &MMI = MF.getMMI(); 359 MCContext &Context = MMI.getContext(); 360 const TargetMachine &TM = MF.getTarget(); 361 const MCRegisterInfo *MRI = Context.getRegisterInfo(); 362 const ARMBaseRegisterInfo *RegInfo = STI.getRegisterInfo(); 363 const ARMBaseInstrInfo &TII = *STI.getInstrInfo(); 364 assert(!AFI->isThumb1OnlyFunction() && 365 "This emitPrologue does not support Thumb1!"); 366 bool isARM = !AFI->isThumbFunction(); 367 unsigned Align = STI.getFrameLowering()->getStackAlignment(); 368 unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(); 369 unsigned NumBytes = MFI.getStackSize(); 370 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 371 372 // Debug location must be unknown since the first debug location is used 373 // to determine the end of the prologue. 374 DebugLoc dl; 375 376 unsigned FramePtr = RegInfo->getFrameRegister(MF); 377 378 // Determine the sizes of each callee-save spill areas and record which frame 379 // belongs to which callee-save spill areas. 380 unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0; 381 int FramePtrSpillFI = 0; 382 int D8SpillFI = 0; 383 384 // All calls are tail calls in GHC calling conv, and functions have no 385 // prologue/epilogue. 386 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 387 return; 388 389 StackAdjustingInsts DefCFAOffsetCandidates; 390 bool HasFP = hasFP(MF); 391 392 // Allocate the vararg register save area. 393 if (ArgRegsSaveSize) { 394 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -ArgRegsSaveSize, 395 MachineInstr::FrameSetup); 396 DefCFAOffsetCandidates.addInst(std::prev(MBBI), ArgRegsSaveSize, true); 397 } 398 399 if (!AFI->hasStackFrame() && 400 (!STI.isTargetWindows() || !WindowsRequiresStackProbe(MF, NumBytes))) { 401 if (NumBytes - ArgRegsSaveSize != 0) { 402 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -(NumBytes - ArgRegsSaveSize), 403 MachineInstr::FrameSetup); 404 DefCFAOffsetCandidates.addInst(std::prev(MBBI), 405 NumBytes - ArgRegsSaveSize, true); 406 } 407 DefCFAOffsetCandidates.emitDefCFAOffsets(MBB, dl, TII, HasFP); 408 return; 409 } 410 411 // Determine spill area sizes. 412 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 413 unsigned Reg = CSI[i].getReg(); 414 int FI = CSI[i].getFrameIdx(); 415 switch (Reg) { 416 case ARM::R8: 417 case ARM::R9: 418 case ARM::R10: 419 case ARM::R11: 420 case ARM::R12: 421 if (STI.splitFramePushPop(MF)) { 422 GPRCS2Size += 4; 423 break; 424 } 425 LLVM_FALLTHROUGH; 426 case ARM::R0: 427 case ARM::R1: 428 case ARM::R2: 429 case ARM::R3: 430 case ARM::R4: 431 case ARM::R5: 432 case ARM::R6: 433 case ARM::R7: 434 case ARM::LR: 435 if (Reg == FramePtr) 436 FramePtrSpillFI = FI; 437 GPRCS1Size += 4; 438 break; 439 default: 440 // This is a DPR. Exclude the aligned DPRCS2 spills. 441 if (Reg == ARM::D8) 442 D8SpillFI = FI; 443 if (Reg < ARM::D8 || Reg >= ARM::D8 + AFI->getNumAlignedDPRCS2Regs()) 444 DPRCSSize += 8; 445 } 446 } 447 448 // Move past area 1. 449 MachineBasicBlock::iterator LastPush = MBB.end(), GPRCS1Push, GPRCS2Push; 450 if (GPRCS1Size > 0) { 451 GPRCS1Push = LastPush = MBBI++; 452 DefCFAOffsetCandidates.addInst(LastPush, GPRCS1Size, true); 453 } 454 455 // Determine starting offsets of spill areas. 456 unsigned GPRCS1Offset = NumBytes - ArgRegsSaveSize - GPRCS1Size; 457 unsigned GPRCS2Offset = GPRCS1Offset - GPRCS2Size; 458 unsigned DPRAlign = DPRCSSize ? std::min(8U, Align) : 4U; 459 unsigned DPRGapSize = (GPRCS1Size + GPRCS2Size + ArgRegsSaveSize) % DPRAlign; 460 unsigned DPRCSOffset = GPRCS2Offset - DPRGapSize - DPRCSSize; 461 int FramePtrOffsetInPush = 0; 462 if (HasFP) { 463 int FPOffset = MFI.getObjectOffset(FramePtrSpillFI); 464 assert(getMaxFPOffset(MF.getFunction(), *AFI) <= FPOffset && 465 "Max FP estimation is wrong"); 466 FramePtrOffsetInPush = FPOffset + ArgRegsSaveSize; 467 AFI->setFramePtrSpillOffset(MFI.getObjectOffset(FramePtrSpillFI) + 468 NumBytes); 469 } 470 AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset); 471 AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset); 472 AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset); 473 474 // Move past area 2. 475 if (GPRCS2Size > 0) { 476 GPRCS2Push = LastPush = MBBI++; 477 DefCFAOffsetCandidates.addInst(LastPush, GPRCS2Size); 478 } 479 480 // Prolog/epilog inserter assumes we correctly align DPRs on the stack, so our 481 // .cfi_offset operations will reflect that. 482 if (DPRGapSize) { 483 assert(DPRGapSize == 4 && "unexpected alignment requirements for DPRs"); 484 if (LastPush != MBB.end() && 485 tryFoldSPUpdateIntoPushPop(STI, MF, &*LastPush, DPRGapSize)) 486 DefCFAOffsetCandidates.addExtraBytes(LastPush, DPRGapSize); 487 else { 488 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRGapSize, 489 MachineInstr::FrameSetup); 490 DefCFAOffsetCandidates.addInst(std::prev(MBBI), DPRGapSize); 491 } 492 } 493 494 // Move past area 3. 495 if (DPRCSSize > 0) { 496 // Since vpush register list cannot have gaps, there may be multiple vpush 497 // instructions in the prologue. 498 while (MBBI != MBB.end() && MBBI->getOpcode() == ARM::VSTMDDB_UPD) { 499 DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(*MBBI)); 500 LastPush = MBBI++; 501 } 502 } 503 504 // Move past the aligned DPRCS2 area. 505 if (AFI->getNumAlignedDPRCS2Regs() > 0) { 506 MBBI = skipAlignedDPRCS2Spills(MBBI, AFI->getNumAlignedDPRCS2Regs()); 507 // The code inserted by emitAlignedDPRCS2Spills realigns the stack, and 508 // leaves the stack pointer pointing to the DPRCS2 area. 509 // 510 // Adjust NumBytes to represent the stack slots below the DPRCS2 area. 511 NumBytes += MFI.getObjectOffset(D8SpillFI); 512 } else 513 NumBytes = DPRCSOffset; 514 515 if (STI.isTargetWindows() && WindowsRequiresStackProbe(MF, NumBytes)) { 516 uint32_t NumWords = NumBytes >> 2; 517 518 if (NumWords < 65536) 519 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), ARM::R4) 520 .addImm(NumWords) 521 .setMIFlags(MachineInstr::FrameSetup) 522 .add(predOps(ARMCC::AL)); 523 else 524 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi32imm), ARM::R4) 525 .addImm(NumWords) 526 .setMIFlags(MachineInstr::FrameSetup); 527 528 switch (TM.getCodeModel()) { 529 case CodeModel::Small: 530 case CodeModel::Medium: 531 case CodeModel::Kernel: 532 BuildMI(MBB, MBBI, dl, TII.get(ARM::tBL)) 533 .add(predOps(ARMCC::AL)) 534 .addExternalSymbol("__chkstk") 535 .addReg(ARM::R4, RegState::Implicit) 536 .setMIFlags(MachineInstr::FrameSetup); 537 break; 538 case CodeModel::Large: 539 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi32imm), ARM::R12) 540 .addExternalSymbol("__chkstk") 541 .setMIFlags(MachineInstr::FrameSetup); 542 543 BuildMI(MBB, MBBI, dl, TII.get(ARM::tBLXr)) 544 .add(predOps(ARMCC::AL)) 545 .addReg(ARM::R12, RegState::Kill) 546 .addReg(ARM::R4, RegState::Implicit) 547 .setMIFlags(MachineInstr::FrameSetup); 548 break; 549 } 550 551 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), ARM::SP) 552 .addReg(ARM::SP, RegState::Kill) 553 .addReg(ARM::R4, RegState::Kill) 554 .setMIFlags(MachineInstr::FrameSetup) 555 .add(predOps(ARMCC::AL)) 556 .add(condCodeOp()); 557 NumBytes = 0; 558 } 559 560 if (NumBytes) { 561 // Adjust SP after all the callee-save spills. 562 if (AFI->getNumAlignedDPRCS2Regs() == 0 && 563 tryFoldSPUpdateIntoPushPop(STI, MF, &*LastPush, NumBytes)) 564 DefCFAOffsetCandidates.addExtraBytes(LastPush, NumBytes); 565 else { 566 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes, 567 MachineInstr::FrameSetup); 568 DefCFAOffsetCandidates.addInst(std::prev(MBBI), NumBytes); 569 } 570 571 if (HasFP && isARM) 572 // Restore from fp only in ARM mode: e.g. sub sp, r7, #24 573 // Note it's not safe to do this in Thumb2 mode because it would have 574 // taken two instructions: 575 // mov sp, r7 576 // sub sp, #24 577 // If an interrupt is taken between the two instructions, then sp is in 578 // an inconsistent state (pointing to the middle of callee-saved area). 579 // The interrupt handler can end up clobbering the registers. 580 AFI->setShouldRestoreSPFromFP(true); 581 } 582 583 // Set FP to point to the stack slot that contains the previous FP. 584 // For iOS, FP is R7, which has now been stored in spill area 1. 585 // Otherwise, if this is not iOS, all the callee-saved registers go 586 // into spill area 1, including the FP in R11. In either case, it 587 // is in area one and the adjustment needs to take place just after 588 // that push. 589 if (HasFP) { 590 MachineBasicBlock::iterator AfterPush = std::next(GPRCS1Push); 591 unsigned PushSize = sizeOfSPAdjustment(*GPRCS1Push); 592 emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, AfterPush, 593 dl, TII, FramePtr, ARM::SP, 594 PushSize + FramePtrOffsetInPush, 595 MachineInstr::FrameSetup); 596 if (FramePtrOffsetInPush + PushSize != 0) { 597 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( 598 nullptr, MRI->getDwarfRegNum(FramePtr, true), 599 -(ArgRegsSaveSize - FramePtrOffsetInPush))); 600 BuildMI(MBB, AfterPush, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 601 .addCFIIndex(CFIIndex) 602 .setMIFlags(MachineInstr::FrameSetup); 603 } else { 604 unsigned CFIIndex = 605 MF.addFrameInst(MCCFIInstruction::createDefCfaRegister( 606 nullptr, MRI->getDwarfRegNum(FramePtr, true))); 607 BuildMI(MBB, AfterPush, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 608 .addCFIIndex(CFIIndex) 609 .setMIFlags(MachineInstr::FrameSetup); 610 } 611 } 612 613 // Now that the prologue's actual instructions are finalised, we can insert 614 // the necessary DWARF cf instructions to describe the situation. Start by 615 // recording where each register ended up: 616 if (GPRCS1Size > 0) { 617 MachineBasicBlock::iterator Pos = std::next(GPRCS1Push); 618 int CFIIndex; 619 for (const auto &Entry : CSI) { 620 unsigned Reg = Entry.getReg(); 621 int FI = Entry.getFrameIdx(); 622 switch (Reg) { 623 case ARM::R8: 624 case ARM::R9: 625 case ARM::R10: 626 case ARM::R11: 627 case ARM::R12: 628 if (STI.splitFramePushPop(MF)) 629 break; 630 LLVM_FALLTHROUGH; 631 case ARM::R0: 632 case ARM::R1: 633 case ARM::R2: 634 case ARM::R3: 635 case ARM::R4: 636 case ARM::R5: 637 case ARM::R6: 638 case ARM::R7: 639 case ARM::LR: 640 CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( 641 nullptr, MRI->getDwarfRegNum(Reg, true), MFI.getObjectOffset(FI))); 642 BuildMI(MBB, Pos, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 643 .addCFIIndex(CFIIndex) 644 .setMIFlags(MachineInstr::FrameSetup); 645 break; 646 } 647 } 648 } 649 650 if (GPRCS2Size > 0) { 651 MachineBasicBlock::iterator Pos = std::next(GPRCS2Push); 652 for (const auto &Entry : CSI) { 653 unsigned Reg = Entry.getReg(); 654 int FI = Entry.getFrameIdx(); 655 switch (Reg) { 656 case ARM::R8: 657 case ARM::R9: 658 case ARM::R10: 659 case ARM::R11: 660 case ARM::R12: 661 if (STI.splitFramePushPop(MF)) { 662 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 663 unsigned Offset = MFI.getObjectOffset(FI); 664 unsigned CFIIndex = MF.addFrameInst( 665 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 666 BuildMI(MBB, Pos, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 667 .addCFIIndex(CFIIndex) 668 .setMIFlags(MachineInstr::FrameSetup); 669 } 670 break; 671 } 672 } 673 } 674 675 if (DPRCSSize > 0) { 676 // Since vpush register list cannot have gaps, there may be multiple vpush 677 // instructions in the prologue. 678 MachineBasicBlock::iterator Pos = std::next(LastPush); 679 for (const auto &Entry : CSI) { 680 unsigned Reg = Entry.getReg(); 681 int FI = Entry.getFrameIdx(); 682 if ((Reg >= ARM::D0 && Reg <= ARM::D31) && 683 (Reg < ARM::D8 || Reg >= ARM::D8 + AFI->getNumAlignedDPRCS2Regs())) { 684 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 685 unsigned Offset = MFI.getObjectOffset(FI); 686 unsigned CFIIndex = MF.addFrameInst( 687 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 688 BuildMI(MBB, Pos, dl, TII.get(TargetOpcode::CFI_INSTRUCTION)) 689 .addCFIIndex(CFIIndex) 690 .setMIFlags(MachineInstr::FrameSetup); 691 } 692 } 693 } 694 695 // Now we can emit descriptions of where the canonical frame address was 696 // throughout the process. If we have a frame pointer, it takes over the job 697 // half-way through, so only the first few .cfi_def_cfa_offset instructions 698 // actually get emitted. 699 DefCFAOffsetCandidates.emitDefCFAOffsets(MBB, dl, TII, HasFP); 700 701 if (STI.isTargetELF() && hasFP(MF)) 702 MFI.setOffsetAdjustment(MFI.getOffsetAdjustment() - 703 AFI->getFramePtrSpillOffset()); 704 705 AFI->setGPRCalleeSavedArea1Size(GPRCS1Size); 706 AFI->setGPRCalleeSavedArea2Size(GPRCS2Size); 707 AFI->setDPRCalleeSavedGapSize(DPRGapSize); 708 AFI->setDPRCalleeSavedAreaSize(DPRCSSize); 709 710 // If we need dynamic stack realignment, do it here. Be paranoid and make 711 // sure if we also have VLAs, we have a base pointer for frame access. 712 // If aligned NEON registers were spilled, the stack has already been 713 // realigned. 714 if (!AFI->getNumAlignedDPRCS2Regs() && RegInfo->needsStackRealignment(MF)) { 715 unsigned MaxAlign = MFI.getMaxAlignment(); 716 assert(!AFI->isThumb1OnlyFunction()); 717 if (!AFI->isThumbFunction()) { 718 emitAligningInstructions(MF, AFI, TII, MBB, MBBI, dl, ARM::SP, MaxAlign, 719 false); 720 } else { 721 // We cannot use sp as source/dest register here, thus we're using r4 to 722 // perform the calculations. We're emitting the following sequence: 723 // mov r4, sp 724 // -- use emitAligningInstructions to produce best sequence to zero 725 // -- out lower bits in r4 726 // mov sp, r4 727 // FIXME: It will be better just to find spare register here. 728 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::R4) 729 .addReg(ARM::SP, RegState::Kill) 730 .add(predOps(ARMCC::AL)); 731 emitAligningInstructions(MF, AFI, TII, MBB, MBBI, dl, ARM::R4, MaxAlign, 732 false); 733 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) 734 .addReg(ARM::R4, RegState::Kill) 735 .add(predOps(ARMCC::AL)); 736 } 737 738 AFI->setShouldRestoreSPFromFP(true); 739 } 740 741 // If we need a base pointer, set it up here. It's whatever the value 742 // of the stack pointer is at this point. Any variable size objects 743 // will be allocated after this, so we can still use the base pointer 744 // to reference locals. 745 // FIXME: Clarify FrameSetup flags here. 746 if (RegInfo->hasBasePointer(MF)) { 747 if (isARM) 748 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), RegInfo->getBaseRegister()) 749 .addReg(ARM::SP) 750 .add(predOps(ARMCC::AL)) 751 .add(condCodeOp()); 752 else 753 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), RegInfo->getBaseRegister()) 754 .addReg(ARM::SP) 755 .add(predOps(ARMCC::AL)); 756 } 757 758 // If the frame has variable sized objects then the epilogue must restore 759 // the sp from fp. We can assume there's an FP here since hasFP already 760 // checks for hasVarSizedObjects. 761 if (MFI.hasVarSizedObjects()) 762 AFI->setShouldRestoreSPFromFP(true); 763 } 764 765 void ARMFrameLowering::emitEpilogue(MachineFunction &MF, 766 MachineBasicBlock &MBB) const { 767 MachineFrameInfo &MFI = MF.getFrameInfo(); 768 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 769 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 770 const ARMBaseInstrInfo &TII = 771 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 772 assert(!AFI->isThumb1OnlyFunction() && 773 "This emitEpilogue does not support Thumb1!"); 774 bool isARM = !AFI->isThumbFunction(); 775 776 unsigned ArgRegsSaveSize = AFI->getArgRegsSaveSize(); 777 int NumBytes = (int)MFI.getStackSize(); 778 unsigned FramePtr = RegInfo->getFrameRegister(MF); 779 780 // All calls are tail calls in GHC calling conv, and functions have no 781 // prologue/epilogue. 782 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 783 return; 784 785 // First put ourselves on the first (from top) terminator instructions. 786 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 787 DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 788 789 if (!AFI->hasStackFrame()) { 790 if (NumBytes - ArgRegsSaveSize != 0) 791 emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes - ArgRegsSaveSize); 792 } else { 793 // Unwind MBBI to point to first LDR / VLDRD. 794 const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF); 795 if (MBBI != MBB.begin()) { 796 do { 797 --MBBI; 798 } while (MBBI != MBB.begin() && isCSRestore(*MBBI, TII, CSRegs)); 799 if (!isCSRestore(*MBBI, TII, CSRegs)) 800 ++MBBI; 801 } 802 803 // Move SP to start of FP callee save spill area. 804 NumBytes -= (ArgRegsSaveSize + 805 AFI->getGPRCalleeSavedArea1Size() + 806 AFI->getGPRCalleeSavedArea2Size() + 807 AFI->getDPRCalleeSavedGapSize() + 808 AFI->getDPRCalleeSavedAreaSize()); 809 810 // Reset SP based on frame pointer only if the stack frame extends beyond 811 // frame pointer stack slot or target is ELF and the function has FP. 812 if (AFI->shouldRestoreSPFromFP()) { 813 NumBytes = AFI->getFramePtrSpillOffset() - NumBytes; 814 if (NumBytes) { 815 if (isARM) 816 emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes, 817 ARMCC::AL, 0, TII); 818 else { 819 // It's not possible to restore SP from FP in a single instruction. 820 // For iOS, this looks like: 821 // mov sp, r7 822 // sub sp, #24 823 // This is bad, if an interrupt is taken after the mov, sp is in an 824 // inconsistent state. 825 // Use the first callee-saved register as a scratch register. 826 assert(!MFI.getPristineRegs(MF).test(ARM::R4) && 827 "No scratch register to restore SP from FP!"); 828 emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::R4, FramePtr, -NumBytes, 829 ARMCC::AL, 0, TII); 830 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) 831 .addReg(ARM::R4) 832 .add(predOps(ARMCC::AL)); 833 } 834 } else { 835 // Thumb2 or ARM. 836 if (isARM) 837 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP) 838 .addReg(FramePtr) 839 .add(predOps(ARMCC::AL)) 840 .add(condCodeOp()); 841 else 842 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP) 843 .addReg(FramePtr) 844 .add(predOps(ARMCC::AL)); 845 } 846 } else if (NumBytes && 847 !tryFoldSPUpdateIntoPushPop(STI, MF, &*MBBI, NumBytes)) 848 emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes); 849 850 // Increment past our save areas. 851 if (MBBI != MBB.end() && AFI->getDPRCalleeSavedAreaSize()) { 852 MBBI++; 853 // Since vpop register list cannot have gaps, there may be multiple vpop 854 // instructions in the epilogue. 855 while (MBBI != MBB.end() && MBBI->getOpcode() == ARM::VLDMDIA_UPD) 856 MBBI++; 857 } 858 if (AFI->getDPRCalleeSavedGapSize()) { 859 assert(AFI->getDPRCalleeSavedGapSize() == 4 && 860 "unexpected DPR alignment gap"); 861 emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getDPRCalleeSavedGapSize()); 862 } 863 864 if (AFI->getGPRCalleeSavedArea2Size()) MBBI++; 865 if (AFI->getGPRCalleeSavedArea1Size()) MBBI++; 866 } 867 868 if (ArgRegsSaveSize) 869 emitSPUpdate(isARM, MBB, MBBI, dl, TII, ArgRegsSaveSize); 870 } 871 872 /// getFrameIndexReference - Provide a base+offset reference to an FI slot for 873 /// debug info. It's the same as what we use for resolving the code-gen 874 /// references for now. FIXME: This can go wrong when references are 875 /// SP-relative and simple call frames aren't used. 876 int 877 ARMFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 878 unsigned &FrameReg) const { 879 return ResolveFrameIndexReference(MF, FI, FrameReg, 0); 880 } 881 882 int 883 ARMFrameLowering::ResolveFrameIndexReference(const MachineFunction &MF, 884 int FI, unsigned &FrameReg, 885 int SPAdj) const { 886 const MachineFrameInfo &MFI = MF.getFrameInfo(); 887 const ARMBaseRegisterInfo *RegInfo = static_cast<const ARMBaseRegisterInfo *>( 888 MF.getSubtarget().getRegisterInfo()); 889 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 890 int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize(); 891 int FPOffset = Offset - AFI->getFramePtrSpillOffset(); 892 bool isFixed = MFI.isFixedObjectIndex(FI); 893 894 FrameReg = ARM::SP; 895 Offset += SPAdj; 896 897 // SP can move around if there are allocas. We may also lose track of SP 898 // when emergency spilling inside a non-reserved call frame setup. 899 bool hasMovingSP = !hasReservedCallFrame(MF); 900 901 // When dynamically realigning the stack, use the frame pointer for 902 // parameters, and the stack/base pointer for locals. 903 if (RegInfo->needsStackRealignment(MF)) { 904 assert(hasFP(MF) && "dynamic stack realignment without a FP!"); 905 if (isFixed) { 906 FrameReg = RegInfo->getFrameRegister(MF); 907 Offset = FPOffset; 908 } else if (hasMovingSP) { 909 assert(RegInfo->hasBasePointer(MF) && 910 "VLAs and dynamic stack alignment, but missing base pointer!"); 911 FrameReg = RegInfo->getBaseRegister(); 912 } 913 return Offset; 914 } 915 916 // If there is a frame pointer, use it when we can. 917 if (hasFP(MF) && AFI->hasStackFrame()) { 918 // Use frame pointer to reference fixed objects. Use it for locals if 919 // there are VLAs (and thus the SP isn't reliable as a base). 920 if (isFixed || (hasMovingSP && !RegInfo->hasBasePointer(MF))) { 921 FrameReg = RegInfo->getFrameRegister(MF); 922 return FPOffset; 923 } else if (hasMovingSP) { 924 assert(RegInfo->hasBasePointer(MF) && "missing base pointer!"); 925 if (AFI->isThumb2Function()) { 926 // Try to use the frame pointer if we can, else use the base pointer 927 // since it's available. This is handy for the emergency spill slot, in 928 // particular. 929 if (FPOffset >= -255 && FPOffset < 0) { 930 FrameReg = RegInfo->getFrameRegister(MF); 931 return FPOffset; 932 } 933 } 934 } else if (AFI->isThumbFunction()) { 935 // Prefer SP to base pointer, if the offset is suitably aligned and in 936 // range as the effective range of the immediate offset is bigger when 937 // basing off SP. 938 // Use add <rd>, sp, #<imm8> 939 // ldr <rd>, [sp, #<imm8>] 940 if (Offset >= 0 && (Offset & 3) == 0 && Offset <= 1020) 941 return Offset; 942 // In Thumb2 mode, the negative offset is very limited. Try to avoid 943 // out of range references. ldr <rt>,[<rn>, #-<imm8>] 944 if (AFI->isThumb2Function() && FPOffset >= -255 && FPOffset < 0) { 945 FrameReg = RegInfo->getFrameRegister(MF); 946 return FPOffset; 947 } 948 } else if (Offset > (FPOffset < 0 ? -FPOffset : FPOffset)) { 949 // Otherwise, use SP or FP, whichever is closer to the stack slot. 950 FrameReg = RegInfo->getFrameRegister(MF); 951 return FPOffset; 952 } 953 } 954 // Use the base pointer if we have one. 955 if (RegInfo->hasBasePointer(MF)) 956 FrameReg = RegInfo->getBaseRegister(); 957 return Offset; 958 } 959 960 void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB, 961 MachineBasicBlock::iterator MI, 962 const std::vector<CalleeSavedInfo> &CSI, 963 unsigned StmOpc, unsigned StrOpc, 964 bool NoGap, 965 bool(*Func)(unsigned, bool), 966 unsigned NumAlignedDPRCS2Regs, 967 unsigned MIFlags) const { 968 MachineFunction &MF = *MBB.getParent(); 969 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 970 const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); 971 972 DebugLoc DL; 973 974 using RegAndKill = std::pair<unsigned, bool>; 975 976 SmallVector<RegAndKill, 4> Regs; 977 unsigned i = CSI.size(); 978 while (i != 0) { 979 unsigned LastReg = 0; 980 for (; i != 0; --i) { 981 unsigned Reg = CSI[i-1].getReg(); 982 if (!(Func)(Reg, STI.splitFramePushPop(MF))) continue; 983 984 // D-registers in the aligned area DPRCS2 are NOT spilled here. 985 if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs) 986 continue; 987 988 const MachineRegisterInfo &MRI = MF.getRegInfo(); 989 bool isLiveIn = MRI.isLiveIn(Reg); 990 if (!isLiveIn && !MRI.isReserved(Reg)) 991 MBB.addLiveIn(Reg); 992 // If NoGap is true, push consecutive registers and then leave the rest 993 // for other instructions. e.g. 994 // vpush {d8, d10, d11} -> vpush {d8}, vpush {d10, d11} 995 if (NoGap && LastReg && LastReg != Reg-1) 996 break; 997 LastReg = Reg; 998 // Do not set a kill flag on values that are also marked as live-in. This 999 // happens with the @llvm-returnaddress intrinsic and with arguments 1000 // passed in callee saved registers. 1001 // Omitting the kill flags is conservatively correct even if the live-in 1002 // is not used after all. 1003 Regs.push_back(std::make_pair(Reg, /*isKill=*/!isLiveIn)); 1004 } 1005 1006 if (Regs.empty()) 1007 continue; 1008 1009 llvm::sort(Regs.begin(), Regs.end(), [&](const RegAndKill &LHS, 1010 const RegAndKill &RHS) { 1011 return TRI.getEncodingValue(LHS.first) < TRI.getEncodingValue(RHS.first); 1012 }); 1013 1014 if (Regs.size() > 1 || StrOpc== 0) { 1015 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StmOpc), ARM::SP) 1016 .addReg(ARM::SP) 1017 .setMIFlags(MIFlags) 1018 .add(predOps(ARMCC::AL)); 1019 for (unsigned i = 0, e = Regs.size(); i < e; ++i) 1020 MIB.addReg(Regs[i].first, getKillRegState(Regs[i].second)); 1021 } else if (Regs.size() == 1) { 1022 BuildMI(MBB, MI, DL, TII.get(StrOpc), ARM::SP) 1023 .addReg(Regs[0].first, getKillRegState(Regs[0].second)) 1024 .addReg(ARM::SP) 1025 .setMIFlags(MIFlags) 1026 .addImm(-4) 1027 .add(predOps(ARMCC::AL)); 1028 } 1029 Regs.clear(); 1030 1031 // Put any subsequent vpush instructions before this one: they will refer to 1032 // higher register numbers so need to be pushed first in order to preserve 1033 // monotonicity. 1034 if (MI != MBB.begin()) 1035 --MI; 1036 } 1037 } 1038 1039 void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB, 1040 MachineBasicBlock::iterator MI, 1041 std::vector<CalleeSavedInfo> &CSI, 1042 unsigned LdmOpc, unsigned LdrOpc, 1043 bool isVarArg, bool NoGap, 1044 bool(*Func)(unsigned, bool), 1045 unsigned NumAlignedDPRCS2Regs) const { 1046 MachineFunction &MF = *MBB.getParent(); 1047 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1048 const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); 1049 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1050 DebugLoc DL; 1051 bool isTailCall = false; 1052 bool isInterrupt = false; 1053 bool isTrap = false; 1054 if (MBB.end() != MI) { 1055 DL = MI->getDebugLoc(); 1056 unsigned RetOpcode = MI->getOpcode(); 1057 isTailCall = (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNri); 1058 isInterrupt = 1059 RetOpcode == ARM::SUBS_PC_LR || RetOpcode == ARM::t2SUBS_PC_LR; 1060 isTrap = 1061 RetOpcode == ARM::TRAP || RetOpcode == ARM::TRAPNaCl || 1062 RetOpcode == ARM::tTRAP; 1063 } 1064 1065 SmallVector<unsigned, 4> Regs; 1066 unsigned i = CSI.size(); 1067 while (i != 0) { 1068 unsigned LastReg = 0; 1069 bool DeleteRet = false; 1070 for (; i != 0; --i) { 1071 CalleeSavedInfo &Info = CSI[i-1]; 1072 unsigned Reg = Info.getReg(); 1073 if (!(Func)(Reg, STI.splitFramePushPop(MF))) continue; 1074 1075 // The aligned reloads from area DPRCS2 are not inserted here. 1076 if (Reg >= ARM::D8 && Reg < ARM::D8 + NumAlignedDPRCS2Regs) 1077 continue; 1078 1079 if (Reg == ARM::LR && !isTailCall && !isVarArg && !isInterrupt && 1080 !isTrap && STI.hasV5TOps()) { 1081 if (MBB.succ_empty()) { 1082 Reg = ARM::PC; 1083 // Fold the return instruction into the LDM. 1084 DeleteRet = true; 1085 LdmOpc = AFI->isThumbFunction() ? ARM::t2LDMIA_RET : ARM::LDMIA_RET; 1086 // We 'restore' LR into PC so it is not live out of the return block: 1087 // Clear Restored bit. 1088 Info.setRestored(false); 1089 } else 1090 LdmOpc = AFI->isThumbFunction() ? ARM::t2LDMIA_UPD : ARM::LDMIA_UPD; 1091 } 1092 1093 // If NoGap is true, pop consecutive registers and then leave the rest 1094 // for other instructions. e.g. 1095 // vpop {d8, d10, d11} -> vpop {d8}, vpop {d10, d11} 1096 if (NoGap && LastReg && LastReg != Reg-1) 1097 break; 1098 1099 LastReg = Reg; 1100 Regs.push_back(Reg); 1101 } 1102 1103 if (Regs.empty()) 1104 continue; 1105 1106 llvm::sort(Regs.begin(), Regs.end(), [&](unsigned LHS, unsigned RHS) { 1107 return TRI.getEncodingValue(LHS) < TRI.getEncodingValue(RHS); 1108 }); 1109 1110 if (Regs.size() > 1 || LdrOpc == 0) { 1111 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdmOpc), ARM::SP) 1112 .addReg(ARM::SP) 1113 .add(predOps(ARMCC::AL)); 1114 for (unsigned i = 0, e = Regs.size(); i < e; ++i) 1115 MIB.addReg(Regs[i], getDefRegState(true)); 1116 if (DeleteRet) { 1117 if (MI != MBB.end()) { 1118 MIB.copyImplicitOps(*MI); 1119 MI->eraseFromParent(); 1120 } 1121 } 1122 MI = MIB; 1123 } else if (Regs.size() == 1) { 1124 // If we adjusted the reg to PC from LR above, switch it back here. We 1125 // only do that for LDM. 1126 if (Regs[0] == ARM::PC) 1127 Regs[0] = ARM::LR; 1128 MachineInstrBuilder MIB = 1129 BuildMI(MBB, MI, DL, TII.get(LdrOpc), Regs[0]) 1130 .addReg(ARM::SP, RegState::Define) 1131 .addReg(ARM::SP); 1132 // ARM mode needs an extra reg0 here due to addrmode2. Will go away once 1133 // that refactoring is complete (eventually). 1134 if (LdrOpc == ARM::LDR_POST_REG || LdrOpc == ARM::LDR_POST_IMM) { 1135 MIB.addReg(0); 1136 MIB.addImm(ARM_AM::getAM2Opc(ARM_AM::add, 4, ARM_AM::no_shift)); 1137 } else 1138 MIB.addImm(4); 1139 MIB.add(predOps(ARMCC::AL)); 1140 } 1141 Regs.clear(); 1142 1143 // Put any subsequent vpop instructions after this one: they will refer to 1144 // higher register numbers so need to be popped afterwards. 1145 if (MI != MBB.end()) 1146 ++MI; 1147 } 1148 } 1149 1150 /// Emit aligned spill instructions for NumAlignedDPRCS2Regs D-registers 1151 /// starting from d8. Also insert stack realignment code and leave the stack 1152 /// pointer pointing to the d8 spill slot. 1153 static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB, 1154 MachineBasicBlock::iterator MI, 1155 unsigned NumAlignedDPRCS2Regs, 1156 const std::vector<CalleeSavedInfo> &CSI, 1157 const TargetRegisterInfo *TRI) { 1158 MachineFunction &MF = *MBB.getParent(); 1159 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1160 DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc() : DebugLoc(); 1161 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1162 MachineFrameInfo &MFI = MF.getFrameInfo(); 1163 1164 // Mark the D-register spill slots as properly aligned. Since MFI computes 1165 // stack slot layout backwards, this can actually mean that the d-reg stack 1166 // slot offsets can be wrong. The offset for d8 will always be correct. 1167 for (unsigned i = 0, e = CSI.size(); i != e; ++i) { 1168 unsigned DNum = CSI[i].getReg() - ARM::D8; 1169 if (DNum > NumAlignedDPRCS2Regs - 1) 1170 continue; 1171 int FI = CSI[i].getFrameIdx(); 1172 // The even-numbered registers will be 16-byte aligned, the odd-numbered 1173 // registers will be 8-byte aligned. 1174 MFI.setObjectAlignment(FI, DNum % 2 ? 8 : 16); 1175 1176 // The stack slot for D8 needs to be maximally aligned because this is 1177 // actually the point where we align the stack pointer. MachineFrameInfo 1178 // computes all offsets relative to the incoming stack pointer which is a 1179 // bit weird when realigning the stack. Any extra padding for this 1180 // over-alignment is not realized because the code inserted below adjusts 1181 // the stack pointer by numregs * 8 before aligning the stack pointer. 1182 if (DNum == 0) 1183 MFI.setObjectAlignment(FI, MFI.getMaxAlignment()); 1184 } 1185 1186 // Move the stack pointer to the d8 spill slot, and align it at the same 1187 // time. Leave the stack slot address in the scratch register r4. 1188 // 1189 // sub r4, sp, #numregs * 8 1190 // bic r4, r4, #align - 1 1191 // mov sp, r4 1192 // 1193 bool isThumb = AFI->isThumbFunction(); 1194 assert(!AFI->isThumb1OnlyFunction() && "Can't realign stack for thumb1"); 1195 AFI->setShouldRestoreSPFromFP(true); 1196 1197 // sub r4, sp, #numregs * 8 1198 // The immediate is <= 64, so it doesn't need any special encoding. 1199 unsigned Opc = isThumb ? ARM::t2SUBri : ARM::SUBri; 1200 BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4) 1201 .addReg(ARM::SP) 1202 .addImm(8 * NumAlignedDPRCS2Regs) 1203 .add(predOps(ARMCC::AL)) 1204 .add(condCodeOp()); 1205 1206 unsigned MaxAlign = MF.getFrameInfo().getMaxAlignment(); 1207 // We must set parameter MustBeSingleInstruction to true, since 1208 // skipAlignedDPRCS2Spills expects exactly 3 instructions to perform 1209 // stack alignment. Luckily, this can always be done since all ARM 1210 // architecture versions that support Neon also support the BFC 1211 // instruction. 1212 emitAligningInstructions(MF, AFI, TII, MBB, MI, DL, ARM::R4, MaxAlign, true); 1213 1214 // mov sp, r4 1215 // The stack pointer must be adjusted before spilling anything, otherwise 1216 // the stack slots could be clobbered by an interrupt handler. 1217 // Leave r4 live, it is used below. 1218 Opc = isThumb ? ARM::tMOVr : ARM::MOVr; 1219 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(Opc), ARM::SP) 1220 .addReg(ARM::R4) 1221 .add(predOps(ARMCC::AL)); 1222 if (!isThumb) 1223 MIB.add(condCodeOp()); 1224 1225 // Now spill NumAlignedDPRCS2Regs registers starting from d8. 1226 // r4 holds the stack slot address. 1227 unsigned NextReg = ARM::D8; 1228 1229 // 16-byte aligned vst1.64 with 4 d-regs and address writeback. 1230 // The writeback is only needed when emitting two vst1.64 instructions. 1231 if (NumAlignedDPRCS2Regs >= 6) { 1232 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 1233 &ARM::QQPRRegClass); 1234 MBB.addLiveIn(SupReg); 1235 BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Qwb_fixed), ARM::R4) 1236 .addReg(ARM::R4, RegState::Kill) 1237 .addImm(16) 1238 .addReg(NextReg) 1239 .addReg(SupReg, RegState::ImplicitKill) 1240 .add(predOps(ARMCC::AL)); 1241 NextReg += 4; 1242 NumAlignedDPRCS2Regs -= 4; 1243 } 1244 1245 // We won't modify r4 beyond this point. It currently points to the next 1246 // register to be spilled. 1247 unsigned R4BaseReg = NextReg; 1248 1249 // 16-byte aligned vst1.64 with 4 d-regs, no writeback. 1250 if (NumAlignedDPRCS2Regs >= 4) { 1251 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 1252 &ARM::QQPRRegClass); 1253 MBB.addLiveIn(SupReg); 1254 BuildMI(MBB, MI, DL, TII.get(ARM::VST1d64Q)) 1255 .addReg(ARM::R4) 1256 .addImm(16) 1257 .addReg(NextReg) 1258 .addReg(SupReg, RegState::ImplicitKill) 1259 .add(predOps(ARMCC::AL)); 1260 NextReg += 4; 1261 NumAlignedDPRCS2Regs -= 4; 1262 } 1263 1264 // 16-byte aligned vst1.64 with 2 d-regs. 1265 if (NumAlignedDPRCS2Regs >= 2) { 1266 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 1267 &ARM::QPRRegClass); 1268 MBB.addLiveIn(SupReg); 1269 BuildMI(MBB, MI, DL, TII.get(ARM::VST1q64)) 1270 .addReg(ARM::R4) 1271 .addImm(16) 1272 .addReg(SupReg) 1273 .add(predOps(ARMCC::AL)); 1274 NextReg += 2; 1275 NumAlignedDPRCS2Regs -= 2; 1276 } 1277 1278 // Finally, use a vanilla vstr.64 for the odd last register. 1279 if (NumAlignedDPRCS2Regs) { 1280 MBB.addLiveIn(NextReg); 1281 // vstr.64 uses addrmode5 which has an offset scale of 4. 1282 BuildMI(MBB, MI, DL, TII.get(ARM::VSTRD)) 1283 .addReg(NextReg) 1284 .addReg(ARM::R4) 1285 .addImm((NextReg - R4BaseReg) * 2) 1286 .add(predOps(ARMCC::AL)); 1287 } 1288 1289 // The last spill instruction inserted should kill the scratch register r4. 1290 std::prev(MI)->addRegisterKilled(ARM::R4, TRI); 1291 } 1292 1293 /// Skip past the code inserted by emitAlignedDPRCS2Spills, and return an 1294 /// iterator to the following instruction. 1295 static MachineBasicBlock::iterator 1296 skipAlignedDPRCS2Spills(MachineBasicBlock::iterator MI, 1297 unsigned NumAlignedDPRCS2Regs) { 1298 // sub r4, sp, #numregs * 8 1299 // bic r4, r4, #align - 1 1300 // mov sp, r4 1301 ++MI; ++MI; ++MI; 1302 assert(MI->mayStore() && "Expecting spill instruction"); 1303 1304 // These switches all fall through. 1305 switch(NumAlignedDPRCS2Regs) { 1306 case 7: 1307 ++MI; 1308 assert(MI->mayStore() && "Expecting spill instruction"); 1309 LLVM_FALLTHROUGH; 1310 default: 1311 ++MI; 1312 assert(MI->mayStore() && "Expecting spill instruction"); 1313 LLVM_FALLTHROUGH; 1314 case 1: 1315 case 2: 1316 case 4: 1317 assert(MI->killsRegister(ARM::R4) && "Missed kill flag"); 1318 ++MI; 1319 } 1320 return MI; 1321 } 1322 1323 /// Emit aligned reload instructions for NumAlignedDPRCS2Regs D-registers 1324 /// starting from d8. These instructions are assumed to execute while the 1325 /// stack is still aligned, unlike the code inserted by emitPopInst. 1326 static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB, 1327 MachineBasicBlock::iterator MI, 1328 unsigned NumAlignedDPRCS2Regs, 1329 const std::vector<CalleeSavedInfo> &CSI, 1330 const TargetRegisterInfo *TRI) { 1331 MachineFunction &MF = *MBB.getParent(); 1332 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1333 DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc() : DebugLoc(); 1334 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1335 1336 // Find the frame index assigned to d8. 1337 int D8SpillFI = 0; 1338 for (unsigned i = 0, e = CSI.size(); i != e; ++i) 1339 if (CSI[i].getReg() == ARM::D8) { 1340 D8SpillFI = CSI[i].getFrameIdx(); 1341 break; 1342 } 1343 1344 // Materialize the address of the d8 spill slot into the scratch register r4. 1345 // This can be fairly complicated if the stack frame is large, so just use 1346 // the normal frame index elimination mechanism to do it. This code runs as 1347 // the initial part of the epilog where the stack and base pointers haven't 1348 // been changed yet. 1349 bool isThumb = AFI->isThumbFunction(); 1350 assert(!AFI->isThumb1OnlyFunction() && "Can't realign stack for thumb1"); 1351 1352 unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri; 1353 BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4) 1354 .addFrameIndex(D8SpillFI) 1355 .addImm(0) 1356 .add(predOps(ARMCC::AL)) 1357 .add(condCodeOp()); 1358 1359 // Now restore NumAlignedDPRCS2Regs registers starting from d8. 1360 unsigned NextReg = ARM::D8; 1361 1362 // 16-byte aligned vld1.64 with 4 d-regs and writeback. 1363 if (NumAlignedDPRCS2Regs >= 6) { 1364 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 1365 &ARM::QQPRRegClass); 1366 BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Qwb_fixed), NextReg) 1367 .addReg(ARM::R4, RegState::Define) 1368 .addReg(ARM::R4, RegState::Kill) 1369 .addImm(16) 1370 .addReg(SupReg, RegState::ImplicitDefine) 1371 .add(predOps(ARMCC::AL)); 1372 NextReg += 4; 1373 NumAlignedDPRCS2Regs -= 4; 1374 } 1375 1376 // We won't modify r4 beyond this point. It currently points to the next 1377 // register to be spilled. 1378 unsigned R4BaseReg = NextReg; 1379 1380 // 16-byte aligned vld1.64 with 4 d-regs, no writeback. 1381 if (NumAlignedDPRCS2Regs >= 4) { 1382 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 1383 &ARM::QQPRRegClass); 1384 BuildMI(MBB, MI, DL, TII.get(ARM::VLD1d64Q), NextReg) 1385 .addReg(ARM::R4) 1386 .addImm(16) 1387 .addReg(SupReg, RegState::ImplicitDefine) 1388 .add(predOps(ARMCC::AL)); 1389 NextReg += 4; 1390 NumAlignedDPRCS2Regs -= 4; 1391 } 1392 1393 // 16-byte aligned vld1.64 with 2 d-regs. 1394 if (NumAlignedDPRCS2Regs >= 2) { 1395 unsigned SupReg = TRI->getMatchingSuperReg(NextReg, ARM::dsub_0, 1396 &ARM::QPRRegClass); 1397 BuildMI(MBB, MI, DL, TII.get(ARM::VLD1q64), SupReg) 1398 .addReg(ARM::R4) 1399 .addImm(16) 1400 .add(predOps(ARMCC::AL)); 1401 NextReg += 2; 1402 NumAlignedDPRCS2Regs -= 2; 1403 } 1404 1405 // Finally, use a vanilla vldr.64 for the remaining odd register. 1406 if (NumAlignedDPRCS2Regs) 1407 BuildMI(MBB, MI, DL, TII.get(ARM::VLDRD), NextReg) 1408 .addReg(ARM::R4) 1409 .addImm(2 * (NextReg - R4BaseReg)) 1410 .add(predOps(ARMCC::AL)); 1411 1412 // Last store kills r4. 1413 std::prev(MI)->addRegisterKilled(ARM::R4, TRI); 1414 } 1415 1416 bool ARMFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, 1417 MachineBasicBlock::iterator MI, 1418 const std::vector<CalleeSavedInfo> &CSI, 1419 const TargetRegisterInfo *TRI) const { 1420 if (CSI.empty()) 1421 return false; 1422 1423 MachineFunction &MF = *MBB.getParent(); 1424 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1425 1426 unsigned PushOpc = AFI->isThumbFunction() ? ARM::t2STMDB_UPD : ARM::STMDB_UPD; 1427 unsigned PushOneOpc = AFI->isThumbFunction() ? 1428 ARM::t2STR_PRE : ARM::STR_PRE_IMM; 1429 unsigned FltOpc = ARM::VSTMDDB_UPD; 1430 unsigned NumAlignedDPRCS2Regs = AFI->getNumAlignedDPRCS2Regs(); 1431 emitPushInst(MBB, MI, CSI, PushOpc, PushOneOpc, false, &isARMArea1Register, 0, 1432 MachineInstr::FrameSetup); 1433 emitPushInst(MBB, MI, CSI, PushOpc, PushOneOpc, false, &isARMArea2Register, 0, 1434 MachineInstr::FrameSetup); 1435 emitPushInst(MBB, MI, CSI, FltOpc, 0, true, &isARMArea3Register, 1436 NumAlignedDPRCS2Regs, MachineInstr::FrameSetup); 1437 1438 // The code above does not insert spill code for the aligned DPRCS2 registers. 1439 // The stack realignment code will be inserted between the push instructions 1440 // and these spills. 1441 if (NumAlignedDPRCS2Regs) 1442 emitAlignedDPRCS2Spills(MBB, MI, NumAlignedDPRCS2Regs, CSI, TRI); 1443 1444 return true; 1445 } 1446 1447 bool ARMFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, 1448 MachineBasicBlock::iterator MI, 1449 std::vector<CalleeSavedInfo> &CSI, 1450 const TargetRegisterInfo *TRI) const { 1451 if (CSI.empty()) 1452 return false; 1453 1454 MachineFunction &MF = *MBB.getParent(); 1455 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1456 bool isVarArg = AFI->getArgRegsSaveSize() > 0; 1457 unsigned NumAlignedDPRCS2Regs = AFI->getNumAlignedDPRCS2Regs(); 1458 1459 // The emitPopInst calls below do not insert reloads for the aligned DPRCS2 1460 // registers. Do that here instead. 1461 if (NumAlignedDPRCS2Regs) 1462 emitAlignedDPRCS2Restores(MBB, MI, NumAlignedDPRCS2Regs, CSI, TRI); 1463 1464 unsigned PopOpc = AFI->isThumbFunction() ? ARM::t2LDMIA_UPD : ARM::LDMIA_UPD; 1465 unsigned LdrOpc = AFI->isThumbFunction() ? ARM::t2LDR_POST :ARM::LDR_POST_IMM; 1466 unsigned FltOpc = ARM::VLDMDIA_UPD; 1467 emitPopInst(MBB, MI, CSI, FltOpc, 0, isVarArg, true, &isARMArea3Register, 1468 NumAlignedDPRCS2Regs); 1469 emitPopInst(MBB, MI, CSI, PopOpc, LdrOpc, isVarArg, false, 1470 &isARMArea2Register, 0); 1471 emitPopInst(MBB, MI, CSI, PopOpc, LdrOpc, isVarArg, false, 1472 &isARMArea1Register, 0); 1473 1474 return true; 1475 } 1476 1477 // FIXME: Make generic? 1478 static unsigned GetFunctionSizeInBytes(const MachineFunction &MF, 1479 const ARMBaseInstrInfo &TII) { 1480 unsigned FnSize = 0; 1481 for (auto &MBB : MF) { 1482 for (auto &MI : MBB) 1483 FnSize += TII.getInstSizeInBytes(MI); 1484 } 1485 return FnSize; 1486 } 1487 1488 /// estimateRSStackSizeLimit - Look at each instruction that references stack 1489 /// frames and return the stack size limit beyond which some of these 1490 /// instructions will require a scratch register during their expansion later. 1491 // FIXME: Move to TII? 1492 static unsigned estimateRSStackSizeLimit(MachineFunction &MF, 1493 const TargetFrameLowering *TFI) { 1494 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1495 unsigned Limit = (1 << 12) - 1; 1496 for (auto &MBB : MF) { 1497 for (auto &MI : MBB) { 1498 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1499 if (!MI.getOperand(i).isFI()) 1500 continue; 1501 1502 // When using ADDri to get the address of a stack object, 255 is the 1503 // largest offset guaranteed to fit in the immediate offset. 1504 if (MI.getOpcode() == ARM::ADDri) { 1505 Limit = std::min(Limit, (1U << 8) - 1); 1506 break; 1507 } 1508 1509 // Otherwise check the addressing mode. 1510 switch (MI.getDesc().TSFlags & ARMII::AddrModeMask) { 1511 case ARMII::AddrMode3: 1512 case ARMII::AddrModeT2_i8: 1513 Limit = std::min(Limit, (1U << 8) - 1); 1514 break; 1515 case ARMII::AddrMode5: 1516 case ARMII::AddrModeT2_i8s4: 1517 Limit = std::min(Limit, ((1U << 8) - 1) * 4); 1518 break; 1519 case ARMII::AddrModeT2_i12: 1520 // i12 supports only positive offset so these will be converted to 1521 // i8 opcodes. See llvm::rewriteT2FrameIndex. 1522 if (TFI->hasFP(MF) && AFI->hasStackFrame()) 1523 Limit = std::min(Limit, (1U << 8) - 1); 1524 break; 1525 case ARMII::AddrMode4: 1526 case ARMII::AddrMode6: 1527 // Addressing modes 4 & 6 (load/store) instructions can't encode an 1528 // immediate offset for stack references. 1529 return 0; 1530 default: 1531 break; 1532 } 1533 break; // At most one FI per instruction 1534 } 1535 } 1536 } 1537 1538 return Limit; 1539 } 1540 1541 // In functions that realign the stack, it can be an advantage to spill the 1542 // callee-saved vector registers after realigning the stack. The vst1 and vld1 1543 // instructions take alignment hints that can improve performance. 1544 static void 1545 checkNumAlignedDPRCS2Regs(MachineFunction &MF, BitVector &SavedRegs) { 1546 MF.getInfo<ARMFunctionInfo>()->setNumAlignedDPRCS2Regs(0); 1547 if (!SpillAlignedNEONRegs) 1548 return; 1549 1550 // Naked functions don't spill callee-saved registers. 1551 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 1552 return; 1553 1554 // We are planning to use NEON instructions vst1 / vld1. 1555 if (!static_cast<const ARMSubtarget &>(MF.getSubtarget()).hasNEON()) 1556 return; 1557 1558 // Don't bother if the default stack alignment is sufficiently high. 1559 if (MF.getSubtarget().getFrameLowering()->getStackAlignment() >= 8) 1560 return; 1561 1562 // Aligned spills require stack realignment. 1563 if (!static_cast<const ARMBaseRegisterInfo *>( 1564 MF.getSubtarget().getRegisterInfo())->canRealignStack(MF)) 1565 return; 1566 1567 // We always spill contiguous d-registers starting from d8. Count how many 1568 // needs spilling. The register allocator will almost always use the 1569 // callee-saved registers in order, but it can happen that there are holes in 1570 // the range. Registers above the hole will be spilled to the standard DPRCS 1571 // area. 1572 unsigned NumSpills = 0; 1573 for (; NumSpills < 8; ++NumSpills) 1574 if (!SavedRegs.test(ARM::D8 + NumSpills)) 1575 break; 1576 1577 // Don't do this for just one d-register. It's not worth it. 1578 if (NumSpills < 2) 1579 return; 1580 1581 // Spill the first NumSpills D-registers after realigning the stack. 1582 MF.getInfo<ARMFunctionInfo>()->setNumAlignedDPRCS2Regs(NumSpills); 1583 1584 // A scratch register is required for the vst1 / vld1 instructions. 1585 SavedRegs.set(ARM::R4); 1586 } 1587 1588 void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF, 1589 BitVector &SavedRegs, 1590 RegScavenger *RS) const { 1591 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 1592 // This tells PEI to spill the FP as if it is any other callee-save register 1593 // to take advantage the eliminateFrameIndex machinery. This also ensures it 1594 // is spilled in the order specified by getCalleeSavedRegs() to make it easier 1595 // to combine multiple loads / stores. 1596 bool CanEliminateFrame = true; 1597 bool CS1Spilled = false; 1598 bool LRSpilled = false; 1599 unsigned NumGPRSpills = 0; 1600 unsigned NumFPRSpills = 0; 1601 SmallVector<unsigned, 4> UnspilledCS1GPRs; 1602 SmallVector<unsigned, 4> UnspilledCS2GPRs; 1603 const ARMBaseRegisterInfo *RegInfo = static_cast<const ARMBaseRegisterInfo *>( 1604 MF.getSubtarget().getRegisterInfo()); 1605 const ARMBaseInstrInfo &TII = 1606 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 1607 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1608 MachineFrameInfo &MFI = MF.getFrameInfo(); 1609 MachineRegisterInfo &MRI = MF.getRegInfo(); 1610 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 1611 (void)TRI; // Silence unused warning in non-assert builds. 1612 unsigned FramePtr = RegInfo->getFrameRegister(MF); 1613 1614 // Spill R4 if Thumb2 function requires stack realignment - it will be used as 1615 // scratch register. Also spill R4 if Thumb2 function has varsized objects, 1616 // since it's not always possible to restore sp from fp in a single 1617 // instruction. 1618 // FIXME: It will be better just to find spare register here. 1619 if (AFI->isThumb2Function() && 1620 (MFI.hasVarSizedObjects() || RegInfo->needsStackRealignment(MF))) 1621 SavedRegs.set(ARM::R4); 1622 1623 if (AFI->isThumb1OnlyFunction()) { 1624 // Spill LR if Thumb1 function uses variable length argument lists. 1625 if (AFI->getArgRegsSaveSize() > 0) 1626 SavedRegs.set(ARM::LR); 1627 1628 // Spill R4 if Thumb1 epilogue has to restore SP from FP or the function 1629 // requires stack alignment. We don't know for sure what the stack size 1630 // will be, but for this, an estimate is good enough. If there anything 1631 // changes it, it'll be a spill, which implies we've used all the registers 1632 // and so R4 is already used, so not marking it here will be OK. 1633 // FIXME: It will be better just to find spare register here. 1634 if (MFI.hasVarSizedObjects() || RegInfo->needsStackRealignment(MF) || 1635 MFI.estimateStackSize(MF) > 508) 1636 SavedRegs.set(ARM::R4); 1637 } 1638 1639 // See if we can spill vector registers to aligned stack. 1640 checkNumAlignedDPRCS2Regs(MF, SavedRegs); 1641 1642 // Spill the BasePtr if it's used. 1643 if (RegInfo->hasBasePointer(MF)) 1644 SavedRegs.set(RegInfo->getBaseRegister()); 1645 1646 // Don't spill FP if the frame can be eliminated. This is determined 1647 // by scanning the callee-save registers to see if any is modified. 1648 const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF); 1649 for (unsigned i = 0; CSRegs[i]; ++i) { 1650 unsigned Reg = CSRegs[i]; 1651 bool Spilled = false; 1652 if (SavedRegs.test(Reg)) { 1653 Spilled = true; 1654 CanEliminateFrame = false; 1655 } 1656 1657 if (!ARM::GPRRegClass.contains(Reg)) { 1658 if (Spilled) { 1659 if (ARM::SPRRegClass.contains(Reg)) 1660 NumFPRSpills++; 1661 else if (ARM::DPRRegClass.contains(Reg)) 1662 NumFPRSpills += 2; 1663 else if (ARM::QPRRegClass.contains(Reg)) 1664 NumFPRSpills += 4; 1665 } 1666 continue; 1667 } 1668 1669 if (Spilled) { 1670 NumGPRSpills++; 1671 1672 if (!STI.splitFramePushPop(MF)) { 1673 if (Reg == ARM::LR) 1674 LRSpilled = true; 1675 CS1Spilled = true; 1676 continue; 1677 } 1678 1679 // Keep track if LR and any of R4, R5, R6, and R7 is spilled. 1680 switch (Reg) { 1681 case ARM::LR: 1682 LRSpilled = true; 1683 LLVM_FALLTHROUGH; 1684 case ARM::R0: case ARM::R1: 1685 case ARM::R2: case ARM::R3: 1686 case ARM::R4: case ARM::R5: 1687 case ARM::R6: case ARM::R7: 1688 CS1Spilled = true; 1689 break; 1690 default: 1691 break; 1692 } 1693 } else { 1694 if (!STI.splitFramePushPop(MF)) { 1695 UnspilledCS1GPRs.push_back(Reg); 1696 continue; 1697 } 1698 1699 switch (Reg) { 1700 case ARM::R0: case ARM::R1: 1701 case ARM::R2: case ARM::R3: 1702 case ARM::R4: case ARM::R5: 1703 case ARM::R6: case ARM::R7: 1704 case ARM::LR: 1705 UnspilledCS1GPRs.push_back(Reg); 1706 break; 1707 default: 1708 UnspilledCS2GPRs.push_back(Reg); 1709 break; 1710 } 1711 } 1712 } 1713 1714 bool ForceLRSpill = false; 1715 if (!LRSpilled && AFI->isThumb1OnlyFunction()) { 1716 unsigned FnSize = GetFunctionSizeInBytes(MF, TII); 1717 // Force LR to be spilled if the Thumb function size is > 2048. This enables 1718 // use of BL to implement far jump. If it turns out that it's not needed 1719 // then the branch fix up path will undo it. 1720 if (FnSize >= (1 << 11)) { 1721 CanEliminateFrame = false; 1722 ForceLRSpill = true; 1723 } 1724 } 1725 1726 // If any of the stack slot references may be out of range of an immediate 1727 // offset, make sure a register (or a spill slot) is available for the 1728 // register scavenger. Note that if we're indexing off the frame pointer, the 1729 // effective stack size is 4 bytes larger since the FP points to the stack 1730 // slot of the previous FP. Also, if we have variable sized objects in the 1731 // function, stack slot references will often be negative, and some of 1732 // our instructions are positive-offset only, so conservatively consider 1733 // that case to want a spill slot (or register) as well. Similarly, if 1734 // the function adjusts the stack pointer during execution and the 1735 // adjustments aren't already part of our stack size estimate, our offset 1736 // calculations may be off, so be conservative. 1737 // FIXME: We could add logic to be more precise about negative offsets 1738 // and which instructions will need a scratch register for them. Is it 1739 // worth the effort and added fragility? 1740 unsigned EstimatedStackSize = 1741 MFI.estimateStackSize(MF) + 4 * (NumGPRSpills + NumFPRSpills); 1742 1743 // Determine biggest (positive) SP offset in MachineFrameInfo. 1744 int MaxFixedOffset = 0; 1745 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) { 1746 int MaxObjectOffset = MFI.getObjectOffset(I) + MFI.getObjectSize(I); 1747 MaxFixedOffset = std::max(MaxFixedOffset, MaxObjectOffset); 1748 } 1749 1750 bool HasFP = hasFP(MF); 1751 if (HasFP) { 1752 if (AFI->hasStackFrame()) 1753 EstimatedStackSize += 4; 1754 } else { 1755 // If FP is not used, SP will be used to access arguments, so count the 1756 // size of arguments into the estimation. 1757 EstimatedStackSize += MaxFixedOffset; 1758 } 1759 EstimatedStackSize += 16; // For possible paddings. 1760 1761 unsigned EstimatedRSStackSizeLimit = estimateRSStackSizeLimit(MF, this); 1762 int MaxFPOffset = getMaxFPOffset(MF.getFunction(), *AFI); 1763 bool BigFrameOffsets = EstimatedStackSize >= EstimatedRSStackSizeLimit || 1764 MFI.hasVarSizedObjects() || 1765 (MFI.adjustsStack() && !canSimplifyCallFramePseudos(MF)) || 1766 // For large argument stacks fp relative addressed may overflow. 1767 (HasFP && (MaxFixedOffset - MaxFPOffset) >= (int)EstimatedRSStackSizeLimit); 1768 if (BigFrameOffsets || 1769 !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) { 1770 AFI->setHasStackFrame(true); 1771 1772 if (HasFP) { 1773 SavedRegs.set(FramePtr); 1774 // If the frame pointer is required by the ABI, also spill LR so that we 1775 // emit a complete frame record. 1776 if (MF.getTarget().Options.DisableFramePointerElim(MF) && !LRSpilled) { 1777 SavedRegs.set(ARM::LR); 1778 LRSpilled = true; 1779 NumGPRSpills++; 1780 auto LRPos = llvm::find(UnspilledCS1GPRs, ARM::LR); 1781 if (LRPos != UnspilledCS1GPRs.end()) 1782 UnspilledCS1GPRs.erase(LRPos); 1783 } 1784 auto FPPos = llvm::find(UnspilledCS1GPRs, FramePtr); 1785 if (FPPos != UnspilledCS1GPRs.end()) 1786 UnspilledCS1GPRs.erase(FPPos); 1787 NumGPRSpills++; 1788 if (FramePtr == ARM::R7) 1789 CS1Spilled = true; 1790 } 1791 1792 // This is true when we inserted a spill for an unused register that can now 1793 // be used for register scavenging. 1794 bool ExtraCSSpill = false; 1795 1796 if (AFI->isThumb1OnlyFunction()) { 1797 // For Thumb1-only targets, we need some low registers when we save and 1798 // restore the high registers (which aren't allocatable, but could be 1799 // used by inline assembly) because the push/pop instructions can not 1800 // access high registers. If necessary, we might need to push more low 1801 // registers to ensure that there is at least one free that can be used 1802 // for the saving & restoring, and preferably we should ensure that as 1803 // many as are needed are available so that fewer push/pop instructions 1804 // are required. 1805 1806 // Low registers which are not currently pushed, but could be (r4-r7). 1807 SmallVector<unsigned, 4> AvailableRegs; 1808 1809 // Unused argument registers (r0-r3) can be clobbered in the prologue for 1810 // free. 1811 int EntryRegDeficit = 0; 1812 for (unsigned Reg : {ARM::R0, ARM::R1, ARM::R2, ARM::R3}) { 1813 if (!MF.getRegInfo().isLiveIn(Reg)) { 1814 --EntryRegDeficit; 1815 LLVM_DEBUG(dbgs() 1816 << printReg(Reg, TRI) 1817 << " is unused argument register, EntryRegDeficit = " 1818 << EntryRegDeficit << "\n"); 1819 } 1820 } 1821 1822 // Unused return registers can be clobbered in the epilogue for free. 1823 int ExitRegDeficit = AFI->getReturnRegsCount() - 4; 1824 LLVM_DEBUG(dbgs() << AFI->getReturnRegsCount() 1825 << " return regs used, ExitRegDeficit = " 1826 << ExitRegDeficit << "\n"); 1827 1828 int RegDeficit = std::max(EntryRegDeficit, ExitRegDeficit); 1829 LLVM_DEBUG(dbgs() << "RegDeficit = " << RegDeficit << "\n"); 1830 1831 // r4-r6 can be used in the prologue if they are pushed by the first push 1832 // instruction. 1833 for (unsigned Reg : {ARM::R4, ARM::R5, ARM::R6}) { 1834 if (SavedRegs.test(Reg)) { 1835 --RegDeficit; 1836 LLVM_DEBUG(dbgs() << printReg(Reg, TRI) 1837 << " is saved low register, RegDeficit = " 1838 << RegDeficit << "\n"); 1839 } else { 1840 AvailableRegs.push_back(Reg); 1841 LLVM_DEBUG( 1842 dbgs() 1843 << printReg(Reg, TRI) 1844 << " is non-saved low register, adding to AvailableRegs\n"); 1845 } 1846 } 1847 1848 // r7 can be used if it is not being used as the frame pointer. 1849 if (!HasFP) { 1850 if (SavedRegs.test(ARM::R7)) { 1851 --RegDeficit; 1852 LLVM_DEBUG(dbgs() << "%r7 is saved low register, RegDeficit = " 1853 << RegDeficit << "\n"); 1854 } else { 1855 AvailableRegs.push_back(ARM::R7); 1856 LLVM_DEBUG( 1857 dbgs() 1858 << "%r7 is non-saved low register, adding to AvailableRegs\n"); 1859 } 1860 } 1861 1862 // Each of r8-r11 needs to be copied to a low register, then pushed. 1863 for (unsigned Reg : {ARM::R8, ARM::R9, ARM::R10, ARM::R11}) { 1864 if (SavedRegs.test(Reg)) { 1865 ++RegDeficit; 1866 LLVM_DEBUG(dbgs() << printReg(Reg, TRI) 1867 << " is saved high register, RegDeficit = " 1868 << RegDeficit << "\n"); 1869 } 1870 } 1871 1872 // LR can only be used by PUSH, not POP, and can't be used at all if the 1873 // llvm.returnaddress intrinsic is used. This is only worth doing if we 1874 // are more limited at function entry than exit. 1875 if ((EntryRegDeficit > ExitRegDeficit) && 1876 !(MF.getRegInfo().isLiveIn(ARM::LR) && 1877 MF.getFrameInfo().isReturnAddressTaken())) { 1878 if (SavedRegs.test(ARM::LR)) { 1879 --RegDeficit; 1880 LLVM_DEBUG(dbgs() << "%lr is saved register, RegDeficit = " 1881 << RegDeficit << "\n"); 1882 } else { 1883 AvailableRegs.push_back(ARM::LR); 1884 LLVM_DEBUG(dbgs() << "%lr is not saved, adding to AvailableRegs\n"); 1885 } 1886 } 1887 1888 // If there are more high registers that need pushing than low registers 1889 // available, push some more low registers so that we can use fewer push 1890 // instructions. This might not reduce RegDeficit all the way to zero, 1891 // because we can only guarantee that r4-r6 are available, but r8-r11 may 1892 // need saving. 1893 LLVM_DEBUG(dbgs() << "Final RegDeficit = " << RegDeficit << "\n"); 1894 for (; RegDeficit > 0 && !AvailableRegs.empty(); --RegDeficit) { 1895 unsigned Reg = AvailableRegs.pop_back_val(); 1896 LLVM_DEBUG(dbgs() << "Spilling " << printReg(Reg, TRI) 1897 << " to make up reg deficit\n"); 1898 SavedRegs.set(Reg); 1899 NumGPRSpills++; 1900 CS1Spilled = true; 1901 assert(!MRI.isReserved(Reg) && "Should not be reserved"); 1902 if (!MRI.isPhysRegUsed(Reg)) 1903 ExtraCSSpill = true; 1904 UnspilledCS1GPRs.erase(llvm::find(UnspilledCS1GPRs, Reg)); 1905 if (Reg == ARM::LR) 1906 LRSpilled = true; 1907 } 1908 LLVM_DEBUG(dbgs() << "After adding spills, RegDeficit = " << RegDeficit 1909 << "\n"); 1910 } 1911 1912 // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled. 1913 // Spill LR as well so we can fold BX_RET to the registers restore (LDM). 1914 if (!LRSpilled && CS1Spilled) { 1915 SavedRegs.set(ARM::LR); 1916 NumGPRSpills++; 1917 SmallVectorImpl<unsigned>::iterator LRPos; 1918 LRPos = llvm::find(UnspilledCS1GPRs, (unsigned)ARM::LR); 1919 if (LRPos != UnspilledCS1GPRs.end()) 1920 UnspilledCS1GPRs.erase(LRPos); 1921 1922 ForceLRSpill = false; 1923 if (!MRI.isReserved(ARM::LR) && !MRI.isPhysRegUsed(ARM::LR)) 1924 ExtraCSSpill = true; 1925 } 1926 1927 // If stack and double are 8-byte aligned and we are spilling an odd number 1928 // of GPRs, spill one extra callee save GPR so we won't have to pad between 1929 // the integer and double callee save areas. 1930 LLVM_DEBUG(dbgs() << "NumGPRSpills = " << NumGPRSpills << "\n"); 1931 unsigned TargetAlign = getStackAlignment(); 1932 if (TargetAlign >= 8 && (NumGPRSpills & 1)) { 1933 if (CS1Spilled && !UnspilledCS1GPRs.empty()) { 1934 for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) { 1935 unsigned Reg = UnspilledCS1GPRs[i]; 1936 // Don't spill high register if the function is thumb. In the case of 1937 // Windows on ARM, accept R11 (frame pointer) 1938 if (!AFI->isThumbFunction() || 1939 (STI.isTargetWindows() && Reg == ARM::R11) || 1940 isARMLowRegister(Reg) || Reg == ARM::LR) { 1941 SavedRegs.set(Reg); 1942 LLVM_DEBUG(dbgs() << "Spilling " << printReg(Reg, TRI) 1943 << " to make up alignment\n"); 1944 if (!MRI.isReserved(Reg) && !MRI.isPhysRegUsed(Reg)) 1945 ExtraCSSpill = true; 1946 break; 1947 } 1948 } 1949 } else if (!UnspilledCS2GPRs.empty() && !AFI->isThumb1OnlyFunction()) { 1950 unsigned Reg = UnspilledCS2GPRs.front(); 1951 SavedRegs.set(Reg); 1952 LLVM_DEBUG(dbgs() << "Spilling " << printReg(Reg, TRI) 1953 << " to make up alignment\n"); 1954 if (!MRI.isReserved(Reg) && !MRI.isPhysRegUsed(Reg)) 1955 ExtraCSSpill = true; 1956 } 1957 } 1958 1959 // Estimate if we might need to scavenge a register at some point in order 1960 // to materialize a stack offset. If so, either spill one additional 1961 // callee-saved register or reserve a special spill slot to facilitate 1962 // register scavenging. Thumb1 needs a spill slot for stack pointer 1963 // adjustments also, even when the frame itself is small. 1964 if (BigFrameOffsets && !ExtraCSSpill) { 1965 // If any non-reserved CS register isn't spilled, just spill one or two 1966 // extra. That should take care of it! 1967 unsigned NumExtras = TargetAlign / 4; 1968 SmallVector<unsigned, 2> Extras; 1969 while (NumExtras && !UnspilledCS1GPRs.empty()) { 1970 unsigned Reg = UnspilledCS1GPRs.back(); 1971 UnspilledCS1GPRs.pop_back(); 1972 if (!MRI.isReserved(Reg) && 1973 (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) || 1974 Reg == ARM::LR)) { 1975 Extras.push_back(Reg); 1976 NumExtras--; 1977 } 1978 } 1979 // For non-Thumb1 functions, also check for hi-reg CS registers 1980 if (!AFI->isThumb1OnlyFunction()) { 1981 while (NumExtras && !UnspilledCS2GPRs.empty()) { 1982 unsigned Reg = UnspilledCS2GPRs.back(); 1983 UnspilledCS2GPRs.pop_back(); 1984 if (!MRI.isReserved(Reg)) { 1985 Extras.push_back(Reg); 1986 NumExtras--; 1987 } 1988 } 1989 } 1990 if (NumExtras == 0) { 1991 for (unsigned Reg : Extras) { 1992 SavedRegs.set(Reg); 1993 if (!MRI.isPhysRegUsed(Reg)) 1994 ExtraCSSpill = true; 1995 } 1996 } 1997 if (!ExtraCSSpill && !AFI->isThumb1OnlyFunction()) { 1998 // note: Thumb1 functions spill to R12, not the stack. Reserve a slot 1999 // closest to SP or frame pointer. 2000 assert(RS && "Register scavenging not provided"); 2001 const TargetRegisterClass &RC = ARM::GPRRegClass; 2002 unsigned Size = TRI->getSpillSize(RC); 2003 unsigned Align = TRI->getSpillAlignment(RC); 2004 RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false)); 2005 } 2006 } 2007 } 2008 2009 if (ForceLRSpill) { 2010 SavedRegs.set(ARM::LR); 2011 AFI->setLRIsSpilledForFarJump(true); 2012 } 2013 } 2014 2015 MachineBasicBlock::iterator ARMFrameLowering::eliminateCallFramePseudoInstr( 2016 MachineFunction &MF, MachineBasicBlock &MBB, 2017 MachineBasicBlock::iterator I) const { 2018 const ARMBaseInstrInfo &TII = 2019 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 2020 if (!hasReservedCallFrame(MF)) { 2021 // If we have alloca, convert as follows: 2022 // ADJCALLSTACKDOWN -> sub, sp, sp, amount 2023 // ADJCALLSTACKUP -> add, sp, sp, amount 2024 MachineInstr &Old = *I; 2025 DebugLoc dl = Old.getDebugLoc(); 2026 unsigned Amount = TII.getFrameSize(Old); 2027 if (Amount != 0) { 2028 // We need to keep the stack aligned properly. To do this, we round the 2029 // amount of space needed for the outgoing arguments up to the next 2030 // alignment boundary. 2031 Amount = alignSPAdjust(Amount); 2032 2033 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2034 assert(!AFI->isThumb1OnlyFunction() && 2035 "This eliminateCallFramePseudoInstr does not support Thumb1!"); 2036 bool isARM = !AFI->isThumbFunction(); 2037 2038 // Replace the pseudo instruction with a new instruction... 2039 unsigned Opc = Old.getOpcode(); 2040 int PIdx = Old.findFirstPredOperandIdx(); 2041 ARMCC::CondCodes Pred = 2042 (PIdx == -1) ? ARMCC::AL 2043 : (ARMCC::CondCodes)Old.getOperand(PIdx).getImm(); 2044 unsigned PredReg = TII.getFramePred(Old); 2045 if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) { 2046 emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, MachineInstr::NoFlags, 2047 Pred, PredReg); 2048 } else { 2049 assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP); 2050 emitSPUpdate(isARM, MBB, I, dl, TII, Amount, MachineInstr::NoFlags, 2051 Pred, PredReg); 2052 } 2053 } 2054 } 2055 return MBB.erase(I); 2056 } 2057 2058 /// Get the minimum constant for ARM that is greater than or equal to the 2059 /// argument. In ARM, constants can have any value that can be produced by 2060 /// rotating an 8-bit value to the right by an even number of bits within a 2061 /// 32-bit word. 2062 static uint32_t alignToARMConstant(uint32_t Value) { 2063 unsigned Shifted = 0; 2064 2065 if (Value == 0) 2066 return 0; 2067 2068 while (!(Value & 0xC0000000)) { 2069 Value = Value << 2; 2070 Shifted += 2; 2071 } 2072 2073 bool Carry = (Value & 0x00FFFFFF); 2074 Value = ((Value & 0xFF000000) >> 24) + Carry; 2075 2076 if (Value & 0x0000100) 2077 Value = Value & 0x000001FC; 2078 2079 if (Shifted > 24) 2080 Value = Value >> (Shifted - 24); 2081 else 2082 Value = Value << (24 - Shifted); 2083 2084 return Value; 2085 } 2086 2087 // The stack limit in the TCB is set to this many bytes above the actual 2088 // stack limit. 2089 static const uint64_t kSplitStackAvailable = 256; 2090 2091 // Adjust the function prologue to enable split stacks. This currently only 2092 // supports android and linux. 2093 // 2094 // The ABI of the segmented stack prologue is a little arbitrarily chosen, but 2095 // must be well defined in order to allow for consistent implementations of the 2096 // __morestack helper function. The ABI is also not a normal ABI in that it 2097 // doesn't follow the normal calling conventions because this allows the 2098 // prologue of each function to be optimized further. 2099 // 2100 // Currently, the ABI looks like (when calling __morestack) 2101 // 2102 // * r4 holds the minimum stack size requested for this function call 2103 // * r5 holds the stack size of the arguments to the function 2104 // * the beginning of the function is 3 instructions after the call to 2105 // __morestack 2106 // 2107 // Implementations of __morestack should use r4 to allocate a new stack, r5 to 2108 // place the arguments on to the new stack, and the 3-instruction knowledge to 2109 // jump directly to the body of the function when working on the new stack. 2110 // 2111 // An old (and possibly no longer compatible) implementation of __morestack for 2112 // ARM can be found at [1]. 2113 // 2114 // [1] - https://github.com/mozilla/rust/blob/86efd9/src/rt/arch/arm/morestack.S 2115 void ARMFrameLowering::adjustForSegmentedStacks( 2116 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const { 2117 unsigned Opcode; 2118 unsigned CFIIndex; 2119 const ARMSubtarget *ST = &MF.getSubtarget<ARMSubtarget>(); 2120 bool Thumb = ST->isThumb(); 2121 2122 // Sadly, this currently doesn't support varargs, platforms other than 2123 // android/linux. Note that thumb1/thumb2 are support for android/linux. 2124 if (MF.getFunction().isVarArg()) 2125 report_fatal_error("Segmented stacks do not support vararg functions."); 2126 if (!ST->isTargetAndroid() && !ST->isTargetLinux()) 2127 report_fatal_error("Segmented stacks not supported on this platform."); 2128 2129 MachineFrameInfo &MFI = MF.getFrameInfo(); 2130 MachineModuleInfo &MMI = MF.getMMI(); 2131 MCContext &Context = MMI.getContext(); 2132 const MCRegisterInfo *MRI = Context.getRegisterInfo(); 2133 const ARMBaseInstrInfo &TII = 2134 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 2135 ARMFunctionInfo *ARMFI = MF.getInfo<ARMFunctionInfo>(); 2136 DebugLoc DL; 2137 2138 uint64_t StackSize = MFI.getStackSize(); 2139 2140 // Do not generate a prologue for functions with a stack of size zero 2141 if (StackSize == 0) 2142 return; 2143 2144 // Use R4 and R5 as scratch registers. 2145 // We save R4 and R5 before use and restore them before leaving the function. 2146 unsigned ScratchReg0 = ARM::R4; 2147 unsigned ScratchReg1 = ARM::R5; 2148 uint64_t AlignedStackSize; 2149 2150 MachineBasicBlock *PrevStackMBB = MF.CreateMachineBasicBlock(); 2151 MachineBasicBlock *PostStackMBB = MF.CreateMachineBasicBlock(); 2152 MachineBasicBlock *AllocMBB = MF.CreateMachineBasicBlock(); 2153 MachineBasicBlock *GetMBB = MF.CreateMachineBasicBlock(); 2154 MachineBasicBlock *McrMBB = MF.CreateMachineBasicBlock(); 2155 2156 // Grab everything that reaches PrologueMBB to update there liveness as well. 2157 SmallPtrSet<MachineBasicBlock *, 8> BeforePrologueRegion; 2158 SmallVector<MachineBasicBlock *, 2> WalkList; 2159 WalkList.push_back(&PrologueMBB); 2160 2161 do { 2162 MachineBasicBlock *CurMBB = WalkList.pop_back_val(); 2163 for (MachineBasicBlock *PredBB : CurMBB->predecessors()) { 2164 if (BeforePrologueRegion.insert(PredBB).second) 2165 WalkList.push_back(PredBB); 2166 } 2167 } while (!WalkList.empty()); 2168 2169 // The order in that list is important. 2170 // The blocks will all be inserted before PrologueMBB using that order. 2171 // Therefore the block that should appear first in the CFG should appear 2172 // first in the list. 2173 MachineBasicBlock *AddedBlocks[] = {PrevStackMBB, McrMBB, GetMBB, AllocMBB, 2174 PostStackMBB}; 2175 2176 for (MachineBasicBlock *B : AddedBlocks) 2177 BeforePrologueRegion.insert(B); 2178 2179 for (const auto &LI : PrologueMBB.liveins()) { 2180 for (MachineBasicBlock *PredBB : BeforePrologueRegion) 2181 PredBB->addLiveIn(LI); 2182 } 2183 2184 // Remove the newly added blocks from the list, since we know 2185 // we do not have to do the following updates for them. 2186 for (MachineBasicBlock *B : AddedBlocks) { 2187 BeforePrologueRegion.erase(B); 2188 MF.insert(PrologueMBB.getIterator(), B); 2189 } 2190 2191 for (MachineBasicBlock *MBB : BeforePrologueRegion) { 2192 // Make sure the LiveIns are still sorted and unique. 2193 MBB->sortUniqueLiveIns(); 2194 // Replace the edges to PrologueMBB by edges to the sequences 2195 // we are about to add. 2196 MBB->ReplaceUsesOfBlockWith(&PrologueMBB, AddedBlocks[0]); 2197 } 2198 2199 // The required stack size that is aligned to ARM constant criterion. 2200 AlignedStackSize = alignToARMConstant(StackSize); 2201 2202 // When the frame size is less than 256 we just compare the stack 2203 // boundary directly to the value of the stack pointer, per gcc. 2204 bool CompareStackPointer = AlignedStackSize < kSplitStackAvailable; 2205 2206 // We will use two of the callee save registers as scratch registers so we 2207 // need to save those registers onto the stack. 2208 // We will use SR0 to hold stack limit and SR1 to hold the stack size 2209 // requested and arguments for __morestack(). 2210 // SR0: Scratch Register #0 2211 // SR1: Scratch Register #1 2212 // push {SR0, SR1} 2213 if (Thumb) { 2214 BuildMI(PrevStackMBB, DL, TII.get(ARM::tPUSH)) 2215 .add(predOps(ARMCC::AL)) 2216 .addReg(ScratchReg0) 2217 .addReg(ScratchReg1); 2218 } else { 2219 BuildMI(PrevStackMBB, DL, TII.get(ARM::STMDB_UPD)) 2220 .addReg(ARM::SP, RegState::Define) 2221 .addReg(ARM::SP) 2222 .add(predOps(ARMCC::AL)) 2223 .addReg(ScratchReg0) 2224 .addReg(ScratchReg1); 2225 } 2226 2227 // Emit the relevant DWARF information about the change in stack pointer as 2228 // well as where to find both r4 and r5 (the callee-save registers) 2229 CFIIndex = 2230 MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, -8)); 2231 BuildMI(PrevStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 2232 .addCFIIndex(CFIIndex); 2233 CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( 2234 nullptr, MRI->getDwarfRegNum(ScratchReg1, true), -4)); 2235 BuildMI(PrevStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 2236 .addCFIIndex(CFIIndex); 2237 CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( 2238 nullptr, MRI->getDwarfRegNum(ScratchReg0, true), -8)); 2239 BuildMI(PrevStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 2240 .addCFIIndex(CFIIndex); 2241 2242 // mov SR1, sp 2243 if (Thumb) { 2244 BuildMI(McrMBB, DL, TII.get(ARM::tMOVr), ScratchReg1) 2245 .addReg(ARM::SP) 2246 .add(predOps(ARMCC::AL)); 2247 } else if (CompareStackPointer) { 2248 BuildMI(McrMBB, DL, TII.get(ARM::MOVr), ScratchReg1) 2249 .addReg(ARM::SP) 2250 .add(predOps(ARMCC::AL)) 2251 .add(condCodeOp()); 2252 } 2253 2254 // sub SR1, sp, #StackSize 2255 if (!CompareStackPointer && Thumb) { 2256 BuildMI(McrMBB, DL, TII.get(ARM::tSUBi8), ScratchReg1) 2257 .add(condCodeOp()) 2258 .addReg(ScratchReg1) 2259 .addImm(AlignedStackSize) 2260 .add(predOps(ARMCC::AL)); 2261 } else if (!CompareStackPointer) { 2262 BuildMI(McrMBB, DL, TII.get(ARM::SUBri), ScratchReg1) 2263 .addReg(ARM::SP) 2264 .addImm(AlignedStackSize) 2265 .add(predOps(ARMCC::AL)) 2266 .add(condCodeOp()); 2267 } 2268 2269 if (Thumb && ST->isThumb1Only()) { 2270 unsigned PCLabelId = ARMFI->createPICLabelUId(); 2271 ARMConstantPoolValue *NewCPV = ARMConstantPoolSymbol::Create( 2272 MF.getFunction().getContext(), "__STACK_LIMIT", PCLabelId, 0); 2273 MachineConstantPool *MCP = MF.getConstantPool(); 2274 unsigned CPI = MCP->getConstantPoolIndex(NewCPV, 4); 2275 2276 // ldr SR0, [pc, offset(STACK_LIMIT)] 2277 BuildMI(GetMBB, DL, TII.get(ARM::tLDRpci), ScratchReg0) 2278 .addConstantPoolIndex(CPI) 2279 .add(predOps(ARMCC::AL)); 2280 2281 // ldr SR0, [SR0] 2282 BuildMI(GetMBB, DL, TII.get(ARM::tLDRi), ScratchReg0) 2283 .addReg(ScratchReg0) 2284 .addImm(0) 2285 .add(predOps(ARMCC::AL)); 2286 } else { 2287 // Get TLS base address from the coprocessor 2288 // mrc p15, #0, SR0, c13, c0, #3 2289 BuildMI(McrMBB, DL, TII.get(ARM::MRC), ScratchReg0) 2290 .addImm(15) 2291 .addImm(0) 2292 .addImm(13) 2293 .addImm(0) 2294 .addImm(3) 2295 .add(predOps(ARMCC::AL)); 2296 2297 // Use the last tls slot on android and a private field of the TCP on linux. 2298 assert(ST->isTargetAndroid() || ST->isTargetLinux()); 2299 unsigned TlsOffset = ST->isTargetAndroid() ? 63 : 1; 2300 2301 // Get the stack limit from the right offset 2302 // ldr SR0, [sr0, #4 * TlsOffset] 2303 BuildMI(GetMBB, DL, TII.get(ARM::LDRi12), ScratchReg0) 2304 .addReg(ScratchReg0) 2305 .addImm(4 * TlsOffset) 2306 .add(predOps(ARMCC::AL)); 2307 } 2308 2309 // Compare stack limit with stack size requested. 2310 // cmp SR0, SR1 2311 Opcode = Thumb ? ARM::tCMPr : ARM::CMPrr; 2312 BuildMI(GetMBB, DL, TII.get(Opcode)) 2313 .addReg(ScratchReg0) 2314 .addReg(ScratchReg1) 2315 .add(predOps(ARMCC::AL)); 2316 2317 // This jump is taken if StackLimit < SP - stack required. 2318 Opcode = Thumb ? ARM::tBcc : ARM::Bcc; 2319 BuildMI(GetMBB, DL, TII.get(Opcode)).addMBB(PostStackMBB) 2320 .addImm(ARMCC::LO) 2321 .addReg(ARM::CPSR); 2322 2323 2324 // Calling __morestack(StackSize, Size of stack arguments). 2325 // __morestack knows that the stack size requested is in SR0(r4) 2326 // and amount size of stack arguments is in SR1(r5). 2327 2328 // Pass first argument for the __morestack by Scratch Register #0. 2329 // The amount size of stack required 2330 if (Thumb) { 2331 BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), ScratchReg0) 2332 .add(condCodeOp()) 2333 .addImm(AlignedStackSize) 2334 .add(predOps(ARMCC::AL)); 2335 } else { 2336 BuildMI(AllocMBB, DL, TII.get(ARM::MOVi), ScratchReg0) 2337 .addImm(AlignedStackSize) 2338 .add(predOps(ARMCC::AL)) 2339 .add(condCodeOp()); 2340 } 2341 // Pass second argument for the __morestack by Scratch Register #1. 2342 // The amount size of stack consumed to save function arguments. 2343 if (Thumb) { 2344 BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), ScratchReg1) 2345 .add(condCodeOp()) 2346 .addImm(alignToARMConstant(ARMFI->getArgumentStackSize())) 2347 .add(predOps(ARMCC::AL)); 2348 } else { 2349 BuildMI(AllocMBB, DL, TII.get(ARM::MOVi), ScratchReg1) 2350 .addImm(alignToARMConstant(ARMFI->getArgumentStackSize())) 2351 .add(predOps(ARMCC::AL)) 2352 .add(condCodeOp()); 2353 } 2354 2355 // push {lr} - Save return address of this function. 2356 if (Thumb) { 2357 BuildMI(AllocMBB, DL, TII.get(ARM::tPUSH)) 2358 .add(predOps(ARMCC::AL)) 2359 .addReg(ARM::LR); 2360 } else { 2361 BuildMI(AllocMBB, DL, TII.get(ARM::STMDB_UPD)) 2362 .addReg(ARM::SP, RegState::Define) 2363 .addReg(ARM::SP) 2364 .add(predOps(ARMCC::AL)) 2365 .addReg(ARM::LR); 2366 } 2367 2368 // Emit the DWARF info about the change in stack as well as where to find the 2369 // previous link register 2370 CFIIndex = 2371 MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, -12)); 2372 BuildMI(AllocMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 2373 .addCFIIndex(CFIIndex); 2374 CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( 2375 nullptr, MRI->getDwarfRegNum(ARM::LR, true), -12)); 2376 BuildMI(AllocMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 2377 .addCFIIndex(CFIIndex); 2378 2379 // Call __morestack(). 2380 if (Thumb) { 2381 BuildMI(AllocMBB, DL, TII.get(ARM::tBL)) 2382 .add(predOps(ARMCC::AL)) 2383 .addExternalSymbol("__morestack"); 2384 } else { 2385 BuildMI(AllocMBB, DL, TII.get(ARM::BL)) 2386 .addExternalSymbol("__morestack"); 2387 } 2388 2389 // pop {lr} - Restore return address of this original function. 2390 if (Thumb) { 2391 if (ST->isThumb1Only()) { 2392 BuildMI(AllocMBB, DL, TII.get(ARM::tPOP)) 2393 .add(predOps(ARMCC::AL)) 2394 .addReg(ScratchReg0); 2395 BuildMI(AllocMBB, DL, TII.get(ARM::tMOVr), ARM::LR) 2396 .addReg(ScratchReg0) 2397 .add(predOps(ARMCC::AL)); 2398 } else { 2399 BuildMI(AllocMBB, DL, TII.get(ARM::t2LDR_POST)) 2400 .addReg(ARM::LR, RegState::Define) 2401 .addReg(ARM::SP, RegState::Define) 2402 .addReg(ARM::SP) 2403 .addImm(4) 2404 .add(predOps(ARMCC::AL)); 2405 } 2406 } else { 2407 BuildMI(AllocMBB, DL, TII.get(ARM::LDMIA_UPD)) 2408 .addReg(ARM::SP, RegState::Define) 2409 .addReg(ARM::SP) 2410 .add(predOps(ARMCC::AL)) 2411 .addReg(ARM::LR); 2412 } 2413 2414 // Restore SR0 and SR1 in case of __morestack() was called. 2415 // __morestack() will skip PostStackMBB block so we need to restore 2416 // scratch registers from here. 2417 // pop {SR0, SR1} 2418 if (Thumb) { 2419 BuildMI(AllocMBB, DL, TII.get(ARM::tPOP)) 2420 .add(predOps(ARMCC::AL)) 2421 .addReg(ScratchReg0) 2422 .addReg(ScratchReg1); 2423 } else { 2424 BuildMI(AllocMBB, DL, TII.get(ARM::LDMIA_UPD)) 2425 .addReg(ARM::SP, RegState::Define) 2426 .addReg(ARM::SP) 2427 .add(predOps(ARMCC::AL)) 2428 .addReg(ScratchReg0) 2429 .addReg(ScratchReg1); 2430 } 2431 2432 // Update the CFA offset now that we've popped 2433 CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 0)); 2434 BuildMI(AllocMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 2435 .addCFIIndex(CFIIndex); 2436 2437 // Return from this function. 2438 BuildMI(AllocMBB, DL, TII.get(ST->getReturnOpcode())).add(predOps(ARMCC::AL)); 2439 2440 // Restore SR0 and SR1 in case of __morestack() was not called. 2441 // pop {SR0, SR1} 2442 if (Thumb) { 2443 BuildMI(PostStackMBB, DL, TII.get(ARM::tPOP)) 2444 .add(predOps(ARMCC::AL)) 2445 .addReg(ScratchReg0) 2446 .addReg(ScratchReg1); 2447 } else { 2448 BuildMI(PostStackMBB, DL, TII.get(ARM::LDMIA_UPD)) 2449 .addReg(ARM::SP, RegState::Define) 2450 .addReg(ARM::SP) 2451 .add(predOps(ARMCC::AL)) 2452 .addReg(ScratchReg0) 2453 .addReg(ScratchReg1); 2454 } 2455 2456 // Update the CFA offset now that we've popped 2457 CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 0)); 2458 BuildMI(PostStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 2459 .addCFIIndex(CFIIndex); 2460 2461 // Tell debuggers that r4 and r5 are now the same as they were in the 2462 // previous function, that they're the "Same Value". 2463 CFIIndex = MF.addFrameInst(MCCFIInstruction::createSameValue( 2464 nullptr, MRI->getDwarfRegNum(ScratchReg0, true))); 2465 BuildMI(PostStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 2466 .addCFIIndex(CFIIndex); 2467 CFIIndex = MF.addFrameInst(MCCFIInstruction::createSameValue( 2468 nullptr, MRI->getDwarfRegNum(ScratchReg1, true))); 2469 BuildMI(PostStackMBB, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 2470 .addCFIIndex(CFIIndex); 2471 2472 // Organizing MBB lists 2473 PostStackMBB->addSuccessor(&PrologueMBB); 2474 2475 AllocMBB->addSuccessor(PostStackMBB); 2476 2477 GetMBB->addSuccessor(PostStackMBB); 2478 GetMBB->addSuccessor(AllocMBB); 2479 2480 McrMBB->addSuccessor(GetMBB); 2481 2482 PrevStackMBB->addSuccessor(McrMBB); 2483 2484 #ifdef EXPENSIVE_CHECKS 2485 MF.verify(); 2486 #endif 2487 } 2488