1 //===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the AArch64 implementation of TargetFrameLowering class. 10 // 11 // On AArch64, stack frames are structured as follows: 12 // 13 // The stack grows downward. 14 // 15 // All of the individual frame areas on the frame below are optional, i.e. it's 16 // possible to create a function so that the particular area isn't present 17 // in the frame. 18 // 19 // At function entry, the "frame" looks as follows: 20 // 21 // | | Higher address 22 // |-----------------------------------| 23 // | | 24 // | arguments passed on the stack | 25 // | | 26 // |-----------------------------------| <- sp 27 // | | Lower address 28 // 29 // 30 // After the prologue has run, the frame has the following general structure. 31 // Note that this doesn't depict the case where a red-zone is used. Also, 32 // technically the last frame area (VLAs) doesn't get created until in the 33 // main function body, after the prologue is run. However, it's depicted here 34 // for completeness. 35 // 36 // | | Higher address 37 // |-----------------------------------| 38 // | | 39 // | arguments passed on the stack | 40 // | | 41 // |-----------------------------------| 42 // | | 43 // | (Win64 only) varargs from reg | 44 // | | 45 // |-----------------------------------| 46 // | | 47 // | callee-saved gpr registers | <--. 48 // | | | On Darwin platforms these 49 // |- - - - - - - - - - - - - - - - - -| | callee saves are swapped, 50 // | | | (frame record first) 51 // | prev_fp, prev_lr | <--' 52 // | (a.k.a. "frame record") | 53 // |-----------------------------------| <- fp(=x29) 54 // | | 55 // | callee-saved fp/simd/SVE regs | 56 // | | 57 // |-----------------------------------| 58 // | | 59 // | SVE stack objects | 60 // | | 61 // |-----------------------------------| 62 // |.empty.space.to.make.part.below....| 63 // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at 64 // |.the.standard.16-byte.alignment....| compile time; if present) 65 // |-----------------------------------| 66 // | | 67 // | local variables of fixed size | 68 // | including spill slots | 69 // |-----------------------------------| <- bp(not defined by ABI, 70 // |.variable-sized.local.variables....| LLVM chooses X19) 71 // |.(VLAs)............................| (size of this area is unknown at 72 // |...................................| compile time) 73 // |-----------------------------------| <- sp 74 // | | Lower address 75 // 76 // 77 // To access the data in a frame, at-compile time, a constant offset must be 78 // computable from one of the pointers (fp, bp, sp) to access it. The size 79 // of the areas with a dotted background cannot be computed at compile-time 80 // if they are present, making it required to have all three of fp, bp and 81 // sp to be set up to be able to access all contents in the frame areas, 82 // assuming all of the frame areas are non-empty. 83 // 84 // For most functions, some of the frame areas are empty. For those functions, 85 // it may not be necessary to set up fp or bp: 86 // * A base pointer is definitely needed when there are both VLAs and local 87 // variables with more-than-default alignment requirements. 88 // * A frame pointer is definitely needed when there are local variables with 89 // more-than-default alignment requirements. 90 // 91 // For Darwin platforms the frame-record (fp, lr) is stored at the top of the 92 // callee-saved area, since the unwind encoding does not allow for encoding 93 // this dynamically and existing tools depend on this layout. For other 94 // platforms, the frame-record is stored at the bottom of the (gpr) callee-saved 95 // area to allow SVE stack objects (allocated directly below the callee-saves, 96 // if available) to be accessed directly from the framepointer. 97 // The SVE spill/fill instructions have VL-scaled addressing modes such 98 // as: 99 // ldr z8, [fp, #-7 mul vl] 100 // For SVE the size of the vector length (VL) is not known at compile-time, so 101 // '#-7 mul vl' is an offset that can only be evaluated at runtime. With this 102 // layout, we don't need to add an unscaled offset to the framepointer before 103 // accessing the SVE object in the frame. 104 // 105 // In some cases when a base pointer is not strictly needed, it is generated 106 // anyway when offsets from the frame pointer to access local variables become 107 // so large that the offset can't be encoded in the immediate fields of loads 108 // or stores. 109 // 110 // FIXME: also explain the redzone concept. 111 // FIXME: also explain the concept of reserved call frames. 112 // 113 //===----------------------------------------------------------------------===// 114 115 #include "AArch64FrameLowering.h" 116 #include "AArch64InstrInfo.h" 117 #include "AArch64MachineFunctionInfo.h" 118 #include "AArch64RegisterInfo.h" 119 #include "AArch64StackOffset.h" 120 #include "AArch64Subtarget.h" 121 #include "AArch64TargetMachine.h" 122 #include "MCTargetDesc/AArch64AddressingModes.h" 123 #include "llvm/ADT/ScopeExit.h" 124 #include "llvm/ADT/SmallVector.h" 125 #include "llvm/ADT/Statistic.h" 126 #include "llvm/CodeGen/LivePhysRegs.h" 127 #include "llvm/CodeGen/MachineBasicBlock.h" 128 #include "llvm/CodeGen/MachineFrameInfo.h" 129 #include "llvm/CodeGen/MachineFunction.h" 130 #include "llvm/CodeGen/MachineInstr.h" 131 #include "llvm/CodeGen/MachineInstrBuilder.h" 132 #include "llvm/CodeGen/MachineMemOperand.h" 133 #include "llvm/CodeGen/MachineModuleInfo.h" 134 #include "llvm/CodeGen/MachineOperand.h" 135 #include "llvm/CodeGen/MachineRegisterInfo.h" 136 #include "llvm/CodeGen/RegisterScavenging.h" 137 #include "llvm/CodeGen/TargetInstrInfo.h" 138 #include "llvm/CodeGen/TargetRegisterInfo.h" 139 #include "llvm/CodeGen/TargetSubtargetInfo.h" 140 #include "llvm/CodeGen/WinEHFuncInfo.h" 141 #include "llvm/IR/Attributes.h" 142 #include "llvm/IR/CallingConv.h" 143 #include "llvm/IR/DataLayout.h" 144 #include "llvm/IR/DebugLoc.h" 145 #include "llvm/IR/Function.h" 146 #include "llvm/MC/MCAsmInfo.h" 147 #include "llvm/MC/MCDwarf.h" 148 #include "llvm/Support/CommandLine.h" 149 #include "llvm/Support/Debug.h" 150 #include "llvm/Support/ErrorHandling.h" 151 #include "llvm/Support/MathExtras.h" 152 #include "llvm/Support/raw_ostream.h" 153 #include "llvm/Target/TargetMachine.h" 154 #include "llvm/Target/TargetOptions.h" 155 #include <cassert> 156 #include <cstdint> 157 #include <iterator> 158 #include <vector> 159 160 using namespace llvm; 161 162 #define DEBUG_TYPE "frame-info" 163 164 static cl::opt<bool> EnableRedZone("aarch64-redzone", 165 cl::desc("enable use of redzone on AArch64"), 166 cl::init(false), cl::Hidden); 167 168 static cl::opt<bool> 169 ReverseCSRRestoreSeq("reverse-csr-restore-seq", 170 cl::desc("reverse the CSR restore sequence"), 171 cl::init(false), cl::Hidden); 172 173 STATISTIC(NumRedZoneFunctions, "Number of functions using red zone"); 174 175 /// This is the biggest offset to the stack pointer we can encode in aarch64 176 /// instructions (without using a separate calculation and a temp register). 177 /// Note that the exception here are vector stores/loads which cannot encode any 178 /// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()). 179 static const unsigned DefaultSafeSPDisplacement = 255; 180 181 /// Look at each instruction that references stack frames and return the stack 182 /// size limit beyond which some of these instructions will require a scratch 183 /// register during their expansion later. 184 static unsigned estimateRSStackSizeLimit(MachineFunction &MF) { 185 // FIXME: For now, just conservatively guestimate based on unscaled indexing 186 // range. We'll end up allocating an unnecessary spill slot a lot, but 187 // realistically that's not a big deal at this stage of the game. 188 for (MachineBasicBlock &MBB : MF) { 189 for (MachineInstr &MI : MBB) { 190 if (MI.isDebugInstr() || MI.isPseudo() || 191 MI.getOpcode() == AArch64::ADDXri || 192 MI.getOpcode() == AArch64::ADDSXri) 193 continue; 194 195 for (const MachineOperand &MO : MI.operands()) { 196 if (!MO.isFI()) 197 continue; 198 199 StackOffset Offset; 200 if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) == 201 AArch64FrameOffsetCannotUpdate) 202 return 0; 203 } 204 } 205 } 206 return DefaultSafeSPDisplacement; 207 } 208 209 /// Returns the size of the entire SVE stackframe (calleesaves + spills). 210 static StackOffset getSVEStackSize(const MachineFunction &MF) { 211 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 212 return {(int64_t)AFI->getStackSizeSVE(), MVT::nxv1i8}; 213 } 214 215 bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { 216 if (!EnableRedZone) 217 return false; 218 // Don't use the red zone if the function explicitly asks us not to. 219 // This is typically used for kernel code. 220 if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone)) 221 return false; 222 223 const MachineFrameInfo &MFI = MF.getFrameInfo(); 224 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 225 unsigned NumBytes = AFI->getLocalStackSize(); 226 227 return !(MFI.hasCalls() || hasFP(MF) || NumBytes > 128 || 228 getSVEStackSize(MF)); 229 } 230 231 /// hasFP - Return true if the specified function should have a dedicated frame 232 /// pointer register. 233 bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const { 234 const MachineFrameInfo &MFI = MF.getFrameInfo(); 235 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 236 // Win64 EH requires a frame pointer if funclets are present, as the locals 237 // are accessed off the frame pointer in both the parent function and the 238 // funclets. 239 if (MF.hasEHFunclets()) 240 return true; 241 // Retain behavior of always omitting the FP for leaf functions when possible. 242 if (MFI.hasCalls() && MF.getTarget().Options.DisableFramePointerElim(MF)) 243 return true; 244 if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || 245 MFI.hasStackMap() || MFI.hasPatchPoint() || 246 RegInfo->needsStackRealignment(MF)) 247 return true; 248 // With large callframes around we may need to use FP to access the scavenging 249 // emergency spillslot. 250 // 251 // Unfortunately some calls to hasFP() like machine verifier -> 252 // getReservedReg() -> hasFP in the middle of global isel are too early 253 // to know the max call frame size. Hopefully conservatively returning "true" 254 // in those cases is fine. 255 // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs. 256 if (!MFI.isMaxCallFrameSizeComputed() || 257 MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement) 258 return true; 259 260 return false; 261 } 262 263 /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is 264 /// not required, we reserve argument space for call sites in the function 265 /// immediately on entry to the current function. This eliminates the need for 266 /// add/sub sp brackets around call sites. Returns true if the call frame is 267 /// included as part of the stack frame. 268 bool 269 AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { 270 return !MF.getFrameInfo().hasVarSizedObjects(); 271 } 272 273 MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr( 274 MachineFunction &MF, MachineBasicBlock &MBB, 275 MachineBasicBlock::iterator I) const { 276 const AArch64InstrInfo *TII = 277 static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); 278 DebugLoc DL = I->getDebugLoc(); 279 unsigned Opc = I->getOpcode(); 280 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 281 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 282 283 if (!hasReservedCallFrame(MF)) { 284 unsigned Align = getStackAlignment(); 285 286 int64_t Amount = I->getOperand(0).getImm(); 287 Amount = alignTo(Amount, Align); 288 if (!IsDestroy) 289 Amount = -Amount; 290 291 // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it 292 // doesn't have to pop anything), then the first operand will be zero too so 293 // this adjustment is a no-op. 294 if (CalleePopAmount == 0) { 295 // FIXME: in-function stack adjustment for calls is limited to 24-bits 296 // because there's no guaranteed temporary register available. 297 // 298 // ADD/SUB (immediate) has only LSL #0 and LSL #12 available. 299 // 1) For offset <= 12-bit, we use LSL #0 300 // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses 301 // LSL #0, and the other uses LSL #12. 302 // 303 // Most call frames will be allocated at the start of a function so 304 // this is OK, but it is a limitation that needs dealing with. 305 assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large"); 306 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, {Amount, MVT::i8}, 307 TII); 308 } 309 } else if (CalleePopAmount != 0) { 310 // If the calling convention demands that the callee pops arguments from the 311 // stack, we want to add it back if we have a reserved call frame. 312 assert(CalleePopAmount < 0xffffff && "call frame too large"); 313 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, 314 {-(int64_t)CalleePopAmount, MVT::i8}, TII); 315 } 316 return MBB.erase(I); 317 } 318 319 static bool ShouldSignReturnAddress(MachineFunction &MF) { 320 // The function should be signed in the following situations: 321 // - sign-return-address=all 322 // - sign-return-address=non-leaf and the functions spills the LR 323 324 const Function &F = MF.getFunction(); 325 if (!F.hasFnAttribute("sign-return-address")) 326 return false; 327 328 StringRef Scope = F.getFnAttribute("sign-return-address").getValueAsString(); 329 if (Scope.equals("none")) 330 return false; 331 332 if (Scope.equals("all")) 333 return true; 334 335 assert(Scope.equals("non-leaf") && "Expected all, none or non-leaf"); 336 337 for (const auto &Info : MF.getFrameInfo().getCalleeSavedInfo()) 338 if (Info.getReg() == AArch64::LR) 339 return true; 340 341 return false; 342 } 343 344 void AArch64FrameLowering::emitCalleeSavedFrameMoves( 345 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 346 MachineFunction &MF = *MBB.getParent(); 347 MachineFrameInfo &MFI = MF.getFrameInfo(); 348 const TargetSubtargetInfo &STI = MF.getSubtarget(); 349 const MCRegisterInfo *MRI = STI.getRegisterInfo(); 350 const TargetInstrInfo *TII = STI.getInstrInfo(); 351 DebugLoc DL = MBB.findDebugLoc(MBBI); 352 353 // Add callee saved registers to move list. 354 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 355 if (CSI.empty()) 356 return; 357 358 for (const auto &Info : CSI) { 359 unsigned Reg = Info.getReg(); 360 int64_t Offset = 361 MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea(); 362 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 363 unsigned CFIIndex = MF.addFrameInst( 364 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 365 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 366 .addCFIIndex(CFIIndex) 367 .setMIFlags(MachineInstr::FrameSetup); 368 } 369 } 370 371 // Find a scratch register that we can use at the start of the prologue to 372 // re-align the stack pointer. We avoid using callee-save registers since they 373 // may appear to be free when this is called from canUseAsPrologue (during 374 // shrink wrapping), but then no longer be free when this is called from 375 // emitPrologue. 376 // 377 // FIXME: This is a bit conservative, since in the above case we could use one 378 // of the callee-save registers as a scratch temp to re-align the stack pointer, 379 // but we would then have to make sure that we were in fact saving at least one 380 // callee-save register in the prologue, which is additional complexity that 381 // doesn't seem worth the benefit. 382 static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) { 383 MachineFunction *MF = MBB->getParent(); 384 385 // If MBB is an entry block, use X9 as the scratch register 386 if (&MF->front() == MBB) 387 return AArch64::X9; 388 389 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 390 const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo(); 391 LivePhysRegs LiveRegs(TRI); 392 LiveRegs.addLiveIns(*MBB); 393 394 // Mark callee saved registers as used so we will not choose them. 395 const MCPhysReg *CSRegs = MF->getRegInfo().getCalleeSavedRegs(); 396 for (unsigned i = 0; CSRegs[i]; ++i) 397 LiveRegs.addReg(CSRegs[i]); 398 399 // Prefer X9 since it was historically used for the prologue scratch reg. 400 const MachineRegisterInfo &MRI = MF->getRegInfo(); 401 if (LiveRegs.available(MRI, AArch64::X9)) 402 return AArch64::X9; 403 404 for (unsigned Reg : AArch64::GPR64RegClass) { 405 if (LiveRegs.available(MRI, Reg)) 406 return Reg; 407 } 408 return AArch64::NoRegister; 409 } 410 411 bool AArch64FrameLowering::canUseAsPrologue( 412 const MachineBasicBlock &MBB) const { 413 const MachineFunction *MF = MBB.getParent(); 414 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB); 415 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 416 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 417 418 // Don't need a scratch register if we're not going to re-align the stack. 419 if (!RegInfo->needsStackRealignment(*MF)) 420 return true; 421 // Otherwise, we can use any block as long as it has a scratch register 422 // available. 423 return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister; 424 } 425 426 static bool windowsRequiresStackProbe(MachineFunction &MF, 427 unsigned StackSizeInBytes) { 428 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 429 if (!Subtarget.isTargetWindows()) 430 return false; 431 const Function &F = MF.getFunction(); 432 // TODO: When implementing stack protectors, take that into account 433 // for the probe threshold. 434 unsigned StackProbeSize = 4096; 435 if (F.hasFnAttribute("stack-probe-size")) 436 F.getFnAttribute("stack-probe-size") 437 .getValueAsString() 438 .getAsInteger(0, StackProbeSize); 439 return (StackSizeInBytes >= StackProbeSize) && 440 !F.hasFnAttribute("no-stack-arg-probe"); 441 } 442 443 bool AArch64FrameLowering::shouldCombineCSRLocalStackBump( 444 MachineFunction &MF, unsigned StackBumpBytes) const { 445 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 446 const MachineFrameInfo &MFI = MF.getFrameInfo(); 447 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 448 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 449 450 if (MF.getFunction().hasOptSize()) 451 return false; 452 453 if (AFI->getLocalStackSize() == 0) 454 return false; 455 456 // 512 is the maximum immediate for stp/ldp that will be used for 457 // callee-save save/restores 458 if (StackBumpBytes >= 512 || windowsRequiresStackProbe(MF, StackBumpBytes)) 459 return false; 460 461 if (MFI.hasVarSizedObjects()) 462 return false; 463 464 if (RegInfo->needsStackRealignment(MF)) 465 return false; 466 467 // This isn't strictly necessary, but it simplifies things a bit since the 468 // current RedZone handling code assumes the SP is adjusted by the 469 // callee-save save/restore code. 470 if (canUseRedZone(MF)) 471 return false; 472 473 // When there is an SVE area on the stack, always allocate the 474 // callee-saves and spills/locals separately. 475 if (getSVEStackSize(MF)) 476 return false; 477 478 return true; 479 } 480 481 // Given a load or a store instruction, generate an appropriate unwinding SEH 482 // code on Windows. 483 static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, 484 const TargetInstrInfo &TII, 485 MachineInstr::MIFlag Flag) { 486 unsigned Opc = MBBI->getOpcode(); 487 MachineBasicBlock *MBB = MBBI->getParent(); 488 MachineFunction &MF = *MBB->getParent(); 489 DebugLoc DL = MBBI->getDebugLoc(); 490 unsigned ImmIdx = MBBI->getNumOperands() - 1; 491 int Imm = MBBI->getOperand(ImmIdx).getImm(); 492 MachineInstrBuilder MIB; 493 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 494 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 495 496 switch (Opc) { 497 default: 498 llvm_unreachable("No SEH Opcode for this instruction"); 499 case AArch64::LDPDpost: 500 Imm = -Imm; 501 LLVM_FALLTHROUGH; 502 case AArch64::STPDpre: { 503 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 504 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(2).getReg()); 505 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP_X)) 506 .addImm(Reg0) 507 .addImm(Reg1) 508 .addImm(Imm * 8) 509 .setMIFlag(Flag); 510 break; 511 } 512 case AArch64::LDPXpost: 513 Imm = -Imm; 514 LLVM_FALLTHROUGH; 515 case AArch64::STPXpre: { 516 Register Reg0 = MBBI->getOperand(1).getReg(); 517 Register Reg1 = MBBI->getOperand(2).getReg(); 518 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) 519 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR_X)) 520 .addImm(Imm * 8) 521 .setMIFlag(Flag); 522 else 523 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP_X)) 524 .addImm(RegInfo->getSEHRegNum(Reg0)) 525 .addImm(RegInfo->getSEHRegNum(Reg1)) 526 .addImm(Imm * 8) 527 .setMIFlag(Flag); 528 break; 529 } 530 case AArch64::LDRDpost: 531 Imm = -Imm; 532 LLVM_FALLTHROUGH; 533 case AArch64::STRDpre: { 534 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 535 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg_X)) 536 .addImm(Reg) 537 .addImm(Imm) 538 .setMIFlag(Flag); 539 break; 540 } 541 case AArch64::LDRXpost: 542 Imm = -Imm; 543 LLVM_FALLTHROUGH; 544 case AArch64::STRXpre: { 545 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 546 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg_X)) 547 .addImm(Reg) 548 .addImm(Imm) 549 .setMIFlag(Flag); 550 break; 551 } 552 case AArch64::STPDi: 553 case AArch64::LDPDi: { 554 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 555 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 556 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP)) 557 .addImm(Reg0) 558 .addImm(Reg1) 559 .addImm(Imm * 8) 560 .setMIFlag(Flag); 561 break; 562 } 563 case AArch64::STPXi: 564 case AArch64::LDPXi: { 565 Register Reg0 = MBBI->getOperand(0).getReg(); 566 Register Reg1 = MBBI->getOperand(1).getReg(); 567 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) 568 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR)) 569 .addImm(Imm * 8) 570 .setMIFlag(Flag); 571 else 572 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP)) 573 .addImm(RegInfo->getSEHRegNum(Reg0)) 574 .addImm(RegInfo->getSEHRegNum(Reg1)) 575 .addImm(Imm * 8) 576 .setMIFlag(Flag); 577 break; 578 } 579 case AArch64::STRXui: 580 case AArch64::LDRXui: { 581 int Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 582 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg)) 583 .addImm(Reg) 584 .addImm(Imm * 8) 585 .setMIFlag(Flag); 586 break; 587 } 588 case AArch64::STRDui: 589 case AArch64::LDRDui: { 590 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 591 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg)) 592 .addImm(Reg) 593 .addImm(Imm * 8) 594 .setMIFlag(Flag); 595 break; 596 } 597 } 598 auto I = MBB->insertAfter(MBBI, MIB); 599 return I; 600 } 601 602 // Fix up the SEH opcode associated with the save/restore instruction. 603 static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, 604 unsigned LocalStackSize) { 605 MachineOperand *ImmOpnd = nullptr; 606 unsigned ImmIdx = MBBI->getNumOperands() - 1; 607 switch (MBBI->getOpcode()) { 608 default: 609 llvm_unreachable("Fix the offset in the SEH instruction"); 610 case AArch64::SEH_SaveFPLR: 611 case AArch64::SEH_SaveRegP: 612 case AArch64::SEH_SaveReg: 613 case AArch64::SEH_SaveFRegP: 614 case AArch64::SEH_SaveFReg: 615 ImmOpnd = &MBBI->getOperand(ImmIdx); 616 break; 617 } 618 if (ImmOpnd) 619 ImmOpnd->setImm(ImmOpnd->getImm() + LocalStackSize); 620 } 621 622 // Convert callee-save register save/restore instruction to do stack pointer 623 // decrement/increment to allocate/deallocate the callee-save stack area by 624 // converting store/load to use pre/post increment version. 625 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec( 626 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 627 const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, 628 bool NeedsWinCFI, bool *HasWinCFI, bool InProlog = true) { 629 // Ignore instructions that do not operate on SP, i.e. shadow call stack 630 // instructions and associated CFI instruction. 631 while (MBBI->getOpcode() == AArch64::STRXpost || 632 MBBI->getOpcode() == AArch64::LDRXpre || 633 MBBI->getOpcode() == AArch64::CFI_INSTRUCTION) { 634 if (MBBI->getOpcode() != AArch64::CFI_INSTRUCTION) 635 assert(MBBI->getOperand(0).getReg() != AArch64::SP); 636 ++MBBI; 637 } 638 unsigned NewOpc; 639 int Scale = 1; 640 switch (MBBI->getOpcode()) { 641 default: 642 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 643 case AArch64::STPXi: 644 NewOpc = AArch64::STPXpre; 645 Scale = 8; 646 break; 647 case AArch64::STPDi: 648 NewOpc = AArch64::STPDpre; 649 Scale = 8; 650 break; 651 case AArch64::STPQi: 652 NewOpc = AArch64::STPQpre; 653 Scale = 16; 654 break; 655 case AArch64::STRXui: 656 NewOpc = AArch64::STRXpre; 657 break; 658 case AArch64::STRDui: 659 NewOpc = AArch64::STRDpre; 660 break; 661 case AArch64::STRQui: 662 NewOpc = AArch64::STRQpre; 663 break; 664 case AArch64::LDPXi: 665 NewOpc = AArch64::LDPXpost; 666 Scale = 8; 667 break; 668 case AArch64::LDPDi: 669 NewOpc = AArch64::LDPDpost; 670 Scale = 8; 671 break; 672 case AArch64::LDPQi: 673 NewOpc = AArch64::LDPQpost; 674 Scale = 16; 675 break; 676 case AArch64::LDRXui: 677 NewOpc = AArch64::LDRXpost; 678 break; 679 case AArch64::LDRDui: 680 NewOpc = AArch64::LDRDpost; 681 break; 682 case AArch64::LDRQui: 683 NewOpc = AArch64::LDRQpost; 684 break; 685 } 686 // Get rid of the SEH code associated with the old instruction. 687 if (NeedsWinCFI) { 688 auto SEH = std::next(MBBI); 689 if (AArch64InstrInfo::isSEHInstruction(*SEH)) 690 SEH->eraseFromParent(); 691 } 692 693 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc)); 694 MIB.addReg(AArch64::SP, RegState::Define); 695 696 // Copy all operands other than the immediate offset. 697 unsigned OpndIdx = 0; 698 for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd; 699 ++OpndIdx) 700 MIB.add(MBBI->getOperand(OpndIdx)); 701 702 assert(MBBI->getOperand(OpndIdx).getImm() == 0 && 703 "Unexpected immediate offset in first/last callee-save save/restore " 704 "instruction!"); 705 assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP && 706 "Unexpected base register in callee-save save/restore instruction!"); 707 assert(CSStackSizeInc % Scale == 0); 708 MIB.addImm(CSStackSizeInc / Scale); 709 710 MIB.setMIFlags(MBBI->getFlags()); 711 MIB.setMemRefs(MBBI->memoperands()); 712 713 // Generate a new SEH code that corresponds to the new instruction. 714 if (NeedsWinCFI) { 715 *HasWinCFI = true; 716 InsertSEH(*MIB, *TII, 717 InProlog ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy); 718 } 719 720 return std::prev(MBB.erase(MBBI)); 721 } 722 723 // Fixup callee-save register save/restore instructions to take into account 724 // combined SP bump by adding the local stack size to the stack offsets. 725 static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, 726 unsigned LocalStackSize, 727 bool NeedsWinCFI, 728 bool *HasWinCFI) { 729 if (AArch64InstrInfo::isSEHInstruction(MI)) 730 return; 731 732 unsigned Opc = MI.getOpcode(); 733 734 // Ignore instructions that do not operate on SP, i.e. shadow call stack 735 // instructions and associated CFI instruction. 736 if (Opc == AArch64::STRXpost || Opc == AArch64::LDRXpre || 737 Opc == AArch64::CFI_INSTRUCTION) { 738 if (Opc != AArch64::CFI_INSTRUCTION) 739 assert(MI.getOperand(0).getReg() != AArch64::SP); 740 return; 741 } 742 743 unsigned Scale; 744 switch (Opc) { 745 case AArch64::STPXi: 746 case AArch64::STRXui: 747 case AArch64::STPDi: 748 case AArch64::STRDui: 749 case AArch64::LDPXi: 750 case AArch64::LDRXui: 751 case AArch64::LDPDi: 752 case AArch64::LDRDui: 753 Scale = 8; 754 break; 755 case AArch64::STPQi: 756 case AArch64::STRQui: 757 case AArch64::LDPQi: 758 case AArch64::LDRQui: 759 Scale = 16; 760 break; 761 default: 762 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 763 } 764 765 unsigned OffsetIdx = MI.getNumExplicitOperands() - 1; 766 assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP && 767 "Unexpected base register in callee-save save/restore instruction!"); 768 // Last operand is immediate offset that needs fixing. 769 MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx); 770 // All generated opcodes have scaled offsets. 771 assert(LocalStackSize % Scale == 0); 772 OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / Scale); 773 774 if (NeedsWinCFI) { 775 *HasWinCFI = true; 776 auto MBBI = std::next(MachineBasicBlock::iterator(MI)); 777 assert(MBBI != MI.getParent()->end() && "Expecting a valid instruction"); 778 assert(AArch64InstrInfo::isSEHInstruction(*MBBI) && 779 "Expecting a SEH instruction"); 780 fixupSEHOpcode(MBBI, LocalStackSize); 781 } 782 } 783 784 static void adaptForLdStOpt(MachineBasicBlock &MBB, 785 MachineBasicBlock::iterator FirstSPPopI, 786 MachineBasicBlock::iterator LastPopI) { 787 // Sometimes (when we restore in the same order as we save), we can end up 788 // with code like this: 789 // 790 // ldp x26, x25, [sp] 791 // ldp x24, x23, [sp, #16] 792 // ldp x22, x21, [sp, #32] 793 // ldp x20, x19, [sp, #48] 794 // add sp, sp, #64 795 // 796 // In this case, it is always better to put the first ldp at the end, so 797 // that the load-store optimizer can run and merge the ldp and the add into 798 // a post-index ldp. 799 // If we managed to grab the first pop instruction, move it to the end. 800 if (ReverseCSRRestoreSeq) 801 MBB.splice(FirstSPPopI, &MBB, LastPopI); 802 // We should end up with something like this now: 803 // 804 // ldp x24, x23, [sp, #16] 805 // ldp x22, x21, [sp, #32] 806 // ldp x20, x19, [sp, #48] 807 // ldp x26, x25, [sp] 808 // add sp, sp, #64 809 // 810 // and the load-store optimizer can merge the last two instructions into: 811 // 812 // ldp x26, x25, [sp], #64 813 // 814 } 815 816 static bool ShouldSignWithAKey(MachineFunction &MF) { 817 const Function &F = MF.getFunction(); 818 if (!F.hasFnAttribute("sign-return-address-key")) 819 return true; 820 821 const StringRef Key = 822 F.getFnAttribute("sign-return-address-key").getValueAsString(); 823 assert(Key.equals_lower("a_key") || Key.equals_lower("b_key")); 824 return Key.equals_lower("a_key"); 825 } 826 827 static bool needsWinCFI(const MachineFunction &MF) { 828 const Function &F = MF.getFunction(); 829 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI() && 830 F.needsUnwindTableEntry(); 831 } 832 833 static bool isTargetDarwin(const MachineFunction &MF) { 834 return MF.getSubtarget<AArch64Subtarget>().isTargetDarwin(); 835 } 836 837 void AArch64FrameLowering::emitPrologue(MachineFunction &MF, 838 MachineBasicBlock &MBB) const { 839 MachineBasicBlock::iterator MBBI = MBB.begin(); 840 const MachineFrameInfo &MFI = MF.getFrameInfo(); 841 const Function &F = MF.getFunction(); 842 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 843 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 844 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 845 MachineModuleInfo &MMI = MF.getMMI(); 846 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 847 bool needsFrameMoves = (MMI.hasDebugInfo() || F.needsUnwindTableEntry()) && 848 !MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); 849 bool HasFP = hasFP(MF); 850 bool NeedsWinCFI = needsWinCFI(MF); 851 bool HasWinCFI = false; 852 auto Cleanup = make_scope_exit([&]() { MF.setHasWinCFI(HasWinCFI); }); 853 854 bool IsFunclet = MBB.isEHFuncletEntry(); 855 856 // At this point, we're going to decide whether or not the function uses a 857 // redzone. In most cases, the function doesn't have a redzone so let's 858 // assume that's false and set it to true in the case that there's a redzone. 859 AFI->setHasRedZone(false); 860 861 // Debug location must be unknown since the first debug location is used 862 // to determine the end of the prologue. 863 DebugLoc DL; 864 865 if (ShouldSignReturnAddress(MF)) { 866 if (ShouldSignWithAKey(MF)) 867 BuildMI(MBB, MBBI, DL, TII->get(AArch64::PACIASP)) 868 .setMIFlag(MachineInstr::FrameSetup); 869 else { 870 BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITBKEY)) 871 .setMIFlag(MachineInstr::FrameSetup); 872 BuildMI(MBB, MBBI, DL, TII->get(AArch64::PACIBSP)) 873 .setMIFlag(MachineInstr::FrameSetup); 874 } 875 876 unsigned CFIIndex = 877 MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr)); 878 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 879 .addCFIIndex(CFIIndex) 880 .setMIFlags(MachineInstr::FrameSetup); 881 } 882 883 // All calls are tail calls in GHC calling conv, and functions have no 884 // prologue/epilogue. 885 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 886 return; 887 888 // Set tagged base pointer to the bottom of the stack frame. 889 // Ideally it should match SP value after prologue. 890 AFI->setTaggedBasePointerOffset(MFI.getStackSize()); 891 892 const StackOffset &SVEStackSize = getSVEStackSize(MF); 893 894 // getStackSize() includes all the locals in its size calculation. We don't 895 // include these locals when computing the stack size of a funclet, as they 896 // are allocated in the parent's stack frame and accessed via the frame 897 // pointer from the funclet. We only save the callee saved registers in the 898 // funclet, which are really the callee saved registers of the parent 899 // function, including the funclet. 900 int NumBytes = IsFunclet ? (int)getWinEHFuncletFrameSize(MF) 901 : (int)MFI.getStackSize(); 902 if (!AFI->hasStackFrame() && !windowsRequiresStackProbe(MF, NumBytes)) { 903 assert(!HasFP && "unexpected function without stack frame but with FP"); 904 assert(!SVEStackSize && 905 "unexpected function without stack frame but with SVE objects"); 906 // All of the stack allocation is for locals. 907 AFI->setLocalStackSize(NumBytes); 908 if (!NumBytes) 909 return; 910 // REDZONE: If the stack size is less than 128 bytes, we don't need 911 // to actually allocate. 912 if (canUseRedZone(MF)) { 913 AFI->setHasRedZone(true); 914 ++NumRedZoneFunctions; 915 } else { 916 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 917 {-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup, 918 false, NeedsWinCFI, &HasWinCFI); 919 if (!NeedsWinCFI) { 920 // Label used to tie together the PROLOG_LABEL and the MachineMoves. 921 MCSymbol *FrameLabel = MMI.getContext().createTempSymbol(); 922 // Encode the stack size of the leaf function. 923 unsigned CFIIndex = MF.addFrameInst( 924 MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes)); 925 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 926 .addCFIIndex(CFIIndex) 927 .setMIFlags(MachineInstr::FrameSetup); 928 } 929 } 930 931 if (NeedsWinCFI) { 932 HasWinCFI = true; 933 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 934 .setMIFlag(MachineInstr::FrameSetup); 935 } 936 937 return; 938 } 939 940 bool IsWin64 = 941 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 942 // Var args are accounted for in the containing function, so don't 943 // include them for funclets. 944 unsigned FixedObject = (IsWin64 && !IsFunclet) ? 945 alignTo(AFI->getVarArgsGPRSize(), 16) : 0; 946 947 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 948 // All of the remaining stack allocations are for locals. 949 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 950 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); 951 if (CombineSPBump) { 952 assert(!SVEStackSize && "Cannot combine SP bump with SVE"); 953 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 954 {-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup, false, 955 NeedsWinCFI, &HasWinCFI); 956 NumBytes = 0; 957 } else if (PrologueSaveSize != 0) { 958 MBBI = convertCalleeSaveRestoreToSPPrePostIncDec( 959 MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI); 960 NumBytes -= PrologueSaveSize; 961 } 962 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 963 964 // Move past the saves of the callee-saved registers, fixing up the offsets 965 // and pre-inc if we decided to combine the callee-save and local stack 966 // pointer bump above. 967 MachineBasicBlock::iterator End = MBB.end(); 968 while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup)) { 969 if (CombineSPBump) 970 fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize(), 971 NeedsWinCFI, &HasWinCFI); 972 ++MBBI; 973 } 974 975 // The code below is not applicable to funclets. We have emitted all the SEH 976 // opcodes that we needed to emit. The FP and BP belong to the containing 977 // function. 978 if (IsFunclet) { 979 if (NeedsWinCFI) { 980 HasWinCFI = true; 981 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 982 .setMIFlag(MachineInstr::FrameSetup); 983 } 984 985 // SEH funclets are passed the frame pointer in X1. If the parent 986 // function uses the base register, then the base register is used 987 // directly, and is not retrieved from X1. 988 if (F.hasPersonalityFn()) { 989 EHPersonality Per = classifyEHPersonality(F.getPersonalityFn()); 990 if (isAsynchronousEHPersonality(Per)) { 991 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), AArch64::FP) 992 .addReg(AArch64::X1).setMIFlag(MachineInstr::FrameSetup); 993 MBB.addLiveIn(AArch64::X1); 994 } 995 } 996 997 return; 998 } 999 1000 if (HasFP) { 1001 // Only set up FP if we actually need to. 1002 int FPOffset = isTargetDarwin(MF) ? (AFI->getCalleeSavedStackSize() - 16) : 0; 1003 1004 if (CombineSPBump) 1005 FPOffset += AFI->getLocalStackSize(); 1006 1007 // Issue sub fp, sp, FPOffset or 1008 // mov fp,sp when FPOffset is zero. 1009 // Note: All stores of callee-saved registers are marked as "FrameSetup". 1010 // This code marks the instruction(s) that set the FP also. 1011 emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, 1012 {FPOffset, MVT::i8}, TII, MachineInstr::FrameSetup, false, 1013 NeedsWinCFI, &HasWinCFI); 1014 } 1015 1016 if (windowsRequiresStackProbe(MF, NumBytes)) { 1017 uint32_t NumWords = NumBytes >> 4; 1018 if (NeedsWinCFI) { 1019 HasWinCFI = true; 1020 // alloc_l can hold at most 256MB, so assume that NumBytes doesn't 1021 // exceed this amount. We need to move at most 2^24 - 1 into x15. 1022 // This is at most two instructions, MOVZ follwed by MOVK. 1023 // TODO: Fix to use multiple stack alloc unwind codes for stacks 1024 // exceeding 256MB in size. 1025 if (NumBytes >= (1 << 28)) 1026 report_fatal_error("Stack size cannot exceed 256MB for stack " 1027 "unwinding purposes"); 1028 1029 uint32_t LowNumWords = NumWords & 0xFFFF; 1030 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVZXi), AArch64::X15) 1031 .addImm(LowNumWords) 1032 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)) 1033 .setMIFlag(MachineInstr::FrameSetup); 1034 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1035 .setMIFlag(MachineInstr::FrameSetup); 1036 if ((NumWords & 0xFFFF0000) != 0) { 1037 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X15) 1038 .addReg(AArch64::X15) 1039 .addImm((NumWords & 0xFFFF0000) >> 16) // High half 1040 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 16)) 1041 .setMIFlag(MachineInstr::FrameSetup); 1042 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1043 .setMIFlag(MachineInstr::FrameSetup); 1044 } 1045 } else { 1046 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), AArch64::X15) 1047 .addImm(NumWords) 1048 .setMIFlags(MachineInstr::FrameSetup); 1049 } 1050 1051 switch (MF.getTarget().getCodeModel()) { 1052 case CodeModel::Tiny: 1053 case CodeModel::Small: 1054 case CodeModel::Medium: 1055 case CodeModel::Kernel: 1056 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL)) 1057 .addExternalSymbol("__chkstk") 1058 .addReg(AArch64::X15, RegState::Implicit) 1059 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead) 1060 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead) 1061 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead) 1062 .setMIFlags(MachineInstr::FrameSetup); 1063 if (NeedsWinCFI) { 1064 HasWinCFI = true; 1065 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1066 .setMIFlag(MachineInstr::FrameSetup); 1067 } 1068 break; 1069 case CodeModel::Large: 1070 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVaddrEXT)) 1071 .addReg(AArch64::X16, RegState::Define) 1072 .addExternalSymbol("__chkstk") 1073 .addExternalSymbol("__chkstk") 1074 .setMIFlags(MachineInstr::FrameSetup); 1075 if (NeedsWinCFI) { 1076 HasWinCFI = true; 1077 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1078 .setMIFlag(MachineInstr::FrameSetup); 1079 } 1080 1081 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BLR)) 1082 .addReg(AArch64::X16, RegState::Kill) 1083 .addReg(AArch64::X15, RegState::Implicit | RegState::Define) 1084 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead) 1085 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead) 1086 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead) 1087 .setMIFlags(MachineInstr::FrameSetup); 1088 if (NeedsWinCFI) { 1089 HasWinCFI = true; 1090 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1091 .setMIFlag(MachineInstr::FrameSetup); 1092 } 1093 break; 1094 } 1095 1096 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBXrx64), AArch64::SP) 1097 .addReg(AArch64::SP, RegState::Kill) 1098 .addReg(AArch64::X15, RegState::Kill) 1099 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 4)) 1100 .setMIFlags(MachineInstr::FrameSetup); 1101 if (NeedsWinCFI) { 1102 HasWinCFI = true; 1103 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)) 1104 .addImm(NumBytes) 1105 .setMIFlag(MachineInstr::FrameSetup); 1106 } 1107 NumBytes = 0; 1108 } 1109 1110 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -SVEStackSize, TII, 1111 MachineInstr::FrameSetup); 1112 1113 // Allocate space for the rest of the frame. 1114 if (NumBytes) { 1115 const bool NeedsRealignment = RegInfo->needsStackRealignment(MF); 1116 unsigned scratchSPReg = AArch64::SP; 1117 1118 if (NeedsRealignment) { 1119 scratchSPReg = findScratchNonCalleeSaveRegister(&MBB); 1120 assert(scratchSPReg != AArch64::NoRegister); 1121 } 1122 1123 // If we're a leaf function, try using the red zone. 1124 if (!canUseRedZone(MF)) 1125 // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have 1126 // the correct value here, as NumBytes also includes padding bytes, 1127 // which shouldn't be counted here. 1128 emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, 1129 {-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup, 1130 false, NeedsWinCFI, &HasWinCFI); 1131 1132 if (NeedsRealignment) { 1133 const unsigned Alignment = MFI.getMaxAlignment(); 1134 const unsigned NrBitsToZero = countTrailingZeros(Alignment); 1135 assert(NrBitsToZero > 1); 1136 assert(scratchSPReg != AArch64::SP); 1137 1138 // SUB X9, SP, NumBytes 1139 // -- X9 is temporary register, so shouldn't contain any live data here, 1140 // -- free to use. This is already produced by emitFrameOffset above. 1141 // AND SP, X9, 0b11111...0000 1142 // The logical immediates have a non-trivial encoding. The following 1143 // formula computes the encoded immediate with all ones but 1144 // NrBitsToZero zero bits as least significant bits. 1145 uint32_t andMaskEncoded = (1 << 12) // = N 1146 | ((64 - NrBitsToZero) << 6) // immr 1147 | ((64 - NrBitsToZero - 1) << 0); // imms 1148 1149 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP) 1150 .addReg(scratchSPReg, RegState::Kill) 1151 .addImm(andMaskEncoded); 1152 AFI->setStackRealigned(true); 1153 if (NeedsWinCFI) { 1154 HasWinCFI = true; 1155 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)) 1156 .addImm(NumBytes & andMaskEncoded) 1157 .setMIFlag(MachineInstr::FrameSetup); 1158 } 1159 } 1160 } 1161 1162 // If we need a base pointer, set it up here. It's whatever the value of the 1163 // stack pointer is at this point. Any variable size objects will be allocated 1164 // after this, so we can still use the base pointer to reference locals. 1165 // 1166 // FIXME: Clarify FrameSetup flags here. 1167 // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is 1168 // needed. 1169 if (RegInfo->hasBasePointer(MF)) { 1170 TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP, 1171 false); 1172 if (NeedsWinCFI) { 1173 HasWinCFI = true; 1174 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1175 .setMIFlag(MachineInstr::FrameSetup); 1176 } 1177 } 1178 1179 // The very last FrameSetup instruction indicates the end of prologue. Emit a 1180 // SEH opcode indicating the prologue end. 1181 if (NeedsWinCFI && HasWinCFI) { 1182 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 1183 .setMIFlag(MachineInstr::FrameSetup); 1184 } 1185 1186 if (needsFrameMoves) { 1187 const DataLayout &TD = MF.getDataLayout(); 1188 const int StackGrowth = isTargetDarwin(MF) 1189 ? (2 * -TD.getPointerSize(0)) 1190 : -AFI->getCalleeSavedStackSize(); 1191 Register FramePtr = RegInfo->getFrameRegister(MF); 1192 // An example of the prologue: 1193 // 1194 // .globl __foo 1195 // .align 2 1196 // __foo: 1197 // Ltmp0: 1198 // .cfi_startproc 1199 // .cfi_personality 155, ___gxx_personality_v0 1200 // Leh_func_begin: 1201 // .cfi_lsda 16, Lexception33 1202 // 1203 // stp xa,bx, [sp, -#offset]! 1204 // ... 1205 // stp x28, x27, [sp, #offset-32] 1206 // stp fp, lr, [sp, #offset-16] 1207 // add fp, sp, #offset - 16 1208 // sub sp, sp, #1360 1209 // 1210 // The Stack: 1211 // +-------------------------------------------+ 1212 // 10000 | ........ | ........ | ........ | ........ | 1213 // 10004 | ........ | ........ | ........ | ........ | 1214 // +-------------------------------------------+ 1215 // 10008 | ........ | ........ | ........ | ........ | 1216 // 1000c | ........ | ........ | ........ | ........ | 1217 // +===========================================+ 1218 // 10010 | X28 Register | 1219 // 10014 | X28 Register | 1220 // +-------------------------------------------+ 1221 // 10018 | X27 Register | 1222 // 1001c | X27 Register | 1223 // +===========================================+ 1224 // 10020 | Frame Pointer | 1225 // 10024 | Frame Pointer | 1226 // +-------------------------------------------+ 1227 // 10028 | Link Register | 1228 // 1002c | Link Register | 1229 // +===========================================+ 1230 // 10030 | ........ | ........ | ........ | ........ | 1231 // 10034 | ........ | ........ | ........ | ........ | 1232 // +-------------------------------------------+ 1233 // 10038 | ........ | ........ | ........ | ........ | 1234 // 1003c | ........ | ........ | ........ | ........ | 1235 // +-------------------------------------------+ 1236 // 1237 // [sp] = 10030 :: >>initial value<< 1238 // sp = 10020 :: stp fp, lr, [sp, #-16]! 1239 // fp = sp == 10020 :: mov fp, sp 1240 // [sp] == 10020 :: stp x28, x27, [sp, #-16]! 1241 // sp == 10010 :: >>final value<< 1242 // 1243 // The frame pointer (w29) points to address 10020. If we use an offset of 1244 // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24 1245 // for w27, and -32 for w28: 1246 // 1247 // Ltmp1: 1248 // .cfi_def_cfa w29, 16 1249 // Ltmp2: 1250 // .cfi_offset w30, -8 1251 // Ltmp3: 1252 // .cfi_offset w29, -16 1253 // Ltmp4: 1254 // .cfi_offset w27, -24 1255 // Ltmp5: 1256 // .cfi_offset w28, -32 1257 1258 if (HasFP) { 1259 // Define the current CFA rule to use the provided FP. 1260 unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true); 1261 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( 1262 nullptr, Reg, StackGrowth - FixedObject)); 1263 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1264 .addCFIIndex(CFIIndex) 1265 .setMIFlags(MachineInstr::FrameSetup); 1266 } else { 1267 // Encode the stack size of the leaf function. 1268 unsigned CFIIndex = MF.addFrameInst( 1269 MCCFIInstruction::createDefCfaOffset(nullptr, -MFI.getStackSize())); 1270 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1271 .addCFIIndex(CFIIndex) 1272 .setMIFlags(MachineInstr::FrameSetup); 1273 } 1274 1275 // Now emit the moves for whatever callee saved regs we have (including FP, 1276 // LR if those are saved). 1277 emitCalleeSavedFrameMoves(MBB, MBBI); 1278 } 1279 } 1280 1281 static void InsertReturnAddressAuth(MachineFunction &MF, 1282 MachineBasicBlock &MBB) { 1283 if (!ShouldSignReturnAddress(MF)) 1284 return; 1285 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1286 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1287 1288 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 1289 DebugLoc DL; 1290 if (MBBI != MBB.end()) 1291 DL = MBBI->getDebugLoc(); 1292 1293 // The AUTIASP instruction assembles to a hint instruction before v8.3a so 1294 // this instruction can safely used for any v8a architecture. 1295 // From v8.3a onwards there are optimised authenticate LR and return 1296 // instructions, namely RETA{A,B}, that can be used instead. 1297 if (Subtarget.hasV8_3aOps() && MBBI != MBB.end() && 1298 MBBI->getOpcode() == AArch64::RET_ReallyLR) { 1299 BuildMI(MBB, MBBI, DL, 1300 TII->get(ShouldSignWithAKey(MF) ? AArch64::RETAA : AArch64::RETAB)) 1301 .copyImplicitOps(*MBBI); 1302 MBB.erase(MBBI); 1303 } else { 1304 BuildMI( 1305 MBB, MBBI, DL, 1306 TII->get(ShouldSignWithAKey(MF) ? AArch64::AUTIASP : AArch64::AUTIBSP)) 1307 .setMIFlag(MachineInstr::FrameDestroy); 1308 } 1309 } 1310 1311 static bool isFuncletReturnInstr(const MachineInstr &MI) { 1312 switch (MI.getOpcode()) { 1313 default: 1314 return false; 1315 case AArch64::CATCHRET: 1316 case AArch64::CLEANUPRET: 1317 return true; 1318 } 1319 } 1320 1321 void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, 1322 MachineBasicBlock &MBB) const { 1323 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 1324 MachineFrameInfo &MFI = MF.getFrameInfo(); 1325 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1326 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1327 DebugLoc DL; 1328 bool IsTailCallReturn = false; 1329 bool NeedsWinCFI = needsWinCFI(MF); 1330 bool HasWinCFI = false; 1331 bool IsFunclet = false; 1332 auto WinCFI = make_scope_exit([&]() { 1333 if (!MF.hasWinCFI()) 1334 MF.setHasWinCFI(HasWinCFI); 1335 }); 1336 1337 if (MBB.end() != MBBI) { 1338 DL = MBBI->getDebugLoc(); 1339 unsigned RetOpcode = MBBI->getOpcode(); 1340 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi || 1341 RetOpcode == AArch64::TCRETURNri || 1342 RetOpcode == AArch64::TCRETURNriBTI; 1343 IsFunclet = isFuncletReturnInstr(*MBBI); 1344 } 1345 1346 int NumBytes = IsFunclet ? (int)getWinEHFuncletFrameSize(MF) 1347 : MFI.getStackSize(); 1348 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1349 1350 // All calls are tail calls in GHC calling conv, and functions have no 1351 // prologue/epilogue. 1352 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 1353 return; 1354 1355 // Initial and residual are named for consistency with the prologue. Note that 1356 // in the epilogue, the residual adjustment is executed first. 1357 uint64_t ArgumentPopSize = 0; 1358 if (IsTailCallReturn) { 1359 MachineOperand &StackAdjust = MBBI->getOperand(1); 1360 1361 // For a tail-call in a callee-pops-arguments environment, some or all of 1362 // the stack may actually be in use for the call's arguments, this is 1363 // calculated during LowerCall and consumed here... 1364 ArgumentPopSize = StackAdjust.getImm(); 1365 } else { 1366 // ... otherwise the amount to pop is *all* of the argument space, 1367 // conveniently stored in the MachineFunctionInfo by 1368 // LowerFormalArguments. This will, of course, be zero for the C calling 1369 // convention. 1370 ArgumentPopSize = AFI->getArgumentStackToRestore(); 1371 } 1372 1373 // The stack frame should be like below, 1374 // 1375 // ---------------------- --- 1376 // | | | 1377 // | BytesInStackArgArea| CalleeArgStackSize 1378 // | (NumReusableBytes) | (of tail call) 1379 // | | --- 1380 // | | | 1381 // ---------------------| --- | 1382 // | | | | 1383 // | CalleeSavedReg | | | 1384 // | (CalleeSavedStackSize)| | | 1385 // | | | | 1386 // ---------------------| | NumBytes 1387 // | | StackSize (StackAdjustUp) 1388 // | LocalStackSize | | | 1389 // | (covering callee | | | 1390 // | args) | | | 1391 // | | | | 1392 // ---------------------- --- --- 1393 // 1394 // So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize 1395 // = StackSize + ArgumentPopSize 1396 // 1397 // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps 1398 // it as the 2nd argument of AArch64ISD::TC_RETURN. 1399 1400 auto Cleanup = make_scope_exit([&] { InsertReturnAddressAuth(MF, MBB); }); 1401 1402 bool IsWin64 = 1403 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 1404 // Var args are accounted for in the containing function, so don't 1405 // include them for funclets. 1406 unsigned FixedObject = 1407 (IsWin64 && !IsFunclet) ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; 1408 1409 uint64_t AfterCSRPopSize = ArgumentPopSize; 1410 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 1411 // We cannot rely on the local stack size set in emitPrologue if the function 1412 // has funclets, as funclets have different local stack size requirements, and 1413 // the current value set in emitPrologue may be that of the containing 1414 // function. 1415 if (MF.hasEHFunclets()) 1416 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 1417 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); 1418 // Assume we can't combine the last pop with the sp restore. 1419 1420 if (!CombineSPBump && PrologueSaveSize != 0) { 1421 MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator()); 1422 while (AArch64InstrInfo::isSEHInstruction(*Pop)) 1423 Pop = std::prev(Pop); 1424 // Converting the last ldp to a post-index ldp is valid only if the last 1425 // ldp's offset is 0. 1426 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1); 1427 // If the offset is 0, convert it to a post-index ldp. 1428 if (OffsetOp.getImm() == 0) 1429 convertCalleeSaveRestoreToSPPrePostIncDec( 1430 MBB, Pop, DL, TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, false); 1431 else { 1432 // If not, make sure to emit an add after the last ldp. 1433 // We're doing this by transfering the size to be restored from the 1434 // adjustment *before* the CSR pops to the adjustment *after* the CSR 1435 // pops. 1436 AfterCSRPopSize += PrologueSaveSize; 1437 } 1438 } 1439 1440 // Move past the restores of the callee-saved registers. 1441 // If we plan on combining the sp bump of the local stack size and the callee 1442 // save stack size, we might need to adjust the CSR save and restore offsets. 1443 MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator(); 1444 MachineBasicBlock::iterator Begin = MBB.begin(); 1445 while (LastPopI != Begin) { 1446 --LastPopI; 1447 if (!LastPopI->getFlag(MachineInstr::FrameDestroy)) { 1448 ++LastPopI; 1449 break; 1450 } else if (CombineSPBump) 1451 fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize(), 1452 NeedsWinCFI, &HasWinCFI); 1453 } 1454 1455 if (NeedsWinCFI) { 1456 HasWinCFI = true; 1457 BuildMI(MBB, LastPopI, DL, TII->get(AArch64::SEH_EpilogStart)) 1458 .setMIFlag(MachineInstr::FrameDestroy); 1459 } 1460 1461 const StackOffset &SVEStackSize = getSVEStackSize(MF); 1462 1463 // If there is a single SP update, insert it before the ret and we're done. 1464 if (CombineSPBump) { 1465 assert(!SVEStackSize && "Cannot combine SP bump with SVE"); 1466 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP, 1467 {NumBytes + (int64_t)AfterCSRPopSize, MVT::i8}, TII, 1468 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); 1469 if (NeedsWinCFI && HasWinCFI) 1470 BuildMI(MBB, MBB.getFirstTerminator(), DL, 1471 TII->get(AArch64::SEH_EpilogEnd)) 1472 .setMIFlag(MachineInstr::FrameDestroy); 1473 return; 1474 } 1475 1476 NumBytes -= PrologueSaveSize; 1477 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 1478 1479 // Deallocate the SVE area. 1480 if (SVEStackSize) 1481 if (!AFI->isStackRealigned()) 1482 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, SVEStackSize, 1483 TII, MachineInstr::FrameDestroy); 1484 1485 if (!hasFP(MF)) { 1486 bool RedZone = canUseRedZone(MF); 1487 // If this was a redzone leaf function, we don't need to restore the 1488 // stack pointer (but we may need to pop stack args for fastcc). 1489 if (RedZone && AfterCSRPopSize == 0) 1490 return; 1491 1492 bool NoCalleeSaveRestore = PrologueSaveSize == 0; 1493 int StackRestoreBytes = RedZone ? 0 : NumBytes; 1494 if (NoCalleeSaveRestore) 1495 StackRestoreBytes += AfterCSRPopSize; 1496 1497 // If we were able to combine the local stack pop with the argument pop, 1498 // then we're done. 1499 bool Done = NoCalleeSaveRestore || AfterCSRPopSize == 0; 1500 1501 // If we're done after this, make sure to help the load store optimizer. 1502 if (Done) 1503 adaptForLdStOpt(MBB, MBB.getFirstTerminator(), LastPopI); 1504 1505 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 1506 {StackRestoreBytes, MVT::i8}, TII, 1507 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); 1508 if (Done) { 1509 if (NeedsWinCFI) { 1510 HasWinCFI = true; 1511 BuildMI(MBB, MBB.getFirstTerminator(), DL, 1512 TII->get(AArch64::SEH_EpilogEnd)) 1513 .setMIFlag(MachineInstr::FrameDestroy); 1514 } 1515 return; 1516 } 1517 1518 NumBytes = 0; 1519 } 1520 1521 // Restore the original stack pointer. 1522 // FIXME: Rather than doing the math here, we should instead just use 1523 // non-post-indexed loads for the restores if we aren't actually going to 1524 // be able to save any instructions. 1525 if (!IsFunclet && (MFI.hasVarSizedObjects() || AFI->isStackRealigned())) { 1526 int64_t OffsetToFrameRecord = 1527 isTargetDarwin(MF) ? (-(int64_t)AFI->getCalleeSavedStackSize() + 16) : 0; 1528 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP, 1529 {OffsetToFrameRecord, MVT::i8}, 1530 TII, MachineInstr::FrameDestroy, false, NeedsWinCFI); 1531 } else if (NumBytes) 1532 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 1533 {NumBytes, MVT::i8}, TII, MachineInstr::FrameDestroy, false, 1534 NeedsWinCFI); 1535 1536 // This must be placed after the callee-save restore code because that code 1537 // assumes the SP is at the same location as it was after the callee-save save 1538 // code in the prologue. 1539 if (AfterCSRPopSize) { 1540 // Find an insertion point for the first ldp so that it goes before the 1541 // shadow call stack epilog instruction. This ensures that the restore of 1542 // lr from x18 is placed after the restore from sp. 1543 auto FirstSPPopI = MBB.getFirstTerminator(); 1544 while (FirstSPPopI != Begin) { 1545 auto Prev = std::prev(FirstSPPopI); 1546 if (Prev->getOpcode() != AArch64::LDRXpre || 1547 Prev->getOperand(0).getReg() == AArch64::SP) 1548 break; 1549 FirstSPPopI = Prev; 1550 } 1551 1552 adaptForLdStOpt(MBB, FirstSPPopI, LastPopI); 1553 1554 emitFrameOffset(MBB, FirstSPPopI, DL, AArch64::SP, AArch64::SP, 1555 {(int64_t)AfterCSRPopSize, MVT::i8}, TII, 1556 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); 1557 } 1558 if (NeedsWinCFI && HasWinCFI) 1559 BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::SEH_EpilogEnd)) 1560 .setMIFlag(MachineInstr::FrameDestroy); 1561 1562 MF.setHasWinCFI(HasWinCFI); 1563 } 1564 1565 /// getFrameIndexReference - Provide a base+offset reference to an FI slot for 1566 /// debug info. It's the same as what we use for resolving the code-gen 1567 /// references for now. FIXME: This can go wrong when references are 1568 /// SP-relative and simple call frames aren't used. 1569 int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF, 1570 int FI, 1571 unsigned &FrameReg) const { 1572 return resolveFrameIndexReference( 1573 MF, FI, FrameReg, 1574 /*PreferFP=*/ 1575 MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress), 1576 /*ForSimm=*/false) 1577 .getBytes(); 1578 } 1579 1580 int AArch64FrameLowering::getNonLocalFrameIndexReference( 1581 const MachineFunction &MF, int FI) const { 1582 return getSEHFrameIndexOffset(MF, FI); 1583 } 1584 1585 static StackOffset getFPOffset(const MachineFunction &MF, int ObjectOffset) { 1586 const auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 1587 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1588 bool IsWin64 = 1589 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()); 1590 unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; 1591 unsigned FPAdjust = isTargetDarwin(MF) ? 16 : AFI->getCalleeSavedStackSize(); 1592 return {ObjectOffset + FixedObject + FPAdjust, MVT::i8}; 1593 } 1594 1595 static StackOffset getStackOffset(const MachineFunction &MF, int ObjectOffset) { 1596 const auto &MFI = MF.getFrameInfo(); 1597 return {ObjectOffset + (int)MFI.getStackSize(), MVT::i8}; 1598 } 1599 1600 int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF, 1601 int FI) const { 1602 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>( 1603 MF.getSubtarget().getRegisterInfo()); 1604 int ObjectOffset = MF.getFrameInfo().getObjectOffset(FI); 1605 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP 1606 ? getFPOffset(MF, ObjectOffset).getBytes() 1607 : getStackOffset(MF, ObjectOffset).getBytes(); 1608 } 1609 1610 StackOffset AArch64FrameLowering::resolveFrameIndexReference( 1611 const MachineFunction &MF, int FI, unsigned &FrameReg, bool PreferFP, 1612 bool ForSimm) const { 1613 const auto &MFI = MF.getFrameInfo(); 1614 int ObjectOffset = MFI.getObjectOffset(FI); 1615 bool isFixed = MFI.isFixedObjectIndex(FI); 1616 bool isSVE = MFI.getStackID(FI) == TargetStackID::SVEVector; 1617 return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg, 1618 PreferFP, ForSimm); 1619 } 1620 1621 StackOffset AArch64FrameLowering::resolveFrameOffsetReference( 1622 const MachineFunction &MF, int ObjectOffset, bool isFixed, bool isSVE, 1623 unsigned &FrameReg, bool PreferFP, bool ForSimm) const { 1624 const auto &MFI = MF.getFrameInfo(); 1625 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>( 1626 MF.getSubtarget().getRegisterInfo()); 1627 const auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 1628 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1629 1630 int FPOffset = getFPOffset(MF, ObjectOffset).getBytes(); 1631 int Offset = getStackOffset(MF, ObjectOffset).getBytes(); 1632 bool isCSR = 1633 !isFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize()); 1634 1635 const StackOffset &SVEStackSize = getSVEStackSize(MF); 1636 1637 // Use frame pointer to reference fixed objects. Use it for locals if 1638 // there are VLAs or a dynamically realigned SP (and thus the SP isn't 1639 // reliable as a base). Make sure useFPForScavengingIndex() does the 1640 // right thing for the emergency spill slot. 1641 bool UseFP = false; 1642 if (AFI->hasStackFrame() && !isSVE) { 1643 // We shouldn't prefer using the FP when there is an SVE area 1644 // in between the FP and the non-SVE locals/spills. 1645 PreferFP &= !SVEStackSize; 1646 1647 // Note: Keeping the following as multiple 'if' statements rather than 1648 // merging to a single expression for readability. 1649 // 1650 // Argument access should always use the FP. 1651 if (isFixed) { 1652 UseFP = hasFP(MF); 1653 } else if (isCSR && RegInfo->needsStackRealignment(MF)) { 1654 // References to the CSR area must use FP if we're re-aligning the stack 1655 // since the dynamically-sized alignment padding is between the SP/BP and 1656 // the CSR area. 1657 assert(hasFP(MF) && "Re-aligned stack must have frame pointer"); 1658 UseFP = true; 1659 } else if (hasFP(MF) && !RegInfo->needsStackRealignment(MF)) { 1660 // If the FPOffset is negative and we're producing a signed immediate, we 1661 // have to keep in mind that the available offset range for negative 1662 // offsets is smaller than for positive ones. If an offset is available 1663 // via the FP and the SP, use whichever is closest. 1664 bool FPOffsetFits = !ForSimm || FPOffset >= -256; 1665 PreferFP |= Offset > -FPOffset; 1666 1667 if (MFI.hasVarSizedObjects()) { 1668 // If we have variable sized objects, we can use either FP or BP, as the 1669 // SP offset is unknown. We can use the base pointer if we have one and 1670 // FP is not preferred. If not, we're stuck with using FP. 1671 bool CanUseBP = RegInfo->hasBasePointer(MF); 1672 if (FPOffsetFits && CanUseBP) // Both are ok. Pick the best. 1673 UseFP = PreferFP; 1674 else if (!CanUseBP) { // Can't use BP. Forced to use FP. 1675 assert(!SVEStackSize && "Expected BP to be available"); 1676 UseFP = true; 1677 } 1678 // else we can use BP and FP, but the offset from FP won't fit. 1679 // That will make us scavenge registers which we can probably avoid by 1680 // using BP. If it won't fit for BP either, we'll scavenge anyway. 1681 } else if (FPOffset >= 0) { 1682 // Use SP or FP, whichever gives us the best chance of the offset 1683 // being in range for direct access. If the FPOffset is positive, 1684 // that'll always be best, as the SP will be even further away. 1685 UseFP = true; 1686 } else if (MF.hasEHFunclets() && !RegInfo->hasBasePointer(MF)) { 1687 // Funclets access the locals contained in the parent's stack frame 1688 // via the frame pointer, so we have to use the FP in the parent 1689 // function. 1690 (void) Subtarget; 1691 assert( 1692 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()) && 1693 "Funclets should only be present on Win64"); 1694 UseFP = true; 1695 } else { 1696 // We have the choice between FP and (SP or BP). 1697 if (FPOffsetFits && PreferFP) // If FP is the best fit, use it. 1698 UseFP = true; 1699 } 1700 } 1701 } 1702 1703 assert(((isFixed || isCSR) || !RegInfo->needsStackRealignment(MF) || !UseFP) && 1704 "In the presence of dynamic stack pointer realignment, " 1705 "non-argument/CSR objects cannot be accessed through the frame pointer"); 1706 1707 if (isSVE) { 1708 int64_t OffsetToSVEArea = 1709 MFI.getStackSize() - AFI->getCalleeSavedStackSize(); 1710 StackOffset FPOffset = {ObjectOffset, MVT::nxv1i8}; 1711 StackOffset SPOffset = SVEStackSize + 1712 StackOffset(ObjectOffset, MVT::nxv1i8) + 1713 StackOffset(OffsetToSVEArea, MVT::i8); 1714 // Always use the FP for SVE spills if available and beneficial. 1715 if (hasFP(MF) && 1716 (SPOffset.getBytes() || 1717 FPOffset.getScalableBytes() < SPOffset.getScalableBytes() || 1718 RegInfo->needsStackRealignment(MF))) { 1719 FrameReg = RegInfo->getFrameRegister(MF); 1720 return FPOffset; 1721 } 1722 1723 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister() 1724 : (unsigned)AArch64::SP; 1725 return SPOffset; 1726 } 1727 1728 StackOffset ScalableOffset = {}; 1729 if (UseFP && !(isFixed || isCSR)) 1730 ScalableOffset = -SVEStackSize; 1731 if (!UseFP && (isFixed || isCSR)) 1732 ScalableOffset = SVEStackSize; 1733 1734 if (UseFP) { 1735 FrameReg = RegInfo->getFrameRegister(MF); 1736 return StackOffset(FPOffset, MVT::i8) + ScalableOffset; 1737 } 1738 1739 // Use the base pointer if we have one. 1740 if (RegInfo->hasBasePointer(MF)) 1741 FrameReg = RegInfo->getBaseRegister(); 1742 else { 1743 assert(!MFI.hasVarSizedObjects() && 1744 "Can't use SP when we have var sized objects."); 1745 FrameReg = AArch64::SP; 1746 // If we're using the red zone for this function, the SP won't actually 1747 // be adjusted, so the offsets will be negative. They're also all 1748 // within range of the signed 9-bit immediate instructions. 1749 if (canUseRedZone(MF)) 1750 Offset -= AFI->getLocalStackSize(); 1751 } 1752 1753 return StackOffset(Offset, MVT::i8) + ScalableOffset; 1754 } 1755 1756 static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { 1757 // Do not set a kill flag on values that are also marked as live-in. This 1758 // happens with the @llvm-returnaddress intrinsic and with arguments passed in 1759 // callee saved registers. 1760 // Omitting the kill flags is conservatively correct even if the live-in 1761 // is not used after all. 1762 bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg); 1763 return getKillRegState(!IsLiveIn); 1764 } 1765 1766 static bool produceCompactUnwindFrame(MachineFunction &MF) { 1767 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1768 AttributeList Attrs = MF.getFunction().getAttributes(); 1769 return Subtarget.isTargetMachO() && 1770 !(Subtarget.getTargetLowering()->supportSwiftError() && 1771 Attrs.hasAttrSomewhere(Attribute::SwiftError)); 1772 } 1773 1774 static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, 1775 bool NeedsWinCFI) { 1776 // If we are generating register pairs for a Windows function that requires 1777 // EH support, then pair consecutive registers only. There are no unwind 1778 // opcodes for saves/restores of non-consectuve register pairs. 1779 // The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x. 1780 // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling 1781 1782 // TODO: LR can be paired with any register. We don't support this yet in 1783 // the MCLayer. We need to add support for the save_lrpair unwind code. 1784 if (!NeedsWinCFI) 1785 return false; 1786 if (Reg2 == Reg1 + 1) 1787 return false; 1788 return true; 1789 } 1790 1791 /// Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction. 1792 /// WindowsCFI requires that only consecutive registers can be paired. 1793 /// LR and FP need to be allocated together when the frame needs to save 1794 /// the frame-record. This means any other register pairing with LR is invalid. 1795 static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, 1796 bool NeedsWinCFI, bool NeedsFrameRecord) { 1797 if (NeedsWinCFI) 1798 return invalidateWindowsRegisterPairing(Reg1, Reg2, true); 1799 1800 // If we need to store the frame record, don't pair any register 1801 // with LR other than FP. 1802 if (NeedsFrameRecord) 1803 return Reg2 == AArch64::LR; 1804 1805 return false; 1806 } 1807 1808 namespace { 1809 1810 struct RegPairInfo { 1811 unsigned Reg1 = AArch64::NoRegister; 1812 unsigned Reg2 = AArch64::NoRegister; 1813 int FrameIdx; 1814 int Offset; 1815 enum RegType { GPR, FPR64, FPR128 } Type; 1816 1817 RegPairInfo() = default; 1818 1819 bool isPaired() const { return Reg2 != AArch64::NoRegister; } 1820 }; 1821 1822 } // end anonymous namespace 1823 1824 static void computeCalleeSaveRegisterPairs( 1825 MachineFunction &MF, const std::vector<CalleeSavedInfo> &CSI, 1826 const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs, 1827 bool &NeedShadowCallStackProlog, bool NeedsFrameRecord) { 1828 1829 if (CSI.empty()) 1830 return; 1831 1832 bool NeedsWinCFI = needsWinCFI(MF); 1833 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1834 MachineFrameInfo &MFI = MF.getFrameInfo(); 1835 CallingConv::ID CC = MF.getFunction().getCallingConv(); 1836 unsigned Count = CSI.size(); 1837 (void)CC; 1838 // MachO's compact unwind format relies on all registers being stored in 1839 // pairs. 1840 assert((!produceCompactUnwindFrame(MF) || 1841 CC == CallingConv::PreserveMost || 1842 (Count & 1) == 0) && 1843 "Odd number of callee-saved regs to spill!"); 1844 int Offset = AFI->getCalleeSavedStackSize(); 1845 // On Linux, we will have either one or zero non-paired register. On Windows 1846 // with CFI, we can have multiple unpaired registers in order to utilize the 1847 // available unwind codes. This flag assures that the alignment fixup is done 1848 // only once, as intened. 1849 bool FixupDone = false; 1850 for (unsigned i = 0; i < Count; ++i) { 1851 RegPairInfo RPI; 1852 RPI.Reg1 = CSI[i].getReg(); 1853 1854 if (AArch64::GPR64RegClass.contains(RPI.Reg1)) 1855 RPI.Type = RegPairInfo::GPR; 1856 else if (AArch64::FPR64RegClass.contains(RPI.Reg1)) 1857 RPI.Type = RegPairInfo::FPR64; 1858 else if (AArch64::FPR128RegClass.contains(RPI.Reg1)) 1859 RPI.Type = RegPairInfo::FPR128; 1860 else 1861 llvm_unreachable("Unsupported register class."); 1862 1863 // Add the next reg to the pair if it is in the same register class. 1864 if (i + 1 < Count) { 1865 unsigned NextReg = CSI[i + 1].getReg(); 1866 switch (RPI.Type) { 1867 case RegPairInfo::GPR: 1868 if (AArch64::GPR64RegClass.contains(NextReg) && 1869 !invalidateRegisterPairing(RPI.Reg1, NextReg, NeedsWinCFI, 1870 NeedsFrameRecord)) 1871 RPI.Reg2 = NextReg; 1872 break; 1873 case RegPairInfo::FPR64: 1874 if (AArch64::FPR64RegClass.contains(NextReg) && 1875 !invalidateWindowsRegisterPairing(RPI.Reg1, NextReg, NeedsWinCFI)) 1876 RPI.Reg2 = NextReg; 1877 break; 1878 case RegPairInfo::FPR128: 1879 if (AArch64::FPR128RegClass.contains(NextReg)) 1880 RPI.Reg2 = NextReg; 1881 break; 1882 } 1883 } 1884 1885 // If either of the registers to be saved is the lr register, it means that 1886 // we also need to save lr in the shadow call stack. 1887 if ((RPI.Reg1 == AArch64::LR || RPI.Reg2 == AArch64::LR) && 1888 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack)) { 1889 if (!MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(18)) 1890 report_fatal_error("Must reserve x18 to use shadow call stack"); 1891 NeedShadowCallStackProlog = true; 1892 } 1893 1894 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI 1895 // list to come in sorted by frame index so that we can issue the store 1896 // pair instructions directly. Assert if we see anything otherwise. 1897 // 1898 // The order of the registers in the list is controlled by 1899 // getCalleeSavedRegs(), so they will always be in-order, as well. 1900 assert((!RPI.isPaired() || 1901 (CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx())) && 1902 "Out of order callee saved regs!"); 1903 1904 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP || 1905 RPI.Reg1 == AArch64::LR) && 1906 "FrameRecord must be allocated together with LR"); 1907 1908 // MachO's compact unwind format relies on all registers being stored in 1909 // adjacent register pairs. 1910 assert((!produceCompactUnwindFrame(MF) || 1911 CC == CallingConv::PreserveMost || 1912 (RPI.isPaired() && 1913 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) || 1914 RPI.Reg1 + 1 == RPI.Reg2))) && 1915 "Callee-save registers not saved as adjacent register pair!"); 1916 1917 RPI.FrameIdx = CSI[i].getFrameIdx(); 1918 1919 int Scale = RPI.Type == RegPairInfo::FPR128 ? 16 : 8; 1920 Offset -= RPI.isPaired() ? 2 * Scale : Scale; 1921 1922 // Round up size of non-pair to pair size if we need to pad the 1923 // callee-save area to ensure 16-byte alignment. 1924 if (AFI->hasCalleeSaveStackFreeSpace() && !FixupDone && 1925 RPI.Type != RegPairInfo::FPR128 && !RPI.isPaired()) { 1926 FixupDone = true; 1927 Offset -= 8; 1928 assert(Offset % 16 == 0); 1929 assert(MFI.getObjectAlignment(RPI.FrameIdx) <= 16); 1930 MFI.setObjectAlignment(RPI.FrameIdx, 16); 1931 } 1932 1933 assert(Offset % Scale == 0); 1934 RPI.Offset = Offset / Scale; 1935 assert((RPI.Offset >= -64 && RPI.Offset <= 63) && 1936 "Offset out of bounds for LDP/STP immediate"); 1937 1938 RegPairs.push_back(RPI); 1939 if (RPI.isPaired()) 1940 ++i; 1941 } 1942 } 1943 1944 bool AArch64FrameLowering::spillCalleeSavedRegisters( 1945 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 1946 const std::vector<CalleeSavedInfo> &CSI, 1947 const TargetRegisterInfo *TRI) const { 1948 MachineFunction &MF = *MBB.getParent(); 1949 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1950 bool NeedsWinCFI = needsWinCFI(MF); 1951 DebugLoc DL; 1952 SmallVector<RegPairInfo, 8> RegPairs; 1953 1954 bool NeedShadowCallStackProlog = false; 1955 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, 1956 NeedShadowCallStackProlog, hasFP(MF)); 1957 const MachineRegisterInfo &MRI = MF.getRegInfo(); 1958 1959 if (NeedShadowCallStackProlog) { 1960 // Shadow call stack prolog: str x30, [x18], #8 1961 BuildMI(MBB, MI, DL, TII.get(AArch64::STRXpost)) 1962 .addReg(AArch64::X18, RegState::Define) 1963 .addReg(AArch64::LR) 1964 .addReg(AArch64::X18) 1965 .addImm(8) 1966 .setMIFlag(MachineInstr::FrameSetup); 1967 1968 if (NeedsWinCFI) 1969 BuildMI(MBB, MI, DL, TII.get(AArch64::SEH_Nop)) 1970 .setMIFlag(MachineInstr::FrameSetup); 1971 1972 if (!MF.getFunction().hasFnAttribute(Attribute::NoUnwind)) { 1973 // Emit a CFI instruction that causes 8 to be subtracted from the value of 1974 // x18 when unwinding past this frame. 1975 static const char CFIInst[] = { 1976 dwarf::DW_CFA_val_expression, 1977 18, // register 1978 2, // length 1979 static_cast<char>(unsigned(dwarf::DW_OP_breg18)), 1980 static_cast<char>(-8) & 0x7f, // addend (sleb128) 1981 }; 1982 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createEscape( 1983 nullptr, StringRef(CFIInst, sizeof(CFIInst)))); 1984 BuildMI(MBB, MI, DL, TII.get(AArch64::CFI_INSTRUCTION)) 1985 .addCFIIndex(CFIIndex) 1986 .setMIFlag(MachineInstr::FrameSetup); 1987 } 1988 1989 // This instruction also makes x18 live-in to the entry block. 1990 MBB.addLiveIn(AArch64::X18); 1991 } 1992 1993 for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE; 1994 ++RPII) { 1995 RegPairInfo RPI = *RPII; 1996 unsigned Reg1 = RPI.Reg1; 1997 unsigned Reg2 = RPI.Reg2; 1998 unsigned StrOpc; 1999 2000 // Issue sequence of spills for cs regs. The first spill may be converted 2001 // to a pre-decrement store later by emitPrologue if the callee-save stack 2002 // area allocation can't be combined with the local stack area allocation. 2003 // For example: 2004 // stp x22, x21, [sp, #0] // addImm(+0) 2005 // stp x20, x19, [sp, #16] // addImm(+2) 2006 // stp fp, lr, [sp, #32] // addImm(+4) 2007 // Rationale: This sequence saves uop updates compared to a sequence of 2008 // pre-increment spills like stp xi,xj,[sp,#-16]! 2009 // Note: Similar rationale and sequence for restores in epilog. 2010 unsigned Size, Align; 2011 switch (RPI.Type) { 2012 case RegPairInfo::GPR: 2013 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui; 2014 Size = 8; 2015 Align = 8; 2016 break; 2017 case RegPairInfo::FPR64: 2018 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui; 2019 Size = 8; 2020 Align = 8; 2021 break; 2022 case RegPairInfo::FPR128: 2023 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui; 2024 Size = 16; 2025 Align = 16; 2026 break; 2027 } 2028 LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI); 2029 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); 2030 dbgs() << ") -> fi#(" << RPI.FrameIdx; 2031 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; 2032 dbgs() << ")\n"); 2033 2034 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) && 2035 "Windows unwdinding requires a consecutive (FP,LR) pair"); 2036 // Windows unwind codes require consecutive registers if registers are 2037 // paired. Make the switch here, so that the code below will save (x,x+1) 2038 // and not (x+1,x). 2039 unsigned FrameIdxReg1 = RPI.FrameIdx; 2040 unsigned FrameIdxReg2 = RPI.FrameIdx + 1; 2041 if (NeedsWinCFI && RPI.isPaired()) { 2042 std::swap(Reg1, Reg2); 2043 std::swap(FrameIdxReg1, FrameIdxReg2); 2044 } 2045 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); 2046 if (!MRI.isReserved(Reg1)) 2047 MBB.addLiveIn(Reg1); 2048 if (RPI.isPaired()) { 2049 if (!MRI.isReserved(Reg2)) 2050 MBB.addLiveIn(Reg2); 2051 MIB.addReg(Reg2, getPrologueDeath(MF, Reg2)); 2052 MIB.addMemOperand(MF.getMachineMemOperand( 2053 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 2054 MachineMemOperand::MOStore, Size, Align)); 2055 } 2056 MIB.addReg(Reg1, getPrologueDeath(MF, Reg1)) 2057 .addReg(AArch64::SP) 2058 .addImm(RPI.Offset) // [sp, #offset*scale], 2059 // where factor*scale is implicit 2060 .setMIFlag(MachineInstr::FrameSetup); 2061 MIB.addMemOperand(MF.getMachineMemOperand( 2062 MachinePointerInfo::getFixedStack(MF,FrameIdxReg1), 2063 MachineMemOperand::MOStore, Size, Align)); 2064 if (NeedsWinCFI) 2065 InsertSEH(MIB, TII, MachineInstr::FrameSetup); 2066 2067 } 2068 return true; 2069 } 2070 2071 bool AArch64FrameLowering::restoreCalleeSavedRegisters( 2072 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 2073 std::vector<CalleeSavedInfo> &CSI, 2074 const TargetRegisterInfo *TRI) const { 2075 MachineFunction &MF = *MBB.getParent(); 2076 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 2077 DebugLoc DL; 2078 SmallVector<RegPairInfo, 8> RegPairs; 2079 bool NeedsWinCFI = needsWinCFI(MF); 2080 2081 if (MI != MBB.end()) 2082 DL = MI->getDebugLoc(); 2083 2084 bool NeedShadowCallStackProlog = false; 2085 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, 2086 NeedShadowCallStackProlog, hasFP(MF)); 2087 2088 auto EmitMI = [&](const RegPairInfo &RPI) { 2089 unsigned Reg1 = RPI.Reg1; 2090 unsigned Reg2 = RPI.Reg2; 2091 2092 // Issue sequence of restores for cs regs. The last restore may be converted 2093 // to a post-increment load later by emitEpilogue if the callee-save stack 2094 // area allocation can't be combined with the local stack area allocation. 2095 // For example: 2096 // ldp fp, lr, [sp, #32] // addImm(+4) 2097 // ldp x20, x19, [sp, #16] // addImm(+2) 2098 // ldp x22, x21, [sp, #0] // addImm(+0) 2099 // Note: see comment in spillCalleeSavedRegisters() 2100 unsigned LdrOpc; 2101 unsigned Size, Align; 2102 switch (RPI.Type) { 2103 case RegPairInfo::GPR: 2104 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui; 2105 Size = 8; 2106 Align = 8; 2107 break; 2108 case RegPairInfo::FPR64: 2109 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui; 2110 Size = 8; 2111 Align = 8; 2112 break; 2113 case RegPairInfo::FPR128: 2114 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui; 2115 Size = 16; 2116 Align = 16; 2117 break; 2118 } 2119 LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI); 2120 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); 2121 dbgs() << ") -> fi#(" << RPI.FrameIdx; 2122 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; 2123 dbgs() << ")\n"); 2124 2125 // Windows unwind codes require consecutive registers if registers are 2126 // paired. Make the switch here, so that the code below will save (x,x+1) 2127 // and not (x+1,x). 2128 unsigned FrameIdxReg1 = RPI.FrameIdx; 2129 unsigned FrameIdxReg2 = RPI.FrameIdx + 1; 2130 if (NeedsWinCFI && RPI.isPaired()) { 2131 std::swap(Reg1, Reg2); 2132 std::swap(FrameIdxReg1, FrameIdxReg2); 2133 } 2134 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc)); 2135 if (RPI.isPaired()) { 2136 MIB.addReg(Reg2, getDefRegState(true)); 2137 MIB.addMemOperand(MF.getMachineMemOperand( 2138 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 2139 MachineMemOperand::MOLoad, Size, Align)); 2140 } 2141 MIB.addReg(Reg1, getDefRegState(true)) 2142 .addReg(AArch64::SP) 2143 .addImm(RPI.Offset) // [sp, #offset*scale] 2144 // where factor*scale is implicit 2145 .setMIFlag(MachineInstr::FrameDestroy); 2146 MIB.addMemOperand(MF.getMachineMemOperand( 2147 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), 2148 MachineMemOperand::MOLoad, Size, Align)); 2149 if (NeedsWinCFI) 2150 InsertSEH(MIB, TII, MachineInstr::FrameDestroy); 2151 }; 2152 if (ReverseCSRRestoreSeq) 2153 for (const RegPairInfo &RPI : reverse(RegPairs)) 2154 EmitMI(RPI); 2155 else 2156 for (const RegPairInfo &RPI : RegPairs) 2157 EmitMI(RPI); 2158 2159 if (NeedShadowCallStackProlog) { 2160 // Shadow call stack epilog: ldr x30, [x18, #-8]! 2161 BuildMI(MBB, MI, DL, TII.get(AArch64::LDRXpre)) 2162 .addReg(AArch64::X18, RegState::Define) 2163 .addReg(AArch64::LR, RegState::Define) 2164 .addReg(AArch64::X18) 2165 .addImm(-8) 2166 .setMIFlag(MachineInstr::FrameDestroy); 2167 } 2168 2169 return true; 2170 } 2171 2172 void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, 2173 BitVector &SavedRegs, 2174 RegScavenger *RS) const { 2175 // All calls are tail calls in GHC calling conv, and functions have no 2176 // prologue/epilogue. 2177 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 2178 return; 2179 2180 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 2181 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( 2182 MF.getSubtarget().getRegisterInfo()); 2183 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 2184 unsigned UnspilledCSGPR = AArch64::NoRegister; 2185 unsigned UnspilledCSGPRPaired = AArch64::NoRegister; 2186 2187 MachineFrameInfo &MFI = MF.getFrameInfo(); 2188 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); 2189 2190 unsigned BasePointerReg = RegInfo->hasBasePointer(MF) 2191 ? RegInfo->getBaseRegister() 2192 : (unsigned)AArch64::NoRegister; 2193 2194 unsigned ExtraCSSpill = 0; 2195 // Figure out which callee-saved registers to save/restore. 2196 for (unsigned i = 0; CSRegs[i]; ++i) { 2197 const unsigned Reg = CSRegs[i]; 2198 2199 // Add the base pointer register to SavedRegs if it is callee-save. 2200 if (Reg == BasePointerReg) 2201 SavedRegs.set(Reg); 2202 2203 bool RegUsed = SavedRegs.test(Reg); 2204 unsigned PairedReg = CSRegs[i ^ 1]; 2205 if (!RegUsed) { 2206 if (AArch64::GPR64RegClass.contains(Reg) && 2207 !RegInfo->isReservedReg(MF, Reg)) { 2208 UnspilledCSGPR = Reg; 2209 UnspilledCSGPRPaired = PairedReg; 2210 } 2211 continue; 2212 } 2213 2214 // MachO's compact unwind format relies on all registers being stored in 2215 // pairs. 2216 // FIXME: the usual format is actually better if unwinding isn't needed. 2217 if (produceCompactUnwindFrame(MF) && PairedReg != AArch64::NoRegister && 2218 !SavedRegs.test(PairedReg)) { 2219 SavedRegs.set(PairedReg); 2220 if (AArch64::GPR64RegClass.contains(PairedReg) && 2221 !RegInfo->isReservedReg(MF, PairedReg)) 2222 ExtraCSSpill = PairedReg; 2223 } 2224 } 2225 2226 // Calculates the callee saved stack size. 2227 unsigned CSStackSize = 0; 2228 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 2229 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2230 for (unsigned Reg : SavedRegs.set_bits()) 2231 CSStackSize += TRI->getRegSizeInBits(Reg, MRI) / 8; 2232 2233 // Save number of saved regs, so we can easily update CSStackSize later. 2234 unsigned NumSavedRegs = SavedRegs.count(); 2235 2236 // The frame record needs to be created by saving the appropriate registers 2237 unsigned EstimatedStackSize = MFI.estimateStackSize(MF); 2238 if (hasFP(MF) || 2239 windowsRequiresStackProbe(MF, EstimatedStackSize + CSStackSize + 16)) { 2240 SavedRegs.set(AArch64::FP); 2241 SavedRegs.set(AArch64::LR); 2242 } 2243 2244 LLVM_DEBUG(dbgs() << "*** determineCalleeSaves\nSaved CSRs:"; 2245 for (unsigned Reg 2246 : SavedRegs.set_bits()) dbgs() 2247 << ' ' << printReg(Reg, RegInfo); 2248 dbgs() << "\n";); 2249 2250 // If any callee-saved registers are used, the frame cannot be eliminated. 2251 unsigned MaxAlign = getStackAlignment(); 2252 int64_t SVEStackSize = 2253 alignTo(determineSVEStackSize(MFI, MaxAlign), MaxAlign); 2254 assert(MaxAlign <= 16 && "Cannot align scalable vectors more than 16 bytes"); 2255 bool CanEliminateFrame = (SavedRegs.count() == 0) && !SVEStackSize; 2256 2257 // The CSR spill slots have not been allocated yet, so estimateStackSize 2258 // won't include them. 2259 unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF); 2260 2261 // Conservatively always assume BigStack when there are SVE spills. 2262 bool BigStack = SVEStackSize || 2263 (EstimatedStackSize + CSStackSize) > EstimatedStackSizeLimit; 2264 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) 2265 AFI->setHasStackFrame(true); 2266 2267 // Estimate if we might need to scavenge a register at some point in order 2268 // to materialize a stack offset. If so, either spill one additional 2269 // callee-saved register or reserve a special spill slot to facilitate 2270 // register scavenging. If we already spilled an extra callee-saved register 2271 // above to keep the number of spills even, we don't need to do anything else 2272 // here. 2273 if (BigStack) { 2274 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) { 2275 LLVM_DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo) 2276 << " to get a scratch register.\n"); 2277 SavedRegs.set(UnspilledCSGPR); 2278 // MachO's compact unwind format relies on all registers being stored in 2279 // pairs, so if we need to spill one extra for BigStack, then we need to 2280 // store the pair. 2281 if (produceCompactUnwindFrame(MF)) 2282 SavedRegs.set(UnspilledCSGPRPaired); 2283 ExtraCSSpill = UnspilledCSGPR; 2284 } 2285 2286 // If we didn't find an extra callee-saved register to spill, create 2287 // an emergency spill slot. 2288 if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) { 2289 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 2290 const TargetRegisterClass &RC = AArch64::GPR64RegClass; 2291 unsigned Size = TRI->getSpillSize(RC); 2292 unsigned Align = TRI->getSpillAlignment(RC); 2293 int FI = MFI.CreateStackObject(Size, Align, false); 2294 RS->addScavengingFrameIndex(FI); 2295 LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI 2296 << " as the emergency spill slot.\n"); 2297 } 2298 } 2299 2300 // Adding the size of additional 64bit GPR saves. 2301 CSStackSize += 8 * (SavedRegs.count() - NumSavedRegs); 2302 unsigned AlignedCSStackSize = alignTo(CSStackSize, 16); 2303 LLVM_DEBUG(dbgs() << "Estimated stack frame size: " 2304 << EstimatedStackSize + AlignedCSStackSize 2305 << " bytes.\n"); 2306 2307 // Round up to register pair alignment to avoid additional SP adjustment 2308 // instructions. 2309 AFI->setCalleeSavedStackSize(AlignedCSStackSize); 2310 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize); 2311 } 2312 2313 bool AArch64FrameLowering::enableStackSlotScavenging( 2314 const MachineFunction &MF) const { 2315 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 2316 return AFI->hasCalleeSaveStackFreeSpace(); 2317 } 2318 2319 int64_t AArch64FrameLowering::determineSVEStackSize(MachineFrameInfo &MFI, 2320 unsigned &MaxAlign) const { 2321 // Process all fixed stack objects. 2322 int64_t Offset = 0; 2323 for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) 2324 if (MFI.getStackID(I) == TargetStackID::SVEVector) { 2325 int64_t FixedOffset = -MFI.getObjectOffset(I); 2326 if (FixedOffset > Offset) 2327 Offset = FixedOffset; 2328 } 2329 2330 // Note: We don't take allocatable stack objects into 2331 // account yet, because allocation for those is not yet 2332 // implemented. 2333 return Offset; 2334 } 2335 2336 void AArch64FrameLowering::processFunctionBeforeFrameFinalized( 2337 MachineFunction &MF, RegScavenger *RS) const { 2338 MachineFrameInfo &MFI = MF.getFrameInfo(); 2339 2340 assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown && 2341 "Upwards growing stack unsupported"); 2342 2343 unsigned MaxAlign = getStackAlignment(); 2344 int64_t SVEStackSize = determineSVEStackSize(MFI, MaxAlign); 2345 2346 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 2347 AFI->setStackSizeSVE(alignTo(SVEStackSize, MaxAlign)); 2348 assert(MaxAlign <= 16 && "Cannot align scalable vectors more than 16 bytes"); 2349 2350 // If this function isn't doing Win64-style C++ EH, we don't need to do 2351 // anything. 2352 if (!MF.hasEHFunclets()) 2353 return; 2354 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 2355 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); 2356 2357 MachineBasicBlock &MBB = MF.front(); 2358 auto MBBI = MBB.begin(); 2359 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) 2360 ++MBBI; 2361 2362 // Create an UnwindHelp object. 2363 int UnwindHelpFI = 2364 MFI.CreateStackObject(/*size*/8, /*alignment*/16, false); 2365 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; 2366 // We need to store -2 into the UnwindHelp object at the start of the 2367 // function. 2368 DebugLoc DL; 2369 RS->enterBasicBlockEnd(MBB); 2370 RS->backward(std::prev(MBBI)); 2371 unsigned DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass); 2372 assert(DstReg && "There must be a free register after frame setup"); 2373 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), DstReg).addImm(-2); 2374 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STURXi)) 2375 .addReg(DstReg, getKillRegState(true)) 2376 .addFrameIndex(UnwindHelpFI) 2377 .addImm(0); 2378 } 2379 2380 /// For Win64 AArch64 EH, the offset to the Unwind object is from the SP before 2381 /// the update. This is easily retrieved as it is exactly the offset that is set 2382 /// in processFunctionBeforeFrameFinalized. 2383 int AArch64FrameLowering::getFrameIndexReferencePreferSP( 2384 const MachineFunction &MF, int FI, unsigned &FrameReg, 2385 bool IgnoreSPUpdates) const { 2386 const MachineFrameInfo &MFI = MF.getFrameInfo(); 2387 LLVM_DEBUG(dbgs() << "Offset from the SP for " << FI << " is " 2388 << MFI.getObjectOffset(FI) << "\n"); 2389 FrameReg = AArch64::SP; 2390 return MFI.getObjectOffset(FI); 2391 } 2392 2393 /// The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve 2394 /// the parent's frame pointer 2395 unsigned AArch64FrameLowering::getWinEHParentFrameOffset( 2396 const MachineFunction &MF) const { 2397 return 0; 2398 } 2399 2400 /// Funclets only need to account for space for the callee saved registers, 2401 /// as the locals are accounted for in the parent's stack frame. 2402 unsigned AArch64FrameLowering::getWinEHFuncletFrameSize( 2403 const MachineFunction &MF) const { 2404 // This is the size of the pushed CSRs. 2405 unsigned CSSize = 2406 MF.getInfo<AArch64FunctionInfo>()->getCalleeSavedStackSize(); 2407 // This is the amount of stack a funclet needs to allocate. 2408 return alignTo(CSSize + MF.getFrameInfo().getMaxCallFrameSize(), 2409 getStackAlignment()); 2410 } 2411