1 //===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the AArch64 implementation of TargetFrameLowering class. 10 // 11 // On AArch64, stack frames are structured as follows: 12 // 13 // The stack grows downward. 14 // 15 // All of the individual frame areas on the frame below are optional, i.e. it's 16 // possible to create a function so that the particular area isn't present 17 // in the frame. 18 // 19 // At function entry, the "frame" looks as follows: 20 // 21 // | | Higher address 22 // |-----------------------------------| 23 // | | 24 // | arguments passed on the stack | 25 // | | 26 // |-----------------------------------| <- sp 27 // | | Lower address 28 // 29 // 30 // After the prologue has run, the frame has the following general structure. 31 // Note that this doesn't depict the case where a red-zone is used. Also, 32 // technically the last frame area (VLAs) doesn't get created until in the 33 // main function body, after the prologue is run. However, it's depicted here 34 // for completeness. 35 // 36 // | | Higher address 37 // |-----------------------------------| 38 // | | 39 // | arguments passed on the stack | 40 // | | 41 // |-----------------------------------| 42 // | | 43 // | (Win64 only) varargs from reg | 44 // | | 45 // |-----------------------------------| 46 // | | 47 // | callee-saved gpr registers | <--. 48 // | | | On Darwin platforms these 49 // |- - - - - - - - - - - - - - - - - -| | callee saves are swapped, 50 // | prev_lr | | (frame record first) 51 // | prev_fp | <--' 52 // | async context if needed | 53 // | (a.k.a. "frame record") | 54 // |-----------------------------------| <- fp(=x29) 55 // | <hazard padding> | 56 // |-----------------------------------| 57 // | | 58 // | callee-saved fp/simd/SVE regs | 59 // | | 60 // |-----------------------------------| 61 // | | 62 // | SVE stack objects | 63 // | | 64 // |-----------------------------------| 65 // |.empty.space.to.make.part.below....| 66 // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at 67 // |.the.standard.16-byte.alignment....| compile time; if present) 68 // |-----------------------------------| 69 // | local variables of fixed size | 70 // | including spill slots | 71 // | <FPR> | 72 // | <hazard padding> | 73 // | <GPR> | 74 // |-----------------------------------| <- bp(not defined by ABI, 75 // |.variable-sized.local.variables....| LLVM chooses X19) 76 // |.(VLAs)............................| (size of this area is unknown at 77 // |...................................| compile time) 78 // |-----------------------------------| <- sp 79 // | | Lower address 80 // 81 // 82 // To access the data in a frame, at-compile time, a constant offset must be 83 // computable from one of the pointers (fp, bp, sp) to access it. The size 84 // of the areas with a dotted background cannot be computed at compile-time 85 // if they are present, making it required to have all three of fp, bp and 86 // sp to be set up to be able to access all contents in the frame areas, 87 // assuming all of the frame areas are non-empty. 88 // 89 // For most functions, some of the frame areas are empty. For those functions, 90 // it may not be necessary to set up fp or bp: 91 // * A base pointer is definitely needed when there are both VLAs and local 92 // variables with more-than-default alignment requirements. 93 // * A frame pointer is definitely needed when there are local variables with 94 // more-than-default alignment requirements. 95 // 96 // For Darwin platforms the frame-record (fp, lr) is stored at the top of the 97 // callee-saved area, since the unwind encoding does not allow for encoding 98 // this dynamically and existing tools depend on this layout. For other 99 // platforms, the frame-record is stored at the bottom of the (gpr) callee-saved 100 // area to allow SVE stack objects (allocated directly below the callee-saves, 101 // if available) to be accessed directly from the framepointer. 102 // The SVE spill/fill instructions have VL-scaled addressing modes such 103 // as: 104 // ldr z8, [fp, #-7 mul vl] 105 // For SVE the size of the vector length (VL) is not known at compile-time, so 106 // '#-7 mul vl' is an offset that can only be evaluated at runtime. With this 107 // layout, we don't need to add an unscaled offset to the framepointer before 108 // accessing the SVE object in the frame. 109 // 110 // In some cases when a base pointer is not strictly needed, it is generated 111 // anyway when offsets from the frame pointer to access local variables become 112 // so large that the offset can't be encoded in the immediate fields of loads 113 // or stores. 114 // 115 // Outgoing function arguments must be at the bottom of the stack frame when 116 // calling another function. If we do not have variable-sized stack objects, we 117 // can allocate a "reserved call frame" area at the bottom of the local 118 // variable area, large enough for all outgoing calls. If we do have VLAs, then 119 // the stack pointer must be decremented and incremented around each call to 120 // make space for the arguments below the VLAs. 121 // 122 // FIXME: also explain the redzone concept. 123 // 124 // About stack hazards: Under some SME contexts, a coprocessor with its own 125 // separate cache can used for FP operations. This can create hazards if the CPU 126 // and the SME unit try to access the same area of memory, including if the 127 // access is to an area of the stack. To try to alleviate this we attempt to 128 // introduce extra padding into the stack frame between FP and GPR accesses, 129 // controlled by the StackHazardSize option. Without changing the layout of the 130 // stack frame in the diagram above, a stack object of size StackHazardSize is 131 // added between GPR and FPR CSRs. Another is added to the stack objects 132 // section, and stack objects are sorted so that FPR > Hazard padding slot > 133 // GPRs (where possible). Unfortunately some things are not handled well (VLA 134 // area, arguments on the stack, object with both GPR and FPR accesses), but if 135 // those are controlled by the user then the entire stack frame becomes GPR at 136 // the start/end with FPR in the middle, surrounded by Hazard padding. 137 // 138 // An example of the prologue: 139 // 140 // .globl __foo 141 // .align 2 142 // __foo: 143 // Ltmp0: 144 // .cfi_startproc 145 // .cfi_personality 155, ___gxx_personality_v0 146 // Leh_func_begin: 147 // .cfi_lsda 16, Lexception33 148 // 149 // stp xa,bx, [sp, -#offset]! 150 // ... 151 // stp x28, x27, [sp, #offset-32] 152 // stp fp, lr, [sp, #offset-16] 153 // add fp, sp, #offset - 16 154 // sub sp, sp, #1360 155 // 156 // The Stack: 157 // +-------------------------------------------+ 158 // 10000 | ........ | ........ | ........ | ........ | 159 // 10004 | ........ | ........ | ........ | ........ | 160 // +-------------------------------------------+ 161 // 10008 | ........ | ........ | ........ | ........ | 162 // 1000c | ........ | ........ | ........ | ........ | 163 // +===========================================+ 164 // 10010 | X28 Register | 165 // 10014 | X28 Register | 166 // +-------------------------------------------+ 167 // 10018 | X27 Register | 168 // 1001c | X27 Register | 169 // +===========================================+ 170 // 10020 | Frame Pointer | 171 // 10024 | Frame Pointer | 172 // +-------------------------------------------+ 173 // 10028 | Link Register | 174 // 1002c | Link Register | 175 // +===========================================+ 176 // 10030 | ........ | ........ | ........ | ........ | 177 // 10034 | ........ | ........ | ........ | ........ | 178 // +-------------------------------------------+ 179 // 10038 | ........ | ........ | ........ | ........ | 180 // 1003c | ........ | ........ | ........ | ........ | 181 // +-------------------------------------------+ 182 // 183 // [sp] = 10030 :: >>initial value<< 184 // sp = 10020 :: stp fp, lr, [sp, #-16]! 185 // fp = sp == 10020 :: mov fp, sp 186 // [sp] == 10020 :: stp x28, x27, [sp, #-16]! 187 // sp == 10010 :: >>final value<< 188 // 189 // The frame pointer (w29) points to address 10020. If we use an offset of 190 // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24 191 // for w27, and -32 for w28: 192 // 193 // Ltmp1: 194 // .cfi_def_cfa w29, 16 195 // Ltmp2: 196 // .cfi_offset w30, -8 197 // Ltmp3: 198 // .cfi_offset w29, -16 199 // Ltmp4: 200 // .cfi_offset w27, -24 201 // Ltmp5: 202 // .cfi_offset w28, -32 203 // 204 //===----------------------------------------------------------------------===// 205 206 #include "AArch64FrameLowering.h" 207 #include "AArch64InstrInfo.h" 208 #include "AArch64MachineFunctionInfo.h" 209 #include "AArch64RegisterInfo.h" 210 #include "AArch64Subtarget.h" 211 #include "AArch64TargetMachine.h" 212 #include "MCTargetDesc/AArch64AddressingModes.h" 213 #include "MCTargetDesc/AArch64MCTargetDesc.h" 214 #include "llvm/ADT/ScopeExit.h" 215 #include "llvm/ADT/SmallVector.h" 216 #include "llvm/ADT/Statistic.h" 217 #include "llvm/Analysis/ValueTracking.h" 218 #include "llvm/CodeGen/LivePhysRegs.h" 219 #include "llvm/CodeGen/MachineBasicBlock.h" 220 #include "llvm/CodeGen/MachineFrameInfo.h" 221 #include "llvm/CodeGen/MachineFunction.h" 222 #include "llvm/CodeGen/MachineInstr.h" 223 #include "llvm/CodeGen/MachineInstrBuilder.h" 224 #include "llvm/CodeGen/MachineMemOperand.h" 225 #include "llvm/CodeGen/MachineModuleInfo.h" 226 #include "llvm/CodeGen/MachineOperand.h" 227 #include "llvm/CodeGen/MachineRegisterInfo.h" 228 #include "llvm/CodeGen/RegisterScavenging.h" 229 #include "llvm/CodeGen/TargetInstrInfo.h" 230 #include "llvm/CodeGen/TargetRegisterInfo.h" 231 #include "llvm/CodeGen/TargetSubtargetInfo.h" 232 #include "llvm/CodeGen/WinEHFuncInfo.h" 233 #include "llvm/IR/Attributes.h" 234 #include "llvm/IR/CallingConv.h" 235 #include "llvm/IR/DataLayout.h" 236 #include "llvm/IR/DebugLoc.h" 237 #include "llvm/IR/Function.h" 238 #include "llvm/MC/MCAsmInfo.h" 239 #include "llvm/MC/MCDwarf.h" 240 #include "llvm/Support/CommandLine.h" 241 #include "llvm/Support/Debug.h" 242 #include "llvm/Support/ErrorHandling.h" 243 #include "llvm/Support/MathExtras.h" 244 #include "llvm/Support/raw_ostream.h" 245 #include "llvm/Target/TargetMachine.h" 246 #include "llvm/Target/TargetOptions.h" 247 #include <cassert> 248 #include <cstdint> 249 #include <iterator> 250 #include <optional> 251 #include <vector> 252 253 using namespace llvm; 254 255 #define DEBUG_TYPE "frame-info" 256 257 static cl::opt<bool> EnableRedZone("aarch64-redzone", 258 cl::desc("enable use of redzone on AArch64"), 259 cl::init(false), cl::Hidden); 260 261 static cl::opt<bool> StackTaggingMergeSetTag( 262 "stack-tagging-merge-settag", 263 cl::desc("merge settag instruction in function epilog"), cl::init(true), 264 cl::Hidden); 265 266 static cl::opt<bool> OrderFrameObjects("aarch64-order-frame-objects", 267 cl::desc("sort stack allocations"), 268 cl::init(true), cl::Hidden); 269 270 cl::opt<bool> EnableHomogeneousPrologEpilog( 271 "homogeneous-prolog-epilog", cl::Hidden, 272 cl::desc("Emit homogeneous prologue and epilogue for the size " 273 "optimization (default = off)")); 274 275 // Stack hazard padding size. 0 = disabled. 276 static cl::opt<unsigned> StackHazardSize("aarch64-stack-hazard-size", 277 cl::init(0), cl::Hidden); 278 // Whether to insert padding into non-streaming functions (for testing). 279 static cl::opt<bool> 280 StackHazardInNonStreaming("aarch64-stack-hazard-in-non-streaming", 281 cl::init(false), cl::Hidden); 282 283 STATISTIC(NumRedZoneFunctions, "Number of functions using red zone"); 284 285 /// Returns how much of the incoming argument stack area (in bytes) we should 286 /// clean up in an epilogue. For the C calling convention this will be 0, for 287 /// guaranteed tail call conventions it can be positive (a normal return or a 288 /// tail call to a function that uses less stack space for arguments) or 289 /// negative (for a tail call to a function that needs more stack space than us 290 /// for arguments). 291 static int64_t getArgumentStackToRestore(MachineFunction &MF, 292 MachineBasicBlock &MBB) { 293 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 294 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 295 bool IsTailCallReturn = (MBB.end() != MBBI) 296 ? AArch64InstrInfo::isTailCallReturnInst(*MBBI) 297 : false; 298 299 int64_t ArgumentPopSize = 0; 300 if (IsTailCallReturn) { 301 MachineOperand &StackAdjust = MBBI->getOperand(1); 302 303 // For a tail-call in a callee-pops-arguments environment, some or all of 304 // the stack may actually be in use for the call's arguments, this is 305 // calculated during LowerCall and consumed here... 306 ArgumentPopSize = StackAdjust.getImm(); 307 } else { 308 // ... otherwise the amount to pop is *all* of the argument space, 309 // conveniently stored in the MachineFunctionInfo by 310 // LowerFormalArguments. This will, of course, be zero for the C calling 311 // convention. 312 ArgumentPopSize = AFI->getArgumentStackToRestore(); 313 } 314 315 return ArgumentPopSize; 316 } 317 318 static bool produceCompactUnwindFrame(MachineFunction &MF); 319 static bool needsWinCFI(const MachineFunction &MF); 320 static StackOffset getSVEStackSize(const MachineFunction &MF); 321 static Register findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB); 322 323 /// Returns true if a homogeneous prolog or epilog code can be emitted 324 /// for the size optimization. If possible, a frame helper call is injected. 325 /// When Exit block is given, this check is for epilog. 326 bool AArch64FrameLowering::homogeneousPrologEpilog( 327 MachineFunction &MF, MachineBasicBlock *Exit) const { 328 if (!MF.getFunction().hasMinSize()) 329 return false; 330 if (!EnableHomogeneousPrologEpilog) 331 return false; 332 if (EnableRedZone) 333 return false; 334 335 // TODO: Window is supported yet. 336 if (needsWinCFI(MF)) 337 return false; 338 // TODO: SVE is not supported yet. 339 if (getSVEStackSize(MF)) 340 return false; 341 342 // Bail on stack adjustment needed on return for simplicity. 343 const MachineFrameInfo &MFI = MF.getFrameInfo(); 344 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 345 if (MFI.hasVarSizedObjects() || RegInfo->hasStackRealignment(MF)) 346 return false; 347 if (Exit && getArgumentStackToRestore(MF, *Exit)) 348 return false; 349 350 auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 351 if (AFI->hasSwiftAsyncContext() || AFI->hasStreamingModeChanges()) 352 return false; 353 354 // If there are an odd number of GPRs before LR and FP in the CSRs list, 355 // they will not be paired into one RegPairInfo, which is incompatible with 356 // the assumption made by the homogeneous prolog epilog pass. 357 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); 358 unsigned NumGPRs = 0; 359 for (unsigned I = 0; CSRegs[I]; ++I) { 360 Register Reg = CSRegs[I]; 361 if (Reg == AArch64::LR) { 362 assert(CSRegs[I + 1] == AArch64::FP); 363 if (NumGPRs % 2 != 0) 364 return false; 365 break; 366 } 367 if (AArch64::GPR64RegClass.contains(Reg)) 368 ++NumGPRs; 369 } 370 371 return true; 372 } 373 374 /// Returns true if CSRs should be paired. 375 bool AArch64FrameLowering::producePairRegisters(MachineFunction &MF) const { 376 return produceCompactUnwindFrame(MF) || homogeneousPrologEpilog(MF); 377 } 378 379 /// This is the biggest offset to the stack pointer we can encode in aarch64 380 /// instructions (without using a separate calculation and a temp register). 381 /// Note that the exception here are vector stores/loads which cannot encode any 382 /// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()). 383 static const unsigned DefaultSafeSPDisplacement = 255; 384 385 /// Look at each instruction that references stack frames and return the stack 386 /// size limit beyond which some of these instructions will require a scratch 387 /// register during their expansion later. 388 static unsigned estimateRSStackSizeLimit(MachineFunction &MF) { 389 // FIXME: For now, just conservatively guestimate based on unscaled indexing 390 // range. We'll end up allocating an unnecessary spill slot a lot, but 391 // realistically that's not a big deal at this stage of the game. 392 for (MachineBasicBlock &MBB : MF) { 393 for (MachineInstr &MI : MBB) { 394 if (MI.isDebugInstr() || MI.isPseudo() || 395 MI.getOpcode() == AArch64::ADDXri || 396 MI.getOpcode() == AArch64::ADDSXri) 397 continue; 398 399 for (const MachineOperand &MO : MI.operands()) { 400 if (!MO.isFI()) 401 continue; 402 403 StackOffset Offset; 404 if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) == 405 AArch64FrameOffsetCannotUpdate) 406 return 0; 407 } 408 } 409 } 410 return DefaultSafeSPDisplacement; 411 } 412 413 TargetStackID::Value 414 AArch64FrameLowering::getStackIDForScalableVectors() const { 415 return TargetStackID::ScalableVector; 416 } 417 418 /// Returns the size of the fixed object area (allocated next to sp on entry) 419 /// On Win64 this may include a var args area and an UnwindHelp object for EH. 420 static unsigned getFixedObjectSize(const MachineFunction &MF, 421 const AArch64FunctionInfo *AFI, bool IsWin64, 422 bool IsFunclet) { 423 if (!IsWin64 || IsFunclet) { 424 return AFI->getTailCallReservedStack(); 425 } else { 426 if (AFI->getTailCallReservedStack() != 0 && 427 !MF.getFunction().getAttributes().hasAttrSomewhere( 428 Attribute::SwiftAsync)) 429 report_fatal_error("cannot generate ABI-changing tail call for Win64"); 430 // Var args are stored here in the primary function. 431 const unsigned VarArgsArea = AFI->getVarArgsGPRSize(); 432 // To support EH funclets we allocate an UnwindHelp object 433 const unsigned UnwindHelpObject = (MF.hasEHFunclets() ? 8 : 0); 434 return AFI->getTailCallReservedStack() + 435 alignTo(VarArgsArea + UnwindHelpObject, 16); 436 } 437 } 438 439 /// Returns the size of the entire SVE stackframe (calleesaves + spills). 440 static StackOffset getSVEStackSize(const MachineFunction &MF) { 441 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 442 return StackOffset::getScalable((int64_t)AFI->getStackSizeSVE()); 443 } 444 445 bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { 446 if (!EnableRedZone) 447 return false; 448 449 // Don't use the red zone if the function explicitly asks us not to. 450 // This is typically used for kernel code. 451 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 452 const unsigned RedZoneSize = 453 Subtarget.getTargetLowering()->getRedZoneSize(MF.getFunction()); 454 if (!RedZoneSize) 455 return false; 456 457 const MachineFrameInfo &MFI = MF.getFrameInfo(); 458 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 459 uint64_t NumBytes = AFI->getLocalStackSize(); 460 461 // If neither NEON or SVE are available, a COPY from one Q-reg to 462 // another requires a spill -> reload sequence. We can do that 463 // using a pre-decrementing store/post-decrementing load, but 464 // if we do so, we can't use the Red Zone. 465 bool LowerQRegCopyThroughMem = Subtarget.hasFPARMv8() && 466 !Subtarget.isNeonAvailable() && 467 !Subtarget.hasSVE(); 468 469 return !(MFI.hasCalls() || hasFP(MF) || NumBytes > RedZoneSize || 470 getSVEStackSize(MF) || LowerQRegCopyThroughMem); 471 } 472 473 /// hasFP - Return true if the specified function should have a dedicated frame 474 /// pointer register. 475 bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const { 476 const MachineFrameInfo &MFI = MF.getFrameInfo(); 477 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); 478 479 // Win64 EH requires a frame pointer if funclets are present, as the locals 480 // are accessed off the frame pointer in both the parent function and the 481 // funclets. 482 if (MF.hasEHFunclets()) 483 return true; 484 // Retain behavior of always omitting the FP for leaf functions when possible. 485 if (MF.getTarget().Options.DisableFramePointerElim(MF)) 486 return true; 487 if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || 488 MFI.hasStackMap() || MFI.hasPatchPoint() || 489 RegInfo->hasStackRealignment(MF)) 490 return true; 491 // With large callframes around we may need to use FP to access the scavenging 492 // emergency spillslot. 493 // 494 // Unfortunately some calls to hasFP() like machine verifier -> 495 // getReservedReg() -> hasFP in the middle of global isel are too early 496 // to know the max call frame size. Hopefully conservatively returning "true" 497 // in those cases is fine. 498 // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs. 499 if (!MFI.isMaxCallFrameSizeComputed() || 500 MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement) 501 return true; 502 503 return false; 504 } 505 506 /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is 507 /// not required, we reserve argument space for call sites in the function 508 /// immediately on entry to the current function. This eliminates the need for 509 /// add/sub sp brackets around call sites. Returns true if the call frame is 510 /// included as part of the stack frame. 511 bool AArch64FrameLowering::hasReservedCallFrame( 512 const MachineFunction &MF) const { 513 // The stack probing code for the dynamically allocated outgoing arguments 514 // area assumes that the stack is probed at the top - either by the prologue 515 // code, which issues a probe if `hasVarSizedObjects` return true, or by the 516 // most recent variable-sized object allocation. Changing the condition here 517 // may need to be followed up by changes to the probe issuing logic. 518 return !MF.getFrameInfo().hasVarSizedObjects(); 519 } 520 521 MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr( 522 MachineFunction &MF, MachineBasicBlock &MBB, 523 MachineBasicBlock::iterator I) const { 524 const AArch64InstrInfo *TII = 525 static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); 526 const AArch64TargetLowering *TLI = 527 MF.getSubtarget<AArch64Subtarget>().getTargetLowering(); 528 [[maybe_unused]] MachineFrameInfo &MFI = MF.getFrameInfo(); 529 DebugLoc DL = I->getDebugLoc(); 530 unsigned Opc = I->getOpcode(); 531 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); 532 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; 533 534 if (!hasReservedCallFrame(MF)) { 535 int64_t Amount = I->getOperand(0).getImm(); 536 Amount = alignTo(Amount, getStackAlign()); 537 if (!IsDestroy) 538 Amount = -Amount; 539 540 // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it 541 // doesn't have to pop anything), then the first operand will be zero too so 542 // this adjustment is a no-op. 543 if (CalleePopAmount == 0) { 544 // FIXME: in-function stack adjustment for calls is limited to 24-bits 545 // because there's no guaranteed temporary register available. 546 // 547 // ADD/SUB (immediate) has only LSL #0 and LSL #12 available. 548 // 1) For offset <= 12-bit, we use LSL #0 549 // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses 550 // LSL #0, and the other uses LSL #12. 551 // 552 // Most call frames will be allocated at the start of a function so 553 // this is OK, but it is a limitation that needs dealing with. 554 assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large"); 555 556 if (TLI->hasInlineStackProbe(MF) && 557 -Amount >= AArch64::StackProbeMaxUnprobedStack) { 558 // When stack probing is enabled, the decrement of SP may need to be 559 // probed. We only need to do this if the call site needs 1024 bytes of 560 // space or more, because a region smaller than that is allowed to be 561 // unprobed at an ABI boundary. We rely on the fact that SP has been 562 // probed exactly at this point, either by the prologue or most recent 563 // dynamic allocation. 564 assert(MFI.hasVarSizedObjects() && 565 "non-reserved call frame without var sized objects?"); 566 Register ScratchReg = 567 MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass); 568 inlineStackProbeFixed(I, ScratchReg, -Amount, StackOffset::get(0, 0)); 569 } else { 570 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, 571 StackOffset::getFixed(Amount), TII); 572 } 573 } 574 } else if (CalleePopAmount != 0) { 575 // If the calling convention demands that the callee pops arguments from the 576 // stack, we want to add it back if we have a reserved call frame. 577 assert(CalleePopAmount < 0xffffff && "call frame too large"); 578 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, 579 StackOffset::getFixed(-(int64_t)CalleePopAmount), TII); 580 } 581 return MBB.erase(I); 582 } 583 584 void AArch64FrameLowering::emitCalleeSavedGPRLocations( 585 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 586 MachineFunction &MF = *MBB.getParent(); 587 MachineFrameInfo &MFI = MF.getFrameInfo(); 588 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 589 SMEAttrs Attrs(MF.getFunction()); 590 bool LocallyStreaming = 591 Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface(); 592 593 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 594 if (CSI.empty()) 595 return; 596 597 const TargetSubtargetInfo &STI = MF.getSubtarget(); 598 const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); 599 const TargetInstrInfo &TII = *STI.getInstrInfo(); 600 DebugLoc DL = MBB.findDebugLoc(MBBI); 601 602 for (const auto &Info : CSI) { 603 unsigned FrameIdx = Info.getFrameIdx(); 604 if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) 605 continue; 606 607 assert(!Info.isSpilledToReg() && "Spilling to registers not implemented"); 608 int64_t DwarfReg = TRI.getDwarfRegNum(Info.getReg(), true); 609 int64_t Offset = MFI.getObjectOffset(FrameIdx) - getOffsetOfLocalArea(); 610 611 // The location of VG will be emitted before each streaming-mode change in 612 // the function. Only locally-streaming functions require emitting the 613 // non-streaming VG location here. 614 if ((LocallyStreaming && FrameIdx == AFI->getStreamingVGIdx()) || 615 (!LocallyStreaming && 616 DwarfReg == TRI.getDwarfRegNum(AArch64::VG, true))) 617 continue; 618 619 unsigned CFIIndex = MF.addFrameInst( 620 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset)); 621 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 622 .addCFIIndex(CFIIndex) 623 .setMIFlags(MachineInstr::FrameSetup); 624 } 625 } 626 627 void AArch64FrameLowering::emitCalleeSavedSVELocations( 628 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 629 MachineFunction &MF = *MBB.getParent(); 630 MachineFrameInfo &MFI = MF.getFrameInfo(); 631 632 // Add callee saved registers to move list. 633 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 634 if (CSI.empty()) 635 return; 636 637 const TargetSubtargetInfo &STI = MF.getSubtarget(); 638 const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); 639 const TargetInstrInfo &TII = *STI.getInstrInfo(); 640 DebugLoc DL = MBB.findDebugLoc(MBBI); 641 AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>(); 642 643 for (const auto &Info : CSI) { 644 if (!(MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector)) 645 continue; 646 647 // Not all unwinders may know about SVE registers, so assume the lowest 648 // common demoninator. 649 assert(!Info.isSpilledToReg() && "Spilling to registers not implemented"); 650 unsigned Reg = Info.getReg(); 651 if (!static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg)) 652 continue; 653 654 StackOffset Offset = 655 StackOffset::getScalable(MFI.getObjectOffset(Info.getFrameIdx())) - 656 StackOffset::getFixed(AFI.getCalleeSavedStackSize(MFI)); 657 658 unsigned CFIIndex = MF.addFrameInst(createCFAOffset(TRI, Reg, Offset)); 659 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 660 .addCFIIndex(CFIIndex) 661 .setMIFlags(MachineInstr::FrameSetup); 662 } 663 } 664 665 static void insertCFISameValue(const MCInstrDesc &Desc, MachineFunction &MF, 666 MachineBasicBlock &MBB, 667 MachineBasicBlock::iterator InsertPt, 668 unsigned DwarfReg) { 669 unsigned CFIIndex = 670 MF.addFrameInst(MCCFIInstruction::createSameValue(nullptr, DwarfReg)); 671 BuildMI(MBB, InsertPt, DebugLoc(), Desc).addCFIIndex(CFIIndex); 672 } 673 674 void AArch64FrameLowering::resetCFIToInitialState( 675 MachineBasicBlock &MBB) const { 676 677 MachineFunction &MF = *MBB.getParent(); 678 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 679 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 680 const auto &TRI = 681 static_cast<const AArch64RegisterInfo &>(*Subtarget.getRegisterInfo()); 682 const auto &MFI = *MF.getInfo<AArch64FunctionInfo>(); 683 684 const MCInstrDesc &CFIDesc = TII.get(TargetOpcode::CFI_INSTRUCTION); 685 DebugLoc DL; 686 687 // Reset the CFA to `SP + 0`. 688 MachineBasicBlock::iterator InsertPt = MBB.begin(); 689 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa( 690 nullptr, TRI.getDwarfRegNum(AArch64::SP, true), 0)); 691 BuildMI(MBB, InsertPt, DL, CFIDesc).addCFIIndex(CFIIndex); 692 693 // Flip the RA sign state. 694 if (MFI.shouldSignReturnAddress(MF)) { 695 CFIIndex = MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr)); 696 BuildMI(MBB, InsertPt, DL, CFIDesc).addCFIIndex(CFIIndex); 697 } 698 699 // Shadow call stack uses X18, reset it. 700 if (MFI.needsShadowCallStackPrologueEpilogue(MF)) 701 insertCFISameValue(CFIDesc, MF, MBB, InsertPt, 702 TRI.getDwarfRegNum(AArch64::X18, true)); 703 704 // Emit .cfi_same_value for callee-saved registers. 705 const std::vector<CalleeSavedInfo> &CSI = 706 MF.getFrameInfo().getCalleeSavedInfo(); 707 for (const auto &Info : CSI) { 708 unsigned Reg = Info.getReg(); 709 if (!TRI.regNeedsCFI(Reg, Reg)) 710 continue; 711 insertCFISameValue(CFIDesc, MF, MBB, InsertPt, 712 TRI.getDwarfRegNum(Reg, true)); 713 } 714 } 715 716 static void emitCalleeSavedRestores(MachineBasicBlock &MBB, 717 MachineBasicBlock::iterator MBBI, 718 bool SVE) { 719 MachineFunction &MF = *MBB.getParent(); 720 MachineFrameInfo &MFI = MF.getFrameInfo(); 721 722 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 723 if (CSI.empty()) 724 return; 725 726 const TargetSubtargetInfo &STI = MF.getSubtarget(); 727 const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); 728 const TargetInstrInfo &TII = *STI.getInstrInfo(); 729 DebugLoc DL = MBB.findDebugLoc(MBBI); 730 731 for (const auto &Info : CSI) { 732 if (SVE != 733 (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector)) 734 continue; 735 736 unsigned Reg = Info.getReg(); 737 if (SVE && 738 !static_cast<const AArch64RegisterInfo &>(TRI).regNeedsCFI(Reg, Reg)) 739 continue; 740 741 if (!Info.isRestored()) 742 continue; 743 744 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createRestore( 745 nullptr, TRI.getDwarfRegNum(Info.getReg(), true))); 746 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 747 .addCFIIndex(CFIIndex) 748 .setMIFlags(MachineInstr::FrameDestroy); 749 } 750 } 751 752 void AArch64FrameLowering::emitCalleeSavedGPRRestores( 753 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 754 emitCalleeSavedRestores(MBB, MBBI, false); 755 } 756 757 void AArch64FrameLowering::emitCalleeSavedSVERestores( 758 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const { 759 emitCalleeSavedRestores(MBB, MBBI, true); 760 } 761 762 // Return the maximum possible number of bytes for `Size` due to the 763 // architectural limit on the size of a SVE register. 764 static int64_t upperBound(StackOffset Size) { 765 static const int64_t MAX_BYTES_PER_SCALABLE_BYTE = 16; 766 return Size.getScalable() * MAX_BYTES_PER_SCALABLE_BYTE + Size.getFixed(); 767 } 768 769 void AArch64FrameLowering::allocateStackSpace( 770 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 771 int64_t RealignmentPadding, StackOffset AllocSize, bool NeedsWinCFI, 772 bool *HasWinCFI, bool EmitCFI, StackOffset InitialOffset, 773 bool FollowupAllocs) const { 774 775 if (!AllocSize) 776 return; 777 778 DebugLoc DL; 779 MachineFunction &MF = *MBB.getParent(); 780 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 781 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 782 AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>(); 783 const MachineFrameInfo &MFI = MF.getFrameInfo(); 784 785 const int64_t MaxAlign = MFI.getMaxAlign().value(); 786 const uint64_t AndMask = ~(MaxAlign - 1); 787 788 if (!Subtarget.getTargetLowering()->hasInlineStackProbe(MF)) { 789 Register TargetReg = RealignmentPadding 790 ? findScratchNonCalleeSaveRegister(&MBB) 791 : AArch64::SP; 792 // SUB Xd/SP, SP, AllocSize 793 emitFrameOffset(MBB, MBBI, DL, TargetReg, AArch64::SP, -AllocSize, &TII, 794 MachineInstr::FrameSetup, false, NeedsWinCFI, HasWinCFI, 795 EmitCFI, InitialOffset); 796 797 if (RealignmentPadding) { 798 // AND SP, X9, 0b11111...0000 799 BuildMI(MBB, MBBI, DL, TII.get(AArch64::ANDXri), AArch64::SP) 800 .addReg(TargetReg, RegState::Kill) 801 .addImm(AArch64_AM::encodeLogicalImmediate(AndMask, 64)) 802 .setMIFlags(MachineInstr::FrameSetup); 803 AFI.setStackRealigned(true); 804 805 // No need for SEH instructions here; if we're realigning the stack, 806 // we've set a frame pointer and already finished the SEH prologue. 807 assert(!NeedsWinCFI); 808 } 809 return; 810 } 811 812 // 813 // Stack probing allocation. 814 // 815 816 // Fixed length allocation. If we don't need to re-align the stack and don't 817 // have SVE objects, we can use a more efficient sequence for stack probing. 818 if (AllocSize.getScalable() == 0 && RealignmentPadding == 0) { 819 Register ScratchReg = findScratchNonCalleeSaveRegister(&MBB); 820 assert(ScratchReg != AArch64::NoRegister); 821 BuildMI(MBB, MBBI, DL, TII.get(AArch64::PROBED_STACKALLOC)) 822 .addDef(ScratchReg) 823 .addImm(AllocSize.getFixed()) 824 .addImm(InitialOffset.getFixed()) 825 .addImm(InitialOffset.getScalable()); 826 // The fixed allocation may leave unprobed bytes at the top of the 827 // stack. If we have subsequent alocation (e.g. if we have variable-sized 828 // objects), we need to issue an extra probe, so these allocations start in 829 // a known state. 830 if (FollowupAllocs) { 831 // STR XZR, [SP] 832 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STRXui)) 833 .addReg(AArch64::XZR) 834 .addReg(AArch64::SP) 835 .addImm(0) 836 .setMIFlags(MachineInstr::FrameSetup); 837 } 838 839 return; 840 } 841 842 // Variable length allocation. 843 844 // If the (unknown) allocation size cannot exceed the probe size, decrement 845 // the stack pointer right away. 846 int64_t ProbeSize = AFI.getStackProbeSize(); 847 if (upperBound(AllocSize) + RealignmentPadding <= ProbeSize) { 848 Register ScratchReg = RealignmentPadding 849 ? findScratchNonCalleeSaveRegister(&MBB) 850 : AArch64::SP; 851 assert(ScratchReg != AArch64::NoRegister); 852 // SUB Xd, SP, AllocSize 853 emitFrameOffset(MBB, MBBI, DL, ScratchReg, AArch64::SP, -AllocSize, &TII, 854 MachineInstr::FrameSetup, false, NeedsWinCFI, HasWinCFI, 855 EmitCFI, InitialOffset); 856 if (RealignmentPadding) { 857 // AND SP, Xn, 0b11111...0000 858 BuildMI(MBB, MBBI, DL, TII.get(AArch64::ANDXri), AArch64::SP) 859 .addReg(ScratchReg, RegState::Kill) 860 .addImm(AArch64_AM::encodeLogicalImmediate(AndMask, 64)) 861 .setMIFlags(MachineInstr::FrameSetup); 862 AFI.setStackRealigned(true); 863 } 864 if (FollowupAllocs || upperBound(AllocSize) + RealignmentPadding > 865 AArch64::StackProbeMaxUnprobedStack) { 866 // STR XZR, [SP] 867 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STRXui)) 868 .addReg(AArch64::XZR) 869 .addReg(AArch64::SP) 870 .addImm(0) 871 .setMIFlags(MachineInstr::FrameSetup); 872 } 873 return; 874 } 875 876 // Emit a variable-length allocation probing loop. 877 // TODO: As an optimisation, the loop can be "unrolled" into a few parts, 878 // each of them guaranteed to adjust the stack by less than the probe size. 879 Register TargetReg = findScratchNonCalleeSaveRegister(&MBB); 880 assert(TargetReg != AArch64::NoRegister); 881 // SUB Xd, SP, AllocSize 882 emitFrameOffset(MBB, MBBI, DL, TargetReg, AArch64::SP, -AllocSize, &TII, 883 MachineInstr::FrameSetup, false, NeedsWinCFI, HasWinCFI, 884 EmitCFI, InitialOffset); 885 if (RealignmentPadding) { 886 // AND Xn, Xn, 0b11111...0000 887 BuildMI(MBB, MBBI, DL, TII.get(AArch64::ANDXri), TargetReg) 888 .addReg(TargetReg, RegState::Kill) 889 .addImm(AArch64_AM::encodeLogicalImmediate(AndMask, 64)) 890 .setMIFlags(MachineInstr::FrameSetup); 891 } 892 893 BuildMI(MBB, MBBI, DL, TII.get(AArch64::PROBED_STACKALLOC_VAR)) 894 .addReg(TargetReg); 895 if (EmitCFI) { 896 // Set the CFA register back to SP. 897 unsigned Reg = 898 Subtarget.getRegisterInfo()->getDwarfRegNum(AArch64::SP, true); 899 unsigned CFIIndex = 900 MF.addFrameInst(MCCFIInstruction::createDefCfaRegister(nullptr, Reg)); 901 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 902 .addCFIIndex(CFIIndex) 903 .setMIFlags(MachineInstr::FrameSetup); 904 } 905 if (RealignmentPadding) 906 AFI.setStackRealigned(true); 907 } 908 909 static MCRegister getRegisterOrZero(MCRegister Reg, bool HasSVE) { 910 switch (Reg.id()) { 911 default: 912 // The called routine is expected to preserve r19-r28 913 // r29 and r30 are used as frame pointer and link register resp. 914 return 0; 915 916 // GPRs 917 #define CASE(n) \ 918 case AArch64::W##n: \ 919 case AArch64::X##n: \ 920 return AArch64::X##n 921 CASE(0); 922 CASE(1); 923 CASE(2); 924 CASE(3); 925 CASE(4); 926 CASE(5); 927 CASE(6); 928 CASE(7); 929 CASE(8); 930 CASE(9); 931 CASE(10); 932 CASE(11); 933 CASE(12); 934 CASE(13); 935 CASE(14); 936 CASE(15); 937 CASE(16); 938 CASE(17); 939 CASE(18); 940 #undef CASE 941 942 // FPRs 943 #define CASE(n) \ 944 case AArch64::B##n: \ 945 case AArch64::H##n: \ 946 case AArch64::S##n: \ 947 case AArch64::D##n: \ 948 case AArch64::Q##n: \ 949 return HasSVE ? AArch64::Z##n : AArch64::Q##n 950 CASE(0); 951 CASE(1); 952 CASE(2); 953 CASE(3); 954 CASE(4); 955 CASE(5); 956 CASE(6); 957 CASE(7); 958 CASE(8); 959 CASE(9); 960 CASE(10); 961 CASE(11); 962 CASE(12); 963 CASE(13); 964 CASE(14); 965 CASE(15); 966 CASE(16); 967 CASE(17); 968 CASE(18); 969 CASE(19); 970 CASE(20); 971 CASE(21); 972 CASE(22); 973 CASE(23); 974 CASE(24); 975 CASE(25); 976 CASE(26); 977 CASE(27); 978 CASE(28); 979 CASE(29); 980 CASE(30); 981 CASE(31); 982 #undef CASE 983 } 984 } 985 986 void AArch64FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero, 987 MachineBasicBlock &MBB) const { 988 // Insertion point. 989 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator(); 990 991 // Fake a debug loc. 992 DebugLoc DL; 993 if (MBBI != MBB.end()) 994 DL = MBBI->getDebugLoc(); 995 996 const MachineFunction &MF = *MBB.getParent(); 997 const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>(); 998 const AArch64RegisterInfo &TRI = *STI.getRegisterInfo(); 999 1000 BitVector GPRsToZero(TRI.getNumRegs()); 1001 BitVector FPRsToZero(TRI.getNumRegs()); 1002 bool HasSVE = STI.hasSVE(); 1003 for (MCRegister Reg : RegsToZero.set_bits()) { 1004 if (TRI.isGeneralPurposeRegister(MF, Reg)) { 1005 // For GPRs, we only care to clear out the 64-bit register. 1006 if (MCRegister XReg = getRegisterOrZero(Reg, HasSVE)) 1007 GPRsToZero.set(XReg); 1008 } else if (AArch64InstrInfo::isFpOrNEON(Reg)) { 1009 // For FPRs, 1010 if (MCRegister XReg = getRegisterOrZero(Reg, HasSVE)) 1011 FPRsToZero.set(XReg); 1012 } 1013 } 1014 1015 const AArch64InstrInfo &TII = *STI.getInstrInfo(); 1016 1017 // Zero out GPRs. 1018 for (MCRegister Reg : GPRsToZero.set_bits()) 1019 TII.buildClearRegister(Reg, MBB, MBBI, DL); 1020 1021 // Zero out FP/vector registers. 1022 for (MCRegister Reg : FPRsToZero.set_bits()) 1023 TII.buildClearRegister(Reg, MBB, MBBI, DL); 1024 1025 if (HasSVE) { 1026 for (MCRegister PReg : 1027 {AArch64::P0, AArch64::P1, AArch64::P2, AArch64::P3, AArch64::P4, 1028 AArch64::P5, AArch64::P6, AArch64::P7, AArch64::P8, AArch64::P9, 1029 AArch64::P10, AArch64::P11, AArch64::P12, AArch64::P13, AArch64::P14, 1030 AArch64::P15}) { 1031 if (RegsToZero[PReg]) 1032 BuildMI(MBB, MBBI, DL, TII.get(AArch64::PFALSE), PReg); 1033 } 1034 } 1035 } 1036 1037 static void getLiveRegsForEntryMBB(LivePhysRegs &LiveRegs, 1038 const MachineBasicBlock &MBB) { 1039 const MachineFunction *MF = MBB.getParent(); 1040 LiveRegs.addLiveIns(MBB); 1041 // Mark callee saved registers as used so we will not choose them. 1042 const MCPhysReg *CSRegs = MF->getRegInfo().getCalleeSavedRegs(); 1043 for (unsigned i = 0; CSRegs[i]; ++i) 1044 LiveRegs.addReg(CSRegs[i]); 1045 } 1046 1047 // Find a scratch register that we can use at the start of the prologue to 1048 // re-align the stack pointer. We avoid using callee-save registers since they 1049 // may appear to be free when this is called from canUseAsPrologue (during 1050 // shrink wrapping), but then no longer be free when this is called from 1051 // emitPrologue. 1052 // 1053 // FIXME: This is a bit conservative, since in the above case we could use one 1054 // of the callee-save registers as a scratch temp to re-align the stack pointer, 1055 // but we would then have to make sure that we were in fact saving at least one 1056 // callee-save register in the prologue, which is additional complexity that 1057 // doesn't seem worth the benefit. 1058 static Register findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) { 1059 MachineFunction *MF = MBB->getParent(); 1060 1061 // If MBB is an entry block, use X9 as the scratch register 1062 // preserve_none functions may be using X9 to pass arguments, 1063 // so prefer to pick an available register below. 1064 if (&MF->front() == MBB && 1065 MF->getFunction().getCallingConv() != CallingConv::PreserveNone) 1066 return AArch64::X9; 1067 1068 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 1069 const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo(); 1070 LivePhysRegs LiveRegs(TRI); 1071 getLiveRegsForEntryMBB(LiveRegs, *MBB); 1072 1073 // Prefer X9 since it was historically used for the prologue scratch reg. 1074 const MachineRegisterInfo &MRI = MF->getRegInfo(); 1075 if (LiveRegs.available(MRI, AArch64::X9)) 1076 return AArch64::X9; 1077 1078 for (unsigned Reg : AArch64::GPR64RegClass) { 1079 if (LiveRegs.available(MRI, Reg)) 1080 return Reg; 1081 } 1082 return AArch64::NoRegister; 1083 } 1084 1085 bool AArch64FrameLowering::canUseAsPrologue( 1086 const MachineBasicBlock &MBB) const { 1087 const MachineFunction *MF = MBB.getParent(); 1088 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB); 1089 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>(); 1090 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 1091 const AArch64TargetLowering *TLI = Subtarget.getTargetLowering(); 1092 const AArch64FunctionInfo *AFI = MF->getInfo<AArch64FunctionInfo>(); 1093 1094 if (AFI->hasSwiftAsyncContext()) { 1095 const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo(); 1096 const MachineRegisterInfo &MRI = MF->getRegInfo(); 1097 LivePhysRegs LiveRegs(TRI); 1098 getLiveRegsForEntryMBB(LiveRegs, MBB); 1099 // The StoreSwiftAsyncContext clobbers X16 and X17. Make sure they are 1100 // available. 1101 if (!LiveRegs.available(MRI, AArch64::X16) || 1102 !LiveRegs.available(MRI, AArch64::X17)) 1103 return false; 1104 } 1105 1106 // Certain stack probing sequences might clobber flags, then we can't use 1107 // the block as a prologue if the flags register is a live-in. 1108 if (MF->getInfo<AArch64FunctionInfo>()->hasStackProbing() && 1109 MBB.isLiveIn(AArch64::NZCV)) 1110 return false; 1111 1112 // Don't need a scratch register if we're not going to re-align the stack or 1113 // emit stack probes. 1114 if (!RegInfo->hasStackRealignment(*MF) && !TLI->hasInlineStackProbe(*MF)) 1115 return true; 1116 // Otherwise, we can use any block as long as it has a scratch register 1117 // available. 1118 return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister; 1119 } 1120 1121 static bool windowsRequiresStackProbe(MachineFunction &MF, 1122 uint64_t StackSizeInBytes) { 1123 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1124 const AArch64FunctionInfo &MFI = *MF.getInfo<AArch64FunctionInfo>(); 1125 // TODO: When implementing stack protectors, take that into account 1126 // for the probe threshold. 1127 return Subtarget.isTargetWindows() && MFI.hasStackProbing() && 1128 StackSizeInBytes >= uint64_t(MFI.getStackProbeSize()); 1129 } 1130 1131 static bool needsWinCFI(const MachineFunction &MF) { 1132 const Function &F = MF.getFunction(); 1133 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI() && 1134 F.needsUnwindTableEntry(); 1135 } 1136 1137 bool AArch64FrameLowering::shouldCombineCSRLocalStackBump( 1138 MachineFunction &MF, uint64_t StackBumpBytes) const { 1139 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1140 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1141 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1142 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 1143 if (homogeneousPrologEpilog(MF)) 1144 return false; 1145 1146 if (AFI->getLocalStackSize() == 0) 1147 return false; 1148 1149 // For WinCFI, if optimizing for size, prefer to not combine the stack bump 1150 // (to force a stp with predecrement) to match the packed unwind format, 1151 // provided that there actually are any callee saved registers to merge the 1152 // decrement with. 1153 // This is potentially marginally slower, but allows using the packed 1154 // unwind format for functions that both have a local area and callee saved 1155 // registers. Using the packed unwind format notably reduces the size of 1156 // the unwind info. 1157 if (needsWinCFI(MF) && AFI->getCalleeSavedStackSize() > 0 && 1158 MF.getFunction().hasOptSize()) 1159 return false; 1160 1161 // 512 is the maximum immediate for stp/ldp that will be used for 1162 // callee-save save/restores 1163 if (StackBumpBytes >= 512 || windowsRequiresStackProbe(MF, StackBumpBytes)) 1164 return false; 1165 1166 if (MFI.hasVarSizedObjects()) 1167 return false; 1168 1169 if (RegInfo->hasStackRealignment(MF)) 1170 return false; 1171 1172 // This isn't strictly necessary, but it simplifies things a bit since the 1173 // current RedZone handling code assumes the SP is adjusted by the 1174 // callee-save save/restore code. 1175 if (canUseRedZone(MF)) 1176 return false; 1177 1178 // When there is an SVE area on the stack, always allocate the 1179 // callee-saves and spills/locals separately. 1180 if (getSVEStackSize(MF)) 1181 return false; 1182 1183 return true; 1184 } 1185 1186 bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue( 1187 MachineBasicBlock &MBB, unsigned StackBumpBytes) const { 1188 if (!shouldCombineCSRLocalStackBump(*MBB.getParent(), StackBumpBytes)) 1189 return false; 1190 1191 if (MBB.empty()) 1192 return true; 1193 1194 // Disable combined SP bump if the last instruction is an MTE tag store. It 1195 // is almost always better to merge SP adjustment into those instructions. 1196 MachineBasicBlock::iterator LastI = MBB.getFirstTerminator(); 1197 MachineBasicBlock::iterator Begin = MBB.begin(); 1198 while (LastI != Begin) { 1199 --LastI; 1200 if (LastI->isTransient()) 1201 continue; 1202 if (!LastI->getFlag(MachineInstr::FrameDestroy)) 1203 break; 1204 } 1205 switch (LastI->getOpcode()) { 1206 case AArch64::STGloop: 1207 case AArch64::STZGloop: 1208 case AArch64::STGi: 1209 case AArch64::STZGi: 1210 case AArch64::ST2Gi: 1211 case AArch64::STZ2Gi: 1212 return false; 1213 default: 1214 return true; 1215 } 1216 llvm_unreachable("unreachable"); 1217 } 1218 1219 // Given a load or a store instruction, generate an appropriate unwinding SEH 1220 // code on Windows. 1221 static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI, 1222 const TargetInstrInfo &TII, 1223 MachineInstr::MIFlag Flag) { 1224 unsigned Opc = MBBI->getOpcode(); 1225 MachineBasicBlock *MBB = MBBI->getParent(); 1226 MachineFunction &MF = *MBB->getParent(); 1227 DebugLoc DL = MBBI->getDebugLoc(); 1228 unsigned ImmIdx = MBBI->getNumOperands() - 1; 1229 int Imm = MBBI->getOperand(ImmIdx).getImm(); 1230 MachineInstrBuilder MIB; 1231 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1232 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 1233 1234 switch (Opc) { 1235 default: 1236 llvm_unreachable("No SEH Opcode for this instruction"); 1237 case AArch64::LDPDpost: 1238 Imm = -Imm; 1239 [[fallthrough]]; 1240 case AArch64::STPDpre: { 1241 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 1242 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(2).getReg()); 1243 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP_X)) 1244 .addImm(Reg0) 1245 .addImm(Reg1) 1246 .addImm(Imm * 8) 1247 .setMIFlag(Flag); 1248 break; 1249 } 1250 case AArch64::LDPXpost: 1251 Imm = -Imm; 1252 [[fallthrough]]; 1253 case AArch64::STPXpre: { 1254 Register Reg0 = MBBI->getOperand(1).getReg(); 1255 Register Reg1 = MBBI->getOperand(2).getReg(); 1256 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) 1257 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR_X)) 1258 .addImm(Imm * 8) 1259 .setMIFlag(Flag); 1260 else 1261 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP_X)) 1262 .addImm(RegInfo->getSEHRegNum(Reg0)) 1263 .addImm(RegInfo->getSEHRegNum(Reg1)) 1264 .addImm(Imm * 8) 1265 .setMIFlag(Flag); 1266 break; 1267 } 1268 case AArch64::LDRDpost: 1269 Imm = -Imm; 1270 [[fallthrough]]; 1271 case AArch64::STRDpre: { 1272 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 1273 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg_X)) 1274 .addImm(Reg) 1275 .addImm(Imm) 1276 .setMIFlag(Flag); 1277 break; 1278 } 1279 case AArch64::LDRXpost: 1280 Imm = -Imm; 1281 [[fallthrough]]; 1282 case AArch64::STRXpre: { 1283 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 1284 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg_X)) 1285 .addImm(Reg) 1286 .addImm(Imm) 1287 .setMIFlag(Flag); 1288 break; 1289 } 1290 case AArch64::STPDi: 1291 case AArch64::LDPDi: { 1292 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 1293 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 1294 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP)) 1295 .addImm(Reg0) 1296 .addImm(Reg1) 1297 .addImm(Imm * 8) 1298 .setMIFlag(Flag); 1299 break; 1300 } 1301 case AArch64::STPXi: 1302 case AArch64::LDPXi: { 1303 Register Reg0 = MBBI->getOperand(0).getReg(); 1304 Register Reg1 = MBBI->getOperand(1).getReg(); 1305 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR) 1306 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR)) 1307 .addImm(Imm * 8) 1308 .setMIFlag(Flag); 1309 else 1310 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP)) 1311 .addImm(RegInfo->getSEHRegNum(Reg0)) 1312 .addImm(RegInfo->getSEHRegNum(Reg1)) 1313 .addImm(Imm * 8) 1314 .setMIFlag(Flag); 1315 break; 1316 } 1317 case AArch64::STRXui: 1318 case AArch64::LDRXui: { 1319 int Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 1320 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg)) 1321 .addImm(Reg) 1322 .addImm(Imm * 8) 1323 .setMIFlag(Flag); 1324 break; 1325 } 1326 case AArch64::STRDui: 1327 case AArch64::LDRDui: { 1328 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 1329 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg)) 1330 .addImm(Reg) 1331 .addImm(Imm * 8) 1332 .setMIFlag(Flag); 1333 break; 1334 } 1335 case AArch64::STPQi: 1336 case AArch64::LDPQi: { 1337 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg()); 1338 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 1339 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveAnyRegQP)) 1340 .addImm(Reg0) 1341 .addImm(Reg1) 1342 .addImm(Imm * 16) 1343 .setMIFlag(Flag); 1344 break; 1345 } 1346 case AArch64::LDPQpost: 1347 Imm = -Imm; 1348 [[fallthrough]]; 1349 case AArch64::STPQpre: { 1350 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg()); 1351 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(2).getReg()); 1352 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveAnyRegQPX)) 1353 .addImm(Reg0) 1354 .addImm(Reg1) 1355 .addImm(Imm * 16) 1356 .setMIFlag(Flag); 1357 break; 1358 } 1359 } 1360 auto I = MBB->insertAfter(MBBI, MIB); 1361 return I; 1362 } 1363 1364 // Fix up the SEH opcode associated with the save/restore instruction. 1365 static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI, 1366 unsigned LocalStackSize) { 1367 MachineOperand *ImmOpnd = nullptr; 1368 unsigned ImmIdx = MBBI->getNumOperands() - 1; 1369 switch (MBBI->getOpcode()) { 1370 default: 1371 llvm_unreachable("Fix the offset in the SEH instruction"); 1372 case AArch64::SEH_SaveFPLR: 1373 case AArch64::SEH_SaveRegP: 1374 case AArch64::SEH_SaveReg: 1375 case AArch64::SEH_SaveFRegP: 1376 case AArch64::SEH_SaveFReg: 1377 case AArch64::SEH_SaveAnyRegQP: 1378 case AArch64::SEH_SaveAnyRegQPX: 1379 ImmOpnd = &MBBI->getOperand(ImmIdx); 1380 break; 1381 } 1382 if (ImmOpnd) 1383 ImmOpnd->setImm(ImmOpnd->getImm() + LocalStackSize); 1384 } 1385 1386 bool requiresGetVGCall(MachineFunction &MF) { 1387 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1388 return AFI->hasStreamingModeChanges() && 1389 !MF.getSubtarget<AArch64Subtarget>().hasSVE(); 1390 } 1391 1392 bool isVGInstruction(MachineBasicBlock::iterator MBBI) { 1393 unsigned Opc = MBBI->getOpcode(); 1394 if (Opc == AArch64::CNTD_XPiI || Opc == AArch64::RDSVLI_XI || 1395 Opc == AArch64::UBFMXri) 1396 return true; 1397 1398 if (requiresGetVGCall(*MBBI->getMF())) { 1399 if (Opc == AArch64::ORRXrr) 1400 return true; 1401 1402 if (Opc == AArch64::BL) { 1403 auto Op1 = MBBI->getOperand(0); 1404 return Op1.isSymbol() && 1405 (StringRef(Op1.getSymbolName()) == "__arm_get_current_vg"); 1406 } 1407 } 1408 1409 return false; 1410 } 1411 1412 // Convert callee-save register save/restore instruction to do stack pointer 1413 // decrement/increment to allocate/deallocate the callee-save stack area by 1414 // converting store/load to use pre/post increment version. 1415 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec( 1416 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 1417 const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc, 1418 bool NeedsWinCFI, bool *HasWinCFI, bool EmitCFI, 1419 MachineInstr::MIFlag FrameFlag = MachineInstr::FrameSetup, 1420 int CFAOffset = 0) { 1421 unsigned NewOpc; 1422 1423 // If the function contains streaming mode changes, we expect instructions 1424 // to calculate the value of VG before spilling. For locally-streaming 1425 // functions, we need to do this for both the streaming and non-streaming 1426 // vector length. Move past these instructions if necessary. 1427 MachineFunction &MF = *MBB.getParent(); 1428 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1429 if (AFI->hasStreamingModeChanges()) 1430 while (isVGInstruction(MBBI)) 1431 ++MBBI; 1432 1433 switch (MBBI->getOpcode()) { 1434 default: 1435 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 1436 case AArch64::STPXi: 1437 NewOpc = AArch64::STPXpre; 1438 break; 1439 case AArch64::STPDi: 1440 NewOpc = AArch64::STPDpre; 1441 break; 1442 case AArch64::STPQi: 1443 NewOpc = AArch64::STPQpre; 1444 break; 1445 case AArch64::STRXui: 1446 NewOpc = AArch64::STRXpre; 1447 break; 1448 case AArch64::STRDui: 1449 NewOpc = AArch64::STRDpre; 1450 break; 1451 case AArch64::STRQui: 1452 NewOpc = AArch64::STRQpre; 1453 break; 1454 case AArch64::LDPXi: 1455 NewOpc = AArch64::LDPXpost; 1456 break; 1457 case AArch64::LDPDi: 1458 NewOpc = AArch64::LDPDpost; 1459 break; 1460 case AArch64::LDPQi: 1461 NewOpc = AArch64::LDPQpost; 1462 break; 1463 case AArch64::LDRXui: 1464 NewOpc = AArch64::LDRXpost; 1465 break; 1466 case AArch64::LDRDui: 1467 NewOpc = AArch64::LDRDpost; 1468 break; 1469 case AArch64::LDRQui: 1470 NewOpc = AArch64::LDRQpost; 1471 break; 1472 } 1473 // Get rid of the SEH code associated with the old instruction. 1474 if (NeedsWinCFI) { 1475 auto SEH = std::next(MBBI); 1476 if (AArch64InstrInfo::isSEHInstruction(*SEH)) 1477 SEH->eraseFromParent(); 1478 } 1479 1480 TypeSize Scale = TypeSize::getFixed(1), Width = TypeSize::getFixed(0); 1481 int64_t MinOffset, MaxOffset; 1482 bool Success = static_cast<const AArch64InstrInfo *>(TII)->getMemOpInfo( 1483 NewOpc, Scale, Width, MinOffset, MaxOffset); 1484 (void)Success; 1485 assert(Success && "unknown load/store opcode"); 1486 1487 // If the first store isn't right where we want SP then we can't fold the 1488 // update in so create a normal arithmetic instruction instead. 1489 if (MBBI->getOperand(MBBI->getNumOperands() - 1).getImm() != 0 || 1490 CSStackSizeInc < MinOffset || CSStackSizeInc > MaxOffset) { 1491 // If we are destroying the frame, make sure we add the increment after the 1492 // last frame operation. 1493 if (FrameFlag == MachineInstr::FrameDestroy) 1494 ++MBBI; 1495 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 1496 StackOffset::getFixed(CSStackSizeInc), TII, FrameFlag, 1497 false, false, nullptr, EmitCFI, 1498 StackOffset::getFixed(CFAOffset)); 1499 1500 return std::prev(MBBI); 1501 } 1502 1503 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc)); 1504 MIB.addReg(AArch64::SP, RegState::Define); 1505 1506 // Copy all operands other than the immediate offset. 1507 unsigned OpndIdx = 0; 1508 for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd; 1509 ++OpndIdx) 1510 MIB.add(MBBI->getOperand(OpndIdx)); 1511 1512 assert(MBBI->getOperand(OpndIdx).getImm() == 0 && 1513 "Unexpected immediate offset in first/last callee-save save/restore " 1514 "instruction!"); 1515 assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP && 1516 "Unexpected base register in callee-save save/restore instruction!"); 1517 assert(CSStackSizeInc % Scale == 0); 1518 MIB.addImm(CSStackSizeInc / (int)Scale); 1519 1520 MIB.setMIFlags(MBBI->getFlags()); 1521 MIB.setMemRefs(MBBI->memoperands()); 1522 1523 // Generate a new SEH code that corresponds to the new instruction. 1524 if (NeedsWinCFI) { 1525 *HasWinCFI = true; 1526 InsertSEH(*MIB, *TII, FrameFlag); 1527 } 1528 1529 if (EmitCFI) { 1530 unsigned CFIIndex = MF.addFrameInst( 1531 MCCFIInstruction::cfiDefCfaOffset(nullptr, CFAOffset - CSStackSizeInc)); 1532 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1533 .addCFIIndex(CFIIndex) 1534 .setMIFlags(FrameFlag); 1535 } 1536 1537 return std::prev(MBB.erase(MBBI)); 1538 } 1539 1540 // Fixup callee-save register save/restore instructions to take into account 1541 // combined SP bump by adding the local stack size to the stack offsets. 1542 static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI, 1543 uint64_t LocalStackSize, 1544 bool NeedsWinCFI, 1545 bool *HasWinCFI) { 1546 if (AArch64InstrInfo::isSEHInstruction(MI)) 1547 return; 1548 1549 unsigned Opc = MI.getOpcode(); 1550 unsigned Scale; 1551 switch (Opc) { 1552 case AArch64::STPXi: 1553 case AArch64::STRXui: 1554 case AArch64::STPDi: 1555 case AArch64::STRDui: 1556 case AArch64::LDPXi: 1557 case AArch64::LDRXui: 1558 case AArch64::LDPDi: 1559 case AArch64::LDRDui: 1560 Scale = 8; 1561 break; 1562 case AArch64::STPQi: 1563 case AArch64::STRQui: 1564 case AArch64::LDPQi: 1565 case AArch64::LDRQui: 1566 Scale = 16; 1567 break; 1568 default: 1569 llvm_unreachable("Unexpected callee-save save/restore opcode!"); 1570 } 1571 1572 unsigned OffsetIdx = MI.getNumExplicitOperands() - 1; 1573 assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP && 1574 "Unexpected base register in callee-save save/restore instruction!"); 1575 // Last operand is immediate offset that needs fixing. 1576 MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx); 1577 // All generated opcodes have scaled offsets. 1578 assert(LocalStackSize % Scale == 0); 1579 OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / Scale); 1580 1581 if (NeedsWinCFI) { 1582 *HasWinCFI = true; 1583 auto MBBI = std::next(MachineBasicBlock::iterator(MI)); 1584 assert(MBBI != MI.getParent()->end() && "Expecting a valid instruction"); 1585 assert(AArch64InstrInfo::isSEHInstruction(*MBBI) && 1586 "Expecting a SEH instruction"); 1587 fixupSEHOpcode(MBBI, LocalStackSize); 1588 } 1589 } 1590 1591 static bool isTargetWindows(const MachineFunction &MF) { 1592 return MF.getSubtarget<AArch64Subtarget>().isTargetWindows(); 1593 } 1594 1595 // Convenience function to determine whether I is an SVE callee save. 1596 static bool IsSVECalleeSave(MachineBasicBlock::iterator I) { 1597 switch (I->getOpcode()) { 1598 default: 1599 return false; 1600 case AArch64::PTRUE_C_B: 1601 case AArch64::LD1B_2Z_IMM: 1602 case AArch64::ST1B_2Z_IMM: 1603 case AArch64::STR_ZXI: 1604 case AArch64::STR_PXI: 1605 case AArch64::LDR_ZXI: 1606 case AArch64::LDR_PXI: 1607 return I->getFlag(MachineInstr::FrameSetup) || 1608 I->getFlag(MachineInstr::FrameDestroy); 1609 } 1610 } 1611 1612 static void emitShadowCallStackPrologue(const TargetInstrInfo &TII, 1613 MachineFunction &MF, 1614 MachineBasicBlock &MBB, 1615 MachineBasicBlock::iterator MBBI, 1616 const DebugLoc &DL, bool NeedsWinCFI, 1617 bool NeedsUnwindInfo) { 1618 // Shadow call stack prolog: str x30, [x18], #8 1619 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STRXpost)) 1620 .addReg(AArch64::X18, RegState::Define) 1621 .addReg(AArch64::LR) 1622 .addReg(AArch64::X18) 1623 .addImm(8) 1624 .setMIFlag(MachineInstr::FrameSetup); 1625 1626 // This instruction also makes x18 live-in to the entry block. 1627 MBB.addLiveIn(AArch64::X18); 1628 1629 if (NeedsWinCFI) 1630 BuildMI(MBB, MBBI, DL, TII.get(AArch64::SEH_Nop)) 1631 .setMIFlag(MachineInstr::FrameSetup); 1632 1633 if (NeedsUnwindInfo) { 1634 // Emit a CFI instruction that causes 8 to be subtracted from the value of 1635 // x18 when unwinding past this frame. 1636 static const char CFIInst[] = { 1637 dwarf::DW_CFA_val_expression, 1638 18, // register 1639 2, // length 1640 static_cast<char>(unsigned(dwarf::DW_OP_breg18)), 1641 static_cast<char>(-8) & 0x7f, // addend (sleb128) 1642 }; 1643 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createEscape( 1644 nullptr, StringRef(CFIInst, sizeof(CFIInst)))); 1645 BuildMI(MBB, MBBI, DL, TII.get(AArch64::CFI_INSTRUCTION)) 1646 .addCFIIndex(CFIIndex) 1647 .setMIFlag(MachineInstr::FrameSetup); 1648 } 1649 } 1650 1651 static void emitShadowCallStackEpilogue(const TargetInstrInfo &TII, 1652 MachineFunction &MF, 1653 MachineBasicBlock &MBB, 1654 MachineBasicBlock::iterator MBBI, 1655 const DebugLoc &DL) { 1656 // Shadow call stack epilog: ldr x30, [x18, #-8]! 1657 BuildMI(MBB, MBBI, DL, TII.get(AArch64::LDRXpre)) 1658 .addReg(AArch64::X18, RegState::Define) 1659 .addReg(AArch64::LR, RegState::Define) 1660 .addReg(AArch64::X18) 1661 .addImm(-8) 1662 .setMIFlag(MachineInstr::FrameDestroy); 1663 1664 if (MF.getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(MF)) { 1665 unsigned CFIIndex = 1666 MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, 18)); 1667 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION)) 1668 .addCFIIndex(CFIIndex) 1669 .setMIFlags(MachineInstr::FrameDestroy); 1670 } 1671 } 1672 1673 // Define the current CFA rule to use the provided FP. 1674 static void emitDefineCFAWithFP(MachineFunction &MF, MachineBasicBlock &MBB, 1675 MachineBasicBlock::iterator MBBI, 1676 const DebugLoc &DL, unsigned FixedObject) { 1677 const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>(); 1678 const AArch64RegisterInfo *TRI = STI.getRegisterInfo(); 1679 const TargetInstrInfo *TII = STI.getInstrInfo(); 1680 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1681 1682 const int OffsetToFirstCalleeSaveFromFP = 1683 AFI->getCalleeSaveBaseToFrameRecordOffset() - 1684 AFI->getCalleeSavedStackSize(); 1685 Register FramePtr = TRI->getFrameRegister(MF); 1686 unsigned Reg = TRI->getDwarfRegNum(FramePtr, true); 1687 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa( 1688 nullptr, Reg, FixedObject - OffsetToFirstCalleeSaveFromFP)); 1689 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1690 .addCFIIndex(CFIIndex) 1691 .setMIFlags(MachineInstr::FrameSetup); 1692 } 1693 1694 #ifndef NDEBUG 1695 /// Collect live registers from the end of \p MI's parent up to (including) \p 1696 /// MI in \p LiveRegs. 1697 static void getLivePhysRegsUpTo(MachineInstr &MI, const TargetRegisterInfo &TRI, 1698 LivePhysRegs &LiveRegs) { 1699 1700 MachineBasicBlock &MBB = *MI.getParent(); 1701 LiveRegs.addLiveOuts(MBB); 1702 for (const MachineInstr &MI : 1703 reverse(make_range(MI.getIterator(), MBB.instr_end()))) 1704 LiveRegs.stepBackward(MI); 1705 } 1706 #endif 1707 1708 void AArch64FrameLowering::emitPrologue(MachineFunction &MF, 1709 MachineBasicBlock &MBB) const { 1710 MachineBasicBlock::iterator MBBI = MBB.begin(); 1711 const MachineFrameInfo &MFI = MF.getFrameInfo(); 1712 const Function &F = MF.getFunction(); 1713 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 1714 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 1715 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 1716 1717 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 1718 bool EmitCFI = AFI->needsDwarfUnwindInfo(MF); 1719 bool EmitAsyncCFI = AFI->needsAsyncDwarfUnwindInfo(MF); 1720 bool HasFP = hasFP(MF); 1721 bool NeedsWinCFI = needsWinCFI(MF); 1722 bool HasWinCFI = false; 1723 auto Cleanup = make_scope_exit([&]() { MF.setHasWinCFI(HasWinCFI); }); 1724 1725 MachineBasicBlock::iterator End = MBB.end(); 1726 #ifndef NDEBUG 1727 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 1728 // Collect live register from the end of MBB up to the start of the existing 1729 // frame setup instructions. 1730 MachineBasicBlock::iterator NonFrameStart = MBB.begin(); 1731 while (NonFrameStart != End && 1732 NonFrameStart->getFlag(MachineInstr::FrameSetup)) 1733 ++NonFrameStart; 1734 1735 LivePhysRegs LiveRegs(*TRI); 1736 if (NonFrameStart != MBB.end()) { 1737 getLivePhysRegsUpTo(*NonFrameStart, *TRI, LiveRegs); 1738 // Ignore registers used for stack management for now. 1739 LiveRegs.removeReg(AArch64::SP); 1740 LiveRegs.removeReg(AArch64::X19); 1741 LiveRegs.removeReg(AArch64::FP); 1742 LiveRegs.removeReg(AArch64::LR); 1743 1744 // X0 will be clobbered by a call to __arm_get_current_vg in the prologue. 1745 // This is necessary to spill VG if required where SVE is unavailable, but 1746 // X0 is preserved around this call. 1747 if (requiresGetVGCall(MF)) 1748 LiveRegs.removeReg(AArch64::X0); 1749 } 1750 1751 auto VerifyClobberOnExit = make_scope_exit([&]() { 1752 if (NonFrameStart == MBB.end()) 1753 return; 1754 // Check if any of the newly instructions clobber any of the live registers. 1755 for (MachineInstr &MI : 1756 make_range(MBB.instr_begin(), NonFrameStart->getIterator())) { 1757 for (auto &Op : MI.operands()) 1758 if (Op.isReg() && Op.isDef()) 1759 assert(!LiveRegs.contains(Op.getReg()) && 1760 "live register clobbered by inserted prologue instructions"); 1761 } 1762 }); 1763 #endif 1764 1765 bool IsFunclet = MBB.isEHFuncletEntry(); 1766 1767 // At this point, we're going to decide whether or not the function uses a 1768 // redzone. In most cases, the function doesn't have a redzone so let's 1769 // assume that's false and set it to true in the case that there's a redzone. 1770 AFI->setHasRedZone(false); 1771 1772 // Debug location must be unknown since the first debug location is used 1773 // to determine the end of the prologue. 1774 DebugLoc DL; 1775 1776 const auto &MFnI = *MF.getInfo<AArch64FunctionInfo>(); 1777 if (MFnI.needsShadowCallStackPrologueEpilogue(MF)) 1778 emitShadowCallStackPrologue(*TII, MF, MBB, MBBI, DL, NeedsWinCFI, 1779 MFnI.needsDwarfUnwindInfo(MF)); 1780 1781 if (MFnI.shouldSignReturnAddress(MF)) { 1782 BuildMI(MBB, MBBI, DL, TII->get(AArch64::PAUTH_PROLOGUE)) 1783 .setMIFlag(MachineInstr::FrameSetup); 1784 if (NeedsWinCFI) 1785 HasWinCFI = true; // AArch64PointerAuth pass will insert SEH_PACSignLR 1786 } 1787 1788 if (EmitCFI && MFnI.isMTETagged()) { 1789 BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITMTETAGGED)) 1790 .setMIFlag(MachineInstr::FrameSetup); 1791 } 1792 1793 // We signal the presence of a Swift extended frame to external tools by 1794 // storing FP with 0b0001 in bits 63:60. In normal userland operation a simple 1795 // ORR is sufficient, it is assumed a Swift kernel would initialize the TBI 1796 // bits so that is still true. 1797 if (HasFP && AFI->hasSwiftAsyncContext()) { 1798 switch (MF.getTarget().Options.SwiftAsyncFramePointer) { 1799 case SwiftAsyncFramePointerMode::DeploymentBased: 1800 if (Subtarget.swiftAsyncContextIsDynamicallySet()) { 1801 // The special symbol below is absolute and has a *value* that can be 1802 // combined with the frame pointer to signal an extended frame. 1803 BuildMI(MBB, MBBI, DL, TII->get(AArch64::LOADgot), AArch64::X16) 1804 .addExternalSymbol("swift_async_extendedFramePointerFlags", 1805 AArch64II::MO_GOT); 1806 if (NeedsWinCFI) { 1807 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1808 .setMIFlags(MachineInstr::FrameSetup); 1809 HasWinCFI = true; 1810 } 1811 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXrs), AArch64::FP) 1812 .addUse(AArch64::FP) 1813 .addUse(AArch64::X16) 1814 .addImm(Subtarget.isTargetILP32() ? 32 : 0); 1815 if (NeedsWinCFI) { 1816 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1817 .setMIFlags(MachineInstr::FrameSetup); 1818 HasWinCFI = true; 1819 } 1820 break; 1821 } 1822 [[fallthrough]]; 1823 1824 case SwiftAsyncFramePointerMode::Always: 1825 // ORR x29, x29, #0x1000_0000_0000_0000 1826 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ORRXri), AArch64::FP) 1827 .addUse(AArch64::FP) 1828 .addImm(0x1100) 1829 .setMIFlag(MachineInstr::FrameSetup); 1830 if (NeedsWinCFI) { 1831 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1832 .setMIFlags(MachineInstr::FrameSetup); 1833 HasWinCFI = true; 1834 } 1835 break; 1836 1837 case SwiftAsyncFramePointerMode::Never: 1838 break; 1839 } 1840 } 1841 1842 // All calls are tail calls in GHC calling conv, and functions have no 1843 // prologue/epilogue. 1844 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 1845 return; 1846 1847 // Set tagged base pointer to the requested stack slot. 1848 // Ideally it should match SP value after prologue. 1849 std::optional<int> TBPI = AFI->getTaggedBasePointerIndex(); 1850 if (TBPI) 1851 AFI->setTaggedBasePointerOffset(-MFI.getObjectOffset(*TBPI)); 1852 else 1853 AFI->setTaggedBasePointerOffset(MFI.getStackSize()); 1854 1855 const StackOffset &SVEStackSize = getSVEStackSize(MF); 1856 1857 // getStackSize() includes all the locals in its size calculation. We don't 1858 // include these locals when computing the stack size of a funclet, as they 1859 // are allocated in the parent's stack frame and accessed via the frame 1860 // pointer from the funclet. We only save the callee saved registers in the 1861 // funclet, which are really the callee saved registers of the parent 1862 // function, including the funclet. 1863 int64_t NumBytes = 1864 IsFunclet ? getWinEHFuncletFrameSize(MF) : MFI.getStackSize(); 1865 if (!AFI->hasStackFrame() && !windowsRequiresStackProbe(MF, NumBytes)) { 1866 assert(!HasFP && "unexpected function without stack frame but with FP"); 1867 assert(!SVEStackSize && 1868 "unexpected function without stack frame but with SVE objects"); 1869 // All of the stack allocation is for locals. 1870 AFI->setLocalStackSize(NumBytes); 1871 if (!NumBytes) 1872 return; 1873 // REDZONE: If the stack size is less than 128 bytes, we don't need 1874 // to actually allocate. 1875 if (canUseRedZone(MF)) { 1876 AFI->setHasRedZone(true); 1877 ++NumRedZoneFunctions; 1878 } else { 1879 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 1880 StackOffset::getFixed(-NumBytes), TII, 1881 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); 1882 if (EmitCFI) { 1883 // Label used to tie together the PROLOG_LABEL and the MachineMoves. 1884 MCSymbol *FrameLabel = MF.getContext().createTempSymbol(); 1885 // Encode the stack size of the leaf function. 1886 unsigned CFIIndex = MF.addFrameInst( 1887 MCCFIInstruction::cfiDefCfaOffset(FrameLabel, NumBytes)); 1888 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 1889 .addCFIIndex(CFIIndex) 1890 .setMIFlags(MachineInstr::FrameSetup); 1891 } 1892 } 1893 1894 if (NeedsWinCFI) { 1895 HasWinCFI = true; 1896 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 1897 .setMIFlag(MachineInstr::FrameSetup); 1898 } 1899 1900 return; 1901 } 1902 1903 bool IsWin64 = Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg()); 1904 unsigned FixedObject = getFixedObjectSize(MF, AFI, IsWin64, IsFunclet); 1905 1906 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 1907 // All of the remaining stack allocations are for locals. 1908 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 1909 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); 1910 bool HomPrologEpilog = homogeneousPrologEpilog(MF); 1911 if (CombineSPBump) { 1912 assert(!SVEStackSize && "Cannot combine SP bump with SVE"); 1913 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, 1914 StackOffset::getFixed(-NumBytes), TII, 1915 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI, 1916 EmitAsyncCFI); 1917 NumBytes = 0; 1918 } else if (HomPrologEpilog) { 1919 // Stack has been already adjusted. 1920 NumBytes -= PrologueSaveSize; 1921 } else if (PrologueSaveSize != 0) { 1922 MBBI = convertCalleeSaveRestoreToSPPrePostIncDec( 1923 MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI, 1924 EmitAsyncCFI); 1925 NumBytes -= PrologueSaveSize; 1926 } 1927 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 1928 1929 // Move past the saves of the callee-saved registers, fixing up the offsets 1930 // and pre-inc if we decided to combine the callee-save and local stack 1931 // pointer bump above. 1932 while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup) && 1933 !IsSVECalleeSave(MBBI)) { 1934 // Move past instructions generated to calculate VG 1935 if (AFI->hasStreamingModeChanges()) 1936 while (isVGInstruction(MBBI)) 1937 ++MBBI; 1938 1939 if (CombineSPBump) 1940 fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize(), 1941 NeedsWinCFI, &HasWinCFI); 1942 ++MBBI; 1943 } 1944 1945 // For funclets the FP belongs to the containing function. 1946 if (!IsFunclet && HasFP) { 1947 // Only set up FP if we actually need to. 1948 int64_t FPOffset = AFI->getCalleeSaveBaseToFrameRecordOffset(); 1949 1950 if (CombineSPBump) 1951 FPOffset += AFI->getLocalStackSize(); 1952 1953 if (AFI->hasSwiftAsyncContext()) { 1954 // Before we update the live FP we have to ensure there's a valid (or 1955 // null) asynchronous context in its slot just before FP in the frame 1956 // record, so store it now. 1957 const auto &Attrs = MF.getFunction().getAttributes(); 1958 bool HaveInitialContext = Attrs.hasAttrSomewhere(Attribute::SwiftAsync); 1959 if (HaveInitialContext) 1960 MBB.addLiveIn(AArch64::X22); 1961 Register Reg = HaveInitialContext ? AArch64::X22 : AArch64::XZR; 1962 BuildMI(MBB, MBBI, DL, TII->get(AArch64::StoreSwiftAsyncContext)) 1963 .addUse(Reg) 1964 .addUse(AArch64::SP) 1965 .addImm(FPOffset - 8) 1966 .setMIFlags(MachineInstr::FrameSetup); 1967 if (NeedsWinCFI) { 1968 // WinCFI and arm64e, where StoreSwiftAsyncContext is expanded 1969 // to multiple instructions, should be mutually-exclusive. 1970 assert(Subtarget.getTargetTriple().getArchName() != "arm64e"); 1971 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 1972 .setMIFlags(MachineInstr::FrameSetup); 1973 HasWinCFI = true; 1974 } 1975 } 1976 1977 if (HomPrologEpilog) { 1978 auto Prolog = MBBI; 1979 --Prolog; 1980 assert(Prolog->getOpcode() == AArch64::HOM_Prolog); 1981 Prolog->addOperand(MachineOperand::CreateImm(FPOffset)); 1982 } else { 1983 // Issue sub fp, sp, FPOffset or 1984 // mov fp,sp when FPOffset is zero. 1985 // Note: All stores of callee-saved registers are marked as "FrameSetup". 1986 // This code marks the instruction(s) that set the FP also. 1987 emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, 1988 StackOffset::getFixed(FPOffset), TII, 1989 MachineInstr::FrameSetup, false, NeedsWinCFI, &HasWinCFI); 1990 if (NeedsWinCFI && HasWinCFI) { 1991 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 1992 .setMIFlag(MachineInstr::FrameSetup); 1993 // After setting up the FP, the rest of the prolog doesn't need to be 1994 // included in the SEH unwind info. 1995 NeedsWinCFI = false; 1996 } 1997 } 1998 if (EmitAsyncCFI) 1999 emitDefineCFAWithFP(MF, MBB, MBBI, DL, FixedObject); 2000 } 2001 2002 // Now emit the moves for whatever callee saved regs we have (including FP, 2003 // LR if those are saved). Frame instructions for SVE register are emitted 2004 // later, after the instruction which actually save SVE regs. 2005 if (EmitAsyncCFI) 2006 emitCalleeSavedGPRLocations(MBB, MBBI); 2007 2008 // Alignment is required for the parent frame, not the funclet 2009 const bool NeedsRealignment = 2010 NumBytes && !IsFunclet && RegInfo->hasStackRealignment(MF); 2011 const int64_t RealignmentPadding = 2012 (NeedsRealignment && MFI.getMaxAlign() > Align(16)) 2013 ? MFI.getMaxAlign().value() - 16 2014 : 0; 2015 2016 if (windowsRequiresStackProbe(MF, NumBytes + RealignmentPadding)) { 2017 uint64_t NumWords = (NumBytes + RealignmentPadding) >> 4; 2018 if (NeedsWinCFI) { 2019 HasWinCFI = true; 2020 // alloc_l can hold at most 256MB, so assume that NumBytes doesn't 2021 // exceed this amount. We need to move at most 2^24 - 1 into x15. 2022 // This is at most two instructions, MOVZ follwed by MOVK. 2023 // TODO: Fix to use multiple stack alloc unwind codes for stacks 2024 // exceeding 256MB in size. 2025 if (NumBytes >= (1 << 28)) 2026 report_fatal_error("Stack size cannot exceed 256MB for stack " 2027 "unwinding purposes"); 2028 2029 uint32_t LowNumWords = NumWords & 0xFFFF; 2030 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVZXi), AArch64::X15) 2031 .addImm(LowNumWords) 2032 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)) 2033 .setMIFlag(MachineInstr::FrameSetup); 2034 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 2035 .setMIFlag(MachineInstr::FrameSetup); 2036 if ((NumWords & 0xFFFF0000) != 0) { 2037 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X15) 2038 .addReg(AArch64::X15) 2039 .addImm((NumWords & 0xFFFF0000) >> 16) // High half 2040 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 16)) 2041 .setMIFlag(MachineInstr::FrameSetup); 2042 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 2043 .setMIFlag(MachineInstr::FrameSetup); 2044 } 2045 } else { 2046 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), AArch64::X15) 2047 .addImm(NumWords) 2048 .setMIFlags(MachineInstr::FrameSetup); 2049 } 2050 2051 const char *ChkStk = Subtarget.getChkStkName(); 2052 switch (MF.getTarget().getCodeModel()) { 2053 case CodeModel::Tiny: 2054 case CodeModel::Small: 2055 case CodeModel::Medium: 2056 case CodeModel::Kernel: 2057 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL)) 2058 .addExternalSymbol(ChkStk) 2059 .addReg(AArch64::X15, RegState::Implicit) 2060 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead) 2061 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead) 2062 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead) 2063 .setMIFlags(MachineInstr::FrameSetup); 2064 if (NeedsWinCFI) { 2065 HasWinCFI = true; 2066 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 2067 .setMIFlag(MachineInstr::FrameSetup); 2068 } 2069 break; 2070 case CodeModel::Large: 2071 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVaddrEXT)) 2072 .addReg(AArch64::X16, RegState::Define) 2073 .addExternalSymbol(ChkStk) 2074 .addExternalSymbol(ChkStk) 2075 .setMIFlags(MachineInstr::FrameSetup); 2076 if (NeedsWinCFI) { 2077 HasWinCFI = true; 2078 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 2079 .setMIFlag(MachineInstr::FrameSetup); 2080 } 2081 2082 BuildMI(MBB, MBBI, DL, TII->get(getBLRCallOpcode(MF))) 2083 .addReg(AArch64::X16, RegState::Kill) 2084 .addReg(AArch64::X15, RegState::Implicit | RegState::Define) 2085 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead) 2086 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead) 2087 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead) 2088 .setMIFlags(MachineInstr::FrameSetup); 2089 if (NeedsWinCFI) { 2090 HasWinCFI = true; 2091 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 2092 .setMIFlag(MachineInstr::FrameSetup); 2093 } 2094 break; 2095 } 2096 2097 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBXrx64), AArch64::SP) 2098 .addReg(AArch64::SP, RegState::Kill) 2099 .addReg(AArch64::X15, RegState::Kill) 2100 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 4)) 2101 .setMIFlags(MachineInstr::FrameSetup); 2102 if (NeedsWinCFI) { 2103 HasWinCFI = true; 2104 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc)) 2105 .addImm(NumBytes) 2106 .setMIFlag(MachineInstr::FrameSetup); 2107 } 2108 NumBytes = 0; 2109 2110 if (RealignmentPadding > 0) { 2111 if (RealignmentPadding >= 4096) { 2112 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm)) 2113 .addReg(AArch64::X16, RegState::Define) 2114 .addImm(RealignmentPadding) 2115 .setMIFlags(MachineInstr::FrameSetup); 2116 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXrx64), AArch64::X15) 2117 .addReg(AArch64::SP) 2118 .addReg(AArch64::X16, RegState::Kill) 2119 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 0)) 2120 .setMIFlag(MachineInstr::FrameSetup); 2121 } else { 2122 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ADDXri), AArch64::X15) 2123 .addReg(AArch64::SP) 2124 .addImm(RealignmentPadding) 2125 .addImm(0) 2126 .setMIFlag(MachineInstr::FrameSetup); 2127 } 2128 2129 uint64_t AndMask = ~(MFI.getMaxAlign().value() - 1); 2130 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP) 2131 .addReg(AArch64::X15, RegState::Kill) 2132 .addImm(AArch64_AM::encodeLogicalImmediate(AndMask, 64)); 2133 AFI->setStackRealigned(true); 2134 2135 // No need for SEH instructions here; if we're realigning the stack, 2136 // we've set a frame pointer and already finished the SEH prologue. 2137 assert(!NeedsWinCFI); 2138 } 2139 } 2140 2141 StackOffset SVECalleeSavesSize = {}, SVELocalsSize = SVEStackSize; 2142 MachineBasicBlock::iterator CalleeSavesBegin = MBBI, CalleeSavesEnd = MBBI; 2143 2144 // Process the SVE callee-saves to determine what space needs to be 2145 // allocated. 2146 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { 2147 LLVM_DEBUG(dbgs() << "SVECalleeSavedStackSize = " << CalleeSavedSize 2148 << "\n"); 2149 // Find callee save instructions in frame. 2150 CalleeSavesBegin = MBBI; 2151 assert(IsSVECalleeSave(CalleeSavesBegin) && "Unexpected instruction"); 2152 while (IsSVECalleeSave(MBBI) && MBBI != MBB.getFirstTerminator()) 2153 ++MBBI; 2154 CalleeSavesEnd = MBBI; 2155 2156 SVECalleeSavesSize = StackOffset::getScalable(CalleeSavedSize); 2157 SVELocalsSize = SVEStackSize - SVECalleeSavesSize; 2158 } 2159 2160 // Allocate space for the callee saves (if any). 2161 StackOffset CFAOffset = 2162 StackOffset::getFixed((int64_t)MFI.getStackSize() - NumBytes); 2163 StackOffset LocalsSize = SVELocalsSize + StackOffset::getFixed(NumBytes); 2164 allocateStackSpace(MBB, CalleeSavesBegin, 0, SVECalleeSavesSize, false, 2165 nullptr, EmitAsyncCFI && !HasFP, CFAOffset, 2166 MFI.hasVarSizedObjects() || LocalsSize); 2167 CFAOffset += SVECalleeSavesSize; 2168 2169 if (EmitAsyncCFI) 2170 emitCalleeSavedSVELocations(MBB, CalleeSavesEnd); 2171 2172 // Allocate space for the rest of the frame including SVE locals. Align the 2173 // stack as necessary. 2174 assert(!(canUseRedZone(MF) && NeedsRealignment) && 2175 "Cannot use redzone with stack realignment"); 2176 if (!canUseRedZone(MF)) { 2177 // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have 2178 // the correct value here, as NumBytes also includes padding bytes, 2179 // which shouldn't be counted here. 2180 allocateStackSpace(MBB, CalleeSavesEnd, RealignmentPadding, 2181 SVELocalsSize + StackOffset::getFixed(NumBytes), 2182 NeedsWinCFI, &HasWinCFI, EmitAsyncCFI && !HasFP, 2183 CFAOffset, MFI.hasVarSizedObjects()); 2184 } 2185 2186 // If we need a base pointer, set it up here. It's whatever the value of the 2187 // stack pointer is at this point. Any variable size objects will be allocated 2188 // after this, so we can still use the base pointer to reference locals. 2189 // 2190 // FIXME: Clarify FrameSetup flags here. 2191 // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is 2192 // needed. 2193 // For funclets the BP belongs to the containing function. 2194 if (!IsFunclet && RegInfo->hasBasePointer(MF)) { 2195 TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP, 2196 false); 2197 if (NeedsWinCFI) { 2198 HasWinCFI = true; 2199 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 2200 .setMIFlag(MachineInstr::FrameSetup); 2201 } 2202 } 2203 2204 // The very last FrameSetup instruction indicates the end of prologue. Emit a 2205 // SEH opcode indicating the prologue end. 2206 if (NeedsWinCFI && HasWinCFI) { 2207 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd)) 2208 .setMIFlag(MachineInstr::FrameSetup); 2209 } 2210 2211 // SEH funclets are passed the frame pointer in X1. If the parent 2212 // function uses the base register, then the base register is used 2213 // directly, and is not retrieved from X1. 2214 if (IsFunclet && F.hasPersonalityFn()) { 2215 EHPersonality Per = classifyEHPersonality(F.getPersonalityFn()); 2216 if (isAsynchronousEHPersonality(Per)) { 2217 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), AArch64::FP) 2218 .addReg(AArch64::X1) 2219 .setMIFlag(MachineInstr::FrameSetup); 2220 MBB.addLiveIn(AArch64::X1); 2221 } 2222 } 2223 2224 if (EmitCFI && !EmitAsyncCFI) { 2225 if (HasFP) { 2226 emitDefineCFAWithFP(MF, MBB, MBBI, DL, FixedObject); 2227 } else { 2228 StackOffset TotalSize = 2229 SVEStackSize + StackOffset::getFixed((int64_t)MFI.getStackSize()); 2230 unsigned CFIIndex = MF.addFrameInst(createDefCFA( 2231 *RegInfo, /*FrameReg=*/AArch64::SP, /*Reg=*/AArch64::SP, TotalSize, 2232 /*LastAdjustmentWasScalable=*/false)); 2233 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 2234 .addCFIIndex(CFIIndex) 2235 .setMIFlags(MachineInstr::FrameSetup); 2236 } 2237 emitCalleeSavedGPRLocations(MBB, MBBI); 2238 emitCalleeSavedSVELocations(MBB, MBBI); 2239 } 2240 } 2241 2242 static bool isFuncletReturnInstr(const MachineInstr &MI) { 2243 switch (MI.getOpcode()) { 2244 default: 2245 return false; 2246 case AArch64::CATCHRET: 2247 case AArch64::CLEANUPRET: 2248 return true; 2249 } 2250 } 2251 2252 void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, 2253 MachineBasicBlock &MBB) const { 2254 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); 2255 MachineFrameInfo &MFI = MF.getFrameInfo(); 2256 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 2257 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2258 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 2259 DebugLoc DL; 2260 bool NeedsWinCFI = needsWinCFI(MF); 2261 bool EmitCFI = AFI->needsAsyncDwarfUnwindInfo(MF); 2262 bool HasWinCFI = false; 2263 bool IsFunclet = false; 2264 2265 if (MBB.end() != MBBI) { 2266 DL = MBBI->getDebugLoc(); 2267 IsFunclet = isFuncletReturnInstr(*MBBI); 2268 } 2269 2270 MachineBasicBlock::iterator EpilogStartI = MBB.end(); 2271 2272 auto FinishingTouches = make_scope_exit([&]() { 2273 if (AFI->shouldSignReturnAddress(MF)) { 2274 BuildMI(MBB, MBB.getFirstTerminator(), DL, 2275 TII->get(AArch64::PAUTH_EPILOGUE)) 2276 .setMIFlag(MachineInstr::FrameDestroy); 2277 if (NeedsWinCFI) 2278 HasWinCFI = true; // AArch64PointerAuth pass will insert SEH_PACSignLR 2279 } 2280 if (AFI->needsShadowCallStackPrologueEpilogue(MF)) 2281 emitShadowCallStackEpilogue(*TII, MF, MBB, MBB.getFirstTerminator(), DL); 2282 if (EmitCFI) 2283 emitCalleeSavedGPRRestores(MBB, MBB.getFirstTerminator()); 2284 if (HasWinCFI) { 2285 BuildMI(MBB, MBB.getFirstTerminator(), DL, 2286 TII->get(AArch64::SEH_EpilogEnd)) 2287 .setMIFlag(MachineInstr::FrameDestroy); 2288 if (!MF.hasWinCFI()) 2289 MF.setHasWinCFI(true); 2290 } 2291 if (NeedsWinCFI) { 2292 assert(EpilogStartI != MBB.end()); 2293 if (!HasWinCFI) 2294 MBB.erase(EpilogStartI); 2295 } 2296 }); 2297 2298 int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF) 2299 : MFI.getStackSize(); 2300 2301 // All calls are tail calls in GHC calling conv, and functions have no 2302 // prologue/epilogue. 2303 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 2304 return; 2305 2306 // How much of the stack used by incoming arguments this function is expected 2307 // to restore in this particular epilogue. 2308 int64_t ArgumentStackToRestore = getArgumentStackToRestore(MF, MBB); 2309 bool IsWin64 = Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv(), 2310 MF.getFunction().isVarArg()); 2311 unsigned FixedObject = getFixedObjectSize(MF, AFI, IsWin64, IsFunclet); 2312 2313 int64_t AfterCSRPopSize = ArgumentStackToRestore; 2314 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; 2315 // We cannot rely on the local stack size set in emitPrologue if the function 2316 // has funclets, as funclets have different local stack size requirements, and 2317 // the current value set in emitPrologue may be that of the containing 2318 // function. 2319 if (MF.hasEHFunclets()) 2320 AFI->setLocalStackSize(NumBytes - PrologueSaveSize); 2321 if (homogeneousPrologEpilog(MF, &MBB)) { 2322 assert(!NeedsWinCFI); 2323 auto LastPopI = MBB.getFirstTerminator(); 2324 if (LastPopI != MBB.begin()) { 2325 auto HomogeneousEpilog = std::prev(LastPopI); 2326 if (HomogeneousEpilog->getOpcode() == AArch64::HOM_Epilog) 2327 LastPopI = HomogeneousEpilog; 2328 } 2329 2330 // Adjust local stack 2331 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 2332 StackOffset::getFixed(AFI->getLocalStackSize()), TII, 2333 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); 2334 2335 // SP has been already adjusted while restoring callee save regs. 2336 // We've bailed-out the case with adjusting SP for arguments. 2337 assert(AfterCSRPopSize == 0); 2338 return; 2339 } 2340 bool CombineSPBump = shouldCombineCSRLocalStackBumpInEpilogue(MBB, NumBytes); 2341 // Assume we can't combine the last pop with the sp restore. 2342 2343 bool CombineAfterCSRBump = false; 2344 if (!CombineSPBump && PrologueSaveSize != 0) { 2345 MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator()); 2346 while (Pop->getOpcode() == TargetOpcode::CFI_INSTRUCTION || 2347 AArch64InstrInfo::isSEHInstruction(*Pop)) 2348 Pop = std::prev(Pop); 2349 // Converting the last ldp to a post-index ldp is valid only if the last 2350 // ldp's offset is 0. 2351 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1); 2352 // If the offset is 0 and the AfterCSR pop is not actually trying to 2353 // allocate more stack for arguments (in space that an untimely interrupt 2354 // may clobber), convert it to a post-index ldp. 2355 if (OffsetOp.getImm() == 0 && AfterCSRPopSize >= 0) { 2356 convertCalleeSaveRestoreToSPPrePostIncDec( 2357 MBB, Pop, DL, TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, EmitCFI, 2358 MachineInstr::FrameDestroy, PrologueSaveSize); 2359 } else { 2360 // If not, make sure to emit an add after the last ldp. 2361 // We're doing this by transfering the size to be restored from the 2362 // adjustment *before* the CSR pops to the adjustment *after* the CSR 2363 // pops. 2364 AfterCSRPopSize += PrologueSaveSize; 2365 CombineAfterCSRBump = true; 2366 } 2367 } 2368 2369 // Move past the restores of the callee-saved registers. 2370 // If we plan on combining the sp bump of the local stack size and the callee 2371 // save stack size, we might need to adjust the CSR save and restore offsets. 2372 MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator(); 2373 MachineBasicBlock::iterator Begin = MBB.begin(); 2374 while (LastPopI != Begin) { 2375 --LastPopI; 2376 if (!LastPopI->getFlag(MachineInstr::FrameDestroy) || 2377 IsSVECalleeSave(LastPopI)) { 2378 ++LastPopI; 2379 break; 2380 } else if (CombineSPBump) 2381 fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize(), 2382 NeedsWinCFI, &HasWinCFI); 2383 } 2384 2385 if (NeedsWinCFI) { 2386 // Note that there are cases where we insert SEH opcodes in the 2387 // epilogue when we had no SEH opcodes in the prologue. For 2388 // example, when there is no stack frame but there are stack 2389 // arguments. Insert the SEH_EpilogStart and remove it later if it 2390 // we didn't emit any SEH opcodes to avoid generating WinCFI for 2391 // functions that don't need it. 2392 BuildMI(MBB, LastPopI, DL, TII->get(AArch64::SEH_EpilogStart)) 2393 .setMIFlag(MachineInstr::FrameDestroy); 2394 EpilogStartI = LastPopI; 2395 --EpilogStartI; 2396 } 2397 2398 if (hasFP(MF) && AFI->hasSwiftAsyncContext()) { 2399 switch (MF.getTarget().Options.SwiftAsyncFramePointer) { 2400 case SwiftAsyncFramePointerMode::DeploymentBased: 2401 // Avoid the reload as it is GOT relative, and instead fall back to the 2402 // hardcoded value below. This allows a mismatch between the OS and 2403 // application without immediately terminating on the difference. 2404 [[fallthrough]]; 2405 case SwiftAsyncFramePointerMode::Always: 2406 // We need to reset FP to its untagged state on return. Bit 60 is 2407 // currently used to show the presence of an extended frame. 2408 2409 // BIC x29, x29, #0x1000_0000_0000_0000 2410 BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::ANDXri), 2411 AArch64::FP) 2412 .addUse(AArch64::FP) 2413 .addImm(0x10fe) 2414 .setMIFlag(MachineInstr::FrameDestroy); 2415 if (NeedsWinCFI) { 2416 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop)) 2417 .setMIFlags(MachineInstr::FrameDestroy); 2418 HasWinCFI = true; 2419 } 2420 break; 2421 2422 case SwiftAsyncFramePointerMode::Never: 2423 break; 2424 } 2425 } 2426 2427 const StackOffset &SVEStackSize = getSVEStackSize(MF); 2428 2429 // If there is a single SP update, insert it before the ret and we're done. 2430 if (CombineSPBump) { 2431 assert(!SVEStackSize && "Cannot combine SP bump with SVE"); 2432 2433 // When we are about to restore the CSRs, the CFA register is SP again. 2434 if (EmitCFI && hasFP(MF)) { 2435 const AArch64RegisterInfo &RegInfo = *Subtarget.getRegisterInfo(); 2436 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP, true); 2437 unsigned CFIIndex = 2438 MF.addFrameInst(MCCFIInstruction::cfiDefCfa(nullptr, Reg, NumBytes)); 2439 BuildMI(MBB, LastPopI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 2440 .addCFIIndex(CFIIndex) 2441 .setMIFlags(MachineInstr::FrameDestroy); 2442 } 2443 2444 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP, 2445 StackOffset::getFixed(NumBytes + (int64_t)AfterCSRPopSize), 2446 TII, MachineInstr::FrameDestroy, false, NeedsWinCFI, 2447 &HasWinCFI, EmitCFI, StackOffset::getFixed(NumBytes)); 2448 return; 2449 } 2450 2451 NumBytes -= PrologueSaveSize; 2452 assert(NumBytes >= 0 && "Negative stack allocation size!?"); 2453 2454 // Process the SVE callee-saves to determine what space needs to be 2455 // deallocated. 2456 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize; 2457 MachineBasicBlock::iterator RestoreBegin = LastPopI, RestoreEnd = LastPopI; 2458 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { 2459 RestoreBegin = std::prev(RestoreEnd); 2460 while (RestoreBegin != MBB.begin() && 2461 IsSVECalleeSave(std::prev(RestoreBegin))) 2462 --RestoreBegin; 2463 2464 assert(IsSVECalleeSave(RestoreBegin) && 2465 IsSVECalleeSave(std::prev(RestoreEnd)) && "Unexpected instruction"); 2466 2467 StackOffset CalleeSavedSizeAsOffset = 2468 StackOffset::getScalable(CalleeSavedSize); 2469 DeallocateBefore = SVEStackSize - CalleeSavedSizeAsOffset; 2470 DeallocateAfter = CalleeSavedSizeAsOffset; 2471 } 2472 2473 // Deallocate the SVE area. 2474 if (SVEStackSize) { 2475 // If we have stack realignment or variable sized objects on the stack, 2476 // restore the stack pointer from the frame pointer prior to SVE CSR 2477 // restoration. 2478 if (AFI->isStackRealigned() || MFI.hasVarSizedObjects()) { 2479 if (int64_t CalleeSavedSize = AFI->getSVECalleeSavedStackSize()) { 2480 // Set SP to start of SVE callee-save area from which they can 2481 // be reloaded. The code below will deallocate the stack space 2482 // space by moving FP -> SP. 2483 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::FP, 2484 StackOffset::getScalable(-CalleeSavedSize), TII, 2485 MachineInstr::FrameDestroy); 2486 } 2487 } else { 2488 if (AFI->getSVECalleeSavedStackSize()) { 2489 // Deallocate the non-SVE locals first before we can deallocate (and 2490 // restore callee saves) from the SVE area. 2491 emitFrameOffset( 2492 MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, 2493 StackOffset::getFixed(NumBytes), TII, MachineInstr::FrameDestroy, 2494 false, false, nullptr, EmitCFI && !hasFP(MF), 2495 SVEStackSize + StackOffset::getFixed(NumBytes + PrologueSaveSize)); 2496 NumBytes = 0; 2497 } 2498 2499 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP, 2500 DeallocateBefore, TII, MachineInstr::FrameDestroy, false, 2501 false, nullptr, EmitCFI && !hasFP(MF), 2502 SVEStackSize + 2503 StackOffset::getFixed(NumBytes + PrologueSaveSize)); 2504 2505 emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP, 2506 DeallocateAfter, TII, MachineInstr::FrameDestroy, false, 2507 false, nullptr, EmitCFI && !hasFP(MF), 2508 DeallocateAfter + 2509 StackOffset::getFixed(NumBytes + PrologueSaveSize)); 2510 } 2511 if (EmitCFI) 2512 emitCalleeSavedSVERestores(MBB, RestoreEnd); 2513 } 2514 2515 if (!hasFP(MF)) { 2516 bool RedZone = canUseRedZone(MF); 2517 // If this was a redzone leaf function, we don't need to restore the 2518 // stack pointer (but we may need to pop stack args for fastcc). 2519 if (RedZone && AfterCSRPopSize == 0) 2520 return; 2521 2522 // Pop the local variables off the stack. If there are no callee-saved 2523 // registers, it means we are actually positioned at the terminator and can 2524 // combine stack increment for the locals and the stack increment for 2525 // callee-popped arguments into (possibly) a single instruction and be done. 2526 bool NoCalleeSaveRestore = PrologueSaveSize == 0; 2527 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes; 2528 if (NoCalleeSaveRestore) 2529 StackRestoreBytes += AfterCSRPopSize; 2530 2531 emitFrameOffset( 2532 MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 2533 StackOffset::getFixed(StackRestoreBytes), TII, 2534 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI, EmitCFI, 2535 StackOffset::getFixed((RedZone ? 0 : NumBytes) + PrologueSaveSize)); 2536 2537 // If we were able to combine the local stack pop with the argument pop, 2538 // then we're done. 2539 if (NoCalleeSaveRestore || AfterCSRPopSize == 0) { 2540 return; 2541 } 2542 2543 NumBytes = 0; 2544 } 2545 2546 // Restore the original stack pointer. 2547 // FIXME: Rather than doing the math here, we should instead just use 2548 // non-post-indexed loads for the restores if we aren't actually going to 2549 // be able to save any instructions. 2550 if (!IsFunclet && (MFI.hasVarSizedObjects() || AFI->isStackRealigned())) { 2551 emitFrameOffset( 2552 MBB, LastPopI, DL, AArch64::SP, AArch64::FP, 2553 StackOffset::getFixed(-AFI->getCalleeSaveBaseToFrameRecordOffset()), 2554 TII, MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); 2555 } else if (NumBytes) 2556 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, 2557 StackOffset::getFixed(NumBytes), TII, 2558 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI); 2559 2560 // When we are about to restore the CSRs, the CFA register is SP again. 2561 if (EmitCFI && hasFP(MF)) { 2562 const AArch64RegisterInfo &RegInfo = *Subtarget.getRegisterInfo(); 2563 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP, true); 2564 unsigned CFIIndex = MF.addFrameInst( 2565 MCCFIInstruction::cfiDefCfa(nullptr, Reg, PrologueSaveSize)); 2566 BuildMI(MBB, LastPopI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 2567 .addCFIIndex(CFIIndex) 2568 .setMIFlags(MachineInstr::FrameDestroy); 2569 } 2570 2571 // This must be placed after the callee-save restore code because that code 2572 // assumes the SP is at the same location as it was after the callee-save save 2573 // code in the prologue. 2574 if (AfterCSRPopSize) { 2575 assert(AfterCSRPopSize > 0 && "attempting to reallocate arg stack that an " 2576 "interrupt may have clobbered"); 2577 2578 emitFrameOffset( 2579 MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP, 2580 StackOffset::getFixed(AfterCSRPopSize), TII, MachineInstr::FrameDestroy, 2581 false, NeedsWinCFI, &HasWinCFI, EmitCFI, 2582 StackOffset::getFixed(CombineAfterCSRBump ? PrologueSaveSize : 0)); 2583 } 2584 } 2585 2586 bool AArch64FrameLowering::enableCFIFixup(MachineFunction &MF) const { 2587 return TargetFrameLowering::enableCFIFixup(MF) && 2588 MF.getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(MF); 2589 } 2590 2591 /// getFrameIndexReference - Provide a base+offset reference to an FI slot for 2592 /// debug info. It's the same as what we use for resolving the code-gen 2593 /// references for now. FIXME: This can go wrong when references are 2594 /// SP-relative and simple call frames aren't used. 2595 StackOffset 2596 AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI, 2597 Register &FrameReg) const { 2598 return resolveFrameIndexReference( 2599 MF, FI, FrameReg, 2600 /*PreferFP=*/ 2601 MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress) || 2602 MF.getFunction().hasFnAttribute(Attribute::SanitizeMemTag), 2603 /*ForSimm=*/false); 2604 } 2605 2606 StackOffset 2607 AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, 2608 int FI) const { 2609 // This function serves to provide a comparable offset from a single reference 2610 // point (the value of SP at function entry) that can be used for analysis, 2611 // e.g. the stack-frame-layout analysis pass. It is not guaranteed to be 2612 // correct for all objects in the presence of VLA-area objects or dynamic 2613 // stack re-alignment. 2614 2615 const auto &MFI = MF.getFrameInfo(); 2616 2617 int64_t ObjectOffset = MFI.getObjectOffset(FI); 2618 2619 // This is correct in the absence of any SVE stack objects. 2620 StackOffset SVEStackSize = getSVEStackSize(MF); 2621 if (!SVEStackSize) 2622 return StackOffset::getFixed(ObjectOffset - getOffsetOfLocalArea()); 2623 2624 const auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 2625 if (MFI.getStackID(FI) == TargetStackID::ScalableVector) { 2626 return StackOffset::get(-((int64_t)AFI->getCalleeSavedStackSize()), 2627 ObjectOffset); 2628 } 2629 2630 bool IsFixed = MFI.isFixedObjectIndex(FI); 2631 bool IsCSR = 2632 !IsFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize(MFI)); 2633 2634 StackOffset ScalableOffset = {}; 2635 if (!IsFixed && !IsCSR) 2636 ScalableOffset = -SVEStackSize; 2637 2638 return StackOffset::getFixed(ObjectOffset) + ScalableOffset; 2639 } 2640 2641 StackOffset 2642 AArch64FrameLowering::getNonLocalFrameIndexReference(const MachineFunction &MF, 2643 int FI) const { 2644 return StackOffset::getFixed(getSEHFrameIndexOffset(MF, FI)); 2645 } 2646 2647 static StackOffset getFPOffset(const MachineFunction &MF, 2648 int64_t ObjectOffset) { 2649 const auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 2650 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2651 const Function &F = MF.getFunction(); 2652 bool IsWin64 = Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg()); 2653 unsigned FixedObject = 2654 getFixedObjectSize(MF, AFI, IsWin64, /*IsFunclet=*/false); 2655 int64_t CalleeSaveSize = AFI->getCalleeSavedStackSize(MF.getFrameInfo()); 2656 int64_t FPAdjust = 2657 CalleeSaveSize - AFI->getCalleeSaveBaseToFrameRecordOffset(); 2658 return StackOffset::getFixed(ObjectOffset + FixedObject + FPAdjust); 2659 } 2660 2661 static StackOffset getStackOffset(const MachineFunction &MF, 2662 int64_t ObjectOffset) { 2663 const auto &MFI = MF.getFrameInfo(); 2664 return StackOffset::getFixed(ObjectOffset + (int64_t)MFI.getStackSize()); 2665 } 2666 2667 // TODO: This function currently does not work for scalable vectors. 2668 int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF, 2669 int FI) const { 2670 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>( 2671 MF.getSubtarget().getRegisterInfo()); 2672 int ObjectOffset = MF.getFrameInfo().getObjectOffset(FI); 2673 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP 2674 ? getFPOffset(MF, ObjectOffset).getFixed() 2675 : getStackOffset(MF, ObjectOffset).getFixed(); 2676 } 2677 2678 StackOffset AArch64FrameLowering::resolveFrameIndexReference( 2679 const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, 2680 bool ForSimm) const { 2681 const auto &MFI = MF.getFrameInfo(); 2682 int64_t ObjectOffset = MFI.getObjectOffset(FI); 2683 bool isFixed = MFI.isFixedObjectIndex(FI); 2684 bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector; 2685 return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg, 2686 PreferFP, ForSimm); 2687 } 2688 2689 StackOffset AArch64FrameLowering::resolveFrameOffsetReference( 2690 const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE, 2691 Register &FrameReg, bool PreferFP, bool ForSimm) const { 2692 const auto &MFI = MF.getFrameInfo(); 2693 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>( 2694 MF.getSubtarget().getRegisterInfo()); 2695 const auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 2696 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2697 2698 int64_t FPOffset = getFPOffset(MF, ObjectOffset).getFixed(); 2699 int64_t Offset = getStackOffset(MF, ObjectOffset).getFixed(); 2700 bool isCSR = 2701 !isFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize(MFI)); 2702 2703 const StackOffset &SVEStackSize = getSVEStackSize(MF); 2704 2705 // Use frame pointer to reference fixed objects. Use it for locals if 2706 // there are VLAs or a dynamically realigned SP (and thus the SP isn't 2707 // reliable as a base). Make sure useFPForScavengingIndex() does the 2708 // right thing for the emergency spill slot. 2709 bool UseFP = false; 2710 if (AFI->hasStackFrame() && !isSVE) { 2711 // We shouldn't prefer using the FP to access fixed-sized stack objects when 2712 // there are scalable (SVE) objects in between the FP and the fixed-sized 2713 // objects. 2714 PreferFP &= !SVEStackSize; 2715 2716 // Note: Keeping the following as multiple 'if' statements rather than 2717 // merging to a single expression for readability. 2718 // 2719 // Argument access should always use the FP. 2720 if (isFixed) { 2721 UseFP = hasFP(MF); 2722 } else if (isCSR && RegInfo->hasStackRealignment(MF)) { 2723 // References to the CSR area must use FP if we're re-aligning the stack 2724 // since the dynamically-sized alignment padding is between the SP/BP and 2725 // the CSR area. 2726 assert(hasFP(MF) && "Re-aligned stack must have frame pointer"); 2727 UseFP = true; 2728 } else if (hasFP(MF) && !RegInfo->hasStackRealignment(MF)) { 2729 // If the FPOffset is negative and we're producing a signed immediate, we 2730 // have to keep in mind that the available offset range for negative 2731 // offsets is smaller than for positive ones. If an offset is available 2732 // via the FP and the SP, use whichever is closest. 2733 bool FPOffsetFits = !ForSimm || FPOffset >= -256; 2734 PreferFP |= Offset > -FPOffset && !SVEStackSize; 2735 2736 if (MFI.hasVarSizedObjects()) { 2737 // If we have variable sized objects, we can use either FP or BP, as the 2738 // SP offset is unknown. We can use the base pointer if we have one and 2739 // FP is not preferred. If not, we're stuck with using FP. 2740 bool CanUseBP = RegInfo->hasBasePointer(MF); 2741 if (FPOffsetFits && CanUseBP) // Both are ok. Pick the best. 2742 UseFP = PreferFP; 2743 else if (!CanUseBP) // Can't use BP. Forced to use FP. 2744 UseFP = true; 2745 // else we can use BP and FP, but the offset from FP won't fit. 2746 // That will make us scavenge registers which we can probably avoid by 2747 // using BP. If it won't fit for BP either, we'll scavenge anyway. 2748 } else if (FPOffset >= 0) { 2749 // Use SP or FP, whichever gives us the best chance of the offset 2750 // being in range for direct access. If the FPOffset is positive, 2751 // that'll always be best, as the SP will be even further away. 2752 UseFP = true; 2753 } else if (MF.hasEHFunclets() && !RegInfo->hasBasePointer(MF)) { 2754 // Funclets access the locals contained in the parent's stack frame 2755 // via the frame pointer, so we have to use the FP in the parent 2756 // function. 2757 (void) Subtarget; 2758 assert(Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv(), 2759 MF.getFunction().isVarArg()) && 2760 "Funclets should only be present on Win64"); 2761 UseFP = true; 2762 } else { 2763 // We have the choice between FP and (SP or BP). 2764 if (FPOffsetFits && PreferFP) // If FP is the best fit, use it. 2765 UseFP = true; 2766 } 2767 } 2768 } 2769 2770 assert( 2771 ((isFixed || isCSR) || !RegInfo->hasStackRealignment(MF) || !UseFP) && 2772 "In the presence of dynamic stack pointer realignment, " 2773 "non-argument/CSR objects cannot be accessed through the frame pointer"); 2774 2775 if (isSVE) { 2776 StackOffset FPOffset = 2777 StackOffset::get(-AFI->getCalleeSaveBaseToFrameRecordOffset(), ObjectOffset); 2778 StackOffset SPOffset = 2779 SVEStackSize + 2780 StackOffset::get(MFI.getStackSize() - AFI->getCalleeSavedStackSize(), 2781 ObjectOffset); 2782 // Always use the FP for SVE spills if available and beneficial. 2783 if (hasFP(MF) && (SPOffset.getFixed() || 2784 FPOffset.getScalable() < SPOffset.getScalable() || 2785 RegInfo->hasStackRealignment(MF))) { 2786 FrameReg = RegInfo->getFrameRegister(MF); 2787 return FPOffset; 2788 } 2789 2790 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister() 2791 : (unsigned)AArch64::SP; 2792 return SPOffset; 2793 } 2794 2795 StackOffset ScalableOffset = {}; 2796 if (UseFP && !(isFixed || isCSR)) 2797 ScalableOffset = -SVEStackSize; 2798 if (!UseFP && (isFixed || isCSR)) 2799 ScalableOffset = SVEStackSize; 2800 2801 if (UseFP) { 2802 FrameReg = RegInfo->getFrameRegister(MF); 2803 return StackOffset::getFixed(FPOffset) + ScalableOffset; 2804 } 2805 2806 // Use the base pointer if we have one. 2807 if (RegInfo->hasBasePointer(MF)) 2808 FrameReg = RegInfo->getBaseRegister(); 2809 else { 2810 assert(!MFI.hasVarSizedObjects() && 2811 "Can't use SP when we have var sized objects."); 2812 FrameReg = AArch64::SP; 2813 // If we're using the red zone for this function, the SP won't actually 2814 // be adjusted, so the offsets will be negative. They're also all 2815 // within range of the signed 9-bit immediate instructions. 2816 if (canUseRedZone(MF)) 2817 Offset -= AFI->getLocalStackSize(); 2818 } 2819 2820 return StackOffset::getFixed(Offset) + ScalableOffset; 2821 } 2822 2823 static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { 2824 // Do not set a kill flag on values that are also marked as live-in. This 2825 // happens with the @llvm-returnaddress intrinsic and with arguments passed in 2826 // callee saved registers. 2827 // Omitting the kill flags is conservatively correct even if the live-in 2828 // is not used after all. 2829 bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg); 2830 return getKillRegState(!IsLiveIn); 2831 } 2832 2833 static bool produceCompactUnwindFrame(MachineFunction &MF) { 2834 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 2835 AttributeList Attrs = MF.getFunction().getAttributes(); 2836 return Subtarget.isTargetMachO() && 2837 !(Subtarget.getTargetLowering()->supportSwiftError() && 2838 Attrs.hasAttrSomewhere(Attribute::SwiftError)) && 2839 MF.getFunction().getCallingConv() != CallingConv::SwiftTail; 2840 } 2841 2842 static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2, 2843 bool NeedsWinCFI, bool IsFirst, 2844 const TargetRegisterInfo *TRI) { 2845 // If we are generating register pairs for a Windows function that requires 2846 // EH support, then pair consecutive registers only. There are no unwind 2847 // opcodes for saves/restores of non-consectuve register pairs. 2848 // The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x, 2849 // save_lrpair. 2850 // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling 2851 2852 if (Reg2 == AArch64::FP) 2853 return true; 2854 if (!NeedsWinCFI) 2855 return false; 2856 if (TRI->getEncodingValue(Reg2) == TRI->getEncodingValue(Reg1) + 1) 2857 return false; 2858 // If pairing a GPR with LR, the pair can be described by the save_lrpair 2859 // opcode. If this is the first register pair, it would end up with a 2860 // predecrement, but there's no save_lrpair_x opcode, so we can only do this 2861 // if LR is paired with something else than the first register. 2862 // The save_lrpair opcode requires the first register to be an odd one. 2863 if (Reg1 >= AArch64::X19 && Reg1 <= AArch64::X27 && 2864 (Reg1 - AArch64::X19) % 2 == 0 && Reg2 == AArch64::LR && !IsFirst) 2865 return false; 2866 return true; 2867 } 2868 2869 /// Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction. 2870 /// WindowsCFI requires that only consecutive registers can be paired. 2871 /// LR and FP need to be allocated together when the frame needs to save 2872 /// the frame-record. This means any other register pairing with LR is invalid. 2873 static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2, 2874 bool UsesWinAAPCS, bool NeedsWinCFI, 2875 bool NeedsFrameRecord, bool IsFirst, 2876 const TargetRegisterInfo *TRI) { 2877 if (UsesWinAAPCS) 2878 return invalidateWindowsRegisterPairing(Reg1, Reg2, NeedsWinCFI, IsFirst, 2879 TRI); 2880 2881 // If we need to store the frame record, don't pair any register 2882 // with LR other than FP. 2883 if (NeedsFrameRecord) 2884 return Reg2 == AArch64::LR; 2885 2886 return false; 2887 } 2888 2889 namespace { 2890 2891 struct RegPairInfo { 2892 unsigned Reg1 = AArch64::NoRegister; 2893 unsigned Reg2 = AArch64::NoRegister; 2894 int FrameIdx; 2895 int Offset; 2896 enum RegType { GPR, FPR64, FPR128, PPR, ZPR, VG } Type; 2897 2898 RegPairInfo() = default; 2899 2900 bool isPaired() const { return Reg2 != AArch64::NoRegister; } 2901 2902 unsigned getScale() const { 2903 switch (Type) { 2904 case PPR: 2905 return 2; 2906 case GPR: 2907 case FPR64: 2908 case VG: 2909 return 8; 2910 case ZPR: 2911 case FPR128: 2912 return 16; 2913 } 2914 llvm_unreachable("Unsupported type"); 2915 } 2916 2917 bool isScalable() const { return Type == PPR || Type == ZPR; } 2918 }; 2919 2920 } // end anonymous namespace 2921 2922 unsigned findFreePredicateReg(BitVector &SavedRegs) { 2923 for (unsigned PReg = AArch64::P8; PReg <= AArch64::P15; ++PReg) { 2924 if (SavedRegs.test(PReg)) { 2925 unsigned PNReg = PReg - AArch64::P0 + AArch64::PN0; 2926 return PNReg; 2927 } 2928 } 2929 return AArch64::NoRegister; 2930 } 2931 2932 static void computeCalleeSaveRegisterPairs( 2933 MachineFunction &MF, ArrayRef<CalleeSavedInfo> CSI, 2934 const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs, 2935 bool NeedsFrameRecord) { 2936 2937 if (CSI.empty()) 2938 return; 2939 2940 bool IsWindows = isTargetWindows(MF); 2941 bool NeedsWinCFI = needsWinCFI(MF); 2942 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 2943 MachineFrameInfo &MFI = MF.getFrameInfo(); 2944 CallingConv::ID CC = MF.getFunction().getCallingConv(); 2945 unsigned Count = CSI.size(); 2946 (void)CC; 2947 // MachO's compact unwind format relies on all registers being stored in 2948 // pairs. 2949 assert((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost || 2950 CC == CallingConv::PreserveAll || CC == CallingConv::CXX_FAST_TLS || 2951 CC == CallingConv::Win64 || (Count & 1) == 0) && 2952 "Odd number of callee-saved regs to spill!"); 2953 int ByteOffset = AFI->getCalleeSavedStackSize(); 2954 int StackFillDir = -1; 2955 int RegInc = 1; 2956 unsigned FirstReg = 0; 2957 if (NeedsWinCFI) { 2958 // For WinCFI, fill the stack from the bottom up. 2959 ByteOffset = 0; 2960 StackFillDir = 1; 2961 // As the CSI array is reversed to match PrologEpilogInserter, iterate 2962 // backwards, to pair up registers starting from lower numbered registers. 2963 RegInc = -1; 2964 FirstReg = Count - 1; 2965 } 2966 int ScalableByteOffset = AFI->getSVECalleeSavedStackSize(); 2967 bool NeedGapToAlignStack = AFI->hasCalleeSaveStackFreeSpace(); 2968 Register LastReg = 0; 2969 2970 // When iterating backwards, the loop condition relies on unsigned wraparound. 2971 for (unsigned i = FirstReg; i < Count; i += RegInc) { 2972 RegPairInfo RPI; 2973 RPI.Reg1 = CSI[i].getReg(); 2974 2975 if (AArch64::GPR64RegClass.contains(RPI.Reg1)) 2976 RPI.Type = RegPairInfo::GPR; 2977 else if (AArch64::FPR64RegClass.contains(RPI.Reg1)) 2978 RPI.Type = RegPairInfo::FPR64; 2979 else if (AArch64::FPR128RegClass.contains(RPI.Reg1)) 2980 RPI.Type = RegPairInfo::FPR128; 2981 else if (AArch64::ZPRRegClass.contains(RPI.Reg1)) 2982 RPI.Type = RegPairInfo::ZPR; 2983 else if (AArch64::PPRRegClass.contains(RPI.Reg1)) 2984 RPI.Type = RegPairInfo::PPR; 2985 else if (RPI.Reg1 == AArch64::VG) 2986 RPI.Type = RegPairInfo::VG; 2987 else 2988 llvm_unreachable("Unsupported register class."); 2989 2990 // Add the stack hazard size as we transition from GPR->FPR CSRs. 2991 if (AFI->hasStackHazardSlotIndex() && 2992 (!LastReg || !AArch64InstrInfo::isFpOrNEON(LastReg)) && 2993 AArch64InstrInfo::isFpOrNEON(RPI.Reg1)) 2994 ByteOffset += StackFillDir * StackHazardSize; 2995 LastReg = RPI.Reg1; 2996 2997 // Add the next reg to the pair if it is in the same register class. 2998 if (unsigned(i + RegInc) < Count && !AFI->hasStackHazardSlotIndex()) { 2999 Register NextReg = CSI[i + RegInc].getReg(); 3000 bool IsFirst = i == FirstReg; 3001 switch (RPI.Type) { 3002 case RegPairInfo::GPR: 3003 if (AArch64::GPR64RegClass.contains(NextReg) && 3004 !invalidateRegisterPairing(RPI.Reg1, NextReg, IsWindows, 3005 NeedsWinCFI, NeedsFrameRecord, IsFirst, 3006 TRI)) 3007 RPI.Reg2 = NextReg; 3008 break; 3009 case RegPairInfo::FPR64: 3010 if (AArch64::FPR64RegClass.contains(NextReg) && 3011 !invalidateWindowsRegisterPairing(RPI.Reg1, NextReg, NeedsWinCFI, 3012 IsFirst, TRI)) 3013 RPI.Reg2 = NextReg; 3014 break; 3015 case RegPairInfo::FPR128: 3016 if (AArch64::FPR128RegClass.contains(NextReg)) 3017 RPI.Reg2 = NextReg; 3018 break; 3019 case RegPairInfo::PPR: 3020 break; 3021 case RegPairInfo::ZPR: 3022 if (AFI->getPredicateRegForFillSpill() != 0) 3023 if (((RPI.Reg1 - AArch64::Z0) & 1) == 0 && (NextReg == RPI.Reg1 + 1)) 3024 RPI.Reg2 = NextReg; 3025 break; 3026 case RegPairInfo::VG: 3027 break; 3028 } 3029 } 3030 3031 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI 3032 // list to come in sorted by frame index so that we can issue the store 3033 // pair instructions directly. Assert if we see anything otherwise. 3034 // 3035 // The order of the registers in the list is controlled by 3036 // getCalleeSavedRegs(), so they will always be in-order, as well. 3037 assert((!RPI.isPaired() || 3038 (CSI[i].getFrameIdx() + RegInc == CSI[i + RegInc].getFrameIdx())) && 3039 "Out of order callee saved regs!"); 3040 3041 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP || 3042 RPI.Reg1 == AArch64::LR) && 3043 "FrameRecord must be allocated together with LR"); 3044 3045 // Windows AAPCS has FP and LR reversed. 3046 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP || 3047 RPI.Reg2 == AArch64::LR) && 3048 "FrameRecord must be allocated together with LR"); 3049 3050 // MachO's compact unwind format relies on all registers being stored in 3051 // adjacent register pairs. 3052 assert((!produceCompactUnwindFrame(MF) || CC == CallingConv::PreserveMost || 3053 CC == CallingConv::PreserveAll || CC == CallingConv::CXX_FAST_TLS || 3054 CC == CallingConv::Win64 || 3055 (RPI.isPaired() && 3056 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) || 3057 RPI.Reg1 + 1 == RPI.Reg2))) && 3058 "Callee-save registers not saved as adjacent register pair!"); 3059 3060 RPI.FrameIdx = CSI[i].getFrameIdx(); 3061 if (NeedsWinCFI && 3062 RPI.isPaired()) // RPI.FrameIdx must be the lower index of the pair 3063 RPI.FrameIdx = CSI[i + RegInc].getFrameIdx(); 3064 int Scale = RPI.getScale(); 3065 3066 int OffsetPre = RPI.isScalable() ? ScalableByteOffset : ByteOffset; 3067 assert(OffsetPre % Scale == 0); 3068 3069 if (RPI.isScalable()) 3070 ScalableByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale); 3071 else 3072 ByteOffset += StackFillDir * (RPI.isPaired() ? 2 * Scale : Scale); 3073 3074 // Swift's async context is directly before FP, so allocate an extra 3075 // 8 bytes for it. 3076 if (NeedsFrameRecord && AFI->hasSwiftAsyncContext() && 3077 ((!IsWindows && RPI.Reg2 == AArch64::FP) || 3078 (IsWindows && RPI.Reg2 == AArch64::LR))) 3079 ByteOffset += StackFillDir * 8; 3080 3081 // Round up size of non-pair to pair size if we need to pad the 3082 // callee-save area to ensure 16-byte alignment. 3083 if (NeedGapToAlignStack && !NeedsWinCFI && !RPI.isScalable() && 3084 RPI.Type != RegPairInfo::FPR128 && !RPI.isPaired() && 3085 ByteOffset % 16 != 0) { 3086 ByteOffset += 8 * StackFillDir; 3087 assert(MFI.getObjectAlign(RPI.FrameIdx) <= Align(16)); 3088 // A stack frame with a gap looks like this, bottom up: 3089 // d9, d8. x21, gap, x20, x19. 3090 // Set extra alignment on the x21 object to create the gap above it. 3091 MFI.setObjectAlignment(RPI.FrameIdx, Align(16)); 3092 NeedGapToAlignStack = false; 3093 } 3094 3095 int OffsetPost = RPI.isScalable() ? ScalableByteOffset : ByteOffset; 3096 assert(OffsetPost % Scale == 0); 3097 // If filling top down (default), we want the offset after incrementing it. 3098 // If filling bottom up (WinCFI) we need the original offset. 3099 int Offset = NeedsWinCFI ? OffsetPre : OffsetPost; 3100 3101 // The FP, LR pair goes 8 bytes into our expanded 24-byte slot so that the 3102 // Swift context can directly precede FP. 3103 if (NeedsFrameRecord && AFI->hasSwiftAsyncContext() && 3104 ((!IsWindows && RPI.Reg2 == AArch64::FP) || 3105 (IsWindows && RPI.Reg2 == AArch64::LR))) 3106 Offset += 8; 3107 RPI.Offset = Offset / Scale; 3108 3109 assert((!RPI.isPaired() || 3110 (!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) || 3111 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) && 3112 "Offset out of bounds for LDP/STP immediate"); 3113 3114 // Save the offset to frame record so that the FP register can point to the 3115 // innermost frame record (spilled FP and LR registers). 3116 if (NeedsFrameRecord && 3117 ((!IsWindows && RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) || 3118 (IsWindows && RPI.Reg1 == AArch64::FP && RPI.Reg2 == AArch64::LR))) 3119 AFI->setCalleeSaveBaseToFrameRecordOffset(Offset); 3120 3121 RegPairs.push_back(RPI); 3122 if (RPI.isPaired()) 3123 i += RegInc; 3124 } 3125 if (NeedsWinCFI) { 3126 // If we need an alignment gap in the stack, align the topmost stack 3127 // object. A stack frame with a gap looks like this, bottom up: 3128 // x19, d8. d9, gap. 3129 // Set extra alignment on the topmost stack object (the first element in 3130 // CSI, which goes top down), to create the gap above it. 3131 if (AFI->hasCalleeSaveStackFreeSpace()) 3132 MFI.setObjectAlignment(CSI[0].getFrameIdx(), Align(16)); 3133 // We iterated bottom up over the registers; flip RegPairs back to top 3134 // down order. 3135 std::reverse(RegPairs.begin(), RegPairs.end()); 3136 } 3137 } 3138 3139 bool AArch64FrameLowering::spillCalleeSavedRegisters( 3140 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 3141 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 3142 MachineFunction &MF = *MBB.getParent(); 3143 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 3144 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 3145 bool NeedsWinCFI = needsWinCFI(MF); 3146 DebugLoc DL; 3147 SmallVector<RegPairInfo, 8> RegPairs; 3148 3149 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, hasFP(MF)); 3150 3151 MachineRegisterInfo &MRI = MF.getRegInfo(); 3152 // Refresh the reserved regs in case there are any potential changes since the 3153 // last freeze. 3154 MRI.freezeReservedRegs(); 3155 3156 if (homogeneousPrologEpilog(MF)) { 3157 auto MIB = BuildMI(MBB, MI, DL, TII.get(AArch64::HOM_Prolog)) 3158 .setMIFlag(MachineInstr::FrameSetup); 3159 3160 for (auto &RPI : RegPairs) { 3161 MIB.addReg(RPI.Reg1); 3162 MIB.addReg(RPI.Reg2); 3163 3164 // Update register live in. 3165 if (!MRI.isReserved(RPI.Reg1)) 3166 MBB.addLiveIn(RPI.Reg1); 3167 if (RPI.isPaired() && !MRI.isReserved(RPI.Reg2)) 3168 MBB.addLiveIn(RPI.Reg2); 3169 } 3170 return true; 3171 } 3172 bool PTrueCreated = false; 3173 for (const RegPairInfo &RPI : llvm::reverse(RegPairs)) { 3174 unsigned Reg1 = RPI.Reg1; 3175 unsigned Reg2 = RPI.Reg2; 3176 unsigned StrOpc; 3177 3178 // Issue sequence of spills for cs regs. The first spill may be converted 3179 // to a pre-decrement store later by emitPrologue if the callee-save stack 3180 // area allocation can't be combined with the local stack area allocation. 3181 // For example: 3182 // stp x22, x21, [sp, #0] // addImm(+0) 3183 // stp x20, x19, [sp, #16] // addImm(+2) 3184 // stp fp, lr, [sp, #32] // addImm(+4) 3185 // Rationale: This sequence saves uop updates compared to a sequence of 3186 // pre-increment spills like stp xi,xj,[sp,#-16]! 3187 // Note: Similar rationale and sequence for restores in epilog. 3188 unsigned Size; 3189 Align Alignment; 3190 switch (RPI.Type) { 3191 case RegPairInfo::GPR: 3192 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui; 3193 Size = 8; 3194 Alignment = Align(8); 3195 break; 3196 case RegPairInfo::FPR64: 3197 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui; 3198 Size = 8; 3199 Alignment = Align(8); 3200 break; 3201 case RegPairInfo::FPR128: 3202 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui; 3203 Size = 16; 3204 Alignment = Align(16); 3205 break; 3206 case RegPairInfo::ZPR: 3207 StrOpc = RPI.isPaired() ? AArch64::ST1B_2Z_IMM : AArch64::STR_ZXI; 3208 Size = 16; 3209 Alignment = Align(16); 3210 break; 3211 case RegPairInfo::PPR: 3212 StrOpc = AArch64::STR_PXI; 3213 Size = 2; 3214 Alignment = Align(2); 3215 break; 3216 case RegPairInfo::VG: 3217 StrOpc = AArch64::STRXui; 3218 Size = 8; 3219 Alignment = Align(8); 3220 break; 3221 } 3222 3223 unsigned X0Scratch = AArch64::NoRegister; 3224 if (Reg1 == AArch64::VG) { 3225 // Find an available register to store value of VG to. 3226 Reg1 = findScratchNonCalleeSaveRegister(&MBB); 3227 assert(Reg1 != AArch64::NoRegister); 3228 SMEAttrs Attrs(MF.getFunction()); 3229 3230 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface() && 3231 AFI->getStreamingVGIdx() == std::numeric_limits<int>::max()) { 3232 // For locally-streaming functions, we need to store both the streaming 3233 // & non-streaming VG. Spill the streaming value first. 3234 BuildMI(MBB, MI, DL, TII.get(AArch64::RDSVLI_XI), Reg1) 3235 .addImm(1) 3236 .setMIFlag(MachineInstr::FrameSetup); 3237 BuildMI(MBB, MI, DL, TII.get(AArch64::UBFMXri), Reg1) 3238 .addReg(Reg1) 3239 .addImm(3) 3240 .addImm(63) 3241 .setMIFlag(MachineInstr::FrameSetup); 3242 3243 AFI->setStreamingVGIdx(RPI.FrameIdx); 3244 } else if (MF.getSubtarget<AArch64Subtarget>().hasSVE()) { 3245 BuildMI(MBB, MI, DL, TII.get(AArch64::CNTD_XPiI), Reg1) 3246 .addImm(31) 3247 .addImm(1) 3248 .setMIFlag(MachineInstr::FrameSetup); 3249 AFI->setVGIdx(RPI.FrameIdx); 3250 } else { 3251 const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>(); 3252 if (llvm::any_of( 3253 MBB.liveins(), 3254 [&STI](const MachineBasicBlock::RegisterMaskPair &LiveIn) { 3255 return STI.getRegisterInfo()->isSuperOrSubRegisterEq( 3256 AArch64::X0, LiveIn.PhysReg); 3257 })) 3258 X0Scratch = Reg1; 3259 3260 if (X0Scratch != AArch64::NoRegister) 3261 BuildMI(MBB, MI, DL, TII.get(AArch64::ORRXrr), Reg1) 3262 .addReg(AArch64::XZR) 3263 .addReg(AArch64::X0, RegState::Undef) 3264 .addReg(AArch64::X0, RegState::Implicit) 3265 .setMIFlag(MachineInstr::FrameSetup); 3266 3267 const uint32_t *RegMask = TRI->getCallPreservedMask( 3268 MF, 3269 CallingConv::AArch64_SME_ABI_Support_Routines_PreserveMost_From_X1); 3270 BuildMI(MBB, MI, DL, TII.get(AArch64::BL)) 3271 .addExternalSymbol("__arm_get_current_vg") 3272 .addRegMask(RegMask) 3273 .addReg(AArch64::X0, RegState::ImplicitDefine) 3274 .setMIFlag(MachineInstr::FrameSetup); 3275 Reg1 = AArch64::X0; 3276 AFI->setVGIdx(RPI.FrameIdx); 3277 } 3278 } 3279 3280 LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI); 3281 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); 3282 dbgs() << ") -> fi#(" << RPI.FrameIdx; 3283 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; 3284 dbgs() << ")\n"); 3285 3286 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) && 3287 "Windows unwdinding requires a consecutive (FP,LR) pair"); 3288 // Windows unwind codes require consecutive registers if registers are 3289 // paired. Make the switch here, so that the code below will save (x,x+1) 3290 // and not (x+1,x). 3291 unsigned FrameIdxReg1 = RPI.FrameIdx; 3292 unsigned FrameIdxReg2 = RPI.FrameIdx + 1; 3293 if (NeedsWinCFI && RPI.isPaired()) { 3294 std::swap(Reg1, Reg2); 3295 std::swap(FrameIdxReg1, FrameIdxReg2); 3296 } 3297 3298 if (RPI.isPaired() && RPI.isScalable()) { 3299 [[maybe_unused]] const AArch64Subtarget &Subtarget = 3300 MF.getSubtarget<AArch64Subtarget>(); 3301 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 3302 unsigned PnReg = AFI->getPredicateRegForFillSpill(); 3303 assert(((Subtarget.hasSVE2p1() || Subtarget.hasSME2()) && PnReg != 0) && 3304 "Expects SVE2.1 or SME2 target and a predicate register"); 3305 #ifdef EXPENSIVE_CHECKS 3306 auto IsPPR = [](const RegPairInfo &c) { 3307 return c.Reg1 == RegPairInfo::PPR; 3308 }; 3309 auto PPRBegin = std::find_if(RegPairs.begin(), RegPairs.end(), IsPPR); 3310 auto IsZPR = [](const RegPairInfo &c) { 3311 return c.Type == RegPairInfo::ZPR; 3312 }; 3313 auto ZPRBegin = std::find_if(RegPairs.begin(), RegPairs.end(), IsZPR); 3314 assert(!(PPRBegin < ZPRBegin) && 3315 "Expected callee save predicate to be handled first"); 3316 #endif 3317 if (!PTrueCreated) { 3318 PTrueCreated = true; 3319 BuildMI(MBB, MI, DL, TII.get(AArch64::PTRUE_C_B), PnReg) 3320 .setMIFlags(MachineInstr::FrameSetup); 3321 } 3322 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); 3323 if (!MRI.isReserved(Reg1)) 3324 MBB.addLiveIn(Reg1); 3325 if (!MRI.isReserved(Reg2)) 3326 MBB.addLiveIn(Reg2); 3327 MIB.addReg(/*PairRegs*/ AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0)); 3328 MIB.addMemOperand(MF.getMachineMemOperand( 3329 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 3330 MachineMemOperand::MOStore, Size, Alignment)); 3331 MIB.addReg(PnReg); 3332 MIB.addReg(AArch64::SP) 3333 .addImm(RPI.Offset) // [sp, #offset*scale], 3334 // where factor*scale is implicit 3335 .setMIFlag(MachineInstr::FrameSetup); 3336 MIB.addMemOperand(MF.getMachineMemOperand( 3337 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), 3338 MachineMemOperand::MOStore, Size, Alignment)); 3339 if (NeedsWinCFI) 3340 InsertSEH(MIB, TII, MachineInstr::FrameSetup); 3341 } else { // The code when the pair of ZReg is not present 3342 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); 3343 if (!MRI.isReserved(Reg1)) 3344 MBB.addLiveIn(Reg1); 3345 if (RPI.isPaired()) { 3346 if (!MRI.isReserved(Reg2)) 3347 MBB.addLiveIn(Reg2); 3348 MIB.addReg(Reg2, getPrologueDeath(MF, Reg2)); 3349 MIB.addMemOperand(MF.getMachineMemOperand( 3350 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 3351 MachineMemOperand::MOStore, Size, Alignment)); 3352 } 3353 MIB.addReg(Reg1, getPrologueDeath(MF, Reg1)) 3354 .addReg(AArch64::SP) 3355 .addImm(RPI.Offset) // [sp, #offset*scale], 3356 // where factor*scale is implicit 3357 .setMIFlag(MachineInstr::FrameSetup); 3358 MIB.addMemOperand(MF.getMachineMemOperand( 3359 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), 3360 MachineMemOperand::MOStore, Size, Alignment)); 3361 if (NeedsWinCFI) 3362 InsertSEH(MIB, TII, MachineInstr::FrameSetup); 3363 } 3364 // Update the StackIDs of the SVE stack slots. 3365 MachineFrameInfo &MFI = MF.getFrameInfo(); 3366 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) { 3367 MFI.setStackID(FrameIdxReg1, TargetStackID::ScalableVector); 3368 if (RPI.isPaired()) 3369 MFI.setStackID(FrameIdxReg2, TargetStackID::ScalableVector); 3370 } 3371 3372 if (X0Scratch != AArch64::NoRegister) 3373 BuildMI(MBB, MI, DL, TII.get(AArch64::ORRXrr), AArch64::X0) 3374 .addReg(AArch64::XZR) 3375 .addReg(X0Scratch, RegState::Undef) 3376 .addReg(X0Scratch, RegState::Implicit) 3377 .setMIFlag(MachineInstr::FrameSetup); 3378 } 3379 return true; 3380 } 3381 3382 bool AArch64FrameLowering::restoreCalleeSavedRegisters( 3383 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 3384 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const { 3385 MachineFunction &MF = *MBB.getParent(); 3386 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 3387 DebugLoc DL; 3388 SmallVector<RegPairInfo, 8> RegPairs; 3389 bool NeedsWinCFI = needsWinCFI(MF); 3390 3391 if (MBBI != MBB.end()) 3392 DL = MBBI->getDebugLoc(); 3393 3394 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, hasFP(MF)); 3395 if (homogeneousPrologEpilog(MF, &MBB)) { 3396 auto MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::HOM_Epilog)) 3397 .setMIFlag(MachineInstr::FrameDestroy); 3398 for (auto &RPI : RegPairs) { 3399 MIB.addReg(RPI.Reg1, RegState::Define); 3400 MIB.addReg(RPI.Reg2, RegState::Define); 3401 } 3402 return true; 3403 } 3404 3405 // For performance reasons restore SVE register in increasing order 3406 auto IsPPR = [](const RegPairInfo &c) { return c.Type == RegPairInfo::PPR; }; 3407 auto PPRBegin = std::find_if(RegPairs.begin(), RegPairs.end(), IsPPR); 3408 auto PPREnd = std::find_if_not(PPRBegin, RegPairs.end(), IsPPR); 3409 std::reverse(PPRBegin, PPREnd); 3410 auto IsZPR = [](const RegPairInfo &c) { return c.Type == RegPairInfo::ZPR; }; 3411 auto ZPRBegin = std::find_if(RegPairs.begin(), RegPairs.end(), IsZPR); 3412 auto ZPREnd = std::find_if_not(ZPRBegin, RegPairs.end(), IsZPR); 3413 std::reverse(ZPRBegin, ZPREnd); 3414 3415 bool PTrueCreated = false; 3416 for (const RegPairInfo &RPI : RegPairs) { 3417 unsigned Reg1 = RPI.Reg1; 3418 unsigned Reg2 = RPI.Reg2; 3419 3420 // Issue sequence of restores for cs regs. The last restore may be converted 3421 // to a post-increment load later by emitEpilogue if the callee-save stack 3422 // area allocation can't be combined with the local stack area allocation. 3423 // For example: 3424 // ldp fp, lr, [sp, #32] // addImm(+4) 3425 // ldp x20, x19, [sp, #16] // addImm(+2) 3426 // ldp x22, x21, [sp, #0] // addImm(+0) 3427 // Note: see comment in spillCalleeSavedRegisters() 3428 unsigned LdrOpc; 3429 unsigned Size; 3430 Align Alignment; 3431 switch (RPI.Type) { 3432 case RegPairInfo::GPR: 3433 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui; 3434 Size = 8; 3435 Alignment = Align(8); 3436 break; 3437 case RegPairInfo::FPR64: 3438 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui; 3439 Size = 8; 3440 Alignment = Align(8); 3441 break; 3442 case RegPairInfo::FPR128: 3443 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui; 3444 Size = 16; 3445 Alignment = Align(16); 3446 break; 3447 case RegPairInfo::ZPR: 3448 LdrOpc = RPI.isPaired() ? AArch64::LD1B_2Z_IMM : AArch64::LDR_ZXI; 3449 Size = 16; 3450 Alignment = Align(16); 3451 break; 3452 case RegPairInfo::PPR: 3453 LdrOpc = AArch64::LDR_PXI; 3454 Size = 2; 3455 Alignment = Align(2); 3456 break; 3457 case RegPairInfo::VG: 3458 continue; 3459 } 3460 LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI); 3461 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI); 3462 dbgs() << ") -> fi#(" << RPI.FrameIdx; 3463 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1; 3464 dbgs() << ")\n"); 3465 3466 // Windows unwind codes require consecutive registers if registers are 3467 // paired. Make the switch here, so that the code below will save (x,x+1) 3468 // and not (x+1,x). 3469 unsigned FrameIdxReg1 = RPI.FrameIdx; 3470 unsigned FrameIdxReg2 = RPI.FrameIdx + 1; 3471 if (NeedsWinCFI && RPI.isPaired()) { 3472 std::swap(Reg1, Reg2); 3473 std::swap(FrameIdxReg1, FrameIdxReg2); 3474 } 3475 3476 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 3477 if (RPI.isPaired() && RPI.isScalable()) { 3478 [[maybe_unused]] const AArch64Subtarget &Subtarget = 3479 MF.getSubtarget<AArch64Subtarget>(); 3480 unsigned PnReg = AFI->getPredicateRegForFillSpill(); 3481 assert(((Subtarget.hasSVE2p1() || Subtarget.hasSME2()) && PnReg != 0) && 3482 "Expects SVE2.1 or SME2 target and a predicate register"); 3483 #ifdef EXPENSIVE_CHECKS 3484 assert(!(PPRBegin < ZPRBegin) && 3485 "Expected callee save predicate to be handled first"); 3486 #endif 3487 if (!PTrueCreated) { 3488 PTrueCreated = true; 3489 BuildMI(MBB, MBBI, DL, TII.get(AArch64::PTRUE_C_B), PnReg) 3490 .setMIFlags(MachineInstr::FrameDestroy); 3491 } 3492 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(LdrOpc)); 3493 MIB.addReg(/*PairRegs*/ AArch64::Z0_Z1 + (RPI.Reg1 - AArch64::Z0), 3494 getDefRegState(true)); 3495 MIB.addMemOperand(MF.getMachineMemOperand( 3496 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 3497 MachineMemOperand::MOLoad, Size, Alignment)); 3498 MIB.addReg(PnReg); 3499 MIB.addReg(AArch64::SP) 3500 .addImm(RPI.Offset) // [sp, #offset*scale] 3501 // where factor*scale is implicit 3502 .setMIFlag(MachineInstr::FrameDestroy); 3503 MIB.addMemOperand(MF.getMachineMemOperand( 3504 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), 3505 MachineMemOperand::MOLoad, Size, Alignment)); 3506 if (NeedsWinCFI) 3507 InsertSEH(MIB, TII, MachineInstr::FrameDestroy); 3508 } else { 3509 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(LdrOpc)); 3510 if (RPI.isPaired()) { 3511 MIB.addReg(Reg2, getDefRegState(true)); 3512 MIB.addMemOperand(MF.getMachineMemOperand( 3513 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), 3514 MachineMemOperand::MOLoad, Size, Alignment)); 3515 } 3516 MIB.addReg(Reg1, getDefRegState(true)); 3517 MIB.addReg(AArch64::SP) 3518 .addImm(RPI.Offset) // [sp, #offset*scale] 3519 // where factor*scale is implicit 3520 .setMIFlag(MachineInstr::FrameDestroy); 3521 MIB.addMemOperand(MF.getMachineMemOperand( 3522 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), 3523 MachineMemOperand::MOLoad, Size, Alignment)); 3524 if (NeedsWinCFI) 3525 InsertSEH(MIB, TII, MachineInstr::FrameDestroy); 3526 } 3527 } 3528 return true; 3529 } 3530 3531 // Return the FrameID for a Load/Store instruction by looking at the MMO. 3532 static std::optional<int> getLdStFrameID(const MachineInstr &MI, 3533 const MachineFrameInfo &MFI) { 3534 if (!MI.mayLoadOrStore() || MI.getNumMemOperands() < 1) 3535 return std::nullopt; 3536 3537 MachineMemOperand *MMO = *MI.memoperands_begin(); 3538 auto *PSV = 3539 dyn_cast_or_null<FixedStackPseudoSourceValue>(MMO->getPseudoValue()); 3540 if (PSV) 3541 return std::optional<int>(PSV->getFrameIndex()); 3542 3543 if (MMO->getValue()) { 3544 if (auto *Al = dyn_cast<AllocaInst>(getUnderlyingObject(MMO->getValue()))) { 3545 for (int FI = MFI.getObjectIndexBegin(); FI < MFI.getObjectIndexEnd(); 3546 FI++) 3547 if (MFI.getObjectAllocation(FI) == Al) 3548 return FI; 3549 } 3550 } 3551 3552 return std::nullopt; 3553 } 3554 3555 // Check if a Hazard slot is needed for the current function, and if so create 3556 // one for it. The index is stored in AArch64FunctionInfo->StackHazardSlotIndex, 3557 // which can be used to determine if any hazard padding is needed. 3558 void AArch64FrameLowering::determineStackHazardSlot( 3559 MachineFunction &MF, BitVector &SavedRegs) const { 3560 if (StackHazardSize == 0 || StackHazardSize % 16 != 0 || 3561 MF.getInfo<AArch64FunctionInfo>()->hasStackHazardSlotIndex()) 3562 return; 3563 3564 // Stack hazards are only needed in streaming functions. 3565 SMEAttrs Attrs(MF.getFunction()); 3566 if (!StackHazardInNonStreaming && Attrs.hasNonStreamingInterfaceAndBody()) 3567 return; 3568 3569 MachineFrameInfo &MFI = MF.getFrameInfo(); 3570 3571 // Add a hazard slot if there are any CSR FPR registers, or are any fp-only 3572 // stack objects. 3573 bool HasFPRCSRs = any_of(SavedRegs.set_bits(), [](unsigned Reg) { 3574 return AArch64::FPR64RegClass.contains(Reg) || 3575 AArch64::FPR128RegClass.contains(Reg) || 3576 AArch64::ZPRRegClass.contains(Reg) || 3577 AArch64::PPRRegClass.contains(Reg); 3578 }); 3579 bool HasFPRStackObjects = false; 3580 if (!HasFPRCSRs) { 3581 std::vector<unsigned> FrameObjects(MFI.getObjectIndexEnd()); 3582 for (auto &MBB : MF) { 3583 for (auto &MI : MBB) { 3584 std::optional<int> FI = getLdStFrameID(MI, MFI); 3585 if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) { 3586 if (MFI.getStackID(*FI) == TargetStackID::ScalableVector || 3587 AArch64InstrInfo::isFpOrNEON(MI)) 3588 FrameObjects[*FI] |= 2; 3589 else 3590 FrameObjects[*FI] |= 1; 3591 } 3592 } 3593 } 3594 HasFPRStackObjects = 3595 any_of(FrameObjects, [](unsigned B) { return (B & 3) == 2; }); 3596 } 3597 3598 if (HasFPRCSRs || HasFPRStackObjects) { 3599 int ID = MFI.CreateStackObject(StackHazardSize, Align(16), false); 3600 LLVM_DEBUG(dbgs() << "Created Hazard slot at " << ID << " size " 3601 << StackHazardSize << "\n"); 3602 MF.getInfo<AArch64FunctionInfo>()->setStackHazardSlotIndex(ID); 3603 } 3604 } 3605 3606 void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, 3607 BitVector &SavedRegs, 3608 RegScavenger *RS) const { 3609 // All calls are tail calls in GHC calling conv, and functions have no 3610 // prologue/epilogue. 3611 if (MF.getFunction().getCallingConv() == CallingConv::GHC) 3612 return; 3613 3614 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); 3615 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( 3616 MF.getSubtarget().getRegisterInfo()); 3617 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>(); 3618 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 3619 unsigned UnspilledCSGPR = AArch64::NoRegister; 3620 unsigned UnspilledCSGPRPaired = AArch64::NoRegister; 3621 3622 MachineFrameInfo &MFI = MF.getFrameInfo(); 3623 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs(); 3624 3625 unsigned BasePointerReg = RegInfo->hasBasePointer(MF) 3626 ? RegInfo->getBaseRegister() 3627 : (unsigned)AArch64::NoRegister; 3628 3629 unsigned ExtraCSSpill = 0; 3630 bool HasUnpairedGPR64 = false; 3631 bool HasPairZReg = false; 3632 // Figure out which callee-saved registers to save/restore. 3633 for (unsigned i = 0; CSRegs[i]; ++i) { 3634 const unsigned Reg = CSRegs[i]; 3635 3636 // Add the base pointer register to SavedRegs if it is callee-save. 3637 if (Reg == BasePointerReg) 3638 SavedRegs.set(Reg); 3639 3640 bool RegUsed = SavedRegs.test(Reg); 3641 unsigned PairedReg = AArch64::NoRegister; 3642 const bool RegIsGPR64 = AArch64::GPR64RegClass.contains(Reg); 3643 if (RegIsGPR64 || AArch64::FPR64RegClass.contains(Reg) || 3644 AArch64::FPR128RegClass.contains(Reg)) { 3645 // Compensate for odd numbers of GP CSRs. 3646 // For now, all the known cases of odd number of CSRs are of GPRs. 3647 if (HasUnpairedGPR64) 3648 PairedReg = CSRegs[i % 2 == 0 ? i - 1 : i + 1]; 3649 else 3650 PairedReg = CSRegs[i ^ 1]; 3651 } 3652 3653 // If the function requires all the GP registers to save (SavedRegs), 3654 // and there are an odd number of GP CSRs at the same time (CSRegs), 3655 // PairedReg could be in a different register class from Reg, which would 3656 // lead to a FPR (usually D8) accidentally being marked saved. 3657 if (RegIsGPR64 && !AArch64::GPR64RegClass.contains(PairedReg)) { 3658 PairedReg = AArch64::NoRegister; 3659 HasUnpairedGPR64 = true; 3660 } 3661 assert(PairedReg == AArch64::NoRegister || 3662 AArch64::GPR64RegClass.contains(Reg, PairedReg) || 3663 AArch64::FPR64RegClass.contains(Reg, PairedReg) || 3664 AArch64::FPR128RegClass.contains(Reg, PairedReg)); 3665 3666 if (!RegUsed) { 3667 if (AArch64::GPR64RegClass.contains(Reg) && 3668 !RegInfo->isReservedReg(MF, Reg)) { 3669 UnspilledCSGPR = Reg; 3670 UnspilledCSGPRPaired = PairedReg; 3671 } 3672 continue; 3673 } 3674 3675 // MachO's compact unwind format relies on all registers being stored in 3676 // pairs. 3677 // FIXME: the usual format is actually better if unwinding isn't needed. 3678 if (producePairRegisters(MF) && PairedReg != AArch64::NoRegister && 3679 !SavedRegs.test(PairedReg)) { 3680 SavedRegs.set(PairedReg); 3681 if (AArch64::GPR64RegClass.contains(PairedReg) && 3682 !RegInfo->isReservedReg(MF, PairedReg)) 3683 ExtraCSSpill = PairedReg; 3684 } 3685 // Check if there is a pair of ZRegs, so it can select PReg for spill/fill 3686 HasPairZReg |= (AArch64::ZPRRegClass.contains(Reg, CSRegs[i ^ 1]) && 3687 SavedRegs.test(CSRegs[i ^ 1])); 3688 } 3689 3690 if (HasPairZReg && (Subtarget.hasSVE2p1() || Subtarget.hasSME2())) { 3691 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 3692 // Find a suitable predicate register for the multi-vector spill/fill 3693 // instructions. 3694 unsigned PnReg = findFreePredicateReg(SavedRegs); 3695 if (PnReg != AArch64::NoRegister) 3696 AFI->setPredicateRegForFillSpill(PnReg); 3697 // If no free callee-save has been found assign one. 3698 if (!AFI->getPredicateRegForFillSpill() && 3699 MF.getFunction().getCallingConv() == 3700 CallingConv::AArch64_SVE_VectorCall) { 3701 SavedRegs.set(AArch64::P8); 3702 AFI->setPredicateRegForFillSpill(AArch64::PN8); 3703 } 3704 3705 assert(!RegInfo->isReservedReg(MF, AFI->getPredicateRegForFillSpill()) && 3706 "Predicate cannot be a reserved register"); 3707 } 3708 3709 if (MF.getFunction().getCallingConv() == CallingConv::Win64 && 3710 !Subtarget.isTargetWindows()) { 3711 // For Windows calling convention on a non-windows OS, where X18 is treated 3712 // as reserved, back up X18 when entering non-windows code (marked with the 3713 // Windows calling convention) and restore when returning regardless of 3714 // whether the individual function uses it - it might call other functions 3715 // that clobber it. 3716 SavedRegs.set(AArch64::X18); 3717 } 3718 3719 // Calculates the callee saved stack size. 3720 unsigned CSStackSize = 0; 3721 unsigned SVECSStackSize = 0; 3722 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 3723 const MachineRegisterInfo &MRI = MF.getRegInfo(); 3724 for (unsigned Reg : SavedRegs.set_bits()) { 3725 auto RegSize = TRI->getRegSizeInBits(Reg, MRI) / 8; 3726 if (AArch64::PPRRegClass.contains(Reg) || 3727 AArch64::ZPRRegClass.contains(Reg)) 3728 SVECSStackSize += RegSize; 3729 else 3730 CSStackSize += RegSize; 3731 } 3732 3733 // Increase the callee-saved stack size if the function has streaming mode 3734 // changes, as we will need to spill the value of the VG register. 3735 // For locally streaming functions, we spill both the streaming and 3736 // non-streaming VG value. 3737 const Function &F = MF.getFunction(); 3738 SMEAttrs Attrs(F); 3739 if (AFI->hasStreamingModeChanges()) { 3740 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface()) 3741 CSStackSize += 16; 3742 else 3743 CSStackSize += 8; 3744 } 3745 3746 // Determine if a Hazard slot should be used, and increase the CSStackSize by 3747 // StackHazardSize if so. 3748 determineStackHazardSlot(MF, SavedRegs); 3749 if (AFI->hasStackHazardSlotIndex()) 3750 CSStackSize += StackHazardSize; 3751 3752 // Save number of saved regs, so we can easily update CSStackSize later. 3753 unsigned NumSavedRegs = SavedRegs.count(); 3754 3755 // The frame record needs to be created by saving the appropriate registers 3756 uint64_t EstimatedStackSize = MFI.estimateStackSize(MF); 3757 if (hasFP(MF) || 3758 windowsRequiresStackProbe(MF, EstimatedStackSize + CSStackSize + 16)) { 3759 SavedRegs.set(AArch64::FP); 3760 SavedRegs.set(AArch64::LR); 3761 } 3762 3763 LLVM_DEBUG({ 3764 dbgs() << "*** determineCalleeSaves\nSaved CSRs:"; 3765 for (unsigned Reg : SavedRegs.set_bits()) 3766 dbgs() << ' ' << printReg(Reg, RegInfo); 3767 dbgs() << "\n"; 3768 }); 3769 3770 // If any callee-saved registers are used, the frame cannot be eliminated. 3771 int64_t SVEStackSize = 3772 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16); 3773 bool CanEliminateFrame = (SavedRegs.count() == 0) && !SVEStackSize; 3774 3775 // The CSR spill slots have not been allocated yet, so estimateStackSize 3776 // won't include them. 3777 unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF); 3778 3779 // We may address some of the stack above the canonical frame address, either 3780 // for our own arguments or during a call. Include that in calculating whether 3781 // we have complicated addressing concerns. 3782 int64_t CalleeStackUsed = 0; 3783 for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) { 3784 int64_t FixedOff = MFI.getObjectOffset(I); 3785 if (FixedOff > CalleeStackUsed) 3786 CalleeStackUsed = FixedOff; 3787 } 3788 3789 // Conservatively always assume BigStack when there are SVE spills. 3790 bool BigStack = SVEStackSize || (EstimatedStackSize + CSStackSize + 3791 CalleeStackUsed) > EstimatedStackSizeLimit; 3792 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) 3793 AFI->setHasStackFrame(true); 3794 3795 // Estimate if we might need to scavenge a register at some point in order 3796 // to materialize a stack offset. If so, either spill one additional 3797 // callee-saved register or reserve a special spill slot to facilitate 3798 // register scavenging. If we already spilled an extra callee-saved register 3799 // above to keep the number of spills even, we don't need to do anything else 3800 // here. 3801 if (BigStack) { 3802 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) { 3803 LLVM_DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo) 3804 << " to get a scratch register.\n"); 3805 SavedRegs.set(UnspilledCSGPR); 3806 ExtraCSSpill = UnspilledCSGPR; 3807 3808 // MachO's compact unwind format relies on all registers being stored in 3809 // pairs, so if we need to spill one extra for BigStack, then we need to 3810 // store the pair. 3811 if (producePairRegisters(MF)) { 3812 if (UnspilledCSGPRPaired == AArch64::NoRegister) { 3813 // Failed to make a pair for compact unwind format, revert spilling. 3814 if (produceCompactUnwindFrame(MF)) { 3815 SavedRegs.reset(UnspilledCSGPR); 3816 ExtraCSSpill = AArch64::NoRegister; 3817 } 3818 } else 3819 SavedRegs.set(UnspilledCSGPRPaired); 3820 } 3821 } 3822 3823 // If we didn't find an extra callee-saved register to spill, create 3824 // an emergency spill slot. 3825 if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) { 3826 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 3827 const TargetRegisterClass &RC = AArch64::GPR64RegClass; 3828 unsigned Size = TRI->getSpillSize(RC); 3829 Align Alignment = TRI->getSpillAlign(RC); 3830 int FI = MFI.CreateStackObject(Size, Alignment, false); 3831 RS->addScavengingFrameIndex(FI); 3832 LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI 3833 << " as the emergency spill slot.\n"); 3834 } 3835 } 3836 3837 // Adding the size of additional 64bit GPR saves. 3838 CSStackSize += 8 * (SavedRegs.count() - NumSavedRegs); 3839 3840 // A Swift asynchronous context extends the frame record with a pointer 3841 // directly before FP. 3842 if (hasFP(MF) && AFI->hasSwiftAsyncContext()) 3843 CSStackSize += 8; 3844 3845 uint64_t AlignedCSStackSize = alignTo(CSStackSize, 16); 3846 LLVM_DEBUG(dbgs() << "Estimated stack frame size: " 3847 << EstimatedStackSize + AlignedCSStackSize << " bytes.\n"); 3848 3849 assert((!MFI.isCalleeSavedInfoValid() || 3850 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) && 3851 "Should not invalidate callee saved info"); 3852 3853 // Round up to register pair alignment to avoid additional SP adjustment 3854 // instructions. 3855 AFI->setCalleeSavedStackSize(AlignedCSStackSize); 3856 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize); 3857 AFI->setSVECalleeSavedStackSize(alignTo(SVECSStackSize, 16)); 3858 } 3859 3860 bool AArch64FrameLowering::assignCalleeSavedSpillSlots( 3861 MachineFunction &MF, const TargetRegisterInfo *RegInfo, 3862 std::vector<CalleeSavedInfo> &CSI, unsigned &MinCSFrameIndex, 3863 unsigned &MaxCSFrameIndex) const { 3864 bool NeedsWinCFI = needsWinCFI(MF); 3865 // To match the canonical windows frame layout, reverse the list of 3866 // callee saved registers to get them laid out by PrologEpilogInserter 3867 // in the right order. (PrologEpilogInserter allocates stack objects top 3868 // down. Windows canonical prologs store higher numbered registers at 3869 // the top, thus have the CSI array start from the highest registers.) 3870 if (NeedsWinCFI) 3871 std::reverse(CSI.begin(), CSI.end()); 3872 3873 if (CSI.empty()) 3874 return true; // Early exit if no callee saved registers are modified! 3875 3876 // Now that we know which registers need to be saved and restored, allocate 3877 // stack slots for them. 3878 MachineFrameInfo &MFI = MF.getFrameInfo(); 3879 auto *AFI = MF.getInfo<AArch64FunctionInfo>(); 3880 3881 bool UsesWinAAPCS = isTargetWindows(MF); 3882 if (UsesWinAAPCS && hasFP(MF) && AFI->hasSwiftAsyncContext()) { 3883 int FrameIdx = MFI.CreateStackObject(8, Align(16), true); 3884 AFI->setSwiftAsyncContextFrameIdx(FrameIdx); 3885 if ((unsigned)FrameIdx < MinCSFrameIndex) 3886 MinCSFrameIndex = FrameIdx; 3887 if ((unsigned)FrameIdx > MaxCSFrameIndex) 3888 MaxCSFrameIndex = FrameIdx; 3889 } 3890 3891 // Insert VG into the list of CSRs, immediately before LR if saved. 3892 if (AFI->hasStreamingModeChanges()) { 3893 std::vector<CalleeSavedInfo> VGSaves; 3894 SMEAttrs Attrs(MF.getFunction()); 3895 3896 auto VGInfo = CalleeSavedInfo(AArch64::VG); 3897 VGInfo.setRestored(false); 3898 VGSaves.push_back(VGInfo); 3899 3900 // Add VG again if the function is locally-streaming, as we will spill two 3901 // values. 3902 if (Attrs.hasStreamingBody() && !Attrs.hasStreamingInterface()) 3903 VGSaves.push_back(VGInfo); 3904 3905 bool InsertBeforeLR = false; 3906 3907 for (unsigned I = 0; I < CSI.size(); I++) 3908 if (CSI[I].getReg() == AArch64::LR) { 3909 InsertBeforeLR = true; 3910 CSI.insert(CSI.begin() + I, VGSaves.begin(), VGSaves.end()); 3911 break; 3912 } 3913 3914 if (!InsertBeforeLR) 3915 CSI.insert(CSI.end(), VGSaves.begin(), VGSaves.end()); 3916 } 3917 3918 Register LastReg = 0; 3919 int HazardSlotIndex = std::numeric_limits<int>::max(); 3920 for (auto &CS : CSI) { 3921 Register Reg = CS.getReg(); 3922 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); 3923 3924 // Create a hazard slot as we switch between GPR and FPR CSRs. 3925 if (AFI->hasStackHazardSlotIndex() && 3926 (!LastReg || !AArch64InstrInfo::isFpOrNEON(LastReg)) && 3927 AArch64InstrInfo::isFpOrNEON(Reg)) { 3928 assert(HazardSlotIndex == std::numeric_limits<int>::max() && 3929 "Unexpected register order for hazard slot"); 3930 HazardSlotIndex = MFI.CreateStackObject(StackHazardSize, Align(8), true); 3931 LLVM_DEBUG(dbgs() << "Created CSR Hazard at slot " << HazardSlotIndex 3932 << "\n"); 3933 AFI->setStackHazardCSRSlotIndex(HazardSlotIndex); 3934 if ((unsigned)HazardSlotIndex < MinCSFrameIndex) 3935 MinCSFrameIndex = HazardSlotIndex; 3936 if ((unsigned)HazardSlotIndex > MaxCSFrameIndex) 3937 MaxCSFrameIndex = HazardSlotIndex; 3938 } 3939 3940 unsigned Size = RegInfo->getSpillSize(*RC); 3941 Align Alignment(RegInfo->getSpillAlign(*RC)); 3942 int FrameIdx = MFI.CreateStackObject(Size, Alignment, true); 3943 CS.setFrameIdx(FrameIdx); 3944 3945 if ((unsigned)FrameIdx < MinCSFrameIndex) 3946 MinCSFrameIndex = FrameIdx; 3947 if ((unsigned)FrameIdx > MaxCSFrameIndex) 3948 MaxCSFrameIndex = FrameIdx; 3949 3950 // Grab 8 bytes below FP for the extended asynchronous frame info. 3951 if (hasFP(MF) && AFI->hasSwiftAsyncContext() && !UsesWinAAPCS && 3952 Reg == AArch64::FP) { 3953 FrameIdx = MFI.CreateStackObject(8, Alignment, true); 3954 AFI->setSwiftAsyncContextFrameIdx(FrameIdx); 3955 if ((unsigned)FrameIdx < MinCSFrameIndex) 3956 MinCSFrameIndex = FrameIdx; 3957 if ((unsigned)FrameIdx > MaxCSFrameIndex) 3958 MaxCSFrameIndex = FrameIdx; 3959 } 3960 LastReg = Reg; 3961 } 3962 3963 // Add hazard slot in the case where no FPR CSRs are present. 3964 if (AFI->hasStackHazardSlotIndex() && 3965 HazardSlotIndex == std::numeric_limits<int>::max()) { 3966 HazardSlotIndex = MFI.CreateStackObject(StackHazardSize, Align(8), true); 3967 LLVM_DEBUG(dbgs() << "Created CSR Hazard at slot " << HazardSlotIndex 3968 << "\n"); 3969 AFI->setStackHazardCSRSlotIndex(HazardSlotIndex); 3970 if ((unsigned)HazardSlotIndex < MinCSFrameIndex) 3971 MinCSFrameIndex = HazardSlotIndex; 3972 if ((unsigned)HazardSlotIndex > MaxCSFrameIndex) 3973 MaxCSFrameIndex = HazardSlotIndex; 3974 } 3975 3976 return true; 3977 } 3978 3979 bool AArch64FrameLowering::enableStackSlotScavenging( 3980 const MachineFunction &MF) const { 3981 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 3982 // If the function has streaming-mode changes, don't scavenge a 3983 // spillslot in the callee-save area, as that might require an 3984 // 'addvl' in the streaming-mode-changing call-sequence when the 3985 // function doesn't use a FP. 3986 if (AFI->hasStreamingModeChanges() && !hasFP(MF)) 3987 return false; 3988 // Don't allow register salvaging with hazard slots, in case it moves objects 3989 // into the wrong place. 3990 if (AFI->hasStackHazardSlotIndex()) 3991 return false; 3992 return AFI->hasCalleeSaveStackFreeSpace(); 3993 } 3994 3995 /// returns true if there are any SVE callee saves. 3996 static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI, 3997 int &Min, int &Max) { 3998 Min = std::numeric_limits<int>::max(); 3999 Max = std::numeric_limits<int>::min(); 4000 4001 if (!MFI.isCalleeSavedInfoValid()) 4002 return false; 4003 4004 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo(); 4005 for (auto &CS : CSI) { 4006 if (AArch64::ZPRRegClass.contains(CS.getReg()) || 4007 AArch64::PPRRegClass.contains(CS.getReg())) { 4008 assert((Max == std::numeric_limits<int>::min() || 4009 Max + 1 == CS.getFrameIdx()) && 4010 "SVE CalleeSaves are not consecutive"); 4011 4012 Min = std::min(Min, CS.getFrameIdx()); 4013 Max = std::max(Max, CS.getFrameIdx()); 4014 } 4015 } 4016 return Min != std::numeric_limits<int>::max(); 4017 } 4018 4019 // Process all the SVE stack objects and determine offsets for each 4020 // object. If AssignOffsets is true, the offsets get assigned. 4021 // Fills in the first and last callee-saved frame indices into 4022 // Min/MaxCSFrameIndex, respectively. 4023 // Returns the size of the stack. 4024 static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI, 4025 int &MinCSFrameIndex, 4026 int &MaxCSFrameIndex, 4027 bool AssignOffsets) { 4028 #ifndef NDEBUG 4029 // First process all fixed stack objects. 4030 for (int I = MFI.getObjectIndexBegin(); I != 0; ++I) 4031 assert(MFI.getStackID(I) != TargetStackID::ScalableVector && 4032 "SVE vectors should never be passed on the stack by value, only by " 4033 "reference."); 4034 #endif 4035 4036 auto Assign = [&MFI](int FI, int64_t Offset) { 4037 LLVM_DEBUG(dbgs() << "alloc FI(" << FI << ") at SP[" << Offset << "]\n"); 4038 MFI.setObjectOffset(FI, Offset); 4039 }; 4040 4041 int64_t Offset = 0; 4042 4043 // Then process all callee saved slots. 4044 if (getSVECalleeSaveSlotRange(MFI, MinCSFrameIndex, MaxCSFrameIndex)) { 4045 // Assign offsets to the callee save slots. 4046 for (int I = MinCSFrameIndex; I <= MaxCSFrameIndex; ++I) { 4047 Offset += MFI.getObjectSize(I); 4048 Offset = alignTo(Offset, MFI.getObjectAlign(I)); 4049 if (AssignOffsets) 4050 Assign(I, -Offset); 4051 } 4052 } 4053 4054 // Ensure that the Callee-save area is aligned to 16bytes. 4055 Offset = alignTo(Offset, Align(16U)); 4056 4057 // Create a buffer of SVE objects to allocate and sort it. 4058 SmallVector<int, 8> ObjectsToAllocate; 4059 // If we have a stack protector, and we've previously decided that we have SVE 4060 // objects on the stack and thus need it to go in the SVE stack area, then it 4061 // needs to go first. 4062 int StackProtectorFI = -1; 4063 if (MFI.hasStackProtectorIndex()) { 4064 StackProtectorFI = MFI.getStackProtectorIndex(); 4065 if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector) 4066 ObjectsToAllocate.push_back(StackProtectorFI); 4067 } 4068 for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) { 4069 unsigned StackID = MFI.getStackID(I); 4070 if (StackID != TargetStackID::ScalableVector) 4071 continue; 4072 if (I == StackProtectorFI) 4073 continue; 4074 if (MaxCSFrameIndex >= I && I >= MinCSFrameIndex) 4075 continue; 4076 if (MFI.isDeadObjectIndex(I)) 4077 continue; 4078 4079 ObjectsToAllocate.push_back(I); 4080 } 4081 4082 // Allocate all SVE locals and spills 4083 for (unsigned FI : ObjectsToAllocate) { 4084 Align Alignment = MFI.getObjectAlign(FI); 4085 // FIXME: Given that the length of SVE vectors is not necessarily a power of 4086 // two, we'd need to align every object dynamically at runtime if the 4087 // alignment is larger than 16. This is not yet supported. 4088 if (Alignment > Align(16)) 4089 report_fatal_error( 4090 "Alignment of scalable vectors > 16 bytes is not yet supported"); 4091 4092 Offset = alignTo(Offset + MFI.getObjectSize(FI), Alignment); 4093 if (AssignOffsets) 4094 Assign(FI, -Offset); 4095 } 4096 4097 return Offset; 4098 } 4099 4100 int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets( 4101 MachineFrameInfo &MFI) const { 4102 int MinCSFrameIndex, MaxCSFrameIndex; 4103 return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, false); 4104 } 4105 4106 int64_t AArch64FrameLowering::assignSVEStackObjectOffsets( 4107 MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex) const { 4108 return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, 4109 true); 4110 } 4111 4112 void AArch64FrameLowering::processFunctionBeforeFrameFinalized( 4113 MachineFunction &MF, RegScavenger *RS) const { 4114 MachineFrameInfo &MFI = MF.getFrameInfo(); 4115 4116 assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown && 4117 "Upwards growing stack unsupported"); 4118 4119 int MinCSFrameIndex, MaxCSFrameIndex; 4120 int64_t SVEStackSize = 4121 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex); 4122 4123 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 4124 AFI->setStackSizeSVE(alignTo(SVEStackSize, 16U)); 4125 AFI->setMinMaxSVECSFrameIndex(MinCSFrameIndex, MaxCSFrameIndex); 4126 4127 // If this function isn't doing Win64-style C++ EH, we don't need to do 4128 // anything. 4129 if (!MF.hasEHFunclets()) 4130 return; 4131 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 4132 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo(); 4133 4134 MachineBasicBlock &MBB = MF.front(); 4135 auto MBBI = MBB.begin(); 4136 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) 4137 ++MBBI; 4138 4139 // Create an UnwindHelp object. 4140 // The UnwindHelp object is allocated at the start of the fixed object area 4141 int64_t FixedObject = 4142 getFixedObjectSize(MF, AFI, /*IsWin64*/ true, /*IsFunclet*/ false); 4143 int UnwindHelpFI = MFI.CreateFixedObject(/*Size*/ 8, 4144 /*SPOffset*/ -FixedObject, 4145 /*IsImmutable=*/false); 4146 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI; 4147 4148 // We need to store -2 into the UnwindHelp object at the start of the 4149 // function. 4150 DebugLoc DL; 4151 RS->enterBasicBlockEnd(MBB); 4152 RS->backward(MBBI); 4153 Register DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass); 4154 assert(DstReg && "There must be a free register after frame setup"); 4155 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), DstReg).addImm(-2); 4156 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STURXi)) 4157 .addReg(DstReg, getKillRegState(true)) 4158 .addFrameIndex(UnwindHelpFI) 4159 .addImm(0); 4160 } 4161 4162 namespace { 4163 struct TagStoreInstr { 4164 MachineInstr *MI; 4165 int64_t Offset, Size; 4166 explicit TagStoreInstr(MachineInstr *MI, int64_t Offset, int64_t Size) 4167 : MI(MI), Offset(Offset), Size(Size) {} 4168 }; 4169 4170 class TagStoreEdit { 4171 MachineFunction *MF; 4172 MachineBasicBlock *MBB; 4173 MachineRegisterInfo *MRI; 4174 // Tag store instructions that are being replaced. 4175 SmallVector<TagStoreInstr, 8> TagStores; 4176 // Combined memref arguments of the above instructions. 4177 SmallVector<MachineMemOperand *, 8> CombinedMemRefs; 4178 4179 // Replace allocation tags in [FrameReg + FrameRegOffset, FrameReg + 4180 // FrameRegOffset + Size) with the address tag of SP. 4181 Register FrameReg; 4182 StackOffset FrameRegOffset; 4183 int64_t Size; 4184 // If not std::nullopt, move FrameReg to (FrameReg + FrameRegUpdate) at the 4185 // end. 4186 std::optional<int64_t> FrameRegUpdate; 4187 // MIFlags for any FrameReg updating instructions. 4188 unsigned FrameRegUpdateFlags; 4189 4190 // Use zeroing instruction variants. 4191 bool ZeroData; 4192 DebugLoc DL; 4193 4194 void emitUnrolled(MachineBasicBlock::iterator InsertI); 4195 void emitLoop(MachineBasicBlock::iterator InsertI); 4196 4197 public: 4198 TagStoreEdit(MachineBasicBlock *MBB, bool ZeroData) 4199 : MBB(MBB), ZeroData(ZeroData) { 4200 MF = MBB->getParent(); 4201 MRI = &MF->getRegInfo(); 4202 } 4203 // Add an instruction to be replaced. Instructions must be added in the 4204 // ascending order of Offset, and have to be adjacent. 4205 void addInstruction(TagStoreInstr I) { 4206 assert((TagStores.empty() || 4207 TagStores.back().Offset + TagStores.back().Size == I.Offset) && 4208 "Non-adjacent tag store instructions."); 4209 TagStores.push_back(I); 4210 } 4211 void clear() { TagStores.clear(); } 4212 // Emit equivalent code at the given location, and erase the current set of 4213 // instructions. May skip if the replacement is not profitable. May invalidate 4214 // the input iterator and replace it with a valid one. 4215 void emitCode(MachineBasicBlock::iterator &InsertI, 4216 const AArch64FrameLowering *TFI, bool TryMergeSPUpdate); 4217 }; 4218 4219 void TagStoreEdit::emitUnrolled(MachineBasicBlock::iterator InsertI) { 4220 const AArch64InstrInfo *TII = 4221 MF->getSubtarget<AArch64Subtarget>().getInstrInfo(); 4222 4223 const int64_t kMinOffset = -256 * 16; 4224 const int64_t kMaxOffset = 255 * 16; 4225 4226 Register BaseReg = FrameReg; 4227 int64_t BaseRegOffsetBytes = FrameRegOffset.getFixed(); 4228 if (BaseRegOffsetBytes < kMinOffset || 4229 BaseRegOffsetBytes + (Size - Size % 32) > kMaxOffset || 4230 // BaseReg can be FP, which is not necessarily aligned to 16-bytes. In 4231 // that case, BaseRegOffsetBytes will not be aligned to 16 bytes, which 4232 // is required for the offset of ST2G. 4233 BaseRegOffsetBytes % 16 != 0) { 4234 Register ScratchReg = MRI->createVirtualRegister(&AArch64::GPR64RegClass); 4235 emitFrameOffset(*MBB, InsertI, DL, ScratchReg, BaseReg, 4236 StackOffset::getFixed(BaseRegOffsetBytes), TII); 4237 BaseReg = ScratchReg; 4238 BaseRegOffsetBytes = 0; 4239 } 4240 4241 MachineInstr *LastI = nullptr; 4242 while (Size) { 4243 int64_t InstrSize = (Size > 16) ? 32 : 16; 4244 unsigned Opcode = 4245 InstrSize == 16 4246 ? (ZeroData ? AArch64::STZGi : AArch64::STGi) 4247 : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi); 4248 assert(BaseRegOffsetBytes % 16 == 0); 4249 MachineInstr *I = BuildMI(*MBB, InsertI, DL, TII->get(Opcode)) 4250 .addReg(AArch64::SP) 4251 .addReg(BaseReg) 4252 .addImm(BaseRegOffsetBytes / 16) 4253 .setMemRefs(CombinedMemRefs); 4254 // A store to [BaseReg, #0] should go last for an opportunity to fold the 4255 // final SP adjustment in the epilogue. 4256 if (BaseRegOffsetBytes == 0) 4257 LastI = I; 4258 BaseRegOffsetBytes += InstrSize; 4259 Size -= InstrSize; 4260 } 4261 4262 if (LastI) 4263 MBB->splice(InsertI, MBB, LastI); 4264 } 4265 4266 void TagStoreEdit::emitLoop(MachineBasicBlock::iterator InsertI) { 4267 const AArch64InstrInfo *TII = 4268 MF->getSubtarget<AArch64Subtarget>().getInstrInfo(); 4269 4270 Register BaseReg = FrameRegUpdate 4271 ? FrameReg 4272 : MRI->createVirtualRegister(&AArch64::GPR64RegClass); 4273 Register SizeReg = MRI->createVirtualRegister(&AArch64::GPR64RegClass); 4274 4275 emitFrameOffset(*MBB, InsertI, DL, BaseReg, FrameReg, FrameRegOffset, TII); 4276 4277 int64_t LoopSize = Size; 4278 // If the loop size is not a multiple of 32, split off one 16-byte store at 4279 // the end to fold BaseReg update into. 4280 if (FrameRegUpdate && *FrameRegUpdate) 4281 LoopSize -= LoopSize % 32; 4282 MachineInstr *LoopI = BuildMI(*MBB, InsertI, DL, 4283 TII->get(ZeroData ? AArch64::STZGloop_wback 4284 : AArch64::STGloop_wback)) 4285 .addDef(SizeReg) 4286 .addDef(BaseReg) 4287 .addImm(LoopSize) 4288 .addReg(BaseReg) 4289 .setMemRefs(CombinedMemRefs); 4290 if (FrameRegUpdate) 4291 LoopI->setFlags(FrameRegUpdateFlags); 4292 4293 int64_t ExtraBaseRegUpdate = 4294 FrameRegUpdate ? (*FrameRegUpdate - FrameRegOffset.getFixed() - Size) : 0; 4295 if (LoopSize < Size) { 4296 assert(FrameRegUpdate); 4297 assert(Size - LoopSize == 16); 4298 // Tag 16 more bytes at BaseReg and update BaseReg. 4299 BuildMI(*MBB, InsertI, DL, 4300 TII->get(ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex)) 4301 .addDef(BaseReg) 4302 .addReg(BaseReg) 4303 .addReg(BaseReg) 4304 .addImm(1 + ExtraBaseRegUpdate / 16) 4305 .setMemRefs(CombinedMemRefs) 4306 .setMIFlags(FrameRegUpdateFlags); 4307 } else if (ExtraBaseRegUpdate) { 4308 // Update BaseReg. 4309 BuildMI( 4310 *MBB, InsertI, DL, 4311 TII->get(ExtraBaseRegUpdate > 0 ? AArch64::ADDXri : AArch64::SUBXri)) 4312 .addDef(BaseReg) 4313 .addReg(BaseReg) 4314 .addImm(std::abs(ExtraBaseRegUpdate)) 4315 .addImm(0) 4316 .setMIFlags(FrameRegUpdateFlags); 4317 } 4318 } 4319 4320 // Check if *II is a register update that can be merged into STGloop that ends 4321 // at (Reg + Size). RemainingOffset is the required adjustment to Reg after the 4322 // end of the loop. 4323 bool canMergeRegUpdate(MachineBasicBlock::iterator II, unsigned Reg, 4324 int64_t Size, int64_t *TotalOffset) { 4325 MachineInstr &MI = *II; 4326 if ((MI.getOpcode() == AArch64::ADDXri || 4327 MI.getOpcode() == AArch64::SUBXri) && 4328 MI.getOperand(0).getReg() == Reg && MI.getOperand(1).getReg() == Reg) { 4329 unsigned Shift = AArch64_AM::getShiftValue(MI.getOperand(3).getImm()); 4330 int64_t Offset = MI.getOperand(2).getImm() << Shift; 4331 if (MI.getOpcode() == AArch64::SUBXri) 4332 Offset = -Offset; 4333 int64_t AbsPostOffset = std::abs(Offset - Size); 4334 const int64_t kMaxOffset = 4335 0xFFF; // Max encoding for unshifted ADDXri / SUBXri 4336 if (AbsPostOffset <= kMaxOffset && AbsPostOffset % 16 == 0) { 4337 *TotalOffset = Offset; 4338 return true; 4339 } 4340 } 4341 return false; 4342 } 4343 4344 void mergeMemRefs(const SmallVectorImpl<TagStoreInstr> &TSE, 4345 SmallVectorImpl<MachineMemOperand *> &MemRefs) { 4346 MemRefs.clear(); 4347 for (auto &TS : TSE) { 4348 MachineInstr *MI = TS.MI; 4349 // An instruction without memory operands may access anything. Be 4350 // conservative and return an empty list. 4351 if (MI->memoperands_empty()) { 4352 MemRefs.clear(); 4353 return; 4354 } 4355 MemRefs.append(MI->memoperands_begin(), MI->memoperands_end()); 4356 } 4357 } 4358 4359 void TagStoreEdit::emitCode(MachineBasicBlock::iterator &InsertI, 4360 const AArch64FrameLowering *TFI, 4361 bool TryMergeSPUpdate) { 4362 if (TagStores.empty()) 4363 return; 4364 TagStoreInstr &FirstTagStore = TagStores[0]; 4365 TagStoreInstr &LastTagStore = TagStores[TagStores.size() - 1]; 4366 Size = LastTagStore.Offset - FirstTagStore.Offset + LastTagStore.Size; 4367 DL = TagStores[0].MI->getDebugLoc(); 4368 4369 Register Reg; 4370 FrameRegOffset = TFI->resolveFrameOffsetReference( 4371 *MF, FirstTagStore.Offset, false /*isFixed*/, false /*isSVE*/, Reg, 4372 /*PreferFP=*/false, /*ForSimm=*/true); 4373 FrameReg = Reg; 4374 FrameRegUpdate = std::nullopt; 4375 4376 mergeMemRefs(TagStores, CombinedMemRefs); 4377 4378 LLVM_DEBUG({ 4379 dbgs() << "Replacing adjacent STG instructions:\n"; 4380 for (const auto &Instr : TagStores) { 4381 dbgs() << " " << *Instr.MI; 4382 } 4383 }); 4384 4385 // Size threshold where a loop becomes shorter than a linear sequence of 4386 // tagging instructions. 4387 const int kSetTagLoopThreshold = 176; 4388 if (Size < kSetTagLoopThreshold) { 4389 if (TagStores.size() < 2) 4390 return; 4391 emitUnrolled(InsertI); 4392 } else { 4393 MachineInstr *UpdateInstr = nullptr; 4394 int64_t TotalOffset = 0; 4395 if (TryMergeSPUpdate) { 4396 // See if we can merge base register update into the STGloop. 4397 // This is done in AArch64LoadStoreOptimizer for "normal" stores, 4398 // but STGloop is way too unusual for that, and also it only 4399 // realistically happens in function epilogue. Also, STGloop is expanded 4400 // before that pass. 4401 if (InsertI != MBB->end() && 4402 canMergeRegUpdate(InsertI, FrameReg, FrameRegOffset.getFixed() + Size, 4403 &TotalOffset)) { 4404 UpdateInstr = &*InsertI++; 4405 LLVM_DEBUG(dbgs() << "Folding SP update into loop:\n " 4406 << *UpdateInstr); 4407 } 4408 } 4409 4410 if (!UpdateInstr && TagStores.size() < 2) 4411 return; 4412 4413 if (UpdateInstr) { 4414 FrameRegUpdate = TotalOffset; 4415 FrameRegUpdateFlags = UpdateInstr->getFlags(); 4416 } 4417 emitLoop(InsertI); 4418 if (UpdateInstr) 4419 UpdateInstr->eraseFromParent(); 4420 } 4421 4422 for (auto &TS : TagStores) 4423 TS.MI->eraseFromParent(); 4424 } 4425 4426 bool isMergeableStackTaggingInstruction(MachineInstr &MI, int64_t &Offset, 4427 int64_t &Size, bool &ZeroData) { 4428 MachineFunction &MF = *MI.getParent()->getParent(); 4429 const MachineFrameInfo &MFI = MF.getFrameInfo(); 4430 4431 unsigned Opcode = MI.getOpcode(); 4432 ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi || 4433 Opcode == AArch64::STZ2Gi); 4434 4435 if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) { 4436 if (!MI.getOperand(0).isDead() || !MI.getOperand(1).isDead()) 4437 return false; 4438 if (!MI.getOperand(2).isImm() || !MI.getOperand(3).isFI()) 4439 return false; 4440 Offset = MFI.getObjectOffset(MI.getOperand(3).getIndex()); 4441 Size = MI.getOperand(2).getImm(); 4442 return true; 4443 } 4444 4445 if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi) 4446 Size = 16; 4447 else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi) 4448 Size = 32; 4449 else 4450 return false; 4451 4452 if (MI.getOperand(0).getReg() != AArch64::SP || !MI.getOperand(1).isFI()) 4453 return false; 4454 4455 Offset = MFI.getObjectOffset(MI.getOperand(1).getIndex()) + 4456 16 * MI.getOperand(2).getImm(); 4457 return true; 4458 } 4459 4460 // Detect a run of memory tagging instructions for adjacent stack frame slots, 4461 // and replace them with a shorter instruction sequence: 4462 // * replace STG + STG with ST2G 4463 // * replace STGloop + STGloop with STGloop 4464 // This code needs to run when stack slot offsets are already known, but before 4465 // FrameIndex operands in STG instructions are eliminated. 4466 MachineBasicBlock::iterator tryMergeAdjacentSTG(MachineBasicBlock::iterator II, 4467 const AArch64FrameLowering *TFI, 4468 RegScavenger *RS) { 4469 bool FirstZeroData; 4470 int64_t Size, Offset; 4471 MachineInstr &MI = *II; 4472 MachineBasicBlock *MBB = MI.getParent(); 4473 MachineBasicBlock::iterator NextI = ++II; 4474 if (&MI == &MBB->instr_back()) 4475 return II; 4476 if (!isMergeableStackTaggingInstruction(MI, Offset, Size, FirstZeroData)) 4477 return II; 4478 4479 SmallVector<TagStoreInstr, 4> Instrs; 4480 Instrs.emplace_back(&MI, Offset, Size); 4481 4482 constexpr int kScanLimit = 10; 4483 int Count = 0; 4484 for (MachineBasicBlock::iterator E = MBB->end(); 4485 NextI != E && Count < kScanLimit; ++NextI) { 4486 MachineInstr &MI = *NextI; 4487 bool ZeroData; 4488 int64_t Size, Offset; 4489 // Collect instructions that update memory tags with a FrameIndex operand 4490 // and (when applicable) constant size, and whose output registers are dead 4491 // (the latter is almost always the case in practice). Since these 4492 // instructions effectively have no inputs or outputs, we are free to skip 4493 // any non-aliasing instructions in between without tracking used registers. 4494 if (isMergeableStackTaggingInstruction(MI, Offset, Size, ZeroData)) { 4495 if (ZeroData != FirstZeroData) 4496 break; 4497 Instrs.emplace_back(&MI, Offset, Size); 4498 continue; 4499 } 4500 4501 // Only count non-transient, non-tagging instructions toward the scan 4502 // limit. 4503 if (!MI.isTransient()) 4504 ++Count; 4505 4506 // Just in case, stop before the epilogue code starts. 4507 if (MI.getFlag(MachineInstr::FrameSetup) || 4508 MI.getFlag(MachineInstr::FrameDestroy)) 4509 break; 4510 4511 // Reject anything that may alias the collected instructions. 4512 if (MI.mayLoadOrStore() || MI.hasUnmodeledSideEffects()) 4513 break; 4514 } 4515 4516 // New code will be inserted after the last tagging instruction we've found. 4517 MachineBasicBlock::iterator InsertI = Instrs.back().MI; 4518 4519 // All the gathered stack tag instructions are merged and placed after 4520 // last tag store in the list. The check should be made if the nzcv 4521 // flag is live at the point where we are trying to insert. Otherwise 4522 // the nzcv flag might get clobbered if any stg loops are present. 4523 4524 // FIXME : This approach of bailing out from merge is conservative in 4525 // some ways like even if stg loops are not present after merge the 4526 // insert list, this liveness check is done (which is not needed). 4527 LivePhysRegs LiveRegs(*(MBB->getParent()->getSubtarget().getRegisterInfo())); 4528 LiveRegs.addLiveOuts(*MBB); 4529 for (auto I = MBB->rbegin();; ++I) { 4530 MachineInstr &MI = *I; 4531 if (MI == InsertI) 4532 break; 4533 LiveRegs.stepBackward(*I); 4534 } 4535 InsertI++; 4536 if (LiveRegs.contains(AArch64::NZCV)) 4537 return InsertI; 4538 4539 llvm::stable_sort(Instrs, 4540 [](const TagStoreInstr &Left, const TagStoreInstr &Right) { 4541 return Left.Offset < Right.Offset; 4542 }); 4543 4544 // Make sure that we don't have any overlapping stores. 4545 int64_t CurOffset = Instrs[0].Offset; 4546 for (auto &Instr : Instrs) { 4547 if (CurOffset > Instr.Offset) 4548 return NextI; 4549 CurOffset = Instr.Offset + Instr.Size; 4550 } 4551 4552 // Find contiguous runs of tagged memory and emit shorter instruction 4553 // sequencies for them when possible. 4554 TagStoreEdit TSE(MBB, FirstZeroData); 4555 std::optional<int64_t> EndOffset; 4556 for (auto &Instr : Instrs) { 4557 if (EndOffset && *EndOffset != Instr.Offset) { 4558 // Found a gap. 4559 TSE.emitCode(InsertI, TFI, /*TryMergeSPUpdate = */ false); 4560 TSE.clear(); 4561 } 4562 4563 TSE.addInstruction(Instr); 4564 EndOffset = Instr.Offset + Instr.Size; 4565 } 4566 4567 const MachineFunction *MF = MBB->getParent(); 4568 // Multiple FP/SP updates in a loop cannot be described by CFI instructions. 4569 TSE.emitCode( 4570 InsertI, TFI, /*TryMergeSPUpdate = */ 4571 !MF->getInfo<AArch64FunctionInfo>()->needsAsyncDwarfUnwindInfo(*MF)); 4572 4573 return InsertI; 4574 } 4575 } // namespace 4576 4577 MachineBasicBlock::iterator emitVGSaveRestore(MachineBasicBlock::iterator II, 4578 const AArch64FrameLowering *TFI) { 4579 MachineInstr &MI = *II; 4580 MachineBasicBlock *MBB = MI.getParent(); 4581 MachineFunction *MF = MBB->getParent(); 4582 4583 if (MI.getOpcode() != AArch64::VGSavePseudo && 4584 MI.getOpcode() != AArch64::VGRestorePseudo) 4585 return II; 4586 4587 SMEAttrs FuncAttrs(MF->getFunction()); 4588 bool LocallyStreaming = 4589 FuncAttrs.hasStreamingBody() && !FuncAttrs.hasStreamingInterface(); 4590 const AArch64FunctionInfo *AFI = MF->getInfo<AArch64FunctionInfo>(); 4591 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 4592 const AArch64InstrInfo *TII = 4593 MF->getSubtarget<AArch64Subtarget>().getInstrInfo(); 4594 4595 int64_t VGFrameIdx = 4596 LocallyStreaming ? AFI->getStreamingVGIdx() : AFI->getVGIdx(); 4597 assert(VGFrameIdx != std::numeric_limits<int>::max() && 4598 "Expected FrameIdx for VG"); 4599 4600 unsigned CFIIndex; 4601 if (MI.getOpcode() == AArch64::VGSavePseudo) { 4602 const MachineFrameInfo &MFI = MF->getFrameInfo(); 4603 int64_t Offset = 4604 MFI.getObjectOffset(VGFrameIdx) - TFI->getOffsetOfLocalArea(); 4605 CFIIndex = MF->addFrameInst(MCCFIInstruction::createOffset( 4606 nullptr, TRI->getDwarfRegNum(AArch64::VG, true), Offset)); 4607 } else 4608 CFIIndex = MF->addFrameInst(MCCFIInstruction::createRestore( 4609 nullptr, TRI->getDwarfRegNum(AArch64::VG, true))); 4610 4611 MachineInstr *UnwindInst = BuildMI(*MBB, II, II->getDebugLoc(), 4612 TII->get(TargetOpcode::CFI_INSTRUCTION)) 4613 .addCFIIndex(CFIIndex); 4614 4615 MI.eraseFromParent(); 4616 return UnwindInst->getIterator(); 4617 } 4618 4619 void AArch64FrameLowering::processFunctionBeforeFrameIndicesReplaced( 4620 MachineFunction &MF, RegScavenger *RS = nullptr) const { 4621 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 4622 for (auto &BB : MF) 4623 for (MachineBasicBlock::iterator II = BB.begin(); II != BB.end();) { 4624 if (AFI->hasStreamingModeChanges()) 4625 II = emitVGSaveRestore(II, this); 4626 if (StackTaggingMergeSetTag) 4627 II = tryMergeAdjacentSTG(II, this, RS); 4628 } 4629 } 4630 4631 /// For Win64 AArch64 EH, the offset to the Unwind object is from the SP 4632 /// before the update. This is easily retrieved as it is exactly the offset 4633 /// that is set in processFunctionBeforeFrameFinalized. 4634 StackOffset AArch64FrameLowering::getFrameIndexReferencePreferSP( 4635 const MachineFunction &MF, int FI, Register &FrameReg, 4636 bool IgnoreSPUpdates) const { 4637 const MachineFrameInfo &MFI = MF.getFrameInfo(); 4638 if (IgnoreSPUpdates) { 4639 LLVM_DEBUG(dbgs() << "Offset from the SP for " << FI << " is " 4640 << MFI.getObjectOffset(FI) << "\n"); 4641 FrameReg = AArch64::SP; 4642 return StackOffset::getFixed(MFI.getObjectOffset(FI)); 4643 } 4644 4645 // Go to common code if we cannot provide sp + offset. 4646 if (MFI.hasVarSizedObjects() || 4647 MF.getInfo<AArch64FunctionInfo>()->getStackSizeSVE() || 4648 MF.getSubtarget().getRegisterInfo()->hasStackRealignment(MF)) 4649 return getFrameIndexReference(MF, FI, FrameReg); 4650 4651 FrameReg = AArch64::SP; 4652 return getStackOffset(MF, MFI.getObjectOffset(FI)); 4653 } 4654 4655 /// The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve 4656 /// the parent's frame pointer 4657 unsigned AArch64FrameLowering::getWinEHParentFrameOffset( 4658 const MachineFunction &MF) const { 4659 return 0; 4660 } 4661 4662 /// Funclets only need to account for space for the callee saved registers, 4663 /// as the locals are accounted for in the parent's stack frame. 4664 unsigned AArch64FrameLowering::getWinEHFuncletFrameSize( 4665 const MachineFunction &MF) const { 4666 // This is the size of the pushed CSRs. 4667 unsigned CSSize = 4668 MF.getInfo<AArch64FunctionInfo>()->getCalleeSavedStackSize(); 4669 // This is the amount of stack a funclet needs to allocate. 4670 return alignTo(CSSize + MF.getFrameInfo().getMaxCallFrameSize(), 4671 getStackAlign()); 4672 } 4673 4674 namespace { 4675 struct FrameObject { 4676 bool IsValid = false; 4677 // Index of the object in MFI. 4678 int ObjectIndex = 0; 4679 // Group ID this object belongs to. 4680 int GroupIndex = -1; 4681 // This object should be placed first (closest to SP). 4682 bool ObjectFirst = false; 4683 // This object's group (which always contains the object with 4684 // ObjectFirst==true) should be placed first. 4685 bool GroupFirst = false; 4686 4687 // Used to distinguish between FP and GPR accesses. The values are decided so 4688 // that they sort FPR < Hazard < GPR and they can be or'd together. 4689 unsigned Accesses = 0; 4690 enum { AccessFPR = 1, AccessHazard = 2, AccessGPR = 4 }; 4691 }; 4692 4693 class GroupBuilder { 4694 SmallVector<int, 8> CurrentMembers; 4695 int NextGroupIndex = 0; 4696 std::vector<FrameObject> &Objects; 4697 4698 public: 4699 GroupBuilder(std::vector<FrameObject> &Objects) : Objects(Objects) {} 4700 void AddMember(int Index) { CurrentMembers.push_back(Index); } 4701 void EndCurrentGroup() { 4702 if (CurrentMembers.size() > 1) { 4703 // Create a new group with the current member list. This might remove them 4704 // from their pre-existing groups. That's OK, dealing with overlapping 4705 // groups is too hard and unlikely to make a difference. 4706 LLVM_DEBUG(dbgs() << "group:"); 4707 for (int Index : CurrentMembers) { 4708 Objects[Index].GroupIndex = NextGroupIndex; 4709 LLVM_DEBUG(dbgs() << " " << Index); 4710 } 4711 LLVM_DEBUG(dbgs() << "\n"); 4712 NextGroupIndex++; 4713 } 4714 CurrentMembers.clear(); 4715 } 4716 }; 4717 4718 bool FrameObjectCompare(const FrameObject &A, const FrameObject &B) { 4719 // Objects at a lower index are closer to FP; objects at a higher index are 4720 // closer to SP. 4721 // 4722 // For consistency in our comparison, all invalid objects are placed 4723 // at the end. This also allows us to stop walking when we hit the 4724 // first invalid item after it's all sorted. 4725 // 4726 // If we want to include a stack hazard region, order FPR accesses < the 4727 // hazard object < GPRs accesses in order to create a separation between the 4728 // two. For the Accesses field 1 = FPR, 2 = Hazard Object, 4 = GPR. 4729 // 4730 // Otherwise the "first" object goes first (closest to SP), followed by the 4731 // members of the "first" group. 4732 // 4733 // The rest are sorted by the group index to keep the groups together. 4734 // Higher numbered groups are more likely to be around longer (i.e. untagged 4735 // in the function epilogue and not at some earlier point). Place them closer 4736 // to SP. 4737 // 4738 // If all else equal, sort by the object index to keep the objects in the 4739 // original order. 4740 return std::make_tuple(!A.IsValid, A.Accesses, A.ObjectFirst, A.GroupFirst, 4741 A.GroupIndex, A.ObjectIndex) < 4742 std::make_tuple(!B.IsValid, B.Accesses, B.ObjectFirst, B.GroupFirst, 4743 B.GroupIndex, B.ObjectIndex); 4744 } 4745 } // namespace 4746 4747 void AArch64FrameLowering::orderFrameObjects( 4748 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const { 4749 if (!OrderFrameObjects || ObjectsToAllocate.empty()) 4750 return; 4751 4752 const AArch64FunctionInfo &AFI = *MF.getInfo<AArch64FunctionInfo>(); 4753 const MachineFrameInfo &MFI = MF.getFrameInfo(); 4754 std::vector<FrameObject> FrameObjects(MFI.getObjectIndexEnd()); 4755 for (auto &Obj : ObjectsToAllocate) { 4756 FrameObjects[Obj].IsValid = true; 4757 FrameObjects[Obj].ObjectIndex = Obj; 4758 } 4759 4760 // Identify FPR vs GPR slots for hazards, and stack slots that are tagged at 4761 // the same time. 4762 GroupBuilder GB(FrameObjects); 4763 for (auto &MBB : MF) { 4764 for (auto &MI : MBB) { 4765 if (MI.isDebugInstr()) 4766 continue; 4767 4768 if (AFI.hasStackHazardSlotIndex()) { 4769 std::optional<int> FI = getLdStFrameID(MI, MFI); 4770 if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) { 4771 if (MFI.getStackID(*FI) == TargetStackID::ScalableVector || 4772 AArch64InstrInfo::isFpOrNEON(MI)) 4773 FrameObjects[*FI].Accesses |= FrameObject::AccessFPR; 4774 else 4775 FrameObjects[*FI].Accesses |= FrameObject::AccessGPR; 4776 } 4777 } 4778 4779 int OpIndex; 4780 switch (MI.getOpcode()) { 4781 case AArch64::STGloop: 4782 case AArch64::STZGloop: 4783 OpIndex = 3; 4784 break; 4785 case AArch64::STGi: 4786 case AArch64::STZGi: 4787 case AArch64::ST2Gi: 4788 case AArch64::STZ2Gi: 4789 OpIndex = 1; 4790 break; 4791 default: 4792 OpIndex = -1; 4793 } 4794 4795 int TaggedFI = -1; 4796 if (OpIndex >= 0) { 4797 const MachineOperand &MO = MI.getOperand(OpIndex); 4798 if (MO.isFI()) { 4799 int FI = MO.getIndex(); 4800 if (FI >= 0 && FI < MFI.getObjectIndexEnd() && 4801 FrameObjects[FI].IsValid) 4802 TaggedFI = FI; 4803 } 4804 } 4805 4806 // If this is a stack tagging instruction for a slot that is not part of a 4807 // group yet, either start a new group or add it to the current one. 4808 if (TaggedFI >= 0) 4809 GB.AddMember(TaggedFI); 4810 else 4811 GB.EndCurrentGroup(); 4812 } 4813 // Groups should never span multiple basic blocks. 4814 GB.EndCurrentGroup(); 4815 } 4816 4817 if (AFI.hasStackHazardSlotIndex()) { 4818 FrameObjects[AFI.getStackHazardSlotIndex()].Accesses = 4819 FrameObject::AccessHazard; 4820 // If a stack object is unknown or both GPR and FPR, sort it into GPR. 4821 for (auto &Obj : FrameObjects) 4822 if (!Obj.Accesses || 4823 Obj.Accesses == (FrameObject::AccessGPR | FrameObject::AccessFPR)) 4824 Obj.Accesses = FrameObject::AccessGPR; 4825 } 4826 4827 // If the function's tagged base pointer is pinned to a stack slot, we want to 4828 // put that slot first when possible. This will likely place it at SP + 0, 4829 // and save one instruction when generating the base pointer because IRG does 4830 // not allow an immediate offset. 4831 std::optional<int> TBPI = AFI.getTaggedBasePointerIndex(); 4832 if (TBPI) { 4833 FrameObjects[*TBPI].ObjectFirst = true; 4834 FrameObjects[*TBPI].GroupFirst = true; 4835 int FirstGroupIndex = FrameObjects[*TBPI].GroupIndex; 4836 if (FirstGroupIndex >= 0) 4837 for (FrameObject &Object : FrameObjects) 4838 if (Object.GroupIndex == FirstGroupIndex) 4839 Object.GroupFirst = true; 4840 } 4841 4842 llvm::stable_sort(FrameObjects, FrameObjectCompare); 4843 4844 int i = 0; 4845 for (auto &Obj : FrameObjects) { 4846 // All invalid items are sorted at the end, so it's safe to stop. 4847 if (!Obj.IsValid) 4848 break; 4849 ObjectsToAllocate[i++] = Obj.ObjectIndex; 4850 } 4851 4852 LLVM_DEBUG({ 4853 dbgs() << "Final frame order:\n"; 4854 for (auto &Obj : FrameObjects) { 4855 if (!Obj.IsValid) 4856 break; 4857 dbgs() << " " << Obj.ObjectIndex << ": group " << Obj.GroupIndex; 4858 if (Obj.ObjectFirst) 4859 dbgs() << ", first"; 4860 if (Obj.GroupFirst) 4861 dbgs() << ", group-first"; 4862 dbgs() << "\n"; 4863 } 4864 }); 4865 } 4866 4867 /// Emit a loop to decrement SP until it is equal to TargetReg, with probes at 4868 /// least every ProbeSize bytes. Returns an iterator of the first instruction 4869 /// after the loop. The difference between SP and TargetReg must be an exact 4870 /// multiple of ProbeSize. 4871 MachineBasicBlock::iterator 4872 AArch64FrameLowering::inlineStackProbeLoopExactMultiple( 4873 MachineBasicBlock::iterator MBBI, int64_t ProbeSize, 4874 Register TargetReg) const { 4875 MachineBasicBlock &MBB = *MBBI->getParent(); 4876 MachineFunction &MF = *MBB.getParent(); 4877 const AArch64InstrInfo *TII = 4878 MF.getSubtarget<AArch64Subtarget>().getInstrInfo(); 4879 DebugLoc DL = MBB.findDebugLoc(MBBI); 4880 4881 MachineFunction::iterator MBBInsertPoint = std::next(MBB.getIterator()); 4882 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 4883 MF.insert(MBBInsertPoint, LoopMBB); 4884 MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(MBB.getBasicBlock()); 4885 MF.insert(MBBInsertPoint, ExitMBB); 4886 4887 // SUB SP, SP, #ProbeSize (or equivalent if ProbeSize is not encodable 4888 // in SUB). 4889 emitFrameOffset(*LoopMBB, LoopMBB->end(), DL, AArch64::SP, AArch64::SP, 4890 StackOffset::getFixed(-ProbeSize), TII, 4891 MachineInstr::FrameSetup); 4892 // STR XZR, [SP] 4893 BuildMI(*LoopMBB, LoopMBB->end(), DL, TII->get(AArch64::STRXui)) 4894 .addReg(AArch64::XZR) 4895 .addReg(AArch64::SP) 4896 .addImm(0) 4897 .setMIFlags(MachineInstr::FrameSetup); 4898 // CMP SP, TargetReg 4899 BuildMI(*LoopMBB, LoopMBB->end(), DL, TII->get(AArch64::SUBSXrx64), 4900 AArch64::XZR) 4901 .addReg(AArch64::SP) 4902 .addReg(TargetReg) 4903 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 0)) 4904 .setMIFlags(MachineInstr::FrameSetup); 4905 // B.CC Loop 4906 BuildMI(*LoopMBB, LoopMBB->end(), DL, TII->get(AArch64::Bcc)) 4907 .addImm(AArch64CC::NE) 4908 .addMBB(LoopMBB) 4909 .setMIFlags(MachineInstr::FrameSetup); 4910 4911 LoopMBB->addSuccessor(ExitMBB); 4912 LoopMBB->addSuccessor(LoopMBB); 4913 // Synthesize the exit MBB. 4914 ExitMBB->splice(ExitMBB->end(), &MBB, MBBI, MBB.end()); 4915 ExitMBB->transferSuccessorsAndUpdatePHIs(&MBB); 4916 MBB.addSuccessor(LoopMBB); 4917 // Update liveins. 4918 fullyRecomputeLiveIns({ExitMBB, LoopMBB}); 4919 4920 return ExitMBB->begin(); 4921 } 4922 4923 void AArch64FrameLowering::inlineStackProbeFixed( 4924 MachineBasicBlock::iterator MBBI, Register ScratchReg, int64_t FrameSize, 4925 StackOffset CFAOffset) const { 4926 MachineBasicBlock *MBB = MBBI->getParent(); 4927 MachineFunction &MF = *MBB->getParent(); 4928 const AArch64InstrInfo *TII = 4929 MF.getSubtarget<AArch64Subtarget>().getInstrInfo(); 4930 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); 4931 bool EmitAsyncCFI = AFI->needsAsyncDwarfUnwindInfo(MF); 4932 bool HasFP = hasFP(MF); 4933 4934 DebugLoc DL; 4935 int64_t ProbeSize = MF.getInfo<AArch64FunctionInfo>()->getStackProbeSize(); 4936 int64_t NumBlocks = FrameSize / ProbeSize; 4937 int64_t ResidualSize = FrameSize % ProbeSize; 4938 4939 LLVM_DEBUG(dbgs() << "Stack probing: total " << FrameSize << " bytes, " 4940 << NumBlocks << " blocks of " << ProbeSize 4941 << " bytes, plus " << ResidualSize << " bytes\n"); 4942 4943 // Decrement SP by NumBlock * ProbeSize bytes, with either unrolled or 4944 // ordinary loop. 4945 if (NumBlocks <= AArch64::StackProbeMaxLoopUnroll) { 4946 for (int i = 0; i < NumBlocks; ++i) { 4947 // SUB SP, SP, #ProbeSize (or equivalent if ProbeSize is not 4948 // encodable in a SUB). 4949 emitFrameOffset(*MBB, MBBI, DL, AArch64::SP, AArch64::SP, 4950 StackOffset::getFixed(-ProbeSize), TII, 4951 MachineInstr::FrameSetup, false, false, nullptr, 4952 EmitAsyncCFI && !HasFP, CFAOffset); 4953 CFAOffset += StackOffset::getFixed(ProbeSize); 4954 // STR XZR, [SP] 4955 BuildMI(*MBB, MBBI, DL, TII->get(AArch64::STRXui)) 4956 .addReg(AArch64::XZR) 4957 .addReg(AArch64::SP) 4958 .addImm(0) 4959 .setMIFlags(MachineInstr::FrameSetup); 4960 } 4961 } else if (NumBlocks != 0) { 4962 // SUB ScratchReg, SP, #FrameSize (or equivalent if FrameSize is not 4963 // encodable in ADD). ScrathReg may temporarily become the CFA register. 4964 emitFrameOffset(*MBB, MBBI, DL, ScratchReg, AArch64::SP, 4965 StackOffset::getFixed(-ProbeSize * NumBlocks), TII, 4966 MachineInstr::FrameSetup, false, false, nullptr, 4967 EmitAsyncCFI && !HasFP, CFAOffset); 4968 CFAOffset += StackOffset::getFixed(ProbeSize * NumBlocks); 4969 MBBI = inlineStackProbeLoopExactMultiple(MBBI, ProbeSize, ScratchReg); 4970 MBB = MBBI->getParent(); 4971 if (EmitAsyncCFI && !HasFP) { 4972 // Set the CFA register back to SP. 4973 const AArch64RegisterInfo &RegInfo = 4974 *MF.getSubtarget<AArch64Subtarget>().getRegisterInfo(); 4975 unsigned Reg = RegInfo.getDwarfRegNum(AArch64::SP, true); 4976 unsigned CFIIndex = 4977 MF.addFrameInst(MCCFIInstruction::createDefCfaRegister(nullptr, Reg)); 4978 BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) 4979 .addCFIIndex(CFIIndex) 4980 .setMIFlags(MachineInstr::FrameSetup); 4981 } 4982 } 4983 4984 if (ResidualSize != 0) { 4985 // SUB SP, SP, #ResidualSize (or equivalent if ResidualSize is not encodable 4986 // in SUB). 4987 emitFrameOffset(*MBB, MBBI, DL, AArch64::SP, AArch64::SP, 4988 StackOffset::getFixed(-ResidualSize), TII, 4989 MachineInstr::FrameSetup, false, false, nullptr, 4990 EmitAsyncCFI && !HasFP, CFAOffset); 4991 if (ResidualSize > AArch64::StackProbeMaxUnprobedStack) { 4992 // STR XZR, [SP] 4993 BuildMI(*MBB, MBBI, DL, TII->get(AArch64::STRXui)) 4994 .addReg(AArch64::XZR) 4995 .addReg(AArch64::SP) 4996 .addImm(0) 4997 .setMIFlags(MachineInstr::FrameSetup); 4998 } 4999 } 5000 } 5001 5002 void AArch64FrameLowering::inlineStackProbe(MachineFunction &MF, 5003 MachineBasicBlock &MBB) const { 5004 // Get the instructions that need to be replaced. We emit at most two of 5005 // these. Remember them in order to avoid complications coming from the need 5006 // to traverse the block while potentially creating more blocks. 5007 SmallVector<MachineInstr *, 4> ToReplace; 5008 for (MachineInstr &MI : MBB) 5009 if (MI.getOpcode() == AArch64::PROBED_STACKALLOC || 5010 MI.getOpcode() == AArch64::PROBED_STACKALLOC_VAR) 5011 ToReplace.push_back(&MI); 5012 5013 for (MachineInstr *MI : ToReplace) { 5014 if (MI->getOpcode() == AArch64::PROBED_STACKALLOC) { 5015 Register ScratchReg = MI->getOperand(0).getReg(); 5016 int64_t FrameSize = MI->getOperand(1).getImm(); 5017 StackOffset CFAOffset = StackOffset::get(MI->getOperand(2).getImm(), 5018 MI->getOperand(3).getImm()); 5019 inlineStackProbeFixed(MI->getIterator(), ScratchReg, FrameSize, 5020 CFAOffset); 5021 } else { 5022 assert(MI->getOpcode() == AArch64::PROBED_STACKALLOC_VAR && 5023 "Stack probe pseudo-instruction expected"); 5024 const AArch64InstrInfo *TII = 5025 MI->getMF()->getSubtarget<AArch64Subtarget>().getInstrInfo(); 5026 Register TargetReg = MI->getOperand(0).getReg(); 5027 (void)TII->probedStackAlloc(MI->getIterator(), TargetReg, true); 5028 } 5029 MI->eraseFromParent(); 5030 } 5031 } 5032