1 //=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file declares AArch64-specific per-machine-function information. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H 14 #define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H 15 16 #include "AArch64Subtarget.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/CodeGen/CallingConvLower.h" 21 #include "llvm/CodeGen/MIRYamlMapping.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunction.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/MC/MCLinkerOptimizationHint.h" 26 #include "llvm/MC/MCSymbol.h" 27 #include <cassert> 28 #include <optional> 29 30 namespace llvm { 31 32 namespace yaml { 33 struct AArch64FunctionInfo; 34 } // end namespace yaml 35 36 class AArch64Subtarget; 37 class MachineInstr; 38 39 struct TPIDR2Object { 40 int FrameIndex = std::numeric_limits<int>::max(); 41 unsigned Uses = 0; 42 }; 43 44 /// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and 45 /// contains private AArch64-specific information for each MachineFunction. 46 class AArch64FunctionInfo final : public MachineFunctionInfo { 47 /// Number of bytes of arguments this function has on the stack. If the callee 48 /// is expected to restore the argument stack this should be a multiple of 16, 49 /// all usable during a tail call. 50 /// 51 /// The alternative would forbid tail call optimisation in some cases: if we 52 /// want to transfer control from a function with 8-bytes of stack-argument 53 /// space to a function with 16-bytes then misalignment of this value would 54 /// make a stack adjustment necessary, which could not be undone by the 55 /// callee. 56 unsigned BytesInStackArgArea = 0; 57 58 /// The number of bytes to restore to deallocate space for incoming 59 /// arguments. Canonically 0 in the C calling convention, but non-zero when 60 /// callee is expected to pop the args. 61 unsigned ArgumentStackToRestore = 0; 62 63 /// Space just below incoming stack pointer reserved for arguments being 64 /// passed on the stack during a tail call. This will be the difference 65 /// between the largest tail call argument space needed in this function and 66 /// what's already available by reusing space of incoming arguments. 67 unsigned TailCallReservedStack = 0; 68 69 /// HasStackFrame - True if this function has a stack frame. Set by 70 /// determineCalleeSaves(). 71 bool HasStackFrame = false; 72 73 /// Amount of stack frame size, not including callee-saved registers. 74 uint64_t LocalStackSize = 0; 75 76 /// The start and end frame indices for the SVE callee saves. 77 int MinSVECSFrameIndex = 0; 78 int MaxSVECSFrameIndex = 0; 79 80 /// Amount of stack frame size used for saving callee-saved registers. 81 unsigned CalleeSavedStackSize = 0; 82 unsigned SVECalleeSavedStackSize = 0; 83 bool HasCalleeSavedStackSize = false; 84 85 /// Number of TLS accesses using the special (combinable) 86 /// _TLS_MODULE_BASE_ symbol. 87 unsigned NumLocalDynamicTLSAccesses = 0; 88 89 /// FrameIndex for start of varargs area for arguments passed on the 90 /// stack. 91 int VarArgsStackIndex = 0; 92 93 /// Offset of start of varargs area for arguments passed on the stack. 94 unsigned VarArgsStackOffset = 0; 95 96 /// FrameIndex for start of varargs area for arguments passed in 97 /// general purpose registers. 98 int VarArgsGPRIndex = 0; 99 100 /// Size of the varargs area for arguments passed in general purpose 101 /// registers. 102 unsigned VarArgsGPRSize = 0; 103 104 /// FrameIndex for start of varargs area for arguments passed in 105 /// floating-point registers. 106 int VarArgsFPRIndex = 0; 107 108 /// Size of the varargs area for arguments passed in floating-point 109 /// registers. 110 unsigned VarArgsFPRSize = 0; 111 112 /// The stack slots used to add space between FPR and GPR accesses when using 113 /// hazard padding. StackHazardCSRSlotIndex is added between GPR and FPR CSRs. 114 /// StackHazardSlotIndex is added between (sorted) stack objects. 115 int StackHazardSlotIndex = std::numeric_limits<int>::max(); 116 int StackHazardCSRSlotIndex = std::numeric_limits<int>::max(); 117 118 /// True if this function has a subset of CSRs that is handled explicitly via 119 /// copies. 120 bool IsSplitCSR = false; 121 122 /// True when the stack gets realigned dynamically because the size of stack 123 /// frame is unknown at compile time. e.g., in case of VLAs. 124 bool StackRealigned = false; 125 126 /// True when the callee-save stack area has unused gaps that may be used for 127 /// other stack allocations. 128 bool CalleeSaveStackHasFreeSpace = false; 129 130 /// SRetReturnReg - sret lowering includes returning the value of the 131 /// returned struct in a register. This field holds the virtual register into 132 /// which the sret argument is passed. 133 Register SRetReturnReg; 134 135 /// SVE stack size (for predicates and data vectors) are maintained here 136 /// rather than in FrameInfo, as the placement and Stack IDs are target 137 /// specific. 138 uint64_t StackSizeSVE = 0; 139 140 /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid. 141 bool HasCalculatedStackSizeSVE = false; 142 143 /// Has a value when it is known whether or not the function uses a 144 /// redzone, and no value otherwise. 145 /// Initialized during frame lowering, unless the function has the noredzone 146 /// attribute, in which case it is set to false at construction. 147 std::optional<bool> HasRedZone; 148 149 /// ForwardedMustTailRegParms - A list of virtual and physical registers 150 /// that must be forwarded to every musttail call. 151 SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms; 152 153 /// FrameIndex for the tagged base pointer. 154 std::optional<int> TaggedBasePointerIndex; 155 156 /// Offset from SP-at-entry to the tagged base pointer. 157 /// Tagged base pointer is set up to point to the first (lowest address) 158 /// tagged stack slot. 159 unsigned TaggedBasePointerOffset; 160 161 /// OutliningStyle denotes, if a function was outined, how it was outlined, 162 /// e.g. Tail Call, Thunk, or Function if none apply. 163 std::optional<std::string> OutliningStyle; 164 165 // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus 166 // CalleeSavedStackSize) to the address of the frame record. 167 int CalleeSaveBaseToFrameRecordOffset = 0; 168 169 /// SignReturnAddress is true if PAC-RET is enabled for the function with 170 /// defaults being sign non-leaf functions only, with the B key. 171 bool SignReturnAddress = false; 172 173 /// SignReturnAddressAll modifies the default PAC-RET mode to signing leaf 174 /// functions as well. 175 bool SignReturnAddressAll = false; 176 177 /// SignWithBKey modifies the default PAC-RET mode to signing with the B key. 178 bool SignWithBKey = false; 179 180 /// HasELFSignedGOT is true if the target binary format is ELF and the IR 181 /// module containing the corresponding function has "ptrauth-elf-got" flag 182 /// set to 1. 183 bool HasELFSignedGOT = false; 184 185 /// SigningInstrOffset captures the offset of the PAC-RET signing instruction 186 /// within the prologue, so it can be re-used for authentication in the 187 /// epilogue when using PC as a second salt (FEAT_PAuth_LR) 188 MCSymbol *SignInstrLabel = nullptr; 189 190 /// BranchTargetEnforcement enables placing BTI instructions at potential 191 /// indirect branch destinations. 192 bool BranchTargetEnforcement = false; 193 194 /// Indicates that SP signing should be diversified with PC as-per PAuthLR. 195 /// This is set by -mbranch-protection and will emit NOP instructions unless 196 /// the subtarget feature +pauthlr is also used (in which case non-NOP 197 /// instructions are emitted). 198 bool BranchProtectionPAuthLR = false; 199 200 /// Whether this function has an extended frame record [Ctx, FP, LR]. If so, 201 /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the 202 /// extended record. 203 bool HasSwiftAsyncContext = false; 204 205 /// The stack slot where the Swift asynchronous context is stored. 206 int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max(); 207 208 bool IsMTETagged = false; 209 210 /// The function has Scalable Vector or Scalable Predicate register argument 211 /// or return type 212 bool IsSVECC = false; 213 214 /// The frame-index for the TPIDR2 object used for lazy saves. 215 TPIDR2Object TPIDR2; 216 217 /// Whether this function changes streaming mode within the function. 218 bool HasStreamingModeChanges = false; 219 220 /// True if the function need unwind information. 221 mutable std::optional<bool> NeedsDwarfUnwindInfo; 222 223 /// True if the function need asynchronous unwind information. 224 mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo; 225 226 int64_t StackProbeSize = 0; 227 228 // Holds a register containing pstate.sm. This is set 229 // on function entry to record the initial pstate of a function. 230 Register PStateSMReg = MCRegister::NoRegister; 231 232 // Holds a pointer to a buffer that is large enough to represent 233 // all SME ZA state and any additional state required by the 234 // __arm_sme_save/restore support routines. 235 Register SMESaveBufferAddr = MCRegister::NoRegister; 236 237 // true if SMESaveBufferAddr is used. 238 bool SMESaveBufferUsed = false; 239 240 // Has the PNReg used to build PTRUE instruction. 241 // The PTRUE is used for the LD/ST of ZReg pairs in save and restore. 242 unsigned PredicateRegForFillSpill = 0; 243 244 // The stack slots where VG values are stored to. 245 int64_t VGIdx = std::numeric_limits<int>::max(); 246 int64_t StreamingVGIdx = std::numeric_limits<int>::max(); 247 248 public: 249 AArch64FunctionInfo(const Function &F, const AArch64Subtarget *STI); 250 251 MachineFunctionInfo * 252 clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF, 253 const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB) 254 const override; 255 256 void setPredicateRegForFillSpill(unsigned Reg) { 257 PredicateRegForFillSpill = Reg; 258 } 259 unsigned getPredicateRegForFillSpill() const { 260 return PredicateRegForFillSpill; 261 } 262 263 Register getSMESaveBufferAddr() const { return SMESaveBufferAddr; }; 264 void setSMESaveBufferAddr(Register Reg) { SMESaveBufferAddr = Reg; }; 265 266 unsigned isSMESaveBufferUsed() const { return SMESaveBufferUsed; }; 267 void setSMESaveBufferUsed(bool Used = true) { SMESaveBufferUsed = Used; }; 268 269 Register getPStateSMReg() const { return PStateSMReg; }; 270 void setPStateSMReg(Register Reg) { PStateSMReg = Reg; }; 271 272 int64_t getVGIdx() const { return VGIdx; }; 273 void setVGIdx(unsigned Idx) { VGIdx = Idx; }; 274 275 int64_t getStreamingVGIdx() const { return StreamingVGIdx; }; 276 void setStreamingVGIdx(unsigned FrameIdx) { StreamingVGIdx = FrameIdx; }; 277 278 bool isSVECC() const { return IsSVECC; }; 279 void setIsSVECC(bool s) { IsSVECC = s; }; 280 281 TPIDR2Object &getTPIDR2Obj() { return TPIDR2; } 282 283 void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI); 284 285 unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; } 286 void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; } 287 288 unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; } 289 void setArgumentStackToRestore(unsigned bytes) { 290 ArgumentStackToRestore = bytes; 291 } 292 293 unsigned getTailCallReservedStack() const { return TailCallReservedStack; } 294 void setTailCallReservedStack(unsigned bytes) { 295 TailCallReservedStack = bytes; 296 } 297 298 bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; } 299 300 void setStackSizeSVE(uint64_t S) { 301 HasCalculatedStackSizeSVE = true; 302 StackSizeSVE = S; 303 } 304 305 uint64_t getStackSizeSVE() const { return StackSizeSVE; } 306 307 bool hasStackFrame() const { return HasStackFrame; } 308 void setHasStackFrame(bool s) { HasStackFrame = s; } 309 310 bool isStackRealigned() const { return StackRealigned; } 311 void setStackRealigned(bool s) { StackRealigned = s; } 312 313 bool hasCalleeSaveStackFreeSpace() const { 314 return CalleeSaveStackHasFreeSpace; 315 } 316 void setCalleeSaveStackHasFreeSpace(bool s) { 317 CalleeSaveStackHasFreeSpace = s; 318 } 319 bool isSplitCSR() const { return IsSplitCSR; } 320 void setIsSplitCSR(bool s) { IsSplitCSR = s; } 321 322 void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; } 323 uint64_t getLocalStackSize() const { return LocalStackSize; } 324 325 void setOutliningStyle(const std::string &Style) { OutliningStyle = Style; } 326 std::optional<std::string> getOutliningStyle() const { 327 return OutliningStyle; 328 } 329 330 void setCalleeSavedStackSize(unsigned Size) { 331 CalleeSavedStackSize = Size; 332 HasCalleeSavedStackSize = true; 333 } 334 335 // When CalleeSavedStackSize has not been set (for example when 336 // some MachineIR pass is run in isolation), then recalculate 337 // the CalleeSavedStackSize directly from the CalleeSavedInfo. 338 // Note: This information can only be recalculated after PEI 339 // has assigned offsets to the callee save objects. 340 unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const { 341 bool ValidateCalleeSavedStackSize = false; 342 343 #ifndef NDEBUG 344 // Make sure the calculated size derived from the CalleeSavedInfo 345 // equals the cached size that was calculated elsewhere (e.g. in 346 // determineCalleeSaves). 347 ValidateCalleeSavedStackSize = HasCalleeSavedStackSize; 348 #endif 349 350 if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) { 351 assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated"); 352 if (MFI.getCalleeSavedInfo().empty()) 353 return 0; 354 355 int64_t MinOffset = std::numeric_limits<int64_t>::max(); 356 int64_t MaxOffset = std::numeric_limits<int64_t>::min(); 357 for (const auto &Info : MFI.getCalleeSavedInfo()) { 358 int FrameIdx = Info.getFrameIdx(); 359 if (MFI.getStackID(FrameIdx) != TargetStackID::Default) 360 continue; 361 int64_t Offset = MFI.getObjectOffset(FrameIdx); 362 int64_t ObjSize = MFI.getObjectSize(FrameIdx); 363 MinOffset = std::min<int64_t>(Offset, MinOffset); 364 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset); 365 } 366 367 if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) { 368 int64_t Offset = MFI.getObjectOffset(getSwiftAsyncContextFrameIdx()); 369 int64_t ObjSize = MFI.getObjectSize(getSwiftAsyncContextFrameIdx()); 370 MinOffset = std::min<int64_t>(Offset, MinOffset); 371 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset); 372 } 373 374 if (StackHazardCSRSlotIndex != std::numeric_limits<int>::max()) { 375 int64_t Offset = MFI.getObjectOffset(StackHazardCSRSlotIndex); 376 int64_t ObjSize = MFI.getObjectSize(StackHazardCSRSlotIndex); 377 MinOffset = std::min<int64_t>(Offset, MinOffset); 378 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset); 379 } 380 381 unsigned Size = alignTo(MaxOffset - MinOffset, 16); 382 assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) && 383 "Invalid size calculated for callee saves"); 384 return Size; 385 } 386 387 return getCalleeSavedStackSize(); 388 } 389 390 unsigned getCalleeSavedStackSize() const { 391 assert(HasCalleeSavedStackSize && 392 "CalleeSavedStackSize has not been calculated"); 393 return CalleeSavedStackSize; 394 } 395 396 // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes' 397 void setSVECalleeSavedStackSize(unsigned Size) { 398 SVECalleeSavedStackSize = Size; 399 } 400 unsigned getSVECalleeSavedStackSize() const { 401 return SVECalleeSavedStackSize; 402 } 403 404 void setMinMaxSVECSFrameIndex(int Min, int Max) { 405 MinSVECSFrameIndex = Min; 406 MaxSVECSFrameIndex = Max; 407 } 408 409 int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; } 410 int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; } 411 412 void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; } 413 unsigned getNumLocalDynamicTLSAccesses() const { 414 return NumLocalDynamicTLSAccesses; 415 } 416 417 std::optional<bool> hasRedZone() const { return HasRedZone; } 418 void setHasRedZone(bool s) { HasRedZone = s; } 419 420 int getVarArgsStackIndex() const { return VarArgsStackIndex; } 421 void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; } 422 423 unsigned getVarArgsStackOffset() const { return VarArgsStackOffset; } 424 void setVarArgsStackOffset(unsigned Offset) { VarArgsStackOffset = Offset; } 425 426 int getVarArgsGPRIndex() const { return VarArgsGPRIndex; } 427 void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; } 428 429 unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; } 430 void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; } 431 432 int getVarArgsFPRIndex() const { return VarArgsFPRIndex; } 433 void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; } 434 435 unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; } 436 void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; } 437 438 bool hasStackHazardSlotIndex() const { 439 return StackHazardSlotIndex != std::numeric_limits<int>::max(); 440 } 441 int getStackHazardSlotIndex() const { return StackHazardSlotIndex; } 442 void setStackHazardSlotIndex(int Index) { 443 assert(StackHazardSlotIndex == std::numeric_limits<int>::max()); 444 StackHazardSlotIndex = Index; 445 } 446 int getStackHazardCSRSlotIndex() const { return StackHazardCSRSlotIndex; } 447 void setStackHazardCSRSlotIndex(int Index) { 448 assert(StackHazardCSRSlotIndex == std::numeric_limits<int>::max()); 449 StackHazardCSRSlotIndex = Index; 450 } 451 452 unsigned getSRetReturnReg() const { return SRetReturnReg; } 453 void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; } 454 455 unsigned getJumpTableEntrySize(int Idx) const { 456 return JumpTableEntryInfo[Idx].first; 457 } 458 MCSymbol *getJumpTableEntryPCRelSymbol(int Idx) const { 459 return JumpTableEntryInfo[Idx].second; 460 } 461 void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) { 462 if ((unsigned)Idx >= JumpTableEntryInfo.size()) 463 JumpTableEntryInfo.resize(Idx+1); 464 JumpTableEntryInfo[Idx] = std::make_pair(Size, PCRelSym); 465 } 466 467 using SetOfInstructions = SmallPtrSet<const MachineInstr *, 16>; 468 469 const SetOfInstructions &getLOHRelated() const { return LOHRelated; } 470 471 // Shortcuts for LOH related types. 472 class MILOHDirective { 473 MCLOHType Kind; 474 475 /// Arguments of this directive. Order matters. 476 SmallVector<const MachineInstr *, 3> Args; 477 478 public: 479 using LOHArgs = ArrayRef<const MachineInstr *>; 480 481 MILOHDirective(MCLOHType Kind, LOHArgs Args) 482 : Kind(Kind), Args(Args.begin(), Args.end()) { 483 assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!"); 484 } 485 486 MCLOHType getKind() const { return Kind; } 487 LOHArgs getArgs() const { return Args; } 488 }; 489 490 using MILOHArgs = MILOHDirective::LOHArgs; 491 using MILOHContainer = SmallVector<MILOHDirective, 32>; 492 493 const MILOHContainer &getLOHContainer() const { return LOHContainerSet; } 494 495 /// Add a LOH directive of this @p Kind and this @p Args. 496 void addLOHDirective(MCLOHType Kind, MILOHArgs Args) { 497 LOHContainerSet.push_back(MILOHDirective(Kind, Args)); 498 LOHRelated.insert(Args.begin(), Args.end()); 499 } 500 501 SmallVectorImpl<ForwardedRegister> &getForwardedMustTailRegParms() { 502 return ForwardedMustTailRegParms; 503 } 504 505 std::optional<int> getTaggedBasePointerIndex() const { 506 return TaggedBasePointerIndex; 507 } 508 void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; } 509 510 unsigned getTaggedBasePointerOffset() const { 511 return TaggedBasePointerOffset; 512 } 513 void setTaggedBasePointerOffset(unsigned Offset) { 514 TaggedBasePointerOffset = Offset; 515 } 516 517 int getCalleeSaveBaseToFrameRecordOffset() const { 518 return CalleeSaveBaseToFrameRecordOffset; 519 } 520 void setCalleeSaveBaseToFrameRecordOffset(int Offset) { 521 CalleeSaveBaseToFrameRecordOffset = Offset; 522 } 523 524 bool shouldSignReturnAddress(const MachineFunction &MF) const; 525 bool shouldSignReturnAddress(bool SpillsLR) const; 526 527 bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const; 528 529 bool shouldSignWithBKey() const { return SignWithBKey; } 530 531 bool hasELFSignedGOT() const { return HasELFSignedGOT; } 532 533 MCSymbol *getSigningInstrLabel() const { return SignInstrLabel; } 534 void setSigningInstrLabel(MCSymbol *Label) { SignInstrLabel = Label; } 535 536 bool isMTETagged() const { return IsMTETagged; } 537 538 bool branchTargetEnforcement() const { return BranchTargetEnforcement; } 539 540 bool branchProtectionPAuthLR() const { return BranchProtectionPAuthLR; } 541 542 void setHasSwiftAsyncContext(bool HasContext) { 543 HasSwiftAsyncContext = HasContext; 544 } 545 bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; } 546 547 void setSwiftAsyncContextFrameIdx(int FI) { 548 SwiftAsyncContextFrameIdx = FI; 549 } 550 int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; } 551 552 bool needsDwarfUnwindInfo(const MachineFunction &MF) const; 553 bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const; 554 555 bool hasStreamingModeChanges() const { return HasStreamingModeChanges; } 556 void setHasStreamingModeChanges(bool HasChanges) { 557 HasStreamingModeChanges = HasChanges; 558 } 559 560 bool hasStackProbing() const { return StackProbeSize != 0; } 561 562 int64_t getStackProbeSize() const { return StackProbeSize; } 563 564 private: 565 // Hold the lists of LOHs. 566 MILOHContainer LOHContainerSet; 567 SetOfInstructions LOHRelated; 568 569 SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo; 570 }; 571 572 namespace yaml { 573 struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo { 574 std::optional<bool> HasRedZone; 575 576 AArch64FunctionInfo() = default; 577 AArch64FunctionInfo(const llvm::AArch64FunctionInfo &MFI); 578 579 void mappingImpl(yaml::IO &YamlIO) override; 580 ~AArch64FunctionInfo() = default; 581 }; 582 583 template <> struct MappingTraits<AArch64FunctionInfo> { 584 static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) { 585 YamlIO.mapOptional("hasRedZone", MFI.HasRedZone); 586 } 587 }; 588 589 } // end namespace yaml 590 591 } // end namespace llvm 592 593 #endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H 594