xref: /llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision 9033e0c2d22c9f247eccea50ae8c975eb3468ac1)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/Diagnostic.h"
15 #include "clang/Basic/LangOptions.h"
16 #include "clang/Basic/TargetBuiltins.h"
17 #include "clang/Basic/TargetInfo.h"
18 #include "llvm/ADT/APSInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/TargetParser/AArch64TargetParser.h"
23 #include "llvm/TargetParser/ARMTargetParserCommon.h"
24 #include <optional>
25 
26 using namespace clang;
27 using namespace clang::targets;
28 
29 static constexpr Builtin::Info BuiltinInfo[] = {
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
33   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
34 #include "clang/Basic/BuiltinsNEON.def"
35 
36 #define BUILTIN(ID, TYPE, ATTRS)                                               \
37   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
39   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
40 #include "clang/Basic/BuiltinsSVE.def"
41 
42 #define BUILTIN(ID, TYPE, ATTRS)                                               \
43   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
45   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
46 #include "clang/Basic/BuiltinsSME.def"
47 
48 #define BUILTIN(ID, TYPE, ATTRS)                                               \
49   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
50 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
51   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
52 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
53   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
54 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
55   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
56 #include "clang/Basic/BuiltinsAArch64.def"
57 };
58 
59 void AArch64TargetInfo::setArchFeatures() {
60   if (*ArchInfo == llvm::AArch64::ARMV8R) {
61     HasDotProd = true;
62     HasDIT = true;
63     HasFlagM = true;
64     HasRCPC = true;
65     FPU |= NeonMode;
66     HasCCPP = true;
67     HasCRC = true;
68     HasLSE = true;
69     HasRDM = true;
70   } else if (ArchInfo->Version.getMajor() == 8) {
71     if (ArchInfo->Version.getMinor() >= 7u) {
72       HasWFxT = true;
73     }
74     if (ArchInfo->Version.getMinor() >= 6u) {
75       HasBFloat16 = true;
76       HasMatMul = true;
77     }
78     if (ArchInfo->Version.getMinor() >= 5u) {
79       HasAlternativeNZCV = true;
80       HasFRInt3264 = true;
81       HasSSBS = true;
82       HasSB = true;
83       HasPredRes = true;
84       HasBTI = true;
85     }
86     if (ArchInfo->Version.getMinor() >= 4u) {
87       HasDotProd = true;
88       HasDIT = true;
89       HasFlagM = true;
90     }
91     if (ArchInfo->Version.getMinor() >= 3u) {
92       HasRCPC = true;
93       FPU |= NeonMode;
94     }
95     if (ArchInfo->Version.getMinor() >= 2u) {
96       HasCCPP = true;
97     }
98     if (ArchInfo->Version.getMinor() >= 1u) {
99       HasCRC = true;
100       HasLSE = true;
101       HasRDM = true;
102     }
103   } else if (ArchInfo->Version.getMajor() == 9) {
104     if (ArchInfo->Version.getMinor() >= 2u) {
105       HasWFxT = true;
106     }
107     if (ArchInfo->Version.getMinor() >= 1u) {
108       HasBFloat16 = true;
109       HasMatMul = true;
110     }
111     FPU |= SveMode;
112     HasSVE2 = true;
113     HasFullFP16 = true;
114     HasAlternativeNZCV = true;
115     HasFRInt3264 = true;
116     HasSSBS = true;
117     HasSB = true;
118     HasPredRes = true;
119     HasBTI = true;
120     HasDotProd = true;
121     HasDIT = true;
122     HasFlagM = true;
123     HasRCPC = true;
124     FPU |= NeonMode;
125     HasCCPP = true;
126     HasCRC = true;
127     HasLSE = true;
128     HasRDM = true;
129   }
130 }
131 
132 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
133                                      const TargetOptions &Opts)
134     : TargetInfo(Triple), ABI("aapcs") {
135   if (getTriple().isOSOpenBSD()) {
136     Int64Type = SignedLongLong;
137     IntMaxType = SignedLongLong;
138   } else {
139     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
140       WCharType = UnsignedInt;
141 
142     Int64Type = SignedLong;
143     IntMaxType = SignedLong;
144   }
145 
146   AddrSpaceMap = &ARM64AddrSpaceMap;
147 
148   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
149   HasLegalHalfType = true;
150   HalfArgsAndReturns = true;
151   HasFloat16 = true;
152   HasStrictFP = true;
153 
154   if (Triple.isArch64Bit())
155     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
156   else
157     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
158 
159   BitIntMaxAlign = 128;
160   MaxVectorAlign = 128;
161   MaxAtomicInlineWidth = 128;
162   MaxAtomicPromoteWidth = 128;
163 
164   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
165   LongDoubleFormat = &llvm::APFloat::IEEEquad();
166 
167   BFloat16Width = BFloat16Align = 16;
168   BFloat16Format = &llvm::APFloat::BFloat();
169 
170   // Make __builtin_ms_va_list available.
171   HasBuiltinMSVaList = true;
172 
173   // Make the SVE types available.  Note that this deliberately doesn't
174   // depend on SveMode, since in principle it should be possible to turn
175   // SVE on and off within a translation unit.  It should also be possible
176   // to compile the global declaration:
177   //
178   // __SVInt8_t *ptr;
179   //
180   // even without SVE.
181   HasAArch64SVETypes = true;
182 
183   // {} in inline assembly are neon specifiers, not assembly variant
184   // specifiers.
185   NoAsmVariants = true;
186 
187   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
188   // contributes to the alignment of the containing aggregate in the same way
189   // a plain (non bit-field) member of that type would, without exception for
190   // zero-sized or anonymous bit-fields."
191   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
192   UseZeroLengthBitfieldAlignment = true;
193 
194   HasUnalignedAccess = true;
195 
196   // AArch64 targets default to using the ARM C++ ABI.
197   TheCXXABI.set(TargetCXXABI::GenericAArch64);
198 
199   if (Triple.getOS() == llvm::Triple::Linux)
200     this->MCountName = "\01_mcount";
201   else if (Triple.getOS() == llvm::Triple::UnknownOS)
202     this->MCountName =
203         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
204 }
205 
206 StringRef AArch64TargetInfo::getABI() const { return ABI; }
207 
208 bool AArch64TargetInfo::setABI(const std::string &Name) {
209   if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs" &&
210       Name != "pauthtest")
211     return false;
212 
213   ABI = Name;
214   return true;
215 }
216 
217 bool AArch64TargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
218   if (hasFeature("fp") && ABI == "aapcs-soft") {
219     // aapcs-soft is not allowed for targets with an FPU, to avoid there being
220     // two incomatible ABIs.
221     Diags.Report(diag::err_target_unsupported_abi_with_fpu) << ABI;
222     return false;
223   }
224   if (getTriple().getEnvironment() == llvm::Triple::PAuthTest &&
225       getTriple().getOS() != llvm::Triple::Linux) {
226     Diags.Report(diag::err_target_unsupported_abi_for_triple)
227         << getTriple().getEnvironmentName() << getTriple().getTriple();
228     return false;
229   }
230   return true;
231 }
232 
233 bool AArch64TargetInfo::validateGlobalRegisterVariable(
234     StringRef RegName, unsigned RegSize, bool &HasSizeMismatch) const {
235   if (RegName == "sp") {
236     HasSizeMismatch = RegSize != 64;
237     return true;
238   }
239   if (RegName.starts_with("w"))
240     HasSizeMismatch = RegSize != 32;
241   else if (RegName.starts_with("x"))
242     HasSizeMismatch = RegSize != 64;
243   else
244     return false;
245   StringRef RegNum = RegName.drop_front();
246   // Check if the register is reserved. See also
247   // AArch64TargetLowering::getRegisterByName().
248   return RegNum == "0" ||
249          (RegNum == "18" &&
250           llvm::AArch64::isX18ReservedByDefault(getTriple())) ||
251          getTargetOpts().FeatureMap.lookup(("reserve-x" + RegNum).str());
252 }
253 
254 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
255                                                  BranchProtectionInfo &BPI,
256                                                  StringRef &Err) const {
257   llvm::ARM::ParsedBranchProtection PBP;
258   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err, HasPAuthLR))
259     return false;
260 
261   BPI.SignReturnAddr =
262       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
263           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
264           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
265           .Default(LangOptions::SignReturnAddressScopeKind::None);
266 
267   if (PBP.Key == "a_key")
268     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
269   else
270     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
271 
272   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
273   BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
274   BPI.GuardedControlStack = PBP.GuardedControlStack;
275   return true;
276 }
277 
278 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
279   return llvm::AArch64::parseCpu(Name).has_value();
280 }
281 
282 bool AArch64TargetInfo::setCPU(const std::string &Name) {
283   return isValidCPUName(Name);
284 }
285 
286 void AArch64TargetInfo::fillValidCPUList(
287     SmallVectorImpl<StringRef> &Values) const {
288   llvm::AArch64::fillValidCPUArchList(Values);
289 }
290 
291 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
292                                                 MacroBuilder &Builder) const {
293   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
294 }
295 
296 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
297                                                 MacroBuilder &Builder) const {
298   // Also include the ARMv8.1 defines
299   getTargetDefinesARMV81A(Opts, Builder);
300 }
301 
302 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
303                                                 MacroBuilder &Builder) const {
304   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
305   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
306   // Also include the Armv8.2 defines
307   getTargetDefinesARMV82A(Opts, Builder);
308 }
309 
310 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
311                                                 MacroBuilder &Builder) const {
312   // Also include the Armv8.3 defines
313   getTargetDefinesARMV83A(Opts, Builder);
314 }
315 
316 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
317                                                 MacroBuilder &Builder) const {
318   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
319   // Also include the Armv8.4 defines
320   getTargetDefinesARMV84A(Opts, Builder);
321 }
322 
323 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
324                                                 MacroBuilder &Builder) const {
325   // Also include the Armv8.5 defines
326   // FIXME: Armv8.6 makes the following extensions mandatory:
327   // - __ARM_FEATURE_BF16
328   // - __ARM_FEATURE_MATMUL_INT8
329   // Handle them here.
330   getTargetDefinesARMV85A(Opts, Builder);
331 }
332 
333 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
334                                                 MacroBuilder &Builder) const {
335   // Also include the Armv8.6 defines
336   getTargetDefinesARMV86A(Opts, Builder);
337 }
338 
339 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
340                                                 MacroBuilder &Builder) const {
341   // Also include the Armv8.7 defines
342   getTargetDefinesARMV87A(Opts, Builder);
343 }
344 
345 void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
346                                                 MacroBuilder &Builder) const {
347   // Also include the Armv8.8 defines
348   getTargetDefinesARMV88A(Opts, Builder);
349 }
350 
351 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
352                                                MacroBuilder &Builder) const {
353   // Armv9-A maps to Armv8.5-A
354   getTargetDefinesARMV85A(Opts, Builder);
355 }
356 
357 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
358                                                 MacroBuilder &Builder) const {
359   // Armv9.1-A maps to Armv8.6-A
360   getTargetDefinesARMV86A(Opts, Builder);
361 }
362 
363 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
364                                                 MacroBuilder &Builder) const {
365   // Armv9.2-A maps to Armv8.7-A
366   getTargetDefinesARMV87A(Opts, Builder);
367 }
368 
369 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
370                                                 MacroBuilder &Builder) const {
371   // Armv9.3-A maps to Armv8.8-A
372   getTargetDefinesARMV88A(Opts, Builder);
373 }
374 
375 void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
376                                                 MacroBuilder &Builder) const {
377   // Armv9.4-A maps to Armv8.9-A
378   getTargetDefinesARMV89A(Opts, Builder);
379 }
380 
381 void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
382                                                 MacroBuilder &Builder) const {
383   // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
384   getTargetDefinesARMV94A(Opts, Builder);
385 }
386 
387 void AArch64TargetInfo::getTargetDefinesARMV96A(const LangOptions &Opts,
388                                                 MacroBuilder &Builder) const {
389   // Armv9.6-A does not have a v8.* equivalent, but is a superset of v9.5-A.
390   getTargetDefinesARMV95A(Opts, Builder);
391 }
392 
393 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
394                                          MacroBuilder &Builder) const {
395   // Target identification.
396   if (getTriple().isWindowsArm64EC()) {
397     // Define the same set of macros as would be defined on x86_64 to ensure that
398     // ARM64EC datatype layouts match those of x86_64 compiled code
399     Builder.defineMacro("__amd64__");
400     Builder.defineMacro("__amd64");
401     Builder.defineMacro("__x86_64");
402     Builder.defineMacro("__x86_64__");
403     Builder.defineMacro("__arm64ec__");
404   } else {
405     Builder.defineMacro("__aarch64__");
406   }
407 
408   // Inline assembly supports AArch64 flag outputs.
409   Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
410 
411   std::string CodeModel = getTargetOpts().CodeModel;
412   if (CodeModel == "default")
413     CodeModel = "small";
414   for (char &c : CodeModel)
415     c = toupper(c);
416   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
417 
418   // ACLE predefines. Many can only have one possible value on v8 AArch64.
419   Builder.defineMacro("__ARM_ACLE_VERSION(year, quarter, patch)",
420                       "(100 * (year) + 10 * (quarter) + (patch))");
421 #define ARM_ACLE_VERSION(Y, Q, P) (100 * (Y) + 10 * (Q) + (P))
422   Builder.defineMacro("__ARM_ACLE", Twine(ARM_ACLE_VERSION(2024, 2, 0)));
423   Builder.defineMacro("__FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL",
424                       Twine(ARM_ACLE_VERSION(2024, 3, 0)));
425 #undef ARM_ACLE_VERSION
426   Builder.defineMacro("__ARM_ARCH",
427                       std::to_string(ArchInfo->Version.getMajor()));
428   Builder.defineMacro("__ARM_ARCH_PROFILE",
429                       std::string("'") + (char)ArchInfo->Profile + "'");
430 
431   Builder.defineMacro("__ARM_64BIT_STATE", "1");
432   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
433   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
434 
435   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
436   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
437   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
438   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
439   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
440   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
441   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
442 
443   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
444 
445   // These macros are set when Clang can parse declarations with these
446   // attributes.
447   Builder.defineMacro("__ARM_STATE_ZA", "1");
448   Builder.defineMacro("__ARM_STATE_ZT0", "1");
449 
450   // 0xe implies support for half, single and double precision operations.
451   if (FPU & FPUMode)
452     Builder.defineMacro("__ARM_FP", "0xE");
453 
454   // PCS specifies this for SysV variants, which is all we support. Other ABIs
455   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
456   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
457   Builder.defineMacro("__ARM_FP16_ARGS", "1");
458 
459   // Clang supports arm_neon_sve_bridge.h
460   Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
461 
462   if (Opts.UnsafeFPMath)
463     Builder.defineMacro("__ARM_FP_FAST", "1");
464 
465   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
466                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
467 
468   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
469 
470   if (FPU & NeonMode) {
471     Builder.defineMacro("__ARM_NEON", "1");
472     // 64-bit NEON supports half, single and double precision operations.
473     Builder.defineMacro("__ARM_NEON_FP", "0xE");
474   }
475 
476   if (FPU & SveMode)
477     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
478 
479   if (HasSVE2)
480     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
481 
482   if (HasSVE2p1)
483     Builder.defineMacro("__ARM_FEATURE_SVE2p1", "1");
484 
485   if (HasSVE2 && HasSVEAES)
486     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
487 
488   if (HasSVE2 && HasSVEBitPerm)
489     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
490 
491   if (HasSVE2 && HasSVE2SHA3)
492     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
493 
494   if (HasSVE2 && HasSVE2SM4)
495     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
496 
497   if (HasSVEB16B16)
498     Builder.defineMacro("__ARM_FEATURE_SVE_B16B16", "1");
499 
500   if (HasSME) {
501     Builder.defineMacro("__ARM_FEATURE_SME");
502     Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
503   }
504 
505   if (HasSME2)
506     Builder.defineMacro("__ARM_FEATURE_SME2", "1");
507 
508   if (HasSME2p1)
509     Builder.defineMacro("__ARM_FEATURE_SME2p1", "1");
510 
511   if (HasSMEF16F16)
512     Builder.defineMacro("__ARM_FEATURE_SME_F16F16", "1");
513 
514   if (HasSMEB16B16)
515     Builder.defineMacro("__ARM_FEATURE_SME_B16B16", "1");
516 
517   if (HasCRC)
518     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
519 
520   if (HasRCPC3)
521     Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
522   else if (HasRCPC)
523     Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
524 
525   if (HasFMV)
526     Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
527 
528   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
529   // macros for AES, SHA2, SHA3 and SM4
530   if (HasAES && HasSHA2)
531     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
532 
533   if (HasAES)
534     Builder.defineMacro("__ARM_FEATURE_AES", "1");
535 
536   if (HasSHA2)
537     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
538 
539   if (HasSHA3) {
540     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
541     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
542   }
543 
544   if (HasSM4) {
545     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
546     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
547   }
548 
549   if (HasPAuth)
550     Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
551 
552   if (HasPAuthLR)
553     Builder.defineMacro("__ARM_FEATURE_PAUTH_LR", "1");
554 
555   if (HasBTI)
556     Builder.defineMacro("__ARM_FEATURE_BTI", "1");
557 
558   if (HasUnalignedAccess)
559     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
560 
561   if ((FPU & NeonMode) && HasFullFP16)
562     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
563   if (HasFullFP16)
564    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
565 
566   if (HasDotProd)
567     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
568 
569   if (HasMTE)
570     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
571 
572   if (HasTME)
573     Builder.defineMacro("__ARM_FEATURE_TME", "1");
574 
575   if (HasMatMul)
576     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
577 
578   if (HasLSE)
579     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
580 
581   if (HasBFloat16) {
582     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
583     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
584     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
585     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
586   }
587 
588   if ((FPU & SveMode) && HasBFloat16) {
589     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
590   }
591 
592   if ((FPU & SveMode) && HasMatmulFP64)
593     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
594 
595   if ((FPU & SveMode) && HasMatmulFP32)
596     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
597 
598   if ((FPU & SveMode) && HasMatMul)
599     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
600 
601   if ((FPU & NeonMode) && HasFP16FML)
602     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
603 
604   if (Opts.hasSignReturnAddress()) {
605     // Bitmask:
606     // 0: Protection using the A key
607     // 1: Protection using the B key
608     // 2: Protection including leaf functions
609     // 3: Protection using PC as a diversifier
610     unsigned Value = 0;
611 
612     if (Opts.isSignReturnAddressWithAKey())
613       Value |= (1 << 0);
614     else
615       Value |= (1 << 1);
616 
617     if (Opts.isSignReturnAddressScopeAll())
618       Value |= (1 << 2);
619 
620     if (Opts.BranchProtectionPAuthLR)
621       Value |= (1 << 3);
622 
623     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
624   }
625 
626   if (Opts.BranchTargetEnforcement)
627     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
628 
629   if (Opts.GuardedControlStack)
630     Builder.defineMacro("__ARM_FEATURE_GCS_DEFAULT", "1");
631 
632   if (HasLS64)
633     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
634 
635   if (HasRandGen)
636     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
637 
638   if (HasMOPS)
639     Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
640 
641   if (HasD128)
642     Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
643 
644   if (HasGCS)
645     Builder.defineMacro("__ARM_FEATURE_GCS", "1");
646 
647   if (*ArchInfo == llvm::AArch64::ARMV8_1A)
648     getTargetDefinesARMV81A(Opts, Builder);
649   else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
650     getTargetDefinesARMV82A(Opts, Builder);
651   else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
652     getTargetDefinesARMV83A(Opts, Builder);
653   else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
654     getTargetDefinesARMV84A(Opts, Builder);
655   else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
656     getTargetDefinesARMV85A(Opts, Builder);
657   else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
658     getTargetDefinesARMV86A(Opts, Builder);
659   else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
660     getTargetDefinesARMV87A(Opts, Builder);
661   else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
662     getTargetDefinesARMV88A(Opts, Builder);
663   else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
664     getTargetDefinesARMV89A(Opts, Builder);
665   else if (*ArchInfo == llvm::AArch64::ARMV9A)
666     getTargetDefinesARMV9A(Opts, Builder);
667   else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
668     getTargetDefinesARMV91A(Opts, Builder);
669   else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
670     getTargetDefinesARMV92A(Opts, Builder);
671   else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
672     getTargetDefinesARMV93A(Opts, Builder);
673   else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
674     getTargetDefinesARMV94A(Opts, Builder);
675   else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
676     getTargetDefinesARMV95A(Opts, Builder);
677   else if (*ArchInfo == llvm::AArch64::ARMV9_6A)
678     getTargetDefinesARMV96A(Opts, Builder);
679 
680   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
681   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
682   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
683   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
684   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
685   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
686 
687   // Allow detection of fast FMA support.
688   Builder.defineMacro("__FP_FAST_FMA", "1");
689   Builder.defineMacro("__FP_FAST_FMAF", "1");
690 
691   // C/C++ operators work on both VLS and VLA SVE types
692   if (FPU & SveMode)
693     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
694 
695   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
696     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
697   }
698 }
699 
700 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
701   return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
702                                          Builtin::FirstTSBuiltin);
703 }
704 
705 std::optional<std::pair<unsigned, unsigned>>
706 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
707   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
708     return std::pair<unsigned, unsigned>(
709         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
710 
711   if (hasFeature("sve"))
712     return std::pair<unsigned, unsigned>(1, 16);
713 
714   return std::nullopt;
715 }
716 
717 uint64_t AArch64TargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const {
718   return llvm::AArch64::getFMVPriority(Features);
719 }
720 
721 bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
722   // FMV extensions which imply no backend features do not affect codegen.
723   if (auto Ext = llvm::AArch64::parseFMVExtension(Name))
724     return Ext->ID.has_value();
725   return false;
726 }
727 
728 bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
729   // CPU features might be separated by '+', extract them and check
730   llvm::SmallVector<StringRef, 8> Features;
731   FeatureStr.split(Features, "+");
732   for (auto &Feature : Features)
733     if (!llvm::AArch64::parseFMVExtension(Feature.trim()).has_value())
734       return false;
735   return true;
736 }
737 
738 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
739   return llvm::StringSwitch<bool>(Feature)
740       .Cases("aarch64", "arm64", "arm", true)
741       .Case("fmv", HasFMV)
742       .Case("fp", FPU & FPUMode)
743       .Cases("neon", "simd", FPU & NeonMode)
744       .Case("jscvt", HasJSCVT)
745       .Case("fcma", HasFCMA)
746       .Case("rng", HasRandGen)
747       .Case("flagm", HasFlagM)
748       .Case("flagm2", HasAlternativeNZCV)
749       .Case("fp16fml", HasFP16FML)
750       .Case("dotprod", HasDotProd)
751       .Case("sm4", HasSM4)
752       .Case("rdm", HasRDM)
753       .Case("lse", HasLSE)
754       .Case("crc", HasCRC)
755       .Case("sha2", HasSHA2)
756       .Case("sha3", HasSHA3)
757       .Cases("aes", "pmull", HasAES)
758       .Cases("fp16", "fullfp16", HasFullFP16)
759       .Case("dit", HasDIT)
760       .Case("dpb", HasCCPP)
761       .Case("dpb2", HasCCDP)
762       .Case("rcpc", HasRCPC)
763       .Case("frintts", HasFRInt3264)
764       .Case("i8mm", HasMatMul)
765       .Case("bf16", HasBFloat16)
766       .Case("sve", FPU & SveMode)
767       .Case("sve-b16b16", HasSVEB16B16)
768       .Case("f32mm", FPU & SveMode && HasMatmulFP32)
769       .Case("f64mm", FPU & SveMode && HasMatmulFP64)
770       .Case("sve2", FPU & SveMode && HasSVE2)
771       .Case("sve-aes", HasSVEAES)
772       .Case("sve-bitperm", FPU & HasSVEBitPerm)
773       .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
774       .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
775       .Case("sve2p1", FPU & SveMode && HasSVE2p1)
776       .Case("sme", HasSME)
777       .Case("sme2", HasSME2)
778       .Case("sme2p1", HasSME2p1)
779       .Case("sme-f64f64", HasSMEF64F64)
780       .Case("sme-i16i64", HasSMEI16I64)
781       .Case("sme-fa64", HasSMEFA64)
782       .Case("sme-f16f16", HasSMEF16F16)
783       .Case("sme-b16b16", HasSMEB16B16)
784       .Case("memtag", HasMTE)
785       .Case("sb", HasSB)
786       .Case("predres", HasPredRes)
787       .Cases("ssbs", "ssbs2", HasSSBS)
788       .Case("bti", HasBTI)
789       .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
790       .Case("wfxt", HasWFxT)
791       .Case("rcpc3", HasRCPC3)
792       .Default(false);
793 }
794 
795 void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
796                                           StringRef Name, bool Enabled) const {
797   Features[Name] = Enabled;
798   // If the feature is an architecture feature (like v8.2a), add all previous
799   // architecture versions and any dependant target features.
800   const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
801       llvm::AArch64::ArchInfo::findBySubArch(Name);
802 
803   if (!ArchInfo)
804     return; // Not an architecture, nothing more to do.
805 
806   // Disabling an architecture feature does not affect dependent features
807   if (!Enabled)
808     return;
809 
810   for (const auto *OtherArch : llvm::AArch64::ArchInfos)
811     if (ArchInfo->implies(*OtherArch))
812       Features[OtherArch->getSubArch()] = true;
813 
814   // Set any features implied by the architecture
815   std::vector<StringRef> CPUFeats;
816   if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
817     for (auto F : CPUFeats) {
818       assert(F[0] == '+' && "Expected + in target feature!");
819       Features[F.drop_front(1)] = true;
820     }
821   }
822 }
823 
824 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
825                                              DiagnosticsEngine &Diags) {
826   for (const auto &Feature : Features) {
827     if (Feature == "-fp-armv8")
828       HasNoFP = true;
829     if (Feature == "-neon")
830       HasNoNeon = true;
831     if (Feature == "-sve")
832       HasNoSVE = true;
833 
834     if (Feature == "+neon" || Feature == "+fp-armv8")
835       FPU |= NeonMode;
836     if (Feature == "+jscvt") {
837       HasJSCVT = true;
838       FPU |= NeonMode;
839     }
840     if (Feature == "+fcma") {
841       HasFCMA = true;
842       FPU |= NeonMode;
843     }
844 
845     if (Feature == "+sve") {
846       FPU |= NeonMode;
847       FPU |= SveMode;
848       HasFullFP16 = true;
849     }
850     if (Feature == "+sve2") {
851       FPU |= NeonMode;
852       FPU |= SveMode;
853       HasFullFP16 = true;
854       HasSVE2 = true;
855     }
856     if (Feature == "+sve2p1") {
857       FPU |= NeonMode;
858       FPU |= SveMode;
859       HasFullFP16 = true;
860       HasSVE2 = true;
861       HasSVE2p1 = true;
862     }
863     if (Feature == "+sve-aes") {
864       FPU |= NeonMode;
865       HasFullFP16 = true;
866       HasSVEAES = true;
867     }
868     if (Feature == "+sve2-sha3") {
869       FPU |= NeonMode;
870       FPU |= SveMode;
871       HasFullFP16 = true;
872       HasSVE2 = true;
873       HasSVE2SHA3 = true;
874     }
875     if (Feature == "+sve2-sm4") {
876       FPU |= NeonMode;
877       FPU |= SveMode;
878       HasFullFP16 = true;
879       HasSVE2 = true;
880       HasSVE2SM4 = true;
881     }
882     if (Feature == "+sve-b16b16")
883       HasSVEB16B16 = true;
884     if (Feature == "+sve-bitperm") {
885       FPU |= NeonMode;
886       HasFullFP16 = true;
887       HasSVEBitPerm = true;
888     }
889     if (Feature == "+f32mm") {
890       FPU |= NeonMode;
891       FPU |= SveMode;
892       HasFullFP16 = true;
893       HasMatmulFP32 = true;
894     }
895     if (Feature == "+f64mm") {
896       FPU |= NeonMode;
897       FPU |= SveMode;
898       HasFullFP16 = true;
899       HasMatmulFP64 = true;
900     }
901     if (Feature == "+sme") {
902       HasSME = true;
903       HasBFloat16 = true;
904       HasFullFP16 = true;
905     }
906     if (Feature == "+sme2") {
907       HasSME = true;
908       HasSME2 = true;
909       HasBFloat16 = true;
910       HasFullFP16 = true;
911     }
912     if (Feature == "+sme2p1") {
913       HasSME = true;
914       HasSME2 = true;
915       HasSME2p1 = true;
916       HasBFloat16 = true;
917       HasFullFP16 = true;
918     }
919     if (Feature == "+sme-f64f64") {
920       HasSME = true;
921       HasSMEF64F64 = true;
922       HasBFloat16 = true;
923       HasFullFP16 = true;
924     }
925     if (Feature == "+sme-i16i64") {
926       HasSME = true;
927       HasSMEI16I64 = true;
928       HasBFloat16 = true;
929       HasFullFP16 = true;
930     }
931     if (Feature == "+sme-fa64") {
932       FPU |= NeonMode;
933       FPU |= SveMode;
934       HasSME = true;
935       HasSVE2 = true;
936       HasSMEFA64 = true;
937     }
938     if (Feature == "+sme-f16f16") {
939       HasSME = true;
940       HasSME2 = true;
941       HasBFloat16 = true;
942       HasFullFP16 = true;
943       HasSMEF16F16 = true;
944     }
945     if (Feature == "+sme-b16b16") {
946       HasSME = true;
947       HasSME2 = true;
948       HasBFloat16 = true;
949       HasFullFP16 = true;
950       HasSVEB16B16 = true;
951       HasSMEB16B16 = true;
952     }
953     if (Feature == "+sb")
954       HasSB = true;
955     if (Feature == "+predres")
956       HasPredRes = true;
957     if (Feature == "+ssbs")
958       HasSSBS = true;
959     if (Feature == "+bti")
960       HasBTI = true;
961     if (Feature == "+wfxt")
962       HasWFxT = true;
963     if (Feature == "-fmv")
964       HasFMV = false;
965     if (Feature == "+crc")
966       HasCRC = true;
967     if (Feature == "+rcpc")
968       HasRCPC = true;
969     if (Feature == "+aes") {
970       FPU |= NeonMode;
971       HasAES = true;
972     }
973     if (Feature == "+sha2") {
974       FPU |= NeonMode;
975       HasSHA2 = true;
976     }
977     if (Feature == "+sha3") {
978       FPU |= NeonMode;
979       HasSHA2 = true;
980       HasSHA3 = true;
981     }
982     if (Feature == "+rdm") {
983       FPU |= NeonMode;
984       HasRDM = true;
985     }
986     if (Feature == "+dit")
987       HasDIT = true;
988     if (Feature == "+cccp")
989       HasCCPP = true;
990     if (Feature == "+ccdp") {
991       HasCCPP = true;
992       HasCCDP = true;
993     }
994     if (Feature == "+fptoint")
995       HasFRInt3264 = true;
996     if (Feature == "+sm4") {
997       FPU |= NeonMode;
998       HasSM4 = true;
999     }
1000     if (Feature == "+strict-align")
1001       HasUnalignedAccess = false;
1002 
1003     // All predecessor archs are added but select the latest one for ArchKind.
1004     if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
1005       ArchInfo = &llvm::AArch64::ARMV8A;
1006     if (Feature == "+v8.1a" &&
1007         ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
1008       ArchInfo = &llvm::AArch64::ARMV8_1A;
1009     if (Feature == "+v8.2a" &&
1010         ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
1011       ArchInfo = &llvm::AArch64::ARMV8_2A;
1012     if (Feature == "+v8.3a" &&
1013         ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
1014       ArchInfo = &llvm::AArch64::ARMV8_3A;
1015     if (Feature == "+v8.4a" &&
1016         ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
1017       ArchInfo = &llvm::AArch64::ARMV8_4A;
1018     if (Feature == "+v8.5a" &&
1019         ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
1020       ArchInfo = &llvm::AArch64::ARMV8_5A;
1021     if (Feature == "+v8.6a" &&
1022         ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
1023       ArchInfo = &llvm::AArch64::ARMV8_6A;
1024     if (Feature == "+v8.7a" &&
1025         ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
1026       ArchInfo = &llvm::AArch64::ARMV8_7A;
1027     if (Feature == "+v8.8a" &&
1028         ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
1029       ArchInfo = &llvm::AArch64::ARMV8_8A;
1030     if (Feature == "+v8.9a" &&
1031         ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
1032       ArchInfo = &llvm::AArch64::ARMV8_9A;
1033     if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
1034       ArchInfo = &llvm::AArch64::ARMV9A;
1035     if (Feature == "+v9.1a" &&
1036         ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
1037       ArchInfo = &llvm::AArch64::ARMV9_1A;
1038     if (Feature == "+v9.2a" &&
1039         ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
1040       ArchInfo = &llvm::AArch64::ARMV9_2A;
1041     if (Feature == "+v9.3a" &&
1042         ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
1043       ArchInfo = &llvm::AArch64::ARMV9_3A;
1044     if (Feature == "+v9.4a" &&
1045         ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
1046       ArchInfo = &llvm::AArch64::ARMV9_4A;
1047     if (Feature == "+v9.5a" &&
1048         ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
1049       ArchInfo = &llvm::AArch64::ARMV9_5A;
1050     if (Feature == "+v9.6a" &&
1051         ArchInfo->Version < llvm::AArch64::ARMV9_6A.Version)
1052       ArchInfo = &llvm::AArch64::ARMV9_6A;
1053     if (Feature == "+v8r")
1054       ArchInfo = &llvm::AArch64::ARMV8R;
1055     if (Feature == "+fullfp16") {
1056       FPU |= NeonMode;
1057       HasFullFP16 = true;
1058     }
1059     if (Feature == "+dotprod") {
1060       FPU |= NeonMode;
1061       HasDotProd = true;
1062     }
1063     if (Feature == "+fp16fml") {
1064       FPU |= NeonMode;
1065       HasFullFP16 = true;
1066       HasFP16FML = true;
1067     }
1068     if (Feature == "+mte")
1069       HasMTE = true;
1070     if (Feature == "+tme")
1071       HasTME = true;
1072     if (Feature == "+pauth")
1073       HasPAuth = true;
1074     if (Feature == "+i8mm")
1075       HasMatMul = true;
1076     if (Feature == "+bf16")
1077       HasBFloat16 = true;
1078     if (Feature == "+lse")
1079       HasLSE = true;
1080     if (Feature == "+ls64")
1081       HasLS64 = true;
1082     if (Feature == "+rand")
1083       HasRandGen = true;
1084     if (Feature == "+flagm")
1085       HasFlagM = true;
1086     if (Feature == "+altnzcv") {
1087       HasFlagM = true;
1088       HasAlternativeNZCV = true;
1089     }
1090     if (Feature == "+mops")
1091       HasMOPS = true;
1092     if (Feature == "+d128")
1093       HasD128 = true;
1094     if (Feature == "+gcs")
1095       HasGCS = true;
1096     if (Feature == "+rcpc3")
1097       HasRCPC3 = true;
1098     if (Feature == "+pauth-lr") {
1099       HasPAuthLR = true;
1100       HasPAuth = true;
1101     }
1102   }
1103 
1104   // Check features that are manually disabled by command line options.
1105   // This needs to be checked after architecture-related features are handled,
1106   // making sure they are properly disabled when required.
1107   for (const auto &Feature : Features) {
1108     if (Feature == "-d128")
1109       HasD128 = false;
1110   }
1111 
1112   setDataLayout();
1113   setArchFeatures();
1114 
1115   if (HasNoFP) {
1116     FPU &= ~FPUMode;
1117     FPU &= ~NeonMode;
1118     FPU &= ~SveMode;
1119   }
1120   if (HasNoNeon) {
1121     FPU &= ~NeonMode;
1122     FPU &= ~SveMode;
1123   }
1124   if (HasNoSVE)
1125     FPU &= ~SveMode;
1126 
1127   return true;
1128 }
1129 
1130 // Parse AArch64 Target attributes, which are a comma separated list of:
1131 //  "arch=<arch>" - parsed to features as per -march=..
1132 //  "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1133 //  "tune=<cpu>" - TuneCPU set to <cpu>
1134 //  "feature", "no-feature" - Add (or remove) feature.
1135 //  "+feature", "+nofeature" - Add (or remove) feature.
1136 //
1137 // A feature may correspond to an Extension (anything with a corresponding
1138 // AEK_), in which case an ExtensionSet is used to parse it and expand its
1139 // dependencies. If the feature does not yield a successful parse then it
1140 // is passed through.
1141 ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1142   ParsedTargetAttr Ret;
1143   if (Features == "default")
1144     return Ret;
1145   SmallVector<StringRef, 1> AttrFeatures;
1146   Features.split(AttrFeatures, ",");
1147   bool FoundArch = false;
1148 
1149   auto SplitAndAddFeatures = [](StringRef FeatString,
1150                                 std::vector<std::string> &Features,
1151                                 llvm::AArch64::ExtensionSet &FeatureBits) {
1152     SmallVector<StringRef, 8> SplitFeatures;
1153     FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1154     for (StringRef Feature : SplitFeatures) {
1155       if (FeatureBits.parseModifier(Feature))
1156         continue;
1157       // Pass through anything that failed to parse so that we can emit
1158       // diagnostics, as well as valid internal feature names.
1159       //
1160       // FIXME: We should consider rejecting internal feature names like
1161       //        neon, v8a, etc.
1162       // FIXME: We should consider emitting diagnostics here.
1163       if (Feature.starts_with("no"))
1164         Features.push_back("-" + Feature.drop_front(2).str());
1165       else
1166         Features.push_back("+" + Feature.str());
1167     }
1168   };
1169 
1170   llvm::AArch64::ExtensionSet FeatureBits;
1171   // Reconstruct the bitset from the command line option features.
1172   FeatureBits.reconstructFromParsedFeatures(getTargetOpts().FeaturesAsWritten,
1173                                             Ret.Features);
1174 
1175   for (auto &Feature : AttrFeatures) {
1176     Feature = Feature.trim();
1177     if (Feature.starts_with("fpmath="))
1178       continue;
1179 
1180     if (Feature.starts_with("branch-protection=")) {
1181       Ret.BranchProtection = Feature.split('=').second.trim();
1182       continue;
1183     }
1184 
1185     if (Feature.starts_with("arch=")) {
1186       if (FoundArch)
1187         Ret.Duplicate = "arch=";
1188       FoundArch = true;
1189       std::pair<StringRef, StringRef> Split =
1190           Feature.split("=").second.trim().split("+");
1191       const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Split.first);
1192 
1193       // Parse the architecture version, adding the required features to
1194       // Ret.Features.
1195       if (!AI)
1196         continue;
1197       FeatureBits.addArchDefaults(*AI);
1198       // Add any extra features, after the +
1199       SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1200     } else if (Feature.starts_with("cpu=")) {
1201       if (!Ret.CPU.empty())
1202         Ret.Duplicate = "cpu=";
1203       else {
1204         // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1205         // "+feat" features.
1206         std::pair<StringRef, StringRef> Split =
1207             Feature.split("=").second.trim().split("+");
1208         Ret.CPU = Split.first;
1209         if (auto CpuInfo = llvm::AArch64::parseCpu(Ret.CPU)) {
1210           FeatureBits.addCPUDefaults(*CpuInfo);
1211           SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1212         }
1213       }
1214     } else if (Feature.starts_with("tune=")) {
1215       if (!Ret.Tune.empty())
1216         Ret.Duplicate = "tune=";
1217       else
1218         Ret.Tune = Feature.split("=").second.trim();
1219     } else if (Feature.starts_with("+")) {
1220       SplitAndAddFeatures(Feature, Ret.Features, FeatureBits);
1221     } else {
1222       if (FeatureBits.parseModifier(Feature, /* AllowNoDashForm = */ true))
1223         continue;
1224       // Pass through anything that failed to parse so that we can emit
1225       // diagnostics, as well as valid internal feature names.
1226       //
1227       // FIXME: We should consider rejecting internal feature names like
1228       //        neon, v8a, etc.
1229       // FIXME: We should consider emitting diagnostics here.
1230       if (Feature.starts_with("no-"))
1231         Ret.Features.push_back("-" + Feature.drop_front(3).str());
1232       else
1233         Ret.Features.push_back("+" + Feature.str());
1234     }
1235   }
1236   FeatureBits.toLLVMFeatureList(Ret.Features);
1237   return Ret;
1238 }
1239 
1240 bool AArch64TargetInfo::hasBFloat16Type() const {
1241   return true;
1242 }
1243 
1244 TargetInfo::CallingConvCheckResult
1245 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1246   switch (CC) {
1247   case CC_C:
1248   case CC_Swift:
1249   case CC_SwiftAsync:
1250   case CC_PreserveMost:
1251   case CC_PreserveAll:
1252   case CC_PreserveNone:
1253   case CC_OpenCLKernel:
1254   case CC_AArch64VectorCall:
1255   case CC_AArch64SVEPCS:
1256   case CC_Win64:
1257     return CCCR_OK;
1258   default:
1259     return CCCR_Warning;
1260   }
1261 }
1262 
1263 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1264 
1265 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1266   return TargetInfo::AArch64ABIBuiltinVaList;
1267 }
1268 
1269 const char *const AArch64TargetInfo::GCCRegNames[] = {
1270     // clang-format off
1271 
1272     // 32-bit Integer registers
1273     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1274     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1275     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1276 
1277     // 64-bit Integer registers
1278     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1279     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1280     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1281 
1282     // 32-bit floating point regsisters
1283     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1284     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1285     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1286 
1287     // 64-bit floating point regsisters
1288     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1289     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1290     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1291 
1292     // Neon vector registers
1293     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1294     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1295     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1296 
1297     // SVE vector registers
1298     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
1299     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1300     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1301 
1302     // SVE predicate registers
1303     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
1304     "p11", "p12", "p13", "p14", "p15",
1305 
1306     // SVE predicate-as-counter registers
1307     "pn0",  "pn1",  "pn2",  "pn3",  "pn4",  "pn5",  "pn6",  "pn7",  "pn8",
1308     "pn9",  "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
1309 
1310     // SME registers
1311     "za", "zt0",
1312 
1313     // clang-format on
1314 };
1315 
1316 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1317   return llvm::ArrayRef(GCCRegNames);
1318 }
1319 
1320 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1321     {{"w31"}, "wsp"},
1322     {{"x31"}, "sp"},
1323     // GCC rN registers are aliases of xN registers.
1324     {{"r0"}, "x0"},
1325     {{"r1"}, "x1"},
1326     {{"r2"}, "x2"},
1327     {{"r3"}, "x3"},
1328     {{"r4"}, "x4"},
1329     {{"r5"}, "x5"},
1330     {{"r6"}, "x6"},
1331     {{"r7"}, "x7"},
1332     {{"r8"}, "x8"},
1333     {{"r9"}, "x9"},
1334     {{"r10"}, "x10"},
1335     {{"r11"}, "x11"},
1336     {{"r12"}, "x12"},
1337     {{"r13"}, "x13"},
1338     {{"r14"}, "x14"},
1339     {{"r15"}, "x15"},
1340     {{"r16"}, "x16"},
1341     {{"r17"}, "x17"},
1342     {{"r18"}, "x18"},
1343     {{"r19"}, "x19"},
1344     {{"r20"}, "x20"},
1345     {{"r21"}, "x21"},
1346     {{"r22"}, "x22"},
1347     {{"r23"}, "x23"},
1348     {{"r24"}, "x24"},
1349     {{"r25"}, "x25"},
1350     {{"r26"}, "x26"},
1351     {{"r27"}, "x27"},
1352     {{"r28"}, "x28"},
1353     {{"r29", "x29"}, "fp"},
1354     {{"r30", "x30"}, "lr"},
1355     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1356     // don't want to substitute one of these for a different-sized one.
1357 };
1358 
1359 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1360   return llvm::ArrayRef(GCCRegAliases);
1361 }
1362 
1363 // Returns the length of cc constraint.
1364 static unsigned matchAsmCCConstraint(const char *Name) {
1365   constexpr unsigned len = 5;
1366   auto RV = llvm::StringSwitch<unsigned>(Name)
1367                 .Case("@cceq", len)
1368                 .Case("@ccne", len)
1369                 .Case("@cchs", len)
1370                 .Case("@cccs", len)
1371                 .Case("@cccc", len)
1372                 .Case("@cclo", len)
1373                 .Case("@ccmi", len)
1374                 .Case("@ccpl", len)
1375                 .Case("@ccvs", len)
1376                 .Case("@ccvc", len)
1377                 .Case("@cchi", len)
1378                 .Case("@ccls", len)
1379                 .Case("@ccge", len)
1380                 .Case("@cclt", len)
1381                 .Case("@ccgt", len)
1382                 .Case("@ccle", len)
1383                 .Default(0);
1384   return RV;
1385 }
1386 
1387 std::string
1388 AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1389   std::string R;
1390   switch (*Constraint) {
1391   case 'U': // Three-character constraint; add "@3" hint for later parsing.
1392     R = std::string("@3") + std::string(Constraint, 3);
1393     Constraint += 2;
1394     break;
1395   case '@':
1396     if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1397       std::string Converted = "{" + std::string(Constraint, Len) + "}";
1398       Constraint += Len - 1;
1399       return Converted;
1400     }
1401     return std::string(1, *Constraint);
1402   default:
1403     R = TargetInfo::convertConstraint(Constraint);
1404     break;
1405   }
1406   return R;
1407 }
1408 
1409 bool AArch64TargetInfo::validateAsmConstraint(
1410     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1411   switch (*Name) {
1412   default:
1413     return false;
1414   case 'w': // Floating point and SIMD registers (V0-V31)
1415     Info.setAllowsRegister();
1416     return true;
1417   case 'I': // Constant that can be used with an ADD instruction
1418   case 'J': // Constant that can be used with a SUB instruction
1419   case 'K': // Constant that can be used with a 32-bit logical instruction
1420   case 'L': // Constant that can be used with a 64-bit logical instruction
1421   case 'M': // Constant that can be used as a 32-bit MOV immediate
1422   case 'N': // Constant that can be used as a 64-bit MOV immediate
1423   case 'Y': // Floating point constant zero
1424   case 'Z': // Integer constant zero
1425     return true;
1426   case 'Q': // A memory reference with base register and no offset
1427     Info.setAllowsMemory();
1428     return true;
1429   case 'S': // A symbolic address
1430     Info.setAllowsRegister();
1431     return true;
1432   case 'U':
1433     if (Name[1] == 'p' &&
1434         (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1435       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1436       Info.setAllowsRegister();
1437       Name += 2;
1438       return true;
1439     }
1440     if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1441       // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1442       Info.setAllowsRegister();
1443       Name += 2;
1444       return true;
1445     }
1446     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1447     // Utf: A memory address suitable for ldp/stp in TF mode.
1448     // Usa: An absolute symbolic address.
1449     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1450 
1451     // Better to return an error saying that it's an unrecognised constraint
1452     // even if this is a valid constraint in gcc.
1453     return false;
1454   case 'z': // Zero register, wzr or xzr
1455     Info.setAllowsRegister();
1456     return true;
1457   case 'x': // Floating point and SIMD registers (V0-V15)
1458     Info.setAllowsRegister();
1459     return true;
1460   case 'y': // SVE registers (V0-V7)
1461     Info.setAllowsRegister();
1462     return true;
1463   case '@':
1464     // CC condition
1465     if (const unsigned Len = matchAsmCCConstraint(Name)) {
1466       Name += Len - 1;
1467       Info.setAllowsRegister();
1468       return true;
1469     }
1470   }
1471   return false;
1472 }
1473 
1474 bool AArch64TargetInfo::validateConstraintModifier(
1475     StringRef Constraint, char Modifier, unsigned Size,
1476     std::string &SuggestedModifier) const {
1477   // Strip off constraint modifiers.
1478   Constraint = Constraint.ltrim("=+&");
1479 
1480   switch (Constraint[0]) {
1481   default:
1482     return true;
1483   case 'z':
1484   case 'r': {
1485     switch (Modifier) {
1486     case 'x':
1487     case 'w':
1488       // For now assume that the person knows what they're
1489       // doing with the modifier.
1490       return true;
1491     default:
1492       // By default an 'r' constraint will be in the 'x'
1493       // registers.
1494       if (Size == 64)
1495         return true;
1496 
1497       if (Size == 512)
1498         return HasLS64;
1499 
1500       SuggestedModifier = "w";
1501       return false;
1502     }
1503   }
1504   }
1505 }
1506 
1507 std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1508 
1509 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1510   if (RegNo == 0)
1511     return 0;
1512   if (RegNo == 1)
1513     return 1;
1514   return -1;
1515 }
1516 
1517 bool AArch64TargetInfo::validatePointerAuthKey(
1518     const llvm::APSInt &value) const {
1519   return 0 <= value && value <= 3;
1520 }
1521 
1522 bool AArch64TargetInfo::hasInt128Type() const { return true; }
1523 
1524 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1525                                          const TargetOptions &Opts)
1526     : AArch64TargetInfo(Triple, Opts) {}
1527 
1528 void AArch64leTargetInfo::setDataLayout() {
1529   if (getTriple().isOSBinFormatMachO()) {
1530     if(getTriple().isArch32Bit())
1531       resetDataLayout("e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-"
1532                       "i128:128-n32:64-S128-Fn32",
1533                       "_");
1534     else
1535       resetDataLayout("e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-"
1536                       "n32:64-S128-Fn32",
1537                       "_");
1538   } else
1539     resetDataLayout("e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-"
1540                     "i64:64-i128:128-n32:64-S128-Fn32");
1541 }
1542 
1543 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1544                                            MacroBuilder &Builder) const {
1545   Builder.defineMacro("__AARCH64EL__");
1546   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1547 }
1548 
1549 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1550                                          const TargetOptions &Opts)
1551     : AArch64TargetInfo(Triple, Opts) {}
1552 
1553 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1554                                            MacroBuilder &Builder) const {
1555   Builder.defineMacro("__AARCH64EB__");
1556   Builder.defineMacro("__AARCH_BIG_ENDIAN");
1557   Builder.defineMacro("__ARM_BIG_ENDIAN");
1558   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1559 }
1560 
1561 void AArch64beTargetInfo::setDataLayout() {
1562   assert(!getTriple().isOSBinFormatMachO());
1563   resetDataLayout("E-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-"
1564                   "i64:64-i128:128-n32:64-S128-Fn32");
1565 }
1566 
1567 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1568                                                const TargetOptions &Opts)
1569     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1570 
1571   // This is an LLP64 platform.
1572   // int:4, long:4, long long:8, long double:8.
1573   IntWidth = IntAlign = 32;
1574   LongWidth = LongAlign = 32;
1575   DoubleAlign = LongLongAlign = 64;
1576   LongDoubleWidth = LongDoubleAlign = 64;
1577   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1578   IntMaxType = SignedLongLong;
1579   Int64Type = SignedLongLong;
1580   SizeType = UnsignedLongLong;
1581   PtrDiffType = SignedLongLong;
1582   IntPtrType = SignedLongLong;
1583 }
1584 
1585 void WindowsARM64TargetInfo::setDataLayout() {
1586   resetDataLayout(Triple.isOSBinFormatMachO()
1587                       ? "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:"
1588                         "128-n32:64-S128-Fn32"
1589                       : "e-m:w-p270:32:32-p271:32:32-p272:64:64-p:64:64-i32:32-"
1590                         "i64:64-i128:128-n32:64-S128-Fn32",
1591                   Triple.isOSBinFormatMachO() ? "_" : "");
1592 }
1593 
1594 TargetInfo::BuiltinVaListKind
1595 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1596   return TargetInfo::CharPtrBuiltinVaList;
1597 }
1598 
1599 TargetInfo::CallingConvCheckResult
1600 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1601   switch (CC) {
1602   case CC_X86VectorCall:
1603     if (getTriple().isWindowsArm64EC())
1604       return CCCR_OK;
1605     return CCCR_Ignore;
1606   case CC_X86StdCall:
1607   case CC_X86ThisCall:
1608   case CC_X86FastCall:
1609     return CCCR_Ignore;
1610   case CC_C:
1611   case CC_OpenCLKernel:
1612   case CC_PreserveMost:
1613   case CC_PreserveAll:
1614   case CC_PreserveNone:
1615   case CC_Swift:
1616   case CC_SwiftAsync:
1617   case CC_Win64:
1618     return CCCR_OK;
1619   default:
1620     return CCCR_Warning;
1621   }
1622 }
1623 
1624 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1625                                                    const TargetOptions &Opts)
1626     : WindowsARM64TargetInfo(Triple, Opts) {
1627   TheCXXABI.set(TargetCXXABI::Microsoft);
1628 }
1629 
1630 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1631                                                 MacroBuilder &Builder) const {
1632   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1633   if (getTriple().isWindowsArm64EC()) {
1634     Builder.defineMacro("_M_X64", "100");
1635     Builder.defineMacro("_M_AMD64", "100");
1636     Builder.defineMacro("_M_ARM64EC", "1");
1637   } else {
1638     Builder.defineMacro("_M_ARM64", "1");
1639   }
1640 }
1641 
1642 TargetInfo::CallingConvKind
1643 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1644   return CCK_MicrosoftWin64;
1645 }
1646 
1647 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize,
1648                                                      bool HasNonWeakDef) const {
1649   unsigned Align =
1650       WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize, HasNonWeakDef);
1651 
1652   // MSVC does size based alignment for arm64 based on alignment section in
1653   // below document, replicate that to keep alignment consistent with object
1654   // files compiled by MSVC.
1655   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1656   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
1657     Align = std::max(Align, 128u);    // align type at least 16 bytes
1658   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
1659     Align = std::max(Align, 64u);     // align type at least 8 butes
1660   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
1661     Align = std::max(Align, 32u);     // align type at least 4 bytes
1662   }
1663   return Align;
1664 }
1665 
1666 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1667                                            const TargetOptions &Opts)
1668     : WindowsARM64TargetInfo(Triple, Opts) {
1669   TheCXXABI.set(TargetCXXABI::GenericAArch64);
1670 }
1671 
1672 AppleMachOAArch64TargetInfo::AppleMachOAArch64TargetInfo(
1673     const llvm::Triple &Triple, const TargetOptions &Opts)
1674     : AppleMachOTargetInfo<AArch64leTargetInfo>(Triple, Opts) {}
1675 
1676 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1677                                                  const TargetOptions &Opts)
1678     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1679   Int64Type = SignedLongLong;
1680   if (getTriple().isArch32Bit())
1681     IntMaxType = SignedLongLong;
1682 
1683   WCharType = SignedInt;
1684   UseSignedCharForObjCBool = false;
1685 
1686   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1687   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1688 
1689   UseZeroLengthBitfieldAlignment = false;
1690 
1691   if (getTriple().isArch32Bit()) {
1692     UseBitFieldTypeAlignment = false;
1693     ZeroLengthBitfieldBoundary = 32;
1694     UseZeroLengthBitfieldAlignment = true;
1695     TheCXXABI.set(TargetCXXABI::WatchOS);
1696   } else
1697     TheCXXABI.set(TargetCXXABI::AppleARM64);
1698 }
1699 
1700 void clang::targets::getAppleMachOAArch64Defines(MacroBuilder &Builder,
1701                                                  const LangOptions &Opts,
1702                                                  const llvm::Triple &Triple) {
1703   Builder.defineMacro("__AARCH64_SIMD__");
1704   if (Triple.isArch32Bit())
1705     Builder.defineMacro("__ARM64_ARCH_8_32__");
1706   else
1707     Builder.defineMacro("__ARM64_ARCH_8__");
1708   Builder.defineMacro("__ARM_NEON__");
1709   Builder.defineMacro("__REGISTER_PREFIX__", "");
1710   Builder.defineMacro("__arm64", "1");
1711   Builder.defineMacro("__arm64__", "1");
1712 
1713   if (Triple.isArm64e())
1714     Builder.defineMacro("__arm64e__", "1");
1715 }
1716 
1717 void AppleMachOAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1718                                                const llvm::Triple &Triple,
1719                                                MacroBuilder &Builder) const {
1720   getAppleMachOAArch64Defines(Builder, Opts, Triple);
1721   AppleMachOTargetInfo<AArch64leTargetInfo>::getOSDefines(Opts, Triple,
1722                                                           Builder);
1723 }
1724 
1725 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1726                                            const llvm::Triple &Triple,
1727                                            MacroBuilder &Builder) const {
1728   getAppleMachOAArch64Defines(Builder, Opts, Triple);
1729   DarwinTargetInfo<AArch64leTargetInfo>::getOSDefines(Opts, Triple, Builder);
1730 }
1731 
1732 TargetInfo::BuiltinVaListKind
1733 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1734   return TargetInfo::CharPtrBuiltinVaList;
1735 }
1736