xref: /freebsd-src/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision 46c59ea9b61755455ff6bf9f3e7b834e1af634ea)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/TargetParser/AArch64TargetParser.h"
21 #include "llvm/TargetParser/ARMTargetParserCommon.h"
22 #include <optional>
23 
24 using namespace clang;
25 using namespace clang::targets;
26 
27 static constexpr Builtin::Info BuiltinInfo[] = {
28 #define BUILTIN(ID, TYPE, ATTRS)                                               \
29   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
30 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
31   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32 #include "clang/Basic/BuiltinsNEON.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
36 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
37   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38 #include "clang/Basic/BuiltinsSVE.def"
39 
40 #define BUILTIN(ID, TYPE, ATTRS)                                               \
41   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
42 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
43   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44 #include "clang/Basic/BuiltinsSME.def"
45 
46 #define BUILTIN(ID, TYPE, ATTRS)                                               \
47   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
48 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
49   {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
50 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
51   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
52 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
53   {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
54 #include "clang/Basic/BuiltinsAArch64.def"
55 };
56 
57 void AArch64TargetInfo::setArchFeatures() {
58   if (*ArchInfo == llvm::AArch64::ARMV8R) {
59     HasDotProd = true;
60     HasDIT = true;
61     HasFlagM = true;
62     HasRCPC = true;
63     FPU |= NeonMode;
64     HasCCPP = true;
65     HasCRC = true;
66     HasLSE = true;
67     HasRDM = true;
68   } else if (ArchInfo->Version.getMajor() == 8) {
69     if (ArchInfo->Version.getMinor() >= 7u) {
70       HasWFxT = true;
71     }
72     if (ArchInfo->Version.getMinor() >= 6u) {
73       HasBFloat16 = true;
74       HasMatMul = true;
75     }
76     if (ArchInfo->Version.getMinor() >= 5u) {
77       HasAlternativeNZCV = true;
78       HasFRInt3264 = true;
79       HasSSBS = true;
80       HasSB = true;
81       HasPredRes = true;
82       HasBTI = true;
83     }
84     if (ArchInfo->Version.getMinor() >= 4u) {
85       HasDotProd = true;
86       HasDIT = true;
87       HasFlagM = true;
88     }
89     if (ArchInfo->Version.getMinor() >= 3u) {
90       HasRCPC = true;
91       FPU |= NeonMode;
92     }
93     if (ArchInfo->Version.getMinor() >= 2u) {
94       HasCCPP = true;
95     }
96     if (ArchInfo->Version.getMinor() >= 1u) {
97       HasCRC = true;
98       HasLSE = true;
99       HasRDM = true;
100     }
101   } else if (ArchInfo->Version.getMajor() == 9) {
102     if (ArchInfo->Version.getMinor() >= 2u) {
103       HasWFxT = true;
104     }
105     if (ArchInfo->Version.getMinor() >= 1u) {
106       HasBFloat16 = true;
107       HasMatMul = true;
108     }
109     FPU |= SveMode;
110     HasSVE2 = true;
111     HasFullFP16 = true;
112     HasAlternativeNZCV = true;
113     HasFRInt3264 = true;
114     HasSSBS = true;
115     HasSB = true;
116     HasPredRes = true;
117     HasBTI = true;
118     HasDotProd = true;
119     HasDIT = true;
120     HasFlagM = true;
121     HasRCPC = true;
122     FPU |= NeonMode;
123     HasCCPP = true;
124     HasCRC = true;
125     HasLSE = true;
126     HasRDM = true;
127   }
128 }
129 
130 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
131                                      const TargetOptions &Opts)
132     : TargetInfo(Triple), ABI("aapcs") {
133   if (getTriple().isOSOpenBSD()) {
134     Int64Type = SignedLongLong;
135     IntMaxType = SignedLongLong;
136   } else {
137     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
138       WCharType = UnsignedInt;
139 
140     Int64Type = SignedLong;
141     IntMaxType = SignedLong;
142   }
143 
144   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
145   HasLegalHalfType = true;
146   HalfArgsAndReturns = true;
147   HasFloat16 = true;
148   HasStrictFP = true;
149 
150   if (Triple.isArch64Bit())
151     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
152   else
153     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
154 
155   MaxVectorAlign = 128;
156   MaxAtomicInlineWidth = 128;
157   MaxAtomicPromoteWidth = 128;
158 
159   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
160   LongDoubleFormat = &llvm::APFloat::IEEEquad();
161 
162   BFloat16Width = BFloat16Align = 16;
163   BFloat16Format = &llvm::APFloat::BFloat();
164 
165   // Make __builtin_ms_va_list available.
166   HasBuiltinMSVaList = true;
167 
168   // Make the SVE types available.  Note that this deliberately doesn't
169   // depend on SveMode, since in principle it should be possible to turn
170   // SVE on and off within a translation unit.  It should also be possible
171   // to compile the global declaration:
172   //
173   // __SVInt8_t *ptr;
174   //
175   // even without SVE.
176   HasAArch64SVETypes = true;
177 
178   // {} in inline assembly are neon specifiers, not assembly variant
179   // specifiers.
180   NoAsmVariants = true;
181 
182   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
183   // contributes to the alignment of the containing aggregate in the same way
184   // a plain (non bit-field) member of that type would, without exception for
185   // zero-sized or anonymous bit-fields."
186   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
187   UseZeroLengthBitfieldAlignment = true;
188 
189   // AArch64 targets default to using the ARM C++ ABI.
190   TheCXXABI.set(TargetCXXABI::GenericAArch64);
191 
192   if (Triple.getOS() == llvm::Triple::Linux)
193     this->MCountName = "\01_mcount";
194   else if (Triple.getOS() == llvm::Triple::UnknownOS)
195     this->MCountName =
196         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
197 }
198 
199 StringRef AArch64TargetInfo::getABI() const { return ABI; }
200 
201 bool AArch64TargetInfo::setABI(const std::string &Name) {
202   if (Name != "aapcs" && Name != "darwinpcs")
203     return false;
204 
205   ABI = Name;
206   return true;
207 }
208 
209 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
210                                                  BranchProtectionInfo &BPI,
211                                                  StringRef &Err) const {
212   llvm::ARM::ParsedBranchProtection PBP;
213   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
214     return false;
215 
216   BPI.SignReturnAddr =
217       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
218           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
219           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
220           .Default(LangOptions::SignReturnAddressScopeKind::None);
221 
222   if (PBP.Key == "a_key")
223     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
224   else
225     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
226 
227   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
228   BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
229   BPI.GuardedControlStack = PBP.GuardedControlStack;
230   return true;
231 }
232 
233 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
234   return Name == "generic" || llvm::AArch64::parseCpu(Name);
235 }
236 
237 bool AArch64TargetInfo::setCPU(const std::string &Name) {
238   return isValidCPUName(Name);
239 }
240 
241 void AArch64TargetInfo::fillValidCPUList(
242     SmallVectorImpl<StringRef> &Values) const {
243   llvm::AArch64::fillValidCPUArchList(Values);
244 }
245 
246 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
247                                                 MacroBuilder &Builder) const {
248   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
249 }
250 
251 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
252                                                 MacroBuilder &Builder) const {
253   // Also include the ARMv8.1 defines
254   getTargetDefinesARMV81A(Opts, Builder);
255 }
256 
257 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
258                                                 MacroBuilder &Builder) const {
259   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
260   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
261   Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
262   // Also include the Armv8.2 defines
263   getTargetDefinesARMV82A(Opts, Builder);
264 }
265 
266 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
267                                                 MacroBuilder &Builder) const {
268   // Also include the Armv8.3 defines
269   getTargetDefinesARMV83A(Opts, Builder);
270 }
271 
272 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
273                                                 MacroBuilder &Builder) const {
274   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
275   Builder.defineMacro("__ARM_FEATURE_BTI", "1");
276   // Also include the Armv8.4 defines
277   getTargetDefinesARMV84A(Opts, Builder);
278 }
279 
280 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
281                                                 MacroBuilder &Builder) const {
282   // Also include the Armv8.5 defines
283   // FIXME: Armv8.6 makes the following extensions mandatory:
284   // - __ARM_FEATURE_BF16
285   // - __ARM_FEATURE_MATMUL_INT8
286   // Handle them here.
287   getTargetDefinesARMV85A(Opts, Builder);
288 }
289 
290 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
291                                                 MacroBuilder &Builder) const {
292   // Also include the Armv8.6 defines
293   getTargetDefinesARMV86A(Opts, Builder);
294 }
295 
296 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
297                                                 MacroBuilder &Builder) const {
298   // Also include the Armv8.7 defines
299   getTargetDefinesARMV87A(Opts, Builder);
300 }
301 
302 void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
303                                                 MacroBuilder &Builder) const {
304   // Also include the Armv8.8 defines
305   getTargetDefinesARMV88A(Opts, Builder);
306 }
307 
308 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
309                                                MacroBuilder &Builder) const {
310   // Armv9-A maps to Armv8.5-A
311   getTargetDefinesARMV85A(Opts, Builder);
312 }
313 
314 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
315                                                 MacroBuilder &Builder) const {
316   // Armv9.1-A maps to Armv8.6-A
317   getTargetDefinesARMV86A(Opts, Builder);
318 }
319 
320 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
321                                                 MacroBuilder &Builder) const {
322   // Armv9.2-A maps to Armv8.7-A
323   getTargetDefinesARMV87A(Opts, Builder);
324 }
325 
326 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
327                                                 MacroBuilder &Builder) const {
328   // Armv9.3-A maps to Armv8.8-A
329   getTargetDefinesARMV88A(Opts, Builder);
330 }
331 
332 void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
333                                                 MacroBuilder &Builder) const {
334   // Armv9.4-A maps to Armv8.9-A
335   getTargetDefinesARMV89A(Opts, Builder);
336 }
337 
338 void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
339                                                 MacroBuilder &Builder) const {
340   // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
341   getTargetDefinesARMV94A(Opts, Builder);
342 }
343 
344 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
345                                          MacroBuilder &Builder) const {
346   // Target identification.
347   if (getTriple().isWindowsArm64EC()) {
348     // Define the same set of macros as would be defined on x86_64 to ensure that
349     // ARM64EC datatype layouts match those of x86_64 compiled code
350     Builder.defineMacro("__amd64__");
351     Builder.defineMacro("__amd64");
352     Builder.defineMacro("__x86_64");
353     Builder.defineMacro("__x86_64__");
354     Builder.defineMacro("__arm64ec__");
355   } else {
356     Builder.defineMacro("__aarch64__");
357   }
358 
359   // Inline assembly supports AArch64 flag outputs.
360   Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
361 
362   std::string CodeModel = getTargetOpts().CodeModel;
363   if (CodeModel == "default")
364     CodeModel = "small";
365   for (char &c : CodeModel)
366     c = toupper(c);
367   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
368 
369   // ACLE predefines. Many can only have one possible value on v8 AArch64.
370   Builder.defineMacro("__ARM_ACLE", "200");
371   Builder.defineMacro("__ARM_ARCH",
372                       std::to_string(ArchInfo->Version.getMajor()));
373   Builder.defineMacro("__ARM_ARCH_PROFILE",
374                       std::string("'") + (char)ArchInfo->Profile + "'");
375 
376   Builder.defineMacro("__ARM_64BIT_STATE", "1");
377   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
378   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
379 
380   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
381   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
382   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
383   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
384   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
385   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
386   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
387 
388   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
389 
390   // 0xe implies support for half, single and double precision operations.
391   if (FPU & FPUMode)
392     Builder.defineMacro("__ARM_FP", "0xE");
393 
394   // PCS specifies this for SysV variants, which is all we support. Other ABIs
395   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
396   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
397   Builder.defineMacro("__ARM_FP16_ARGS", "1");
398 
399   if (Opts.UnsafeFPMath)
400     Builder.defineMacro("__ARM_FP_FAST", "1");
401 
402   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
403                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
404 
405   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
406 
407   if (FPU & NeonMode) {
408     Builder.defineMacro("__ARM_NEON", "1");
409     // 64-bit NEON supports half, single and double precision operations.
410     Builder.defineMacro("__ARM_NEON_FP", "0xE");
411   }
412 
413   if (FPU & SveMode)
414     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
415 
416   if ((FPU & NeonMode) && (FPU & SveMode))
417     Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
418 
419   if (HasSVE2)
420     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
421 
422   if (HasSVE2 && HasSVE2AES)
423     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
424 
425   if (HasSVE2 && HasSVE2BitPerm)
426     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
427 
428   if (HasSVE2 && HasSVE2SHA3)
429     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
430 
431   if (HasSVE2 && HasSVE2SM4)
432     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
433 
434   if (HasCRC)
435     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
436 
437   if (HasRCPC3)
438     Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
439   else if (HasRCPC)
440     Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
441 
442   if (HasFMV)
443     Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
444 
445   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
446   // macros for AES, SHA2, SHA3 and SM4
447   if (HasAES && HasSHA2)
448     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
449 
450   if (HasAES)
451     Builder.defineMacro("__ARM_FEATURE_AES", "1");
452 
453   if (HasSHA2)
454     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
455 
456   if (HasSHA3) {
457     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
458     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
459   }
460 
461   if (HasSM4) {
462     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
463     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
464   }
465 
466   if (HasPAuth)
467     Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
468 
469   if (HasUnaligned)
470     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
471 
472   if ((FPU & NeonMode) && HasFullFP16)
473     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
474   if (HasFullFP16)
475    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
476 
477   if (HasDotProd)
478     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
479 
480   if (HasMTE)
481     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
482 
483   if (HasTME)
484     Builder.defineMacro("__ARM_FEATURE_TME", "1");
485 
486   if (HasMatMul)
487     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
488 
489   if (HasLSE)
490     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
491 
492   if (HasBFloat16) {
493     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
494     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
495     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
496     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
497   }
498 
499   if ((FPU & SveMode) && HasBFloat16) {
500     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
501   }
502 
503   if ((FPU & SveMode) && HasMatmulFP64)
504     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
505 
506   if ((FPU & SveMode) && HasMatmulFP32)
507     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
508 
509   if ((FPU & SveMode) && HasMatMul)
510     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
511 
512   if ((FPU & NeonMode) && HasFP16FML)
513     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
514 
515   if (Opts.hasSignReturnAddress()) {
516     // Bitmask:
517     // 0: Protection using the A key
518     // 1: Protection using the B key
519     // 2: Protection including leaf functions
520     unsigned Value = 0;
521 
522     if (Opts.isSignReturnAddressWithAKey())
523       Value |= (1 << 0);
524     else
525       Value |= (1 << 1);
526 
527     if (Opts.isSignReturnAddressScopeAll())
528       Value |= (1 << 2);
529 
530     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
531   }
532 
533   if (Opts.BranchTargetEnforcement)
534     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
535 
536   if (Opts.GuardedControlStack)
537     Builder.defineMacro("__ARM_FEATURE_GCS_DEFAULT", "1");
538 
539   if (HasLS64)
540     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
541 
542   if (HasRandGen)
543     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
544 
545   if (HasMOPS)
546     Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
547 
548   if (HasD128)
549     Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
550 
551   if (HasGCS)
552     Builder.defineMacro("__ARM_FEATURE_GCS", "1");
553 
554   if (*ArchInfo == llvm::AArch64::ARMV8_1A)
555     getTargetDefinesARMV81A(Opts, Builder);
556   else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
557     getTargetDefinesARMV82A(Opts, Builder);
558   else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
559     getTargetDefinesARMV83A(Opts, Builder);
560   else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
561     getTargetDefinesARMV84A(Opts, Builder);
562   else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
563     getTargetDefinesARMV85A(Opts, Builder);
564   else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
565     getTargetDefinesARMV86A(Opts, Builder);
566   else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
567     getTargetDefinesARMV87A(Opts, Builder);
568   else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
569     getTargetDefinesARMV88A(Opts, Builder);
570   else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
571     getTargetDefinesARMV89A(Opts, Builder);
572   else if (*ArchInfo == llvm::AArch64::ARMV9A)
573     getTargetDefinesARMV9A(Opts, Builder);
574   else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
575     getTargetDefinesARMV91A(Opts, Builder);
576   else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
577     getTargetDefinesARMV92A(Opts, Builder);
578   else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
579     getTargetDefinesARMV93A(Opts, Builder);
580   else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
581     getTargetDefinesARMV94A(Opts, Builder);
582   else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
583     getTargetDefinesARMV95A(Opts, Builder);
584 
585   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
586   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
587   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
588   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
589   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
590   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
591 
592   // Allow detection of fast FMA support.
593   Builder.defineMacro("__FP_FAST_FMA", "1");
594   Builder.defineMacro("__FP_FAST_FMAF", "1");
595 
596   // C/C++ operators work on both VLS and VLA SVE types
597   if (FPU & SveMode)
598     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
599 
600   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
601     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
602   }
603 }
604 
605 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
606   return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
607                                          Builtin::FirstTSBuiltin);
608 }
609 
610 std::optional<std::pair<unsigned, unsigned>>
611 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
612   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
613     return std::pair<unsigned, unsigned>(
614         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
615 
616   if (hasFeature("sve"))
617     return std::pair<unsigned, unsigned>(1, 16);
618 
619   return std::nullopt;
620 }
621 
622 unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
623   if (Name == "default")
624     return 0;
625   for (const auto &E : llvm::AArch64::Extensions)
626     if (Name == E.Name)
627       return E.FmvPriority;
628   return 0;
629 }
630 
631 unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
632   // Take the maximum priority as per feature cost, so more features win.
633   return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
634 }
635 
636 bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
637   auto F = llvm::find_if(llvm::AArch64::Extensions, [&](const auto &E) {
638     return Name == E.Name && !E.DependentFeatures.empty();
639   });
640   return F != std::end(llvm::AArch64::Extensions);
641 }
642 
643 StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
644   auto F = llvm::find_if(llvm::AArch64::Extensions,
645                          [&](const auto &E) { return Name == E.Name; });
646   return F != std::end(llvm::AArch64::Extensions) ? F->DependentFeatures
647                                                   : StringRef();
648 }
649 
650 bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
651   for (const auto &E : llvm::AArch64::Extensions)
652     if (FeatureStr == E.Name)
653       return true;
654   return false;
655 }
656 
657 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
658   return llvm::StringSwitch<bool>(Feature)
659       .Cases("aarch64", "arm64", "arm", true)
660       .Case("fmv", HasFMV)
661       .Cases("neon", "fp", "simd", FPU & NeonMode)
662       .Case("jscvt", HasJSCVT)
663       .Case("fcma", HasFCMA)
664       .Case("rng", HasRandGen)
665       .Case("flagm", HasFlagM)
666       .Case("flagm2", HasAlternativeNZCV)
667       .Case("fp16fml", HasFP16FML)
668       .Case("dotprod", HasDotProd)
669       .Case("sm4", HasSM4)
670       .Case("rdm", HasRDM)
671       .Case("lse", HasLSE)
672       .Case("crc", HasCRC)
673       .Case("sha2", HasSHA2)
674       .Case("sha3", HasSHA3)
675       .Cases("aes", "pmull", HasAES)
676       .Cases("fp16", "fullfp16", HasFullFP16)
677       .Case("dit", HasDIT)
678       .Case("dpb", HasCCPP)
679       .Case("dpb2", HasCCDP)
680       .Case("rcpc", HasRCPC)
681       .Case("frintts", HasFRInt3264)
682       .Case("i8mm", HasMatMul)
683       .Case("bf16", HasBFloat16)
684       .Case("sve", FPU & SveMode)
685       .Case("sve-bf16", FPU & SveMode && HasBFloat16)
686       .Case("sve-i8mm", FPU & SveMode && HasMatMul)
687       .Case("f32mm", FPU & SveMode && HasMatmulFP32)
688       .Case("f64mm", FPU & SveMode && HasMatmulFP64)
689       .Case("sve2", FPU & SveMode && HasSVE2)
690       .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
691       .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
692       .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
693       .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
694       .Case("sme", HasSME)
695       .Case("sme-f64f64", HasSMEF64F64)
696       .Case("sme-i16i64", HasSMEI16I64)
697       .Case("sme-fa64", HasSMEFA64)
698       .Cases("memtag", "memtag2", HasMTE)
699       .Case("sb", HasSB)
700       .Case("predres", HasPredRes)
701       .Cases("ssbs", "ssbs2", HasSSBS)
702       .Case("bti", HasBTI)
703       .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
704       .Case("wfxt", HasWFxT)
705       .Case("rcpc3", HasRCPC3)
706       .Default(false);
707 }
708 
709 void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
710                                           StringRef Name, bool Enabled) const {
711   Features[Name] = Enabled;
712   // If the feature is an architecture feature (like v8.2a), add all previous
713   // architecture versions and any dependant target features.
714   const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
715       llvm::AArch64::ArchInfo::findBySubArch(Name);
716 
717   if (!ArchInfo)
718     return; // Not an architecture, nothing more to do.
719 
720   // Disabling an architecture feature does not affect dependent features
721   if (!Enabled)
722     return;
723 
724   for (const auto *OtherArch : llvm::AArch64::ArchInfos)
725     if (ArchInfo->implies(*OtherArch))
726       Features[OtherArch->getSubArch()] = true;
727 
728   // Set any features implied by the architecture
729   std::vector<StringRef> CPUFeats;
730   if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
731     for (auto F : CPUFeats) {
732       assert(F[0] == '+' && "Expected + in target feature!");
733       Features[F.drop_front(1)] = true;
734     }
735   }
736 }
737 
738 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
739                                              DiagnosticsEngine &Diags) {
740   for (const auto &Feature : Features) {
741     if (Feature == "-fp-armv8")
742       HasNoFP = true;
743     if (Feature == "-neon")
744       HasNoNeon = true;
745     if (Feature == "-sve")
746       HasNoSVE = true;
747 
748     if (Feature == "+neon" || Feature == "+fp-armv8")
749       FPU |= NeonMode;
750     if (Feature == "+jscvt") {
751       HasJSCVT = true;
752       FPU |= NeonMode;
753     }
754     if (Feature == "+fcma") {
755       HasFCMA = true;
756       FPU |= NeonMode;
757     }
758 
759     if (Feature == "+sve") {
760       FPU |= NeonMode;
761       FPU |= SveMode;
762       HasFullFP16 = true;
763     }
764     if (Feature == "+sve2") {
765       FPU |= NeonMode;
766       FPU |= SveMode;
767       HasFullFP16 = true;
768       HasSVE2 = true;
769     }
770     if (Feature == "+sve2-aes") {
771       FPU |= NeonMode;
772       FPU |= SveMode;
773       HasFullFP16 = true;
774       HasSVE2 = true;
775       HasSVE2AES = true;
776     }
777     if (Feature == "+sve2-sha3") {
778       FPU |= NeonMode;
779       FPU |= SveMode;
780       HasFullFP16 = true;
781       HasSVE2 = true;
782       HasSVE2SHA3 = true;
783     }
784     if (Feature == "+sve2-sm4") {
785       FPU |= NeonMode;
786       FPU |= SveMode;
787       HasFullFP16 = true;
788       HasSVE2 = true;
789       HasSVE2SM4 = true;
790     }
791     if (Feature == "+sve2-bitperm") {
792       FPU |= NeonMode;
793       FPU |= SveMode;
794       HasFullFP16 = true;
795       HasSVE2 = true;
796       HasSVE2BitPerm = true;
797     }
798     if (Feature == "+f32mm") {
799       FPU |= NeonMode;
800       FPU |= SveMode;
801       HasFullFP16 = true;
802       HasMatmulFP32 = true;
803     }
804     if (Feature == "+f64mm") {
805       FPU |= NeonMode;
806       FPU |= SveMode;
807       HasFullFP16 = true;
808       HasMatmulFP64 = true;
809     }
810     if (Feature == "+sme") {
811       HasSME = true;
812       HasBFloat16 = true;
813       HasFullFP16 = true;
814     }
815     if (Feature == "+sme-f64f64") {
816       HasSME = true;
817       HasSMEF64F64 = true;
818       HasBFloat16 = true;
819       HasFullFP16 = true;
820     }
821     if (Feature == "+sme-i16i64") {
822       HasSME = true;
823       HasSMEI16I64 = true;
824       HasBFloat16 = true;
825       HasFullFP16 = true;
826     }
827     if (Feature == "+sme-fa64") {
828       FPU |= NeonMode;
829       FPU |= SveMode;
830       HasSME = true;
831       HasSVE2 = true;
832       HasSMEFA64 = true;
833     }
834     if (Feature == "+sb")
835       HasSB = true;
836     if (Feature == "+predres")
837       HasPredRes = true;
838     if (Feature == "+ssbs")
839       HasSSBS = true;
840     if (Feature == "+bti")
841       HasBTI = true;
842     if (Feature == "+wfxt")
843       HasWFxT = true;
844     if (Feature == "-fmv")
845       HasFMV = false;
846     if (Feature == "+crc")
847       HasCRC = true;
848     if (Feature == "+rcpc")
849       HasRCPC = true;
850     if (Feature == "+aes") {
851       FPU |= NeonMode;
852       HasAES = true;
853     }
854     if (Feature == "+sha2") {
855       FPU |= NeonMode;
856       HasSHA2 = true;
857     }
858     if (Feature == "+sha3") {
859       FPU |= NeonMode;
860       HasSHA2 = true;
861       HasSHA3 = true;
862     }
863     if (Feature == "+rdm") {
864       FPU |= NeonMode;
865       HasRDM = true;
866     }
867     if (Feature == "+dit")
868       HasDIT = true;
869     if (Feature == "+cccp")
870       HasCCPP = true;
871     if (Feature == "+ccdp") {
872       HasCCPP = true;
873       HasCCDP = true;
874     }
875     if (Feature == "+fptoint")
876       HasFRInt3264 = true;
877     if (Feature == "+sm4") {
878       FPU |= NeonMode;
879       HasSM4 = true;
880     }
881     if (Feature == "+strict-align")
882       HasUnaligned = false;
883     // All predecessor archs are added but select the latest one for ArchKind.
884     if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
885       ArchInfo = &llvm::AArch64::ARMV8A;
886     if (Feature == "+v8.1a" &&
887         ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
888       ArchInfo = &llvm::AArch64::ARMV8_1A;
889     if (Feature == "+v8.2a" &&
890         ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
891       ArchInfo = &llvm::AArch64::ARMV8_2A;
892     if (Feature == "+v8.3a" &&
893         ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
894       ArchInfo = &llvm::AArch64::ARMV8_3A;
895     if (Feature == "+v8.4a" &&
896         ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
897       ArchInfo = &llvm::AArch64::ARMV8_4A;
898     if (Feature == "+v8.5a" &&
899         ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
900       ArchInfo = &llvm::AArch64::ARMV8_5A;
901     if (Feature == "+v8.6a" &&
902         ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
903       ArchInfo = &llvm::AArch64::ARMV8_6A;
904     if (Feature == "+v8.7a" &&
905         ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
906       ArchInfo = &llvm::AArch64::ARMV8_7A;
907     if (Feature == "+v8.8a" &&
908         ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
909       ArchInfo = &llvm::AArch64::ARMV8_8A;
910     if (Feature == "+v8.9a" &&
911         ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
912       ArchInfo = &llvm::AArch64::ARMV8_9A;
913     if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
914       ArchInfo = &llvm::AArch64::ARMV9A;
915     if (Feature == "+v9.1a" &&
916         ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
917       ArchInfo = &llvm::AArch64::ARMV9_1A;
918     if (Feature == "+v9.2a" &&
919         ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
920       ArchInfo = &llvm::AArch64::ARMV9_2A;
921     if (Feature == "+v9.3a" &&
922         ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
923       ArchInfo = &llvm::AArch64::ARMV9_3A;
924     if (Feature == "+v9.4a" &&
925         ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
926       ArchInfo = &llvm::AArch64::ARMV9_4A;
927     if (Feature == "+v9.5a" &&
928         ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
929       ArchInfo = &llvm::AArch64::ARMV9_5A;
930     if (Feature == "+v8r")
931       ArchInfo = &llvm::AArch64::ARMV8R;
932     if (Feature == "+fullfp16") {
933       FPU |= NeonMode;
934       HasFullFP16 = true;
935     }
936     if (Feature == "+dotprod") {
937       FPU |= NeonMode;
938       HasDotProd = true;
939     }
940     if (Feature == "+fp16fml") {
941       FPU |= NeonMode;
942       HasFullFP16 = true;
943       HasFP16FML = true;
944     }
945     if (Feature == "+mte")
946       HasMTE = true;
947     if (Feature == "+tme")
948       HasTME = true;
949     if (Feature == "+pauth")
950       HasPAuth = true;
951     if (Feature == "+i8mm")
952       HasMatMul = true;
953     if (Feature == "+bf16")
954       HasBFloat16 = true;
955     if (Feature == "+lse")
956       HasLSE = true;
957     if (Feature == "+ls64")
958       HasLS64 = true;
959     if (Feature == "+rand")
960       HasRandGen = true;
961     if (Feature == "+flagm")
962       HasFlagM = true;
963     if (Feature == "+altnzcv") {
964       HasFlagM = true;
965       HasAlternativeNZCV = true;
966     }
967     if (Feature == "+mops")
968       HasMOPS = true;
969     if (Feature == "+d128")
970       HasD128 = true;
971     if (Feature == "+gcs")
972       HasGCS = true;
973     if (Feature == "+rcpc3")
974       HasRCPC3 = true;
975   }
976 
977   // Check features that are manually disabled by command line options.
978   // This needs to be checked after architecture-related features are handled,
979   // making sure they are properly disabled when required.
980   for (const auto &Feature : Features) {
981     if (Feature == "-d128")
982       HasD128 = false;
983   }
984 
985   setDataLayout();
986   setArchFeatures();
987 
988   if (HasNoFP) {
989     FPU &= ~FPUMode;
990     FPU &= ~NeonMode;
991     FPU &= ~SveMode;
992   }
993   if (HasNoNeon) {
994     FPU &= ~NeonMode;
995     FPU &= ~SveMode;
996   }
997   if (HasNoSVE)
998     FPU &= ~SveMode;
999 
1000   return true;
1001 }
1002 
1003 bool AArch64TargetInfo::initFeatureMap(
1004     llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
1005     const std::vector<std::string> &FeaturesVec) const {
1006   std::vector<std::string> UpdatedFeaturesVec;
1007   // Parse the CPU and add any implied features.
1008   std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
1009   if (CpuInfo) {
1010     auto Exts = CpuInfo->getImpliedExtensions();
1011     std::vector<StringRef> CPUFeats;
1012     llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
1013     for (auto F : CPUFeats) {
1014       assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
1015       UpdatedFeaturesVec.push_back(F.str());
1016     }
1017   }
1018 
1019   // Process target and dependent features. This is done in two loops collecting
1020   // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
1021   // add target '+/-'features that can later disable some of features added on
1022   // the first loop. Function Multi Versioning features begin with '?'.
1023   for (const auto &Feature : FeaturesVec)
1024     if (((Feature[0] == '?' || Feature[0] == '+')) &&
1025         AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
1026       StringRef DepFeatures =
1027           AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
1028       SmallVector<StringRef, 1> AttrFeatures;
1029       DepFeatures.split(AttrFeatures, ",");
1030       for (auto F : AttrFeatures)
1031         UpdatedFeaturesVec.push_back(F.str());
1032     }
1033   for (const auto &Feature : FeaturesVec)
1034     if (Feature[0] != '?') {
1035       std::string UpdatedFeature = Feature;
1036       if (Feature[0] == '+') {
1037         std::optional<llvm::AArch64::ExtensionInfo> Extension =
1038           llvm::AArch64::parseArchExtension(Feature.substr(1));
1039         if (Extension)
1040           UpdatedFeature = Extension->Feature.str();
1041       }
1042       UpdatedFeaturesVec.push_back(UpdatedFeature);
1043     }
1044 
1045   return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
1046 }
1047 
1048 // Parse AArch64 Target attributes, which are a comma separated list of:
1049 //  "arch=<arch>" - parsed to features as per -march=..
1050 //  "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1051 //  "tune=<cpu>" - TuneCPU set to <cpu>
1052 //  "feature", "no-feature" - Add (or remove) feature.
1053 //  "+feature", "+nofeature" - Add (or remove) feature.
1054 ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1055   ParsedTargetAttr Ret;
1056   if (Features == "default")
1057     return Ret;
1058   SmallVector<StringRef, 1> AttrFeatures;
1059   Features.split(AttrFeatures, ",");
1060   bool FoundArch = false;
1061 
1062   auto SplitAndAddFeatures = [](StringRef FeatString,
1063                                 std::vector<std::string> &Features) {
1064     SmallVector<StringRef, 8> SplitFeatures;
1065     FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1066     for (StringRef Feature : SplitFeatures) {
1067       StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1068       if (!FeatureName.empty())
1069         Features.push_back(FeatureName.str());
1070       else
1071         // Pushing the original feature string to give a sema error later on
1072         // when they get checked.
1073         if (Feature.starts_with("no"))
1074           Features.push_back("-" + Feature.drop_front(2).str());
1075         else
1076           Features.push_back("+" + Feature.str());
1077     }
1078   };
1079 
1080   for (auto &Feature : AttrFeatures) {
1081     Feature = Feature.trim();
1082     if (Feature.starts_with("fpmath="))
1083       continue;
1084 
1085     if (Feature.starts_with("branch-protection=")) {
1086       Ret.BranchProtection = Feature.split('=').second.trim();
1087       continue;
1088     }
1089 
1090     if (Feature.starts_with("arch=")) {
1091       if (FoundArch)
1092         Ret.Duplicate = "arch=";
1093       FoundArch = true;
1094       std::pair<StringRef, StringRef> Split =
1095           Feature.split("=").second.trim().split("+");
1096       const std::optional<llvm::AArch64::ArchInfo> AI =
1097           llvm::AArch64::parseArch(Split.first);
1098 
1099       // Parse the architecture version, adding the required features to
1100       // Ret.Features.
1101       if (!AI)
1102         continue;
1103       Ret.Features.push_back(AI->ArchFeature.str());
1104       // Add any extra features, after the +
1105       SplitAndAddFeatures(Split.second, Ret.Features);
1106     } else if (Feature.starts_with("cpu=")) {
1107       if (!Ret.CPU.empty())
1108         Ret.Duplicate = "cpu=";
1109       else {
1110         // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1111         // "+feat" features.
1112         std::pair<StringRef, StringRef> Split =
1113             Feature.split("=").second.trim().split("+");
1114         Ret.CPU = Split.first;
1115         SplitAndAddFeatures(Split.second, Ret.Features);
1116       }
1117     } else if (Feature.starts_with("tune=")) {
1118       if (!Ret.Tune.empty())
1119         Ret.Duplicate = "tune=";
1120       else
1121         Ret.Tune = Feature.split("=").second.trim();
1122     } else if (Feature.starts_with("+")) {
1123       SplitAndAddFeatures(Feature, Ret.Features);
1124     } else if (Feature.starts_with("no-")) {
1125       StringRef FeatureName =
1126           llvm::AArch64::getArchExtFeature(Feature.split("-").second);
1127       if (!FeatureName.empty())
1128         Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
1129       else
1130         Ret.Features.push_back("-" + Feature.split("-").second.str());
1131     } else {
1132       // Try parsing the string to the internal target feature name. If it is
1133       // invalid, add the original string (which could already be an internal
1134       // name). These should be checked later by isValidFeatureName.
1135       StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
1136       if (!FeatureName.empty())
1137         Ret.Features.push_back(FeatureName.str());
1138       else
1139         Ret.Features.push_back("+" + Feature.str());
1140     }
1141   }
1142   return Ret;
1143 }
1144 
1145 bool AArch64TargetInfo::hasBFloat16Type() const {
1146   return true;
1147 }
1148 
1149 TargetInfo::CallingConvCheckResult
1150 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1151   switch (CC) {
1152   case CC_C:
1153   case CC_Swift:
1154   case CC_SwiftAsync:
1155   case CC_PreserveMost:
1156   case CC_PreserveAll:
1157   case CC_OpenCLKernel:
1158   case CC_AArch64VectorCall:
1159   case CC_AArch64SVEPCS:
1160   case CC_Win64:
1161     return CCCR_OK;
1162   default:
1163     return CCCR_Warning;
1164   }
1165 }
1166 
1167 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1168 
1169 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1170   return TargetInfo::AArch64ABIBuiltinVaList;
1171 }
1172 
1173 const char *const AArch64TargetInfo::GCCRegNames[] = {
1174     // 32-bit Integer registers
1175     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1176     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1177     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1178 
1179     // 64-bit Integer registers
1180     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1181     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1182     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1183 
1184     // 32-bit floating point regsisters
1185     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1186     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1187     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1188 
1189     // 64-bit floating point regsisters
1190     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1191     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1192     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1193 
1194     // Neon vector registers
1195     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1196     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1197     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1198 
1199     // SVE vector registers
1200     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
1201     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1202     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1203 
1204     // SVE predicate registers
1205     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
1206     "p11", "p12", "p13", "p14", "p15",
1207 
1208     // SVE predicate-as-counter registers
1209     "pn0",  "pn1",  "pn2",  "pn3",  "pn4",  "pn5",  "pn6",  "pn7",  "pn8",
1210     "pn9",  "pn10", "pn11", "pn12", "pn13", "pn14", "pn15"
1211 };
1212 
1213 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1214   return llvm::ArrayRef(GCCRegNames);
1215 }
1216 
1217 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1218     {{"w31"}, "wsp"},
1219     {{"x31"}, "sp"},
1220     // GCC rN registers are aliases of xN registers.
1221     {{"r0"}, "x0"},
1222     {{"r1"}, "x1"},
1223     {{"r2"}, "x2"},
1224     {{"r3"}, "x3"},
1225     {{"r4"}, "x4"},
1226     {{"r5"}, "x5"},
1227     {{"r6"}, "x6"},
1228     {{"r7"}, "x7"},
1229     {{"r8"}, "x8"},
1230     {{"r9"}, "x9"},
1231     {{"r10"}, "x10"},
1232     {{"r11"}, "x11"},
1233     {{"r12"}, "x12"},
1234     {{"r13"}, "x13"},
1235     {{"r14"}, "x14"},
1236     {{"r15"}, "x15"},
1237     {{"r16"}, "x16"},
1238     {{"r17"}, "x17"},
1239     {{"r18"}, "x18"},
1240     {{"r19"}, "x19"},
1241     {{"r20"}, "x20"},
1242     {{"r21"}, "x21"},
1243     {{"r22"}, "x22"},
1244     {{"r23"}, "x23"},
1245     {{"r24"}, "x24"},
1246     {{"r25"}, "x25"},
1247     {{"r26"}, "x26"},
1248     {{"r27"}, "x27"},
1249     {{"r28"}, "x28"},
1250     {{"r29", "x29"}, "fp"},
1251     {{"r30", "x30"}, "lr"},
1252     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1253     // don't want to substitute one of these for a different-sized one.
1254 };
1255 
1256 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1257   return llvm::ArrayRef(GCCRegAliases);
1258 }
1259 
1260 // Returns the length of cc constraint.
1261 static unsigned matchAsmCCConstraint(const char *Name) {
1262   constexpr unsigned len = 5;
1263   auto RV = llvm::StringSwitch<unsigned>(Name)
1264                 .Case("@cceq", len)
1265                 .Case("@ccne", len)
1266                 .Case("@cchs", len)
1267                 .Case("@cccs", len)
1268                 .Case("@cccc", len)
1269                 .Case("@cclo", len)
1270                 .Case("@ccmi", len)
1271                 .Case("@ccpl", len)
1272                 .Case("@ccvs", len)
1273                 .Case("@ccvc", len)
1274                 .Case("@cchi", len)
1275                 .Case("@ccls", len)
1276                 .Case("@ccge", len)
1277                 .Case("@cclt", len)
1278                 .Case("@ccgt", len)
1279                 .Case("@ccle", len)
1280                 .Default(0);
1281   return RV;
1282 }
1283 
1284 std::string
1285 AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1286   std::string R;
1287   switch (*Constraint) {
1288   case 'U': // Three-character constraint; add "@3" hint for later parsing.
1289     R = std::string("@3") + std::string(Constraint, 3);
1290     Constraint += 2;
1291     break;
1292   case '@':
1293     if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1294       std::string Converted = "{" + std::string(Constraint, Len) + "}";
1295       Constraint += Len - 1;
1296       return Converted;
1297     }
1298     return std::string(1, *Constraint);
1299   default:
1300     R = TargetInfo::convertConstraint(Constraint);
1301     break;
1302   }
1303   return R;
1304 }
1305 
1306 bool AArch64TargetInfo::validateAsmConstraint(
1307     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1308   switch (*Name) {
1309   default:
1310     return false;
1311   case 'w': // Floating point and SIMD registers (V0-V31)
1312     Info.setAllowsRegister();
1313     return true;
1314   case 'I': // Constant that can be used with an ADD instruction
1315   case 'J': // Constant that can be used with a SUB instruction
1316   case 'K': // Constant that can be used with a 32-bit logical instruction
1317   case 'L': // Constant that can be used with a 64-bit logical instruction
1318   case 'M': // Constant that can be used as a 32-bit MOV immediate
1319   case 'N': // Constant that can be used as a 64-bit MOV immediate
1320   case 'Y': // Floating point constant zero
1321   case 'Z': // Integer constant zero
1322     return true;
1323   case 'Q': // A memory reference with base register and no offset
1324     Info.setAllowsMemory();
1325     return true;
1326   case 'S': // A symbolic address
1327     Info.setAllowsRegister();
1328     return true;
1329   case 'U':
1330     if (Name[1] == 'p' &&
1331         (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1332       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1333       Info.setAllowsRegister();
1334       Name += 2;
1335       return true;
1336     }
1337     if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1338       // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1339       Info.setAllowsRegister();
1340       Name += 2;
1341       return true;
1342     }
1343     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1344     // Utf: A memory address suitable for ldp/stp in TF mode.
1345     // Usa: An absolute symbolic address.
1346     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1347 
1348     // Better to return an error saying that it's an unrecognised constraint
1349     // even if this is a valid constraint in gcc.
1350     return false;
1351   case 'z': // Zero register, wzr or xzr
1352     Info.setAllowsRegister();
1353     return true;
1354   case 'x': // Floating point and SIMD registers (V0-V15)
1355     Info.setAllowsRegister();
1356     return true;
1357   case 'y': // SVE registers (V0-V7)
1358     Info.setAllowsRegister();
1359     return true;
1360   case '@':
1361     // CC condition
1362     if (const unsigned Len = matchAsmCCConstraint(Name)) {
1363       Name += Len - 1;
1364       Info.setAllowsRegister();
1365       return true;
1366     }
1367   }
1368   return false;
1369 }
1370 
1371 bool AArch64TargetInfo::validateConstraintModifier(
1372     StringRef Constraint, char Modifier, unsigned Size,
1373     std::string &SuggestedModifier) const {
1374   // Strip off constraint modifiers.
1375   Constraint = Constraint.ltrim("=+&");
1376 
1377   switch (Constraint[0]) {
1378   default:
1379     return true;
1380   case 'z':
1381   case 'r': {
1382     switch (Modifier) {
1383     case 'x':
1384     case 'w':
1385       // For now assume that the person knows what they're
1386       // doing with the modifier.
1387       return true;
1388     default:
1389       // By default an 'r' constraint will be in the 'x'
1390       // registers.
1391       if (Size == 64)
1392         return true;
1393 
1394       if (Size == 512)
1395         return HasLS64;
1396 
1397       SuggestedModifier = "w";
1398       return false;
1399     }
1400   }
1401   }
1402 }
1403 
1404 std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1405 
1406 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1407   if (RegNo == 0)
1408     return 0;
1409   if (RegNo == 1)
1410     return 1;
1411   return -1;
1412 }
1413 
1414 bool AArch64TargetInfo::hasInt128Type() const { return true; }
1415 
1416 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1417                                          const TargetOptions &Opts)
1418     : AArch64TargetInfo(Triple, Opts) {}
1419 
1420 void AArch64leTargetInfo::setDataLayout() {
1421   if (getTriple().isOSBinFormatMachO()) {
1422     if(getTriple().isArch32Bit())
1423       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
1424     else
1425       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
1426   } else
1427     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1428 }
1429 
1430 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1431                                            MacroBuilder &Builder) const {
1432   Builder.defineMacro("__AARCH64EL__");
1433   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1434 }
1435 
1436 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1437                                          const TargetOptions &Opts)
1438     : AArch64TargetInfo(Triple, Opts) {}
1439 
1440 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1441                                            MacroBuilder &Builder) const {
1442   Builder.defineMacro("__AARCH64EB__");
1443   Builder.defineMacro("__AARCH_BIG_ENDIAN");
1444   Builder.defineMacro("__ARM_BIG_ENDIAN");
1445   AArch64TargetInfo::getTargetDefines(Opts, Builder);
1446 }
1447 
1448 void AArch64beTargetInfo::setDataLayout() {
1449   assert(!getTriple().isOSBinFormatMachO());
1450   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
1451 }
1452 
1453 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1454                                                const TargetOptions &Opts)
1455     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1456 
1457   // This is an LLP64 platform.
1458   // int:4, long:4, long long:8, long double:8.
1459   IntWidth = IntAlign = 32;
1460   LongWidth = LongAlign = 32;
1461   DoubleAlign = LongLongAlign = 64;
1462   LongDoubleWidth = LongDoubleAlign = 64;
1463   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1464   IntMaxType = SignedLongLong;
1465   Int64Type = SignedLongLong;
1466   SizeType = UnsignedLongLong;
1467   PtrDiffType = SignedLongLong;
1468   IntPtrType = SignedLongLong;
1469 }
1470 
1471 void WindowsARM64TargetInfo::setDataLayout() {
1472   resetDataLayout(Triple.isOSBinFormatMachO()
1473                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
1474                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
1475                   Triple.isOSBinFormatMachO() ? "_" : "");
1476 }
1477 
1478 TargetInfo::BuiltinVaListKind
1479 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1480   return TargetInfo::CharPtrBuiltinVaList;
1481 }
1482 
1483 TargetInfo::CallingConvCheckResult
1484 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1485   switch (CC) {
1486   case CC_X86StdCall:
1487   case CC_X86ThisCall:
1488   case CC_X86FastCall:
1489   case CC_X86VectorCall:
1490     return CCCR_Ignore;
1491   case CC_C:
1492   case CC_OpenCLKernel:
1493   case CC_PreserveMost:
1494   case CC_PreserveAll:
1495   case CC_Swift:
1496   case CC_SwiftAsync:
1497   case CC_Win64:
1498     return CCCR_OK;
1499   default:
1500     return CCCR_Warning;
1501   }
1502 }
1503 
1504 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1505                                                    const TargetOptions &Opts)
1506     : WindowsARM64TargetInfo(Triple, Opts) {
1507   TheCXXABI.set(TargetCXXABI::Microsoft);
1508 }
1509 
1510 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1511                                                 MacroBuilder &Builder) const {
1512   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1513   if (getTriple().isWindowsArm64EC()) {
1514     Builder.defineMacro("_M_X64", "100");
1515     Builder.defineMacro("_M_AMD64", "100");
1516     Builder.defineMacro("_M_ARM64EC", "1");
1517   } else {
1518     Builder.defineMacro("_M_ARM64", "1");
1519   }
1520 }
1521 
1522 TargetInfo::CallingConvKind
1523 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1524   return CCK_MicrosoftWin64;
1525 }
1526 
1527 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
1528   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
1529 
1530   // MSVC does size based alignment for arm64 based on alignment section in
1531   // below document, replicate that to keep alignment consistent with object
1532   // files compiled by MSVC.
1533   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1534   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
1535     Align = std::max(Align, 128u);    // align type at least 16 bytes
1536   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
1537     Align = std::max(Align, 64u);     // align type at least 8 butes
1538   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
1539     Align = std::max(Align, 32u);     // align type at least 4 bytes
1540   }
1541   return Align;
1542 }
1543 
1544 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1545                                            const TargetOptions &Opts)
1546     : WindowsARM64TargetInfo(Triple, Opts) {
1547   TheCXXABI.set(TargetCXXABI::GenericAArch64);
1548 }
1549 
1550 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1551                                                  const TargetOptions &Opts)
1552     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1553   Int64Type = SignedLongLong;
1554   if (getTriple().isArch32Bit())
1555     IntMaxType = SignedLongLong;
1556 
1557   WCharType = SignedInt;
1558   UseSignedCharForObjCBool = false;
1559 
1560   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1561   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1562 
1563   UseZeroLengthBitfieldAlignment = false;
1564 
1565   if (getTriple().isArch32Bit()) {
1566     UseBitFieldTypeAlignment = false;
1567     ZeroLengthBitfieldBoundary = 32;
1568     UseZeroLengthBitfieldAlignment = true;
1569     TheCXXABI.set(TargetCXXABI::WatchOS);
1570   } else
1571     TheCXXABI.set(TargetCXXABI::AppleARM64);
1572 }
1573 
1574 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1575                                            const llvm::Triple &Triple,
1576                                            MacroBuilder &Builder) const {
1577   Builder.defineMacro("__AARCH64_SIMD__");
1578   if (Triple.isArch32Bit())
1579     Builder.defineMacro("__ARM64_ARCH_8_32__");
1580   else
1581     Builder.defineMacro("__ARM64_ARCH_8__");
1582   Builder.defineMacro("__ARM_NEON__");
1583   Builder.defineMacro("__REGISTER_PREFIX__", "");
1584   Builder.defineMacro("__arm64", "1");
1585   Builder.defineMacro("__arm64__", "1");
1586 
1587   if (Triple.isArm64e())
1588     Builder.defineMacro("__arm64e__", "1");
1589 
1590   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1591 }
1592 
1593 TargetInfo::BuiltinVaListKind
1594 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1595   return TargetInfo::CharPtrBuiltinVaList;
1596 }
1597 
1598 // 64-bit RenderScript is aarch64
1599 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1600                                                    const TargetOptions &Opts)
1601     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1602                                        Triple.getOSName(),
1603                                        Triple.getEnvironmentName()),
1604                           Opts) {
1605   IsRenderScriptTarget = true;
1606 }
1607 
1608 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1609                                                 MacroBuilder &Builder) const {
1610   Builder.defineMacro("__RENDERSCRIPT__");
1611   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1612 }
1613