xref: /freebsd-src/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42 
43 static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
44   switch (Kind) {
45   case llvm::AArch64::ArchKind::ARMV9A:
46   case llvm::AArch64::ArchKind::ARMV9_1A:
47   case llvm::AArch64::ArchKind::ARMV9_2A:
48     return "9";
49   default:
50     return "8";
51   }
52 }
53 
54 StringRef AArch64TargetInfo::getArchProfile() const {
55   switch (ArchKind) {
56   case llvm::AArch64::ArchKind::ARMV8R:
57     return "R";
58   default:
59     return "A";
60   }
61 }
62 
63 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
64                                      const TargetOptions &Opts)
65     : TargetInfo(Triple), ABI("aapcs") {
66   if (getTriple().isOSOpenBSD()) {
67     Int64Type = SignedLongLong;
68     IntMaxType = SignedLongLong;
69   } else {
70     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
71       WCharType = UnsignedInt;
72 
73     Int64Type = SignedLong;
74     IntMaxType = SignedLong;
75   }
76 
77   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
78   HasLegalHalfType = true;
79   HasFloat16 = true;
80 
81   if (Triple.isArch64Bit())
82     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
83   else
84     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
85 
86   MaxVectorAlign = 128;
87   MaxAtomicInlineWidth = 128;
88   MaxAtomicPromoteWidth = 128;
89 
90   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
91   LongDoubleFormat = &llvm::APFloat::IEEEquad();
92 
93   BFloat16Width = BFloat16Align = 16;
94   BFloat16Format = &llvm::APFloat::BFloat();
95 
96   // Make __builtin_ms_va_list available.
97   HasBuiltinMSVaList = true;
98 
99   // Make the SVE types available.  Note that this deliberately doesn't
100   // depend on SveMode, since in principle it should be possible to turn
101   // SVE on and off within a translation unit.  It should also be possible
102   // to compile the global declaration:
103   //
104   // __SVInt8_t *ptr;
105   //
106   // even without SVE.
107   HasAArch64SVETypes = true;
108 
109   // {} in inline assembly are neon specifiers, not assembly variant
110   // specifiers.
111   NoAsmVariants = true;
112 
113   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
114   // contributes to the alignment of the containing aggregate in the same way
115   // a plain (non bit-field) member of that type would, without exception for
116   // zero-sized or anonymous bit-fields."
117   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
118   UseZeroLengthBitfieldAlignment = true;
119 
120   // AArch64 targets default to using the ARM C++ ABI.
121   TheCXXABI.set(TargetCXXABI::GenericAArch64);
122 
123   if (Triple.getOS() == llvm::Triple::Linux)
124     this->MCountName = "\01_mcount";
125   else if (Triple.getOS() == llvm::Triple::UnknownOS)
126     this->MCountName =
127         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
128 }
129 
130 StringRef AArch64TargetInfo::getABI() const { return ABI; }
131 
132 bool AArch64TargetInfo::setABI(const std::string &Name) {
133   if (Name != "aapcs" && Name != "darwinpcs")
134     return false;
135 
136   ABI = Name;
137   return true;
138 }
139 
140 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
141                                                  BranchProtectionInfo &BPI,
142                                                  StringRef &Err) const {
143   llvm::AArch64::ParsedBranchProtection PBP;
144   if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
145     return false;
146 
147   BPI.SignReturnAddr =
148       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
149           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
150           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
151           .Default(LangOptions::SignReturnAddressScopeKind::None);
152 
153   if (PBP.Key == "a_key")
154     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
155   else
156     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
157 
158   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
159   return true;
160 }
161 
162 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
163   return Name == "generic" ||
164          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
165 }
166 
167 bool AArch64TargetInfo::setCPU(const std::string &Name) {
168   return isValidCPUName(Name);
169 }
170 
171 void AArch64TargetInfo::fillValidCPUList(
172     SmallVectorImpl<StringRef> &Values) const {
173   llvm::AArch64::fillValidCPUArchList(Values);
174 }
175 
176 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
177                                                 MacroBuilder &Builder) const {
178   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
179   Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
180   Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
181 }
182 
183 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
184                                                 MacroBuilder &Builder) const {
185   // Also include the ARMv8.1 defines
186   getTargetDefinesARMV81A(Opts, Builder);
187 }
188 
189 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
190                                                 MacroBuilder &Builder) const {
191   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
192   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
193   // Also include the Armv8.2 defines
194   getTargetDefinesARMV82A(Opts, Builder);
195 }
196 
197 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
198                                                 MacroBuilder &Builder) const {
199   // Also include the Armv8.3 defines
200   getTargetDefinesARMV83A(Opts, Builder);
201 }
202 
203 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
204                                                 MacroBuilder &Builder) const {
205   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
206   // Also include the Armv8.4 defines
207   getTargetDefinesARMV84A(Opts, Builder);
208 }
209 
210 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
211                                                 MacroBuilder &Builder) const {
212   // Also include the Armv8.5 defines
213   // FIXME: Armv8.6 makes the following extensions mandatory:
214   // - __ARM_FEATURE_BF16
215   // - __ARM_FEATURE_MATMUL_INT8
216   // Handle them here.
217   getTargetDefinesARMV85A(Opts, Builder);
218 }
219 
220 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
221                                                 MacroBuilder &Builder) const {
222   // Also include the Armv8.6 defines
223   getTargetDefinesARMV86A(Opts, Builder);
224 }
225 
226 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
227                                                MacroBuilder &Builder) const {
228   // Armv9-A maps to Armv8.5-A
229   getTargetDefinesARMV85A(Opts, Builder);
230 }
231 
232 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
233                                                 MacroBuilder &Builder) const {
234   // Armv9.1-A maps to Armv8.6-A
235   getTargetDefinesARMV86A(Opts, Builder);
236 }
237 
238 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
239                                                 MacroBuilder &Builder) const {
240   // Armv9.2-A maps to Armv8.7-A
241   getTargetDefinesARMV87A(Opts, Builder);
242 }
243 
244 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
245                                          MacroBuilder &Builder) const {
246   // Target identification.
247   Builder.defineMacro("__aarch64__");
248   // For bare-metal.
249   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
250       getTriple().isOSBinFormatELF())
251     Builder.defineMacro("__ELF__");
252 
253   // Target properties.
254   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
255     Builder.defineMacro("_LP64");
256     Builder.defineMacro("__LP64__");
257   }
258 
259   std::string CodeModel = getTargetOpts().CodeModel;
260   if (CodeModel == "default")
261     CodeModel = "small";
262   for (char &c : CodeModel)
263     c = toupper(c);
264   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
265 
266   // ACLE predefines. Many can only have one possible value on v8 AArch64.
267   Builder.defineMacro("__ARM_ACLE", "200");
268   Builder.defineMacro("__ARM_ARCH", getArchVersionString(ArchKind));
269   Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + getArchProfile() + "'");
270 
271   Builder.defineMacro("__ARM_64BIT_STATE", "1");
272   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
273   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
274 
275   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
276   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
277   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
278   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
279   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
280   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
281   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
282 
283   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
284 
285   // 0xe implies support for half, single and double precision operations.
286   Builder.defineMacro("__ARM_FP", "0xE");
287 
288   // PCS specifies this for SysV variants, which is all we support. Other ABIs
289   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
290   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
291   Builder.defineMacro("__ARM_FP16_ARGS", "1");
292 
293   if (Opts.UnsafeFPMath)
294     Builder.defineMacro("__ARM_FP_FAST", "1");
295 
296   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
297                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
298 
299   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
300 
301   if (FPU & NeonMode) {
302     Builder.defineMacro("__ARM_NEON", "1");
303     // 64-bit NEON supports half, single and double precision operations.
304     Builder.defineMacro("__ARM_NEON_FP", "0xE");
305   }
306 
307   if (FPU & SveMode)
308     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
309 
310   if (HasSVE2)
311     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
312 
313   if (HasSVE2 && HasSVE2AES)
314     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
315 
316   if (HasSVE2 && HasSVE2BitPerm)
317     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
318 
319   if (HasSVE2 && HasSVE2SHA3)
320     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
321 
322   if (HasSVE2 && HasSVE2SM4)
323     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
324 
325   if (HasCRC)
326     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
327 
328   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
329   // macros for AES, SHA2, SHA3 and SM4
330   if (HasAES && HasSHA2)
331     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
332 
333   if (HasAES)
334     Builder.defineMacro("__ARM_FEATURE_AES", "1");
335 
336   if (HasSHA2)
337     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
338 
339   if (HasSHA3) {
340     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
341     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
342   }
343 
344   if (HasSM4) {
345     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
346     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
347   }
348 
349   if (HasUnaligned)
350     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
351 
352   if ((FPU & NeonMode) && HasFullFP16)
353     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
354   if (HasFullFP16)
355    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
356 
357   if (HasDotProd)
358     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
359 
360   if (HasMTE)
361     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
362 
363   if (HasTME)
364     Builder.defineMacro("__ARM_FEATURE_TME", "1");
365 
366   if (HasMatMul)
367     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
368 
369   if (HasLSE)
370     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
371 
372   if (HasBFloat16) {
373     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
374     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
375     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
376     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
377   }
378 
379   if ((FPU & SveMode) && HasBFloat16) {
380     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
381   }
382 
383   if ((FPU & SveMode) && HasMatmulFP64)
384     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
385 
386   if ((FPU & SveMode) && HasMatmulFP32)
387     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
388 
389   if ((FPU & SveMode) && HasMatMul)
390     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
391 
392   if ((FPU & NeonMode) && HasFP16FML)
393     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
394 
395   if (Opts.hasSignReturnAddress()) {
396     // Bitmask:
397     // 0: Protection using the A key
398     // 1: Protection using the B key
399     // 2: Protection including leaf functions
400     unsigned Value = 0;
401 
402     if (Opts.isSignReturnAddressWithAKey())
403       Value |= (1 << 0);
404     else
405       Value |= (1 << 1);
406 
407     if (Opts.isSignReturnAddressScopeAll())
408       Value |= (1 << 2);
409 
410     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
411   }
412 
413   if (Opts.BranchTargetEnforcement)
414     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
415 
416   if (HasLS64)
417     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
418 
419   if (HasRandGen)
420     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
421 
422   switch (ArchKind) {
423   default:
424     break;
425   case llvm::AArch64::ArchKind::ARMV8_1A:
426     getTargetDefinesARMV81A(Opts, Builder);
427     break;
428   case llvm::AArch64::ArchKind::ARMV8_2A:
429     getTargetDefinesARMV82A(Opts, Builder);
430     break;
431   case llvm::AArch64::ArchKind::ARMV8_3A:
432     getTargetDefinesARMV83A(Opts, Builder);
433     break;
434   case llvm::AArch64::ArchKind::ARMV8_4A:
435     getTargetDefinesARMV84A(Opts, Builder);
436     break;
437   case llvm::AArch64::ArchKind::ARMV8_5A:
438     getTargetDefinesARMV85A(Opts, Builder);
439     break;
440   case llvm::AArch64::ArchKind::ARMV8_6A:
441     getTargetDefinesARMV86A(Opts, Builder);
442     break;
443   case llvm::AArch64::ArchKind::ARMV8_7A:
444     getTargetDefinesARMV87A(Opts, Builder);
445     break;
446   case llvm::AArch64::ArchKind::ARMV9A:
447     getTargetDefinesARMV9A(Opts, Builder);
448     break;
449   case llvm::AArch64::ArchKind::ARMV9_1A:
450     getTargetDefinesARMV91A(Opts, Builder);
451     break;
452   case llvm::AArch64::ArchKind::ARMV9_2A:
453     getTargetDefinesARMV92A(Opts, Builder);
454     break;
455   }
456 
457   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
458   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
459   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
460   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
461   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
462 
463   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
464     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
465     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
466   }
467 }
468 
469 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
470   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
471                                              Builtin::FirstTSBuiltin);
472 }
473 
474 Optional<std::pair<unsigned, unsigned>>
475 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
476   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
477     return std::pair<unsigned, unsigned>(LangOpts.VScaleMin,
478                                          LangOpts.VScaleMax);
479   if (hasFeature("sve"))
480     return std::pair<unsigned, unsigned>(0, 16);
481   return None;
482 }
483 
484 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
485   return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
486          (Feature == "neon" && (FPU & NeonMode)) ||
487          ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
488            Feature == "sve2-aes" || Feature == "sve2-sha3" ||
489            Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
490            Feature == "i8mm" || Feature == "bf16") &&
491           (FPU & SveMode)) ||
492          (Feature == "ls64" && HasLS64);
493 }
494 
495 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
496                                              DiagnosticsEngine &Diags) {
497   FPU = FPUMode;
498   HasCRC = false;
499   HasCrypto = false;
500   HasAES = false;
501   HasSHA2 = false;
502   HasSHA3 = false;
503   HasSM4 = false;
504   HasUnaligned = true;
505   HasFullFP16 = false;
506   HasDotProd = false;
507   HasFP16FML = false;
508   HasMTE = false;
509   HasTME = false;
510   HasLS64 = false;
511   HasRandGen = false;
512   HasMatMul = false;
513   HasBFloat16 = false;
514   HasSVE2 = false;
515   HasSVE2AES = false;
516   HasSVE2SHA3 = false;
517   HasSVE2SM4 = false;
518   HasSVE2BitPerm = false;
519   HasMatmulFP64 = false;
520   HasMatmulFP32 = false;
521   HasLSE = false;
522 
523   ArchKind = llvm::AArch64::ArchKind::INVALID;
524 
525   for (const auto &Feature : Features) {
526     if (Feature == "+neon")
527       FPU |= NeonMode;
528     if (Feature == "+sve") {
529       FPU |= SveMode;
530       HasFullFP16 = 1;
531     }
532     if (Feature == "+sve2") {
533       FPU |= SveMode;
534       HasFullFP16 = 1;
535       HasSVE2 = 1;
536     }
537     if (Feature == "+sve2-aes") {
538       FPU |= SveMode;
539       HasFullFP16 = 1;
540       HasSVE2 = 1;
541       HasSVE2AES = 1;
542     }
543     if (Feature == "+sve2-sha3") {
544       FPU |= SveMode;
545       HasFullFP16 = 1;
546       HasSVE2 = 1;
547       HasSVE2SHA3 = 1;
548     }
549     if (Feature == "+sve2-sm4") {
550       FPU |= SveMode;
551       HasFullFP16 = 1;
552       HasSVE2 = 1;
553       HasSVE2SM4 = 1;
554     }
555     if (Feature == "+sve2-bitperm") {
556       FPU |= SveMode;
557       HasFullFP16 = 1;
558       HasSVE2 = 1;
559       HasSVE2BitPerm = 1;
560     }
561     if (Feature == "+f32mm") {
562       FPU |= SveMode;
563       HasMatmulFP32 = true;
564     }
565     if (Feature == "+f64mm") {
566       FPU |= SveMode;
567       HasMatmulFP64 = true;
568     }
569     if (Feature == "+crc")
570       HasCRC = true;
571     if (Feature == "+crypto")
572       HasCrypto = true;
573     if (Feature == "+aes")
574       HasAES = true;
575     if (Feature == "+sha2")
576       HasSHA2 = true;
577     if (Feature == "+sha3") {
578       HasSHA2 = true;
579       HasSHA3 = true;
580     }
581     if (Feature == "+sm4")
582       HasSM4 = true;
583     if (Feature == "+strict-align")
584       HasUnaligned = false;
585     if (Feature == "+v8a")
586       ArchKind = llvm::AArch64::ArchKind::ARMV8A;
587     if (Feature == "+v8.1a")
588       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
589     if (Feature == "+v8.2a")
590       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
591     if (Feature == "+v8.3a")
592       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
593     if (Feature == "+v8.4a")
594       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
595     if (Feature == "+v8.5a")
596       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
597     if (Feature == "+v8.6a")
598       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
599     if (Feature == "+v8.7a")
600       ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
601     if (Feature == "+v9a")
602       ArchKind = llvm::AArch64::ArchKind::ARMV9A;
603     if (Feature == "+v9.1a")
604       ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
605     if (Feature == "+v9.2a")
606       ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
607     if (Feature == "+v8r")
608       ArchKind = llvm::AArch64::ArchKind::ARMV8R;
609     if (Feature == "+fullfp16")
610       HasFullFP16 = true;
611     if (Feature == "+dotprod")
612       HasDotProd = true;
613     if (Feature == "+fp16fml")
614       HasFP16FML = true;
615     if (Feature == "+mte")
616       HasMTE = true;
617     if (Feature == "+tme")
618       HasTME = true;
619     if (Feature == "+pauth")
620       HasPAuth = true;
621     if (Feature == "+i8mm")
622       HasMatMul = true;
623     if (Feature == "+bf16")
624       HasBFloat16 = true;
625     if (Feature == "+lse")
626       HasLSE = true;
627     if (Feature == "+ls64")
628       HasLS64 = true;
629     if (Feature == "+rand")
630       HasRandGen = true;
631     if (Feature == "+flagm")
632       HasFlagM = true;
633   }
634 
635   setDataLayout();
636 
637   return true;
638 }
639 
640 TargetInfo::CallingConvCheckResult
641 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
642   switch (CC) {
643   case CC_C:
644   case CC_Swift:
645   case CC_SwiftAsync:
646   case CC_PreserveMost:
647   case CC_PreserveAll:
648   case CC_OpenCLKernel:
649   case CC_AArch64VectorCall:
650   case CC_Win64:
651     return CCCR_OK;
652   default:
653     return CCCR_Warning;
654   }
655 }
656 
657 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
658 
659 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
660   return TargetInfo::AArch64ABIBuiltinVaList;
661 }
662 
663 const char *const AArch64TargetInfo::GCCRegNames[] = {
664     // 32-bit Integer registers
665     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
666     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
667     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
668 
669     // 64-bit Integer registers
670     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
671     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
672     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
673 
674     // 32-bit floating point regsisters
675     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
676     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
677     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
678 
679     // 64-bit floating point regsisters
680     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
681     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
682     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
683 
684     // Neon vector registers
685     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
686     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
687     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
688 
689     // SVE vector registers
690     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
691     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
692     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
693 
694     // SVE predicate registers
695     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
696     "p11", "p12", "p13", "p14", "p15"
697 };
698 
699 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
700   return llvm::makeArrayRef(GCCRegNames);
701 }
702 
703 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
704     {{"w31"}, "wsp"},
705     {{"x31"}, "sp"},
706     // GCC rN registers are aliases of xN registers.
707     {{"r0"}, "x0"},
708     {{"r1"}, "x1"},
709     {{"r2"}, "x2"},
710     {{"r3"}, "x3"},
711     {{"r4"}, "x4"},
712     {{"r5"}, "x5"},
713     {{"r6"}, "x6"},
714     {{"r7"}, "x7"},
715     {{"r8"}, "x8"},
716     {{"r9"}, "x9"},
717     {{"r10"}, "x10"},
718     {{"r11"}, "x11"},
719     {{"r12"}, "x12"},
720     {{"r13"}, "x13"},
721     {{"r14"}, "x14"},
722     {{"r15"}, "x15"},
723     {{"r16"}, "x16"},
724     {{"r17"}, "x17"},
725     {{"r18"}, "x18"},
726     {{"r19"}, "x19"},
727     {{"r20"}, "x20"},
728     {{"r21"}, "x21"},
729     {{"r22"}, "x22"},
730     {{"r23"}, "x23"},
731     {{"r24"}, "x24"},
732     {{"r25"}, "x25"},
733     {{"r26"}, "x26"},
734     {{"r27"}, "x27"},
735     {{"r28"}, "x28"},
736     {{"r29", "x29"}, "fp"},
737     {{"r30", "x30"}, "lr"},
738     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
739     // don't want to substitute one of these for a different-sized one.
740 };
741 
742 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
743   return llvm::makeArrayRef(GCCRegAliases);
744 }
745 
746 bool AArch64TargetInfo::validateAsmConstraint(
747     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
748   switch (*Name) {
749   default:
750     return false;
751   case 'w': // Floating point and SIMD registers (V0-V31)
752     Info.setAllowsRegister();
753     return true;
754   case 'I': // Constant that can be used with an ADD instruction
755   case 'J': // Constant that can be used with a SUB instruction
756   case 'K': // Constant that can be used with a 32-bit logical instruction
757   case 'L': // Constant that can be used with a 64-bit logical instruction
758   case 'M': // Constant that can be used as a 32-bit MOV immediate
759   case 'N': // Constant that can be used as a 64-bit MOV immediate
760   case 'Y': // Floating point constant zero
761   case 'Z': // Integer constant zero
762     return true;
763   case 'Q': // A memory reference with base register and no offset
764     Info.setAllowsMemory();
765     return true;
766   case 'S': // A symbolic address
767     Info.setAllowsRegister();
768     return true;
769   case 'U':
770     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
771       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
772       Info.setAllowsRegister();
773       Name += 2;
774       return true;
775     }
776     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
777     // Utf: A memory address suitable for ldp/stp in TF mode.
778     // Usa: An absolute symbolic address.
779     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
780 
781     // Better to return an error saying that it's an unrecognised constraint
782     // even if this is a valid constraint in gcc.
783     return false;
784   case 'z': // Zero register, wzr or xzr
785     Info.setAllowsRegister();
786     return true;
787   case 'x': // Floating point and SIMD registers (V0-V15)
788     Info.setAllowsRegister();
789     return true;
790   case 'y': // SVE registers (V0-V7)
791     Info.setAllowsRegister();
792     return true;
793   }
794   return false;
795 }
796 
797 bool AArch64TargetInfo::validateConstraintModifier(
798     StringRef Constraint, char Modifier, unsigned Size,
799     std::string &SuggestedModifier) const {
800   // Strip off constraint modifiers.
801   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
802     Constraint = Constraint.substr(1);
803 
804   switch (Constraint[0]) {
805   default:
806     return true;
807   case 'z':
808   case 'r': {
809     switch (Modifier) {
810     case 'x':
811     case 'w':
812       // For now assume that the person knows what they're
813       // doing with the modifier.
814       return true;
815     default:
816       // By default an 'r' constraint will be in the 'x'
817       // registers.
818       if (Size == 64)
819         return true;
820 
821       if (Size == 512)
822         return HasLS64;
823 
824       SuggestedModifier = "w";
825       return false;
826     }
827   }
828   }
829 }
830 
831 const char *AArch64TargetInfo::getClobbers() const { return ""; }
832 
833 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
834   if (RegNo == 0)
835     return 0;
836   if (RegNo == 1)
837     return 1;
838   return -1;
839 }
840 
841 bool AArch64TargetInfo::hasInt128Type() const { return true; }
842 
843 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
844                                          const TargetOptions &Opts)
845     : AArch64TargetInfo(Triple, Opts) {}
846 
847 void AArch64leTargetInfo::setDataLayout() {
848   if (getTriple().isOSBinFormatMachO()) {
849     if(getTriple().isArch32Bit())
850       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
851     else
852       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
853   } else
854     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
855 }
856 
857 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
858                                            MacroBuilder &Builder) const {
859   Builder.defineMacro("__AARCH64EL__");
860   AArch64TargetInfo::getTargetDefines(Opts, Builder);
861 }
862 
863 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
864                                          const TargetOptions &Opts)
865     : AArch64TargetInfo(Triple, Opts) {}
866 
867 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
868                                            MacroBuilder &Builder) const {
869   Builder.defineMacro("__AARCH64EB__");
870   Builder.defineMacro("__AARCH_BIG_ENDIAN");
871   Builder.defineMacro("__ARM_BIG_ENDIAN");
872   AArch64TargetInfo::getTargetDefines(Opts, Builder);
873 }
874 
875 void AArch64beTargetInfo::setDataLayout() {
876   assert(!getTriple().isOSBinFormatMachO());
877   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
878 }
879 
880 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
881                                                const TargetOptions &Opts)
882     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
883 
884   // This is an LLP64 platform.
885   // int:4, long:4, long long:8, long double:8.
886   IntWidth = IntAlign = 32;
887   LongWidth = LongAlign = 32;
888   DoubleAlign = LongLongAlign = 64;
889   LongDoubleWidth = LongDoubleAlign = 64;
890   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
891   IntMaxType = SignedLongLong;
892   Int64Type = SignedLongLong;
893   SizeType = UnsignedLongLong;
894   PtrDiffType = SignedLongLong;
895   IntPtrType = SignedLongLong;
896 }
897 
898 void WindowsARM64TargetInfo::setDataLayout() {
899   resetDataLayout(Triple.isOSBinFormatMachO()
900                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
901                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
902                   Triple.isOSBinFormatMachO() ? "_" : "");
903 }
904 
905 TargetInfo::BuiltinVaListKind
906 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
907   return TargetInfo::CharPtrBuiltinVaList;
908 }
909 
910 TargetInfo::CallingConvCheckResult
911 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
912   switch (CC) {
913   case CC_X86StdCall:
914   case CC_X86ThisCall:
915   case CC_X86FastCall:
916   case CC_X86VectorCall:
917     return CCCR_Ignore;
918   case CC_C:
919   case CC_OpenCLKernel:
920   case CC_PreserveMost:
921   case CC_PreserveAll:
922   case CC_Swift:
923   case CC_SwiftAsync:
924   case CC_Win64:
925     return CCCR_OK;
926   default:
927     return CCCR_Warning;
928   }
929 }
930 
931 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
932                                                    const TargetOptions &Opts)
933     : WindowsARM64TargetInfo(Triple, Opts) {
934   TheCXXABI.set(TargetCXXABI::Microsoft);
935 }
936 
937 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
938                                                 MacroBuilder &Builder) const {
939   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
940   Builder.defineMacro("_M_ARM64", "1");
941 }
942 
943 TargetInfo::CallingConvKind
944 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
945   return CCK_MicrosoftWin64;
946 }
947 
948 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
949   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
950 
951   // MSVC does size based alignment for arm64 based on alignment section in
952   // below document, replicate that to keep alignment consistent with object
953   // files compiled by MSVC.
954   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
955   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
956     Align = std::max(Align, 128u);    // align type at least 16 bytes
957   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
958     Align = std::max(Align, 64u);     // align type at least 8 butes
959   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
960     Align = std::max(Align, 32u);     // align type at least 4 bytes
961   }
962   return Align;
963 }
964 
965 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
966                                            const TargetOptions &Opts)
967     : WindowsARM64TargetInfo(Triple, Opts) {
968   TheCXXABI.set(TargetCXXABI::GenericAArch64);
969 }
970 
971 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
972                                                  const TargetOptions &Opts)
973     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
974   Int64Type = SignedLongLong;
975   if (getTriple().isArch32Bit())
976     IntMaxType = SignedLongLong;
977 
978   WCharType = SignedInt;
979   UseSignedCharForObjCBool = false;
980 
981   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
982   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
983 
984   UseZeroLengthBitfieldAlignment = false;
985 
986   if (getTriple().isArch32Bit()) {
987     UseBitFieldTypeAlignment = false;
988     ZeroLengthBitfieldBoundary = 32;
989     UseZeroLengthBitfieldAlignment = true;
990     TheCXXABI.set(TargetCXXABI::WatchOS);
991   } else
992     TheCXXABI.set(TargetCXXABI::AppleARM64);
993 }
994 
995 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
996                                            const llvm::Triple &Triple,
997                                            MacroBuilder &Builder) const {
998   Builder.defineMacro("__AARCH64_SIMD__");
999   if (Triple.isArch32Bit())
1000     Builder.defineMacro("__ARM64_ARCH_8_32__");
1001   else
1002     Builder.defineMacro("__ARM64_ARCH_8__");
1003   Builder.defineMacro("__ARM_NEON__");
1004   Builder.defineMacro("__LITTLE_ENDIAN__");
1005   Builder.defineMacro("__REGISTER_PREFIX__", "");
1006   Builder.defineMacro("__arm64", "1");
1007   Builder.defineMacro("__arm64__", "1");
1008 
1009   if (Triple.isArm64e())
1010     Builder.defineMacro("__arm64e__", "1");
1011 
1012   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1013 }
1014 
1015 TargetInfo::BuiltinVaListKind
1016 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1017   return TargetInfo::CharPtrBuiltinVaList;
1018 }
1019 
1020 // 64-bit RenderScript is aarch64
1021 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1022                                                    const TargetOptions &Opts)
1023     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1024                                        Triple.getOSName(),
1025                                        Triple.getEnvironmentName()),
1026                           Opts) {
1027   IsRenderScriptTarget = true;
1028 }
1029 
1030 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1031                                                 MacroBuilder &Builder) const {
1032   Builder.defineMacro("__RENDERSCRIPT__");
1033   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1034 }
1035