xref: /llvm-project/clang/lib/Sema/SemaARM.cpp (revision a7f4044bd01919df2bf2204d203ee0378e2e9fb2)
1 //===------ SemaARM.cpp ---------- ARM target-specific routines -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file implements semantic analysis functions specific to ARM.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "clang/Sema/SemaARM.h"
14 #include "clang/Basic/DiagnosticSema.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "clang/Sema/Initialization.h"
18 #include "clang/Sema/ParsedAttr.h"
19 #include "clang/Sema/Sema.h"
20 
21 namespace clang {
22 
23 SemaARM::SemaARM(Sema &S) : SemaBase(S) {}
24 
25 /// BuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
26 bool SemaARM::BuiltinARMMemoryTaggingCall(unsigned BuiltinID,
27                                           CallExpr *TheCall) {
28   ASTContext &Context = getASTContext();
29 
30   if (BuiltinID == AArch64::BI__builtin_arm_irg) {
31     if (SemaRef.checkArgCount(TheCall, 2))
32       return true;
33     Expr *Arg0 = TheCall->getArg(0);
34     Expr *Arg1 = TheCall->getArg(1);
35 
36     ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
37     if (FirstArg.isInvalid())
38       return true;
39     QualType FirstArgType = FirstArg.get()->getType();
40     if (!FirstArgType->isAnyPointerType())
41       return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
42              << "first" << FirstArgType << Arg0->getSourceRange();
43     TheCall->setArg(0, FirstArg.get());
44 
45     ExprResult SecArg = SemaRef.DefaultLvalueConversion(Arg1);
46     if (SecArg.isInvalid())
47       return true;
48     QualType SecArgType = SecArg.get()->getType();
49     if (!SecArgType->isIntegerType())
50       return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
51              << "second" << SecArgType << Arg1->getSourceRange();
52 
53     // Derive the return type from the pointer argument.
54     TheCall->setType(FirstArgType);
55     return false;
56   }
57 
58   if (BuiltinID == AArch64::BI__builtin_arm_addg) {
59     if (SemaRef.checkArgCount(TheCall, 2))
60       return true;
61 
62     Expr *Arg0 = TheCall->getArg(0);
63     ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
64     if (FirstArg.isInvalid())
65       return true;
66     QualType FirstArgType = FirstArg.get()->getType();
67     if (!FirstArgType->isAnyPointerType())
68       return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
69              << "first" << FirstArgType << Arg0->getSourceRange();
70     TheCall->setArg(0, FirstArg.get());
71 
72     // Derive the return type from the pointer argument.
73     TheCall->setType(FirstArgType);
74 
75     // Second arg must be an constant in range [0,15]
76     return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
77   }
78 
79   if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
80     if (SemaRef.checkArgCount(TheCall, 2))
81       return true;
82     Expr *Arg0 = TheCall->getArg(0);
83     Expr *Arg1 = TheCall->getArg(1);
84 
85     ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
86     if (FirstArg.isInvalid())
87       return true;
88     QualType FirstArgType = FirstArg.get()->getType();
89     if (!FirstArgType->isAnyPointerType())
90       return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
91              << "first" << FirstArgType << Arg0->getSourceRange();
92 
93     QualType SecArgType = Arg1->getType();
94     if (!SecArgType->isIntegerType())
95       return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
96              << "second" << SecArgType << Arg1->getSourceRange();
97     TheCall->setType(Context.IntTy);
98     return false;
99   }
100 
101   if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
102       BuiltinID == AArch64::BI__builtin_arm_stg) {
103     if (SemaRef.checkArgCount(TheCall, 1))
104       return true;
105     Expr *Arg0 = TheCall->getArg(0);
106     ExprResult FirstArg = SemaRef.DefaultFunctionArrayLvalueConversion(Arg0);
107     if (FirstArg.isInvalid())
108       return true;
109 
110     QualType FirstArgType = FirstArg.get()->getType();
111     if (!FirstArgType->isAnyPointerType())
112       return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
113              << "first" << FirstArgType << Arg0->getSourceRange();
114     TheCall->setArg(0, FirstArg.get());
115 
116     // Derive the return type from the pointer argument.
117     if (BuiltinID == AArch64::BI__builtin_arm_ldg)
118       TheCall->setType(FirstArgType);
119     return false;
120   }
121 
122   if (BuiltinID == AArch64::BI__builtin_arm_subp) {
123     Expr *ArgA = TheCall->getArg(0);
124     Expr *ArgB = TheCall->getArg(1);
125 
126     ExprResult ArgExprA = SemaRef.DefaultFunctionArrayLvalueConversion(ArgA);
127     ExprResult ArgExprB = SemaRef.DefaultFunctionArrayLvalueConversion(ArgB);
128 
129     if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
130       return true;
131 
132     QualType ArgTypeA = ArgExprA.get()->getType();
133     QualType ArgTypeB = ArgExprB.get()->getType();
134 
135     auto isNull = [&](Expr *E) -> bool {
136       return E->isNullPointerConstant(Context,
137                                       Expr::NPC_ValueDependentIsNotNull);
138     };
139 
140     // argument should be either a pointer or null
141     if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
142       return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
143              << "first" << ArgTypeA << ArgA->getSourceRange();
144 
145     if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
146       return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
147              << "second" << ArgTypeB << ArgB->getSourceRange();
148 
149     // Ensure Pointee types are compatible
150     if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
151         ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
152       QualType pointeeA = ArgTypeA->getPointeeType();
153       QualType pointeeB = ArgTypeB->getPointeeType();
154       if (!Context.typesAreCompatible(
155               Context.getCanonicalType(pointeeA).getUnqualifiedType(),
156               Context.getCanonicalType(pointeeB).getUnqualifiedType())) {
157         return Diag(TheCall->getBeginLoc(),
158                     diag::err_typecheck_sub_ptr_compatible)
159                << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
160                << ArgB->getSourceRange();
161       }
162     }
163 
164     // at least one argument should be pointer type
165     if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
166       return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
167              << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
168 
169     if (isNull(ArgA)) // adopt type of the other pointer
170       ArgExprA =
171           SemaRef.ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer);
172 
173     if (isNull(ArgB))
174       ArgExprB =
175           SemaRef.ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer);
176 
177     TheCall->setArg(0, ArgExprA.get());
178     TheCall->setArg(1, ArgExprB.get());
179     TheCall->setType(Context.LongLongTy);
180     return false;
181   }
182   assert(false && "Unhandled ARM MTE intrinsic");
183   return true;
184 }
185 
186 /// BuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
187 /// TheCall is an ARM/AArch64 special register string literal.
188 bool SemaARM::BuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
189                                    int ArgNum, unsigned ExpectedFieldNum,
190                                    bool AllowName) {
191   bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
192                       BuiltinID == ARM::BI__builtin_arm_wsr64 ||
193                       BuiltinID == ARM::BI__builtin_arm_rsr ||
194                       BuiltinID == ARM::BI__builtin_arm_rsrp ||
195                       BuiltinID == ARM::BI__builtin_arm_wsr ||
196                       BuiltinID == ARM::BI__builtin_arm_wsrp;
197   bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
198                           BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
199                           BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
200                           BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
201                           BuiltinID == AArch64::BI__builtin_arm_rsr ||
202                           BuiltinID == AArch64::BI__builtin_arm_rsrp ||
203                           BuiltinID == AArch64::BI__builtin_arm_wsr ||
204                           BuiltinID == AArch64::BI__builtin_arm_wsrp;
205   assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
206 
207   // We can't check the value of a dependent argument.
208   Expr *Arg = TheCall->getArg(ArgNum);
209   if (Arg->isTypeDependent() || Arg->isValueDependent())
210     return false;
211 
212   // Check if the argument is a string literal.
213   if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
214     return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
215            << Arg->getSourceRange();
216 
217   // Check the type of special register given.
218   StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString();
219   SmallVector<StringRef, 6> Fields;
220   Reg.split(Fields, ":");
221 
222   if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
223     return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
224            << Arg->getSourceRange();
225 
226   // If the string is the name of a register then we cannot check that it is
227   // valid here but if the string is of one the forms described in ACLE then we
228   // can check that the supplied fields are integers and within the valid
229   // ranges.
230   if (Fields.size() > 1) {
231     bool FiveFields = Fields.size() == 5;
232 
233     bool ValidString = true;
234     if (IsARMBuiltin) {
235       ValidString &= Fields[0].starts_with_insensitive("cp") ||
236                      Fields[0].starts_with_insensitive("p");
237       if (ValidString)
238         Fields[0] = Fields[0].drop_front(
239             Fields[0].starts_with_insensitive("cp") ? 2 : 1);
240 
241       ValidString &= Fields[2].starts_with_insensitive("c");
242       if (ValidString)
243         Fields[2] = Fields[2].drop_front(1);
244 
245       if (FiveFields) {
246         ValidString &= Fields[3].starts_with_insensitive("c");
247         if (ValidString)
248           Fields[3] = Fields[3].drop_front(1);
249       }
250     }
251 
252     SmallVector<int, 5> Ranges;
253     if (FiveFields)
254       Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
255     else
256       Ranges.append({15, 7, 15});
257 
258     for (unsigned i = 0; i < Fields.size(); ++i) {
259       int IntField;
260       ValidString &= !Fields[i].getAsInteger(10, IntField);
261       ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
262     }
263 
264     if (!ValidString)
265       return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
266              << Arg->getSourceRange();
267   } else if (IsAArch64Builtin && Fields.size() == 1) {
268     // This code validates writes to PSTATE registers.
269 
270     // Not a write.
271     if (TheCall->getNumArgs() != 2)
272       return false;
273 
274     // The 128-bit system register accesses do not touch PSTATE.
275     if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
276         BuiltinID == AArch64::BI__builtin_arm_wsr128)
277       return false;
278 
279     // These are the named PSTATE accesses using "MSR (immediate)" instructions,
280     // along with the upper limit on the immediates allowed.
281     auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
282                         .CaseLower("spsel", 15)
283                         .CaseLower("daifclr", 15)
284                         .CaseLower("daifset", 15)
285                         .CaseLower("pan", 15)
286                         .CaseLower("uao", 15)
287                         .CaseLower("dit", 15)
288                         .CaseLower("ssbs", 15)
289                         .CaseLower("tco", 15)
290                         .CaseLower("allint", 1)
291                         .CaseLower("pm", 1)
292                         .Default(std::nullopt);
293 
294     // If this is not a named PSTATE, just continue without validating, as this
295     // will be lowered to an "MSR (register)" instruction directly
296     if (!MaxLimit)
297       return false;
298 
299     // Here we only allow constants in the range for that pstate, as required by
300     // the ACLE.
301     //
302     // While clang also accepts the names of system registers in its ACLE
303     // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
304     // as the value written via a register is different to the value used as an
305     // immediate to have the same effect. e.g., for the instruction `msr tco,
306     // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
307     // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
308     //
309     // If a programmer wants to codegen the MSR (register) form of `msr tco,
310     // xN`, they can still do so by specifying the register using five
311     // colon-separated numbers in a string.
312     return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit);
313   }
314 
315   return false;
316 }
317 
318 /// getNeonEltType - Return the QualType corresponding to the elements of
319 /// the vector type specified by the NeonTypeFlags.  This is used to check
320 /// the pointer arguments for Neon load/store intrinsics.
321 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
322                                bool IsPolyUnsigned, bool IsInt64Long) {
323   switch (Flags.getEltType()) {
324   case NeonTypeFlags::Int8:
325     return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
326   case NeonTypeFlags::Int16:
327     return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
328   case NeonTypeFlags::Int32:
329     return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
330   case NeonTypeFlags::Int64:
331     if (IsInt64Long)
332       return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
333     else
334       return Flags.isUnsigned() ? Context.UnsignedLongLongTy
335                                 : Context.LongLongTy;
336   case NeonTypeFlags::Poly8:
337     return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
338   case NeonTypeFlags::Poly16:
339     return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
340   case NeonTypeFlags::Poly64:
341     if (IsInt64Long)
342       return Context.UnsignedLongTy;
343     else
344       return Context.UnsignedLongLongTy;
345   case NeonTypeFlags::Poly128:
346     break;
347   case NeonTypeFlags::Float16:
348     return Context.HalfTy;
349   case NeonTypeFlags::Float32:
350     return Context.FloatTy;
351   case NeonTypeFlags::Float64:
352     return Context.DoubleTy;
353   case NeonTypeFlags::BFloat16:
354     return Context.BFloat16Ty;
355   case NeonTypeFlags::MFloat8:
356     return Context.MFloat8Ty;
357   }
358   llvm_unreachable("Invalid NeonTypeFlag!");
359 }
360 
361 enum ArmSMEState : unsigned {
362   ArmNoState = 0,
363 
364   ArmInZA = 0b01,
365   ArmOutZA = 0b10,
366   ArmInOutZA = 0b11,
367   ArmZAMask = 0b11,
368 
369   ArmInZT0 = 0b01 << 2,
370   ArmOutZT0 = 0b10 << 2,
371   ArmInOutZT0 = 0b11 << 2,
372   ArmZT0Mask = 0b11 << 2
373 };
374 
375 bool SemaARM::CheckImmediateArg(CallExpr *TheCall, unsigned CheckTy,
376                                 unsigned ArgIdx, unsigned EltBitWidth,
377                                 unsigned ContainerBitWidth) {
378   // Function that checks whether the operand (ArgIdx) is an immediate
379   // that is one of a given set of values.
380   auto CheckImmediateInSet = [&](std::initializer_list<int64_t> Set,
381                                  int ErrDiag) -> bool {
382     // We can't check the value of a dependent argument.
383     Expr *Arg = TheCall->getArg(ArgIdx);
384     if (Arg->isTypeDependent() || Arg->isValueDependent())
385       return false;
386 
387     // Check constant-ness first.
388     llvm::APSInt Imm;
389     if (SemaRef.BuiltinConstantArg(TheCall, ArgIdx, Imm))
390       return true;
391 
392     if (std::find(Set.begin(), Set.end(), Imm.getSExtValue()) == Set.end())
393       return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange();
394     return false;
395   };
396 
397   switch ((ImmCheckType)CheckTy) {
398   case ImmCheckType::ImmCheck0_31:
399     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, 31))
400       return true;
401     break;
402   case ImmCheckType::ImmCheck0_13:
403     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, 13))
404       return true;
405     break;
406   case ImmCheckType::ImmCheck0_63:
407     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, 63))
408       return true;
409     break;
410   case ImmCheckType::ImmCheck1_16:
411     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 1, 16))
412       return true;
413     break;
414   case ImmCheckType::ImmCheck0_7:
415     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, 7))
416       return true;
417     break;
418   case ImmCheckType::ImmCheck1_1:
419     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 1, 1))
420       return true;
421     break;
422   case ImmCheckType::ImmCheck1_3:
423     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 1, 3))
424       return true;
425     break;
426   case ImmCheckType::ImmCheck1_7:
427     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 1, 7))
428       return true;
429     break;
430   case ImmCheckType::ImmCheckExtract:
431     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0,
432                                         (2048 / EltBitWidth) - 1))
433       return true;
434     break;
435   case ImmCheckType::ImmCheckCvt:
436   case ImmCheckType::ImmCheckShiftRight:
437     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 1, EltBitWidth))
438       return true;
439     break;
440   case ImmCheckType::ImmCheckShiftRightNarrow:
441     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 1, EltBitWidth / 2))
442       return true;
443     break;
444   case ImmCheckType::ImmCheckShiftLeft:
445     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, EltBitWidth - 1))
446       return true;
447     break;
448   case ImmCheckType::ImmCheckLaneIndex:
449     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0,
450                                         (ContainerBitWidth / EltBitWidth) - 1))
451       return true;
452     break;
453   case ImmCheckType::ImmCheckLaneIndexCompRotate:
454     if (SemaRef.BuiltinConstantArgRange(
455             TheCall, ArgIdx, 0, (ContainerBitWidth / (2 * EltBitWidth)) - 1))
456       return true;
457     break;
458   case ImmCheckType::ImmCheckLaneIndexDot:
459     if (SemaRef.BuiltinConstantArgRange(
460             TheCall, ArgIdx, 0, (ContainerBitWidth / (4 * EltBitWidth)) - 1))
461       return true;
462     break;
463   case ImmCheckType::ImmCheckComplexRot90_270:
464     if (CheckImmediateInSet({90, 270}, diag::err_rotation_argument_to_cadd))
465       return true;
466     break;
467   case ImmCheckType::ImmCheckComplexRotAll90:
468     if (CheckImmediateInSet({0, 90, 180, 270},
469                             diag::err_rotation_argument_to_cmla))
470       return true;
471     break;
472   case ImmCheckType::ImmCheck0_1:
473     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, 1))
474       return true;
475     break;
476   case ImmCheckType::ImmCheck0_2:
477     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, 2))
478       return true;
479     break;
480   case ImmCheckType::ImmCheck0_3:
481     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, 3))
482       return true;
483     break;
484   case ImmCheckType::ImmCheck0_0:
485     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, 0))
486       return true;
487     break;
488   case ImmCheckType::ImmCheck0_15:
489     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, 15))
490       return true;
491     break;
492   case ImmCheckType::ImmCheck0_255:
493     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 0, 255))
494       return true;
495     break;
496   case ImmCheckType::ImmCheck1_32:
497     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 1, 32))
498       return true;
499     break;
500   case ImmCheckType::ImmCheck1_64:
501     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 1, 64))
502       return true;
503     break;
504   case ImmCheckType::ImmCheck2_4_Mul2:
505     if (SemaRef.BuiltinConstantArgRange(TheCall, ArgIdx, 2, 4) ||
506         SemaRef.BuiltinConstantArgMultiple(TheCall, ArgIdx, 2))
507       return true;
508     break;
509   }
510   return false;
511 }
512 
513 bool SemaARM::PerformNeonImmChecks(
514     CallExpr *TheCall,
515     SmallVectorImpl<std::tuple<int, int, int, int>> &ImmChecks,
516     int OverloadType) {
517   bool HasError = false;
518 
519   for (const auto &I : ImmChecks) {
520     auto [ArgIdx, CheckTy, ElementBitWidth, VecBitWidth] = I;
521 
522     if (OverloadType >= 0)
523       ElementBitWidth = NeonTypeFlags(OverloadType).getEltSizeInBits();
524 
525     HasError |= CheckImmediateArg(TheCall, CheckTy, ArgIdx, ElementBitWidth,
526                                   VecBitWidth);
527   }
528 
529   return HasError;
530 }
531 
532 bool SemaARM::PerformSVEImmChecks(
533     CallExpr *TheCall, SmallVectorImpl<std::tuple<int, int, int>> &ImmChecks) {
534   bool HasError = false;
535 
536   for (const auto &I : ImmChecks) {
537     auto [ArgIdx, CheckTy, ElementBitWidth] = I;
538     HasError |=
539         CheckImmediateArg(TheCall, CheckTy, ArgIdx, ElementBitWidth, 128);
540   }
541 
542   return HasError;
543 }
544 
545 SemaARM::ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
546   if (FD->hasAttr<ArmLocallyStreamingAttr>())
547     return SemaARM::ArmStreaming;
548   if (const Type *Ty = FD->getType().getTypePtrOrNull()) {
549     if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
550       if (FPT->getAArch64SMEAttributes() &
551           FunctionType::SME_PStateSMEnabledMask)
552         return SemaARM::ArmStreaming;
553       if (FPT->getAArch64SMEAttributes() &
554           FunctionType::SME_PStateSMCompatibleMask)
555         return SemaARM::ArmStreamingCompatible;
556     }
557   }
558   return SemaARM::ArmNonStreaming;
559 }
560 
561 static bool checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
562                                      const FunctionDecl *FD,
563                                      SemaARM::ArmStreamingType BuiltinType,
564                                      unsigned BuiltinID) {
565   SemaARM::ArmStreamingType FnType = getArmStreamingFnType(FD);
566 
567   // Check if the intrinsic is available in the right mode, i.e.
568   // * When compiling for SME only, the caller must be in streaming mode.
569   // * When compiling for SVE only, the caller must be in non-streaming mode.
570   // * When compiling for both SVE and SME, the caller can be in either mode.
571   if (BuiltinType == SemaARM::VerifyRuntimeMode) {
572     llvm::StringMap<bool> CallerFeatureMapWithoutSVE;
573     S.Context.getFunctionFeatureMap(CallerFeatureMapWithoutSVE, FD);
574     CallerFeatureMapWithoutSVE["sve"] = false;
575 
576     // Avoid emitting diagnostics for a function that can never compile.
577     if (FnType == SemaARM::ArmStreaming && !CallerFeatureMapWithoutSVE["sme"])
578       return false;
579 
580     llvm::StringMap<bool> CallerFeatureMapWithoutSME;
581     S.Context.getFunctionFeatureMap(CallerFeatureMapWithoutSME, FD);
582     CallerFeatureMapWithoutSME["sme"] = false;
583 
584     // We know the builtin requires either some combination of SVE flags, or
585     // some combination of SME flags, but we need to figure out which part
586     // of the required features is satisfied by the target features.
587     //
588     // For a builtin with target guard 'sve2p1|sme2', if we compile with
589     // '+sve2p1,+sme', then we know that it satisfies the 'sve2p1' part if we
590     // evaluate the features for '+sve2p1,+sme,+nosme'.
591     //
592     // Similarly, if we compile with '+sve2,+sme2', then we know it satisfies
593     // the 'sme2' part if we evaluate the features for '+sve2,+sme2,+nosve'.
594     StringRef BuiltinTargetGuards(
595         S.Context.BuiltinInfo.getRequiredFeatures(BuiltinID));
596     bool SatisfiesSVE = Builtin::evaluateRequiredTargetFeatures(
597         BuiltinTargetGuards, CallerFeatureMapWithoutSME);
598     bool SatisfiesSME = Builtin::evaluateRequiredTargetFeatures(
599         BuiltinTargetGuards, CallerFeatureMapWithoutSVE);
600 
601     if ((SatisfiesSVE && SatisfiesSME) ||
602         (SatisfiesSVE && FnType == SemaARM::ArmStreamingCompatible))
603       return false;
604     else if (SatisfiesSVE)
605       BuiltinType = SemaARM::ArmNonStreaming;
606     else if (SatisfiesSME)
607       BuiltinType = SemaARM::ArmStreaming;
608     else
609       // This should be diagnosed by CodeGen
610       return false;
611   }
612 
613   if (FnType != SemaARM::ArmNonStreaming &&
614       BuiltinType == SemaARM::ArmNonStreaming)
615     S.Diag(TheCall->getBeginLoc(), diag::err_attribute_arm_sm_incompat_builtin)
616         << TheCall->getSourceRange() << "non-streaming";
617   else if (FnType != SemaARM::ArmStreaming &&
618            BuiltinType == SemaARM::ArmStreaming)
619     S.Diag(TheCall->getBeginLoc(), diag::err_attribute_arm_sm_incompat_builtin)
620         << TheCall->getSourceRange() << "streaming";
621   else
622     return false;
623 
624   return true;
625 }
626 
627 static ArmSMEState getSMEState(unsigned BuiltinID) {
628   switch (BuiltinID) {
629   default:
630     return ArmNoState;
631 #define GET_SME_BUILTIN_GET_STATE
632 #include "clang/Basic/arm_sme_builtins_za_state.inc"
633 #undef GET_SME_BUILTIN_GET_STATE
634   }
635 }
636 
637 bool SemaARM::CheckSMEBuiltinFunctionCall(unsigned BuiltinID,
638                                           CallExpr *TheCall) {
639   if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
640     std::optional<ArmStreamingType> BuiltinType;
641 
642     switch (BuiltinID) {
643 #define GET_SME_STREAMING_ATTRS
644 #include "clang/Basic/arm_sme_streaming_attrs.inc"
645 #undef GET_SME_STREAMING_ATTRS
646     }
647 
648     if (BuiltinType &&
649         checkArmStreamingBuiltin(SemaRef, TheCall, FD, *BuiltinType, BuiltinID))
650       return true;
651 
652     if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
653       Diag(TheCall->getBeginLoc(),
654            diag::warn_attribute_arm_za_builtin_no_za_state)
655           << TheCall->getSourceRange();
656 
657     if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
658       Diag(TheCall->getBeginLoc(),
659            diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
660           << TheCall->getSourceRange();
661   }
662 
663   // Range check SME intrinsics that take immediate values.
664   SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
665 
666   switch (BuiltinID) {
667   default:
668     return false;
669 #define GET_SME_IMMEDIATE_CHECK
670 #include "clang/Basic/arm_sme_sema_rangechecks.inc"
671 #undef GET_SME_IMMEDIATE_CHECK
672   }
673 
674   return PerformSVEImmChecks(TheCall, ImmChecks);
675 }
676 
677 bool SemaARM::CheckSVEBuiltinFunctionCall(unsigned BuiltinID,
678                                           CallExpr *TheCall) {
679   if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
680     std::optional<ArmStreamingType> BuiltinType;
681 
682     switch (BuiltinID) {
683 #define GET_SVE_STREAMING_ATTRS
684 #include "clang/Basic/arm_sve_streaming_attrs.inc"
685 #undef GET_SVE_STREAMING_ATTRS
686     }
687     if (BuiltinType &&
688         checkArmStreamingBuiltin(SemaRef, TheCall, FD, *BuiltinType, BuiltinID))
689       return true;
690   }
691   // Range check SVE intrinsics that take immediate values.
692   SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
693 
694   switch (BuiltinID) {
695   default:
696     return false;
697 #define GET_SVE_IMMEDIATE_CHECK
698 #include "clang/Basic/arm_sve_sema_rangechecks.inc"
699 #undef GET_SVE_IMMEDIATE_CHECK
700   }
701 
702   return PerformSVEImmChecks(TheCall, ImmChecks);
703 }
704 
705 bool SemaARM::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
706                                            unsigned BuiltinID,
707                                            CallExpr *TheCall) {
708   if (const FunctionDecl *FD = SemaRef.getCurFunctionDecl()) {
709 
710     switch (BuiltinID) {
711     default:
712       break;
713 #define GET_NEON_BUILTINS
714 #define TARGET_BUILTIN(id, ...) case NEON::BI##id:
715 #define BUILTIN(id, ...) case NEON::BI##id:
716 #include "clang/Basic/arm_neon.inc"
717       if (checkArmStreamingBuiltin(SemaRef, TheCall, FD, ArmNonStreaming,
718                                    BuiltinID))
719         return true;
720       break;
721 #undef TARGET_BUILTIN
722 #undef BUILTIN
723 #undef GET_NEON_BUILTINS
724     }
725   }
726 
727   llvm::APSInt Result;
728   uint64_t mask = 0;
729   int TV = -1;
730   int PtrArgNum = -1;
731   bool HasConstPtr = false;
732   switch (BuiltinID) {
733 #define GET_NEON_OVERLOAD_CHECK
734 #include "clang/Basic/arm_fp16.inc"
735 #include "clang/Basic/arm_neon.inc"
736 #undef GET_NEON_OVERLOAD_CHECK
737   }
738 
739   // For NEON intrinsics which are overloaded on vector element type, validate
740   // the immediate which specifies which variant to emit.
741   unsigned ImmArg = TheCall->getNumArgs() - 1;
742   if (mask) {
743     if (SemaRef.BuiltinConstantArg(TheCall, ImmArg, Result))
744       return true;
745 
746     TV = Result.getLimitedValue(64);
747     if ((TV > 63) || (mask & (1ULL << TV)) == 0)
748       return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
749              << TheCall->getArg(ImmArg)->getSourceRange();
750   }
751 
752   if (PtrArgNum >= 0) {
753     // Check that pointer arguments have the specified type.
754     Expr *Arg = TheCall->getArg(PtrArgNum);
755     if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
756       Arg = ICE->getSubExpr();
757     ExprResult RHS = SemaRef.DefaultFunctionArrayLvalueConversion(Arg);
758     QualType RHSTy = RHS.get()->getType();
759 
760     llvm::Triple::ArchType Arch = TI.getTriple().getArch();
761     bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
762                           Arch == llvm::Triple::aarch64_32 ||
763                           Arch == llvm::Triple::aarch64_be;
764     bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
765     QualType EltTy = getNeonEltType(NeonTypeFlags(TV), getASTContext(),
766                                     IsPolyUnsigned, IsInt64Long);
767     if (HasConstPtr)
768       EltTy = EltTy.withConst();
769     QualType LHSTy = getASTContext().getPointerType(EltTy);
770     Sema::AssignConvertType ConvTy;
771     ConvTy = SemaRef.CheckSingleAssignmentConstraints(LHSTy, RHS);
772     if (RHS.isInvalid())
773       return true;
774     if (SemaRef.DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy,
775                                          RHSTy, RHS.get(),
776                                          AssignmentAction::Assigning))
777       return true;
778   }
779 
780   // For NEON intrinsics which take an immediate value as part of the
781   // instruction, range check them here.
782   SmallVector<std::tuple<int, int, int, int>, 2> ImmChecks;
783   switch (BuiltinID) {
784   default:
785     return false;
786 #define GET_NEON_IMMEDIATE_CHECK
787 #include "clang/Basic/arm_fp16.inc"
788 #include "clang/Basic/arm_neon.inc"
789 #undef GET_NEON_IMMEDIATE_CHECK
790   }
791 
792   return PerformNeonImmChecks(TheCall, ImmChecks, TV);
793 }
794 
795 bool SemaARM::CheckMVEBuiltinFunctionCall(unsigned BuiltinID,
796                                           CallExpr *TheCall) {
797   switch (BuiltinID) {
798   default:
799     return false;
800 #include "clang/Basic/arm_mve_builtin_sema.inc"
801   }
802 }
803 
804 bool SemaARM::CheckCDEBuiltinFunctionCall(const TargetInfo &TI,
805                                           unsigned BuiltinID,
806                                           CallExpr *TheCall) {
807   bool Err = false;
808   switch (BuiltinID) {
809   default:
810     return false;
811 #include "clang/Basic/arm_cde_builtin_sema.inc"
812   }
813 
814   if (Err)
815     return true;
816 
817   return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true);
818 }
819 
820 bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI,
821                                            const Expr *CoprocArg,
822                                            bool WantCDE) {
823   ASTContext &Context = getASTContext();
824   if (SemaRef.isConstantEvaluatedContext())
825     return false;
826 
827   // We can't check the value of a dependent argument.
828   if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
829     return false;
830 
831   llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context);
832   int64_t CoprocNo = CoprocNoAP.getExtValue();
833   assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
834 
835   uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
836   bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
837 
838   if (IsCDECoproc != WantCDE)
839     return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
840            << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
841 
842   return false;
843 }
844 
845 bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID,
846                                            CallExpr *TheCall,
847                                            unsigned MaxWidth) {
848   assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
849           BuiltinID == ARM::BI__builtin_arm_ldaex ||
850           BuiltinID == ARM::BI__builtin_arm_strex ||
851           BuiltinID == ARM::BI__builtin_arm_stlex ||
852           BuiltinID == AArch64::BI__builtin_arm_ldrex ||
853           BuiltinID == AArch64::BI__builtin_arm_ldaex ||
854           BuiltinID == AArch64::BI__builtin_arm_strex ||
855           BuiltinID == AArch64::BI__builtin_arm_stlex) &&
856          "unexpected ARM builtin");
857   bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
858                  BuiltinID == ARM::BI__builtin_arm_ldaex ||
859                  BuiltinID == AArch64::BI__builtin_arm_ldrex ||
860                  BuiltinID == AArch64::BI__builtin_arm_ldaex;
861 
862   ASTContext &Context = getASTContext();
863   DeclRefExpr *DRE =
864       cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
865 
866   // Ensure that we have the proper number of arguments.
867   if (SemaRef.checkArgCount(TheCall, IsLdrex ? 1 : 2))
868     return true;
869 
870   // Inspect the pointer argument of the atomic builtin.  This should always be
871   // a pointer type, whose element is an integral scalar or pointer type.
872   // Because it is a pointer type, we don't have to worry about any implicit
873   // casts here.
874   Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1);
875   ExprResult PointerArgRes =
876       SemaRef.DefaultFunctionArrayLvalueConversion(PointerArg);
877   if (PointerArgRes.isInvalid())
878     return true;
879   PointerArg = PointerArgRes.get();
880 
881   const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
882   if (!pointerType) {
883     Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
884         << PointerArg->getType() << 0 << PointerArg->getSourceRange();
885     return true;
886   }
887 
888   // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
889   // task is to insert the appropriate casts into the AST. First work out just
890   // what the appropriate type is.
891   QualType ValType = pointerType->getPointeeType();
892   QualType AddrType = ValType.getUnqualifiedType().withVolatile();
893   if (IsLdrex)
894     AddrType.addConst();
895 
896   // Issue a warning if the cast is dodgy.
897   CastKind CastNeeded = CK_NoOp;
898   if (!AddrType.isAtLeastAsQualifiedAs(ValType, getASTContext())) {
899     CastNeeded = CK_BitCast;
900     Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
901         << PointerArg->getType() << Context.getPointerType(AddrType)
902         << AssignmentAction::Passing << PointerArg->getSourceRange();
903   }
904 
905   // Finally, do the cast and replace the argument with the corrected version.
906   AddrType = Context.getPointerType(AddrType);
907   PointerArgRes = SemaRef.ImpCastExprToType(PointerArg, AddrType, CastNeeded);
908   if (PointerArgRes.isInvalid())
909     return true;
910   PointerArg = PointerArgRes.get();
911 
912   TheCall->setArg(IsLdrex ? 0 : 1, PointerArg);
913 
914   // In general, we allow ints, floats and pointers to be loaded and stored.
915   if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
916       !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
917     Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
918         << PointerArg->getType() << 0 << PointerArg->getSourceRange();
919     return true;
920   }
921 
922   // But ARM doesn't have instructions to deal with 128-bit versions.
923   if (Context.getTypeSize(ValType) > MaxWidth) {
924     assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
925     Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
926         << PointerArg->getType() << PointerArg->getSourceRange();
927     return true;
928   }
929 
930   switch (ValType.getObjCLifetime()) {
931   case Qualifiers::OCL_None:
932   case Qualifiers::OCL_ExplicitNone:
933     // okay
934     break;
935 
936   case Qualifiers::OCL_Weak:
937   case Qualifiers::OCL_Strong:
938   case Qualifiers::OCL_Autoreleasing:
939     Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
940         << ValType << PointerArg->getSourceRange();
941     return true;
942   }
943 
944   if (IsLdrex) {
945     TheCall->setType(ValType);
946     return false;
947   }
948 
949   // Initialize the argument to be stored.
950   ExprResult ValArg = TheCall->getArg(0);
951   InitializedEntity Entity = InitializedEntity::InitializeParameter(
952       Context, ValType, /*consume*/ false);
953   ValArg = SemaRef.PerformCopyInitialization(Entity, SourceLocation(), ValArg);
954   if (ValArg.isInvalid())
955     return true;
956   TheCall->setArg(0, ValArg.get());
957 
958   // __builtin_arm_strex always returns an int. It's marked as such in the .def,
959   // but the custom checker bypasses all default analysis.
960   TheCall->setType(Context.IntTy);
961   return false;
962 }
963 
964 bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI,
965                                           unsigned BuiltinID,
966                                           CallExpr *TheCall) {
967   if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
968       BuiltinID == ARM::BI__builtin_arm_ldaex ||
969       BuiltinID == ARM::BI__builtin_arm_strex ||
970       BuiltinID == ARM::BI__builtin_arm_stlex) {
971     return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
972   }
973 
974   if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
975     return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
976            SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 1);
977   }
978 
979   if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
980       BuiltinID == ARM::BI__builtin_arm_wsr64)
981     return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false);
982 
983   if (BuiltinID == ARM::BI__builtin_arm_rsr ||
984       BuiltinID == ARM::BI__builtin_arm_rsrp ||
985       BuiltinID == ARM::BI__builtin_arm_wsr ||
986       BuiltinID == ARM::BI__builtin_arm_wsrp)
987     return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
988 
989   if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
990     return true;
991   if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
992     return true;
993   if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
994     return true;
995 
996   // For intrinsics which take an immediate value as part of the instruction,
997   // range check them here.
998   // FIXME: VFP Intrinsics should error if VFP not present.
999   switch (BuiltinID) {
1000   default:
1001     return false;
1002   case ARM::BI__builtin_arm_ssat:
1003     return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 32);
1004   case ARM::BI__builtin_arm_usat:
1005     return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 31);
1006   case ARM::BI__builtin_arm_ssat16:
1007     return SemaRef.BuiltinConstantArgRange(TheCall, 1, 1, 16);
1008   case ARM::BI__builtin_arm_usat16:
1009     return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 15);
1010   case ARM::BI__builtin_arm_vcvtr_f:
1011   case ARM::BI__builtin_arm_vcvtr_d:
1012     return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1);
1013   case ARM::BI__builtin_arm_dmb:
1014   case ARM::BI__builtin_arm_dsb:
1015   case ARM::BI__builtin_arm_isb:
1016   case ARM::BI__builtin_arm_dbg:
1017     return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15);
1018   case ARM::BI__builtin_arm_cdp:
1019   case ARM::BI__builtin_arm_cdp2:
1020   case ARM::BI__builtin_arm_mcr:
1021   case ARM::BI__builtin_arm_mcr2:
1022   case ARM::BI__builtin_arm_mrc:
1023   case ARM::BI__builtin_arm_mrc2:
1024   case ARM::BI__builtin_arm_mcrr:
1025   case ARM::BI__builtin_arm_mcrr2:
1026   case ARM::BI__builtin_arm_mrrc:
1027   case ARM::BI__builtin_arm_mrrc2:
1028   case ARM::BI__builtin_arm_ldc:
1029   case ARM::BI__builtin_arm_ldcl:
1030   case ARM::BI__builtin_arm_ldc2:
1031   case ARM::BI__builtin_arm_ldc2l:
1032   case ARM::BI__builtin_arm_stc:
1033   case ARM::BI__builtin_arm_stcl:
1034   case ARM::BI__builtin_arm_stc2:
1035   case ARM::BI__builtin_arm_stc2l:
1036     return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 15) ||
1037            CheckARMCoprocessorImmediate(TI, TheCall->getArg(0),
1038                                         /*WantCDE*/ false);
1039   }
1040 }
1041 
1042 bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
1043                                               unsigned BuiltinID,
1044                                               CallExpr *TheCall) {
1045   if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
1046       BuiltinID == AArch64::BI__builtin_arm_ldaex ||
1047       BuiltinID == AArch64::BI__builtin_arm_strex ||
1048       BuiltinID == AArch64::BI__builtin_arm_stlex) {
1049     return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
1050   }
1051 
1052   if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
1053     return SemaRef.BuiltinConstantArgRange(TheCall, 1, 0, 1) ||
1054            SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 3) ||
1055            SemaRef.BuiltinConstantArgRange(TheCall, 3, 0, 1) ||
1056            SemaRef.BuiltinConstantArgRange(TheCall, 4, 0, 1);
1057   }
1058 
1059   if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
1060       BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
1061       BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
1062       BuiltinID == AArch64::BI__builtin_arm_wsr128)
1063     return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1064 
1065   // Memory Tagging Extensions (MTE) Intrinsics
1066   if (BuiltinID == AArch64::BI__builtin_arm_irg ||
1067       BuiltinID == AArch64::BI__builtin_arm_addg ||
1068       BuiltinID == AArch64::BI__builtin_arm_gmi ||
1069       BuiltinID == AArch64::BI__builtin_arm_ldg ||
1070       BuiltinID == AArch64::BI__builtin_arm_stg ||
1071       BuiltinID == AArch64::BI__builtin_arm_subp) {
1072     return BuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
1073   }
1074 
1075   if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
1076       BuiltinID == AArch64::BI__builtin_arm_rsrp ||
1077       BuiltinID == AArch64::BI__builtin_arm_wsr ||
1078       BuiltinID == AArch64::BI__builtin_arm_wsrp)
1079     return BuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
1080 
1081   // Only check the valid encoding range. Any constant in this range would be
1082   // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
1083   // an exception for incorrect registers. This matches MSVC behavior.
1084   if (BuiltinID == AArch64::BI_ReadStatusReg ||
1085       BuiltinID == AArch64::BI_WriteStatusReg)
1086     return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0x7fff);
1087 
1088   if (BuiltinID == AArch64::BI__getReg)
1089     return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 31);
1090 
1091   if (BuiltinID == AArch64::BI__break)
1092     return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
1093 
1094   if (BuiltinID == AArch64::BI__hlt)
1095     return SemaRef.BuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
1096 
1097   if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
1098     return true;
1099 
1100   if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
1101     return true;
1102 
1103   if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
1104     return true;
1105 
1106   // For intrinsics which take an immediate value as part of the instruction,
1107   // range check them here.
1108   unsigned i = 0, l = 0, u = 0;
1109   switch (BuiltinID) {
1110   default: return false;
1111   case AArch64::BI__builtin_arm_dmb:
1112   case AArch64::BI__builtin_arm_dsb:
1113   case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
1114   case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
1115   }
1116 
1117   return SemaRef.BuiltinConstantArgRange(TheCall, i, l, u + l);
1118 }
1119 
1120 namespace {
1121 struct IntrinToName {
1122   uint32_t Id;
1123   int32_t FullName;
1124   int32_t ShortName;
1125 };
1126 } // unnamed namespace
1127 
1128 static bool BuiltinAliasValid(unsigned BuiltinID, StringRef AliasName,
1129                               ArrayRef<IntrinToName> Map,
1130                               const char *IntrinNames) {
1131   AliasName.consume_front("__arm_");
1132   const IntrinToName *It =
1133       llvm::lower_bound(Map, BuiltinID, [](const IntrinToName &L, unsigned Id) {
1134         return L.Id < Id;
1135       });
1136   if (It == Map.end() || It->Id != BuiltinID)
1137     return false;
1138   StringRef FullName(&IntrinNames[It->FullName]);
1139   if (AliasName == FullName)
1140     return true;
1141   if (It->ShortName == -1)
1142     return false;
1143   StringRef ShortName(&IntrinNames[It->ShortName]);
1144   return AliasName == ShortName;
1145 }
1146 
1147 bool SemaARM::MveAliasValid(unsigned BuiltinID, StringRef AliasName) {
1148 #include "clang/Basic/arm_mve_builtin_aliases.inc"
1149   // The included file defines:
1150   // - ArrayRef<IntrinToName> Map
1151   // - const char IntrinNames[]
1152   return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
1153 }
1154 
1155 bool SemaARM::CdeAliasValid(unsigned BuiltinID, StringRef AliasName) {
1156 #include "clang/Basic/arm_cde_builtin_aliases.inc"
1157   return BuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
1158 }
1159 
1160 bool SemaARM::SveAliasValid(unsigned BuiltinID, StringRef AliasName) {
1161   if (getASTContext().BuiltinInfo.isAuxBuiltinID(BuiltinID))
1162     BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(BuiltinID);
1163   return BuiltinID >= AArch64::FirstSVEBuiltin &&
1164          BuiltinID <= AArch64::LastSVEBuiltin;
1165 }
1166 
1167 bool SemaARM::SmeAliasValid(unsigned BuiltinID, StringRef AliasName) {
1168   if (getASTContext().BuiltinInfo.isAuxBuiltinID(BuiltinID))
1169     BuiltinID = getASTContext().BuiltinInfo.getAuxBuiltinID(BuiltinID);
1170   return BuiltinID >= AArch64::FirstSMEBuiltin &&
1171          BuiltinID <= AArch64::LastSMEBuiltin;
1172 }
1173 
1174 void SemaARM::handleBuiltinAliasAttr(Decl *D, const ParsedAttr &AL) {
1175   ASTContext &Context = getASTContext();
1176   if (!AL.isArgIdent(0)) {
1177     Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
1178         << AL << 1 << AANT_ArgumentIdentifier;
1179     return;
1180   }
1181 
1182   IdentifierInfo *Ident = AL.getArgAsIdent(0)->Ident;
1183   unsigned BuiltinID = Ident->getBuiltinID();
1184   StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName();
1185 
1186   bool IsAArch64 = Context.getTargetInfo().getTriple().isAArch64();
1187   if ((IsAArch64 && !SveAliasValid(BuiltinID, AliasName) &&
1188        !SmeAliasValid(BuiltinID, AliasName)) ||
1189       (!IsAArch64 && !MveAliasValid(BuiltinID, AliasName) &&
1190        !CdeAliasValid(BuiltinID, AliasName))) {
1191     Diag(AL.getLoc(), diag::err_attribute_arm_builtin_alias);
1192     return;
1193   }
1194 
1195   D->addAttr(::new (Context) ArmBuiltinAliasAttr(Context, AL, Ident));
1196 }
1197 
1198 static bool checkNewAttrMutualExclusion(
1199     Sema &S, const ParsedAttr &AL, const FunctionProtoType *FPT,
1200     FunctionType::ArmStateValue CurrentState, StringRef StateName) {
1201   auto CheckForIncompatibleAttr =
1202       [&](FunctionType::ArmStateValue IncompatibleState,
1203           StringRef IncompatibleStateName) {
1204         if (CurrentState == IncompatibleState) {
1205           S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
1206               << (std::string("'__arm_new(\"") + StateName.str() + "\")'")
1207               << (std::string("'") + IncompatibleStateName.str() + "(\"" +
1208                   StateName.str() + "\")'")
1209               << true;
1210           AL.setInvalid();
1211         }
1212       };
1213 
1214   CheckForIncompatibleAttr(FunctionType::ARM_In, "__arm_in");
1215   CheckForIncompatibleAttr(FunctionType::ARM_Out, "__arm_out");
1216   CheckForIncompatibleAttr(FunctionType::ARM_InOut, "__arm_inout");
1217   CheckForIncompatibleAttr(FunctionType::ARM_Preserves, "__arm_preserves");
1218   return AL.isInvalid();
1219 }
1220 
1221 void SemaARM::handleNewAttr(Decl *D, const ParsedAttr &AL) {
1222   if (!AL.getNumArgs()) {
1223     Diag(AL.getLoc(), diag::err_missing_arm_state) << AL;
1224     AL.setInvalid();
1225     return;
1226   }
1227 
1228   std::vector<StringRef> NewState;
1229   if (const auto *ExistingAttr = D->getAttr<ArmNewAttr>()) {
1230     for (StringRef S : ExistingAttr->newArgs())
1231       NewState.push_back(S);
1232   }
1233 
1234   bool HasZA = false;
1235   bool HasZT0 = false;
1236   for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
1237     StringRef StateName;
1238     SourceLocation LiteralLoc;
1239     if (!SemaRef.checkStringLiteralArgumentAttr(AL, I, StateName, &LiteralLoc))
1240       return;
1241 
1242     if (StateName == "za")
1243       HasZA = true;
1244     else if (StateName == "zt0")
1245       HasZT0 = true;
1246     else {
1247       Diag(LiteralLoc, diag::err_unknown_arm_state) << StateName;
1248       AL.setInvalid();
1249       return;
1250     }
1251 
1252     if (!llvm::is_contained(NewState, StateName)) // Avoid adding duplicates.
1253       NewState.push_back(StateName);
1254   }
1255 
1256   if (auto *FPT = dyn_cast<FunctionProtoType>(D->getFunctionType())) {
1257     FunctionType::ArmStateValue ZAState =
1258         FunctionType::getArmZAState(FPT->getAArch64SMEAttributes());
1259     if (HasZA && ZAState != FunctionType::ARM_None &&
1260         checkNewAttrMutualExclusion(SemaRef, AL, FPT, ZAState, "za"))
1261       return;
1262     FunctionType::ArmStateValue ZT0State =
1263         FunctionType::getArmZT0State(FPT->getAArch64SMEAttributes());
1264     if (HasZT0 && ZT0State != FunctionType::ARM_None &&
1265         checkNewAttrMutualExclusion(SemaRef, AL, FPT, ZT0State, "zt0"))
1266       return;
1267   }
1268 
1269   D->dropAttr<ArmNewAttr>();
1270   D->addAttr(::new (getASTContext()) ArmNewAttr(
1271       getASTContext(), AL, NewState.data(), NewState.size()));
1272 }
1273 
1274 void SemaARM::handleCmseNSEntryAttr(Decl *D, const ParsedAttr &AL) {
1275   if (getLangOpts().CPlusPlus && !D->getDeclContext()->isExternCContext()) {
1276     Diag(AL.getLoc(), diag::err_attribute_not_clinkage) << AL;
1277     return;
1278   }
1279 
1280   const auto *FD = cast<FunctionDecl>(D);
1281   if (!FD->isExternallyVisible()) {
1282     Diag(AL.getLoc(), diag::warn_attribute_cmse_entry_static);
1283     return;
1284   }
1285 
1286   D->addAttr(::new (getASTContext()) CmseNSEntryAttr(getASTContext(), AL));
1287 }
1288 
1289 void SemaARM::handleInterruptAttr(Decl *D, const ParsedAttr &AL) {
1290   // Check the attribute arguments.
1291   if (AL.getNumArgs() > 1) {
1292     Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL << 1;
1293     return;
1294   }
1295 
1296   StringRef Str;
1297   SourceLocation ArgLoc;
1298 
1299   if (AL.getNumArgs() == 0)
1300     Str = "";
1301   else if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
1302     return;
1303 
1304   ARMInterruptAttr::InterruptType Kind;
1305   if (!ARMInterruptAttr::ConvertStrToInterruptType(Str, Kind)) {
1306     Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
1307         << AL << Str << ArgLoc;
1308     return;
1309   }
1310 
1311   const TargetInfo &TI = getASTContext().getTargetInfo();
1312   if (TI.hasFeature("vfp"))
1313     Diag(D->getLocation(), diag::warn_arm_interrupt_vfp_clobber);
1314 
1315   D->addAttr(::new (getASTContext())
1316                  ARMInterruptAttr(getASTContext(), AL, Kind));
1317 }
1318 
1319 // Check if the function definition uses any AArch64 SME features without
1320 // having the '+sme' feature enabled and warn user if sme locally streaming
1321 // function returns or uses arguments with VL-based types.
1322 void SemaARM::CheckSMEFunctionDefAttributes(const FunctionDecl *FD) {
1323   const auto *Attr = FD->getAttr<ArmNewAttr>();
1324   bool UsesSM = FD->hasAttr<ArmLocallyStreamingAttr>();
1325   bool UsesZA = Attr && Attr->isNewZA();
1326   bool UsesZT0 = Attr && Attr->isNewZT0();
1327 
1328   if (UsesZA || UsesZT0) {
1329     if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
1330       FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
1331       if (EPI.AArch64SMEAttributes & FunctionType::SME_AgnosticZAStateMask)
1332         Diag(FD->getLocation(), diag::err_sme_unsupported_agnostic_new);
1333     }
1334   }
1335 
1336   if (FD->hasAttr<ArmLocallyStreamingAttr>()) {
1337     if (FD->getReturnType()->isSizelessVectorType())
1338       Diag(FD->getLocation(),
1339            diag::warn_sme_locally_streaming_has_vl_args_returns)
1340           << /*IsArg=*/false;
1341     if (llvm::any_of(FD->parameters(), [](ParmVarDecl *P) {
1342           return P->getOriginalType()->isSizelessVectorType();
1343         }))
1344       Diag(FD->getLocation(),
1345            diag::warn_sme_locally_streaming_has_vl_args_returns)
1346           << /*IsArg=*/true;
1347   }
1348   if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
1349     FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
1350     UsesSM |= EPI.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask;
1351     UsesZA |= FunctionType::getArmZAState(EPI.AArch64SMEAttributes) !=
1352               FunctionType::ARM_None;
1353     UsesZT0 |= FunctionType::getArmZT0State(EPI.AArch64SMEAttributes) !=
1354                FunctionType::ARM_None;
1355   }
1356 
1357   ASTContext &Context = getASTContext();
1358   if (UsesSM || UsesZA) {
1359     llvm::StringMap<bool> FeatureMap;
1360     Context.getFunctionFeatureMap(FeatureMap, FD);
1361     if (!FeatureMap.contains("sme")) {
1362       if (UsesSM)
1363         Diag(FD->getLocation(),
1364              diag::err_sme_definition_using_sm_in_non_sme_target);
1365       else
1366         Diag(FD->getLocation(),
1367              diag::err_sme_definition_using_za_in_non_sme_target);
1368     }
1369   }
1370   if (UsesZT0) {
1371     llvm::StringMap<bool> FeatureMap;
1372     Context.getFunctionFeatureMap(FeatureMap, FD);
1373     if (!FeatureMap.contains("sme2")) {
1374       Diag(FD->getLocation(),
1375            diag::err_sme_definition_using_zt0_in_non_sme2_target);
1376     }
1377   }
1378 }
1379 
1380 } // namespace clang
1381