xref: /llvm-project/clang/lib/AST/ByteCode/InterpBuiltin.cpp (revision 45e874e39030bc622ea43fbcfc4fcdd1dd404353)
1 //===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "../ExprConstShared.h"
9 #include "Boolean.h"
10 #include "Compiler.h"
11 #include "EvalEmitter.h"
12 #include "Interp.h"
13 #include "InterpBuiltinBitCast.h"
14 #include "PrimType.h"
15 #include "clang/AST/OSLog.h"
16 #include "clang/AST/RecordLayout.h"
17 #include "clang/Basic/Builtins.h"
18 #include "clang/Basic/TargetBuiltins.h"
19 #include "clang/Basic/TargetInfo.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Support/SipHash.h"
22 
23 namespace clang {
24 namespace interp {
25 
26 static unsigned callArgSize(const InterpState &S, const CallExpr *C) {
27   unsigned O = 0;
28 
29   for (const Expr *E : C->arguments()) {
30     O += align(primSize(*S.getContext().classify(E)));
31   }
32 
33   return O;
34 }
35 
36 template <typename T>
37 static T getParam(const InterpFrame *Frame, unsigned Index) {
38   assert(Frame->getFunction()->getNumParams() > Index);
39   unsigned Offset = Frame->getFunction()->getParamOffset(Index);
40   return Frame->getParam<T>(Offset);
41 }
42 
43 static APSInt getAPSIntParam(const InterpFrame *Frame, unsigned Index) {
44   APSInt R;
45   unsigned Offset = Frame->getFunction()->getParamOffset(Index);
46   INT_TYPE_SWITCH(Frame->getFunction()->getParamType(Index),
47                   R = Frame->getParam<T>(Offset).toAPSInt());
48   return R;
49 }
50 
51 static PrimType getIntPrimType(const InterpState &S) {
52   const TargetInfo &TI = S.getASTContext().getTargetInfo();
53   unsigned IntWidth = TI.getIntWidth();
54 
55   if (IntWidth == 32)
56     return PT_Sint32;
57   else if (IntWidth == 16)
58     return PT_Sint16;
59   llvm_unreachable("Int isn't 16 or 32 bit?");
60 }
61 
62 static PrimType getLongPrimType(const InterpState &S) {
63   const TargetInfo &TI = S.getASTContext().getTargetInfo();
64   unsigned LongWidth = TI.getLongWidth();
65 
66   if (LongWidth == 64)
67     return PT_Sint64;
68   else if (LongWidth == 32)
69     return PT_Sint32;
70   else if (LongWidth == 16)
71     return PT_Sint16;
72   llvm_unreachable("long isn't 16, 32 or 64 bit?");
73 }
74 
75 /// Peek an integer value from the stack into an APSInt.
76 static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) {
77   if (Offset == 0)
78     Offset = align(primSize(T));
79 
80   APSInt R;
81   INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt());
82 
83   return R;
84 }
85 
86 /// Pushes \p Val on the stack as the type given by \p QT.
87 static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
88   assert(QT->isSignedIntegerOrEnumerationType() ||
89          QT->isUnsignedIntegerOrEnumerationType());
90   std::optional<PrimType> T = S.getContext().classify(QT);
91   assert(T);
92 
93   unsigned BitWidth = S.getASTContext().getTypeSize(QT);
94   if (QT->isSignedIntegerOrEnumerationType()) {
95     int64_t V = Val.getSExtValue();
96     INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
97   } else {
98     assert(QT->isUnsignedIntegerOrEnumerationType());
99     uint64_t V = Val.getZExtValue();
100     INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
101   }
102 }
103 
104 template <typename T>
105 static void pushInteger(InterpState &S, T Val, QualType QT) {
106   if constexpr (std::is_same_v<T, APInt>)
107     pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
108   else if constexpr (std::is_same_v<T, APSInt>)
109     pushInteger(S, Val, QT);
110   else
111     pushInteger(S,
112                 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
113                              std::is_signed_v<T>),
114                        !std::is_signed_v<T>),
115                 QT);
116 }
117 
118 static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) {
119   INT_TYPE_SWITCH_NO_BOOL(
120       ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
121 }
122 
123 static bool retPrimValue(InterpState &S, CodePtr OpPC,
124                          std::optional<PrimType> &T) {
125   if (!T)
126     return RetVoid(S, OpPC);
127 
128 #define RET_CASE(X)                                                            \
129   case X:                                                                      \
130     return Ret<X>(S, OpPC);
131   switch (*T) {
132     RET_CASE(PT_Ptr);
133     RET_CASE(PT_FnPtr);
134     RET_CASE(PT_Float);
135     RET_CASE(PT_Bool);
136     RET_CASE(PT_Sint8);
137     RET_CASE(PT_Uint8);
138     RET_CASE(PT_Sint16);
139     RET_CASE(PT_Uint16);
140     RET_CASE(PT_Sint32);
141     RET_CASE(PT_Uint32);
142     RET_CASE(PT_Sint64);
143     RET_CASE(PT_Uint64);
144     RET_CASE(PT_IntAP);
145     RET_CASE(PT_IntAPS);
146   default:
147     llvm_unreachable("Unsupported return type for builtin function");
148   }
149 #undef RET_CASE
150 }
151 
152 static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC,
153                                         unsigned ID) {
154   auto Loc = S.Current->getSource(OpPC);
155   if (S.getLangOpts().CPlusPlus11)
156     S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
157         << /*isConstexpr=*/0 << /*isConstructor=*/0
158         << ("'" + S.getASTContext().BuiltinInfo.getName(ID) + "'").str();
159   else
160     S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
161 }
162 
163 static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC,
164                                                   const InterpFrame *Frame,
165                                                   const CallExpr *Call) {
166   unsigned Depth = S.Current->getDepth();
167   auto isStdCall = [](const FunctionDecl *F) -> bool {
168     return F && F->isInStdNamespace() && F->getIdentifier() &&
169            F->getIdentifier()->isStr("is_constant_evaluated");
170   };
171   const InterpFrame *Caller = Frame->Caller;
172   // The current frame is the one for __builtin_is_constant_evaluated.
173   // The one above that, potentially the one for std::is_constant_evaluated().
174   if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
175       S.getEvalStatus().Diag &&
176       (Depth == 1 || (Depth == 2 && isStdCall(Caller->getCallee())))) {
177     if (Caller->Caller && isStdCall(Caller->getCallee())) {
178       const Expr *E = Caller->Caller->getExpr(Caller->getRetPC());
179       S.report(E->getExprLoc(),
180                diag::warn_is_constant_evaluated_always_true_constexpr)
181           << "std::is_constant_evaluated" << E->getSourceRange();
182     } else {
183       const Expr *E = Frame->Caller->getExpr(Frame->getRetPC());
184       S.report(E->getExprLoc(),
185                diag::warn_is_constant_evaluated_always_true_constexpr)
186           << "__builtin_is_constant_evaluated" << E->getSourceRange();
187     }
188   }
189 
190   S.Stk.push<Boolean>(Boolean::from(S.inConstantContext()));
191   return true;
192 }
193 
194 static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
195                                    const InterpFrame *Frame,
196                                    const Function *Func, const CallExpr *Call) {
197   unsigned ID = Func->getBuiltinID();
198   const Pointer &A = getParam<Pointer>(Frame, 0);
199   const Pointer &B = getParam<Pointer>(Frame, 1);
200 
201   if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp)
202     diagnoseNonConstexprBuiltin(S, OpPC, ID);
203 
204   uint64_t Limit = ~static_cast<uint64_t>(0);
205   if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp)
206     Limit = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)))
207                 .getZExtValue();
208 
209   if (Limit == 0) {
210     pushInteger(S, 0, Call->getType());
211     return true;
212   }
213 
214   if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
215     return false;
216 
217   if (A.isDummy() || B.isDummy())
218     return false;
219 
220   assert(A.getFieldDesc()->isPrimitiveArray());
221   assert(B.getFieldDesc()->isPrimitiveArray());
222 
223   unsigned IndexA = A.getIndex();
224   unsigned IndexB = B.getIndex();
225   int32_t Result = 0;
226   uint64_t Steps = 0;
227   for (;; ++IndexA, ++IndexB, ++Steps) {
228 
229     if (Steps >= Limit)
230       break;
231     const Pointer &PA = A.atIndex(IndexA);
232     const Pointer &PB = B.atIndex(IndexB);
233     if (!CheckRange(S, OpPC, PA, AK_Read) ||
234         !CheckRange(S, OpPC, PB, AK_Read)) {
235       return false;
236     }
237     uint8_t CA = PA.deref<uint8_t>();
238     uint8_t CB = PB.deref<uint8_t>();
239 
240     if (CA > CB) {
241       Result = 1;
242       break;
243     } else if (CA < CB) {
244       Result = -1;
245       break;
246     }
247     if (CA == 0 || CB == 0)
248       break;
249   }
250 
251   pushInteger(S, Result, Call->getType());
252   return true;
253 }
254 
255 static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
256                                    const InterpFrame *Frame,
257                                    const Function *Func, const CallExpr *Call) {
258   unsigned ID = Func->getBuiltinID();
259   const Pointer &StrPtr = getParam<Pointer>(Frame, 0);
260 
261   if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
262     diagnoseNonConstexprBuiltin(S, OpPC, ID);
263 
264   if (!CheckArray(S, OpPC, StrPtr))
265     return false;
266 
267   if (!CheckLive(S, OpPC, StrPtr, AK_Read))
268     return false;
269 
270   if (!CheckDummy(S, OpPC, StrPtr, AK_Read))
271     return false;
272 
273   assert(StrPtr.getFieldDesc()->isPrimitiveArray());
274   unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
275 
276   if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
277     [[maybe_unused]] const ASTContext &AC = S.getASTContext();
278     assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
279   }
280 
281   size_t Len = 0;
282   for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
283     const Pointer &ElemPtr = StrPtr.atIndex(I);
284 
285     if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
286       return false;
287 
288     uint32_t Val;
289     switch (ElemSize) {
290     case 1:
291       Val = ElemPtr.deref<uint8_t>();
292       break;
293     case 2:
294       Val = ElemPtr.deref<uint16_t>();
295       break;
296     case 4:
297       Val = ElemPtr.deref<uint32_t>();
298       break;
299     default:
300       llvm_unreachable("Unsupported char size");
301     }
302     if (Val == 0)
303       break;
304   }
305 
306   pushInteger(S, Len, Call->getType());
307 
308   return true;
309 }
310 
311 static bool interp__builtin_nan(InterpState &S, CodePtr OpPC,
312                                 const InterpFrame *Frame, const Function *F,
313                                 bool Signaling) {
314   const Pointer &Arg = getParam<Pointer>(Frame, 0);
315 
316   if (!CheckLoad(S, OpPC, Arg))
317     return false;
318 
319   assert(Arg.getFieldDesc()->isPrimitiveArray());
320 
321   // Convert the given string to an integer using StringRef's API.
322   llvm::APInt Fill;
323   std::string Str;
324   assert(Arg.getNumElems() >= 1);
325   for (unsigned I = 0;; ++I) {
326     const Pointer &Elem = Arg.atIndex(I);
327 
328     if (!CheckLoad(S, OpPC, Elem))
329       return false;
330 
331     if (Elem.deref<int8_t>() == 0)
332       break;
333 
334     Str += Elem.deref<char>();
335   }
336 
337   // Treat empty strings as if they were zero.
338   if (Str.empty())
339     Fill = llvm::APInt(32, 0);
340   else if (StringRef(Str).getAsInteger(0, Fill))
341     return false;
342 
343   const llvm::fltSemantics &TargetSemantics =
344       S.getASTContext().getFloatTypeSemantics(F->getDecl()->getReturnType());
345 
346   Floating Result;
347   if (S.getASTContext().getTargetInfo().isNan2008()) {
348     if (Signaling)
349       Result = Floating(
350           llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
351     else
352       Result = Floating(
353           llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
354   } else {
355     // Prior to IEEE 754-2008, architectures were allowed to choose whether
356     // the first bit of their significand was set for qNaN or sNaN. MIPS chose
357     // a different encoding to what became a standard in 2008, and for pre-
358     // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
359     // sNaN. This is now known as "legacy NaN" encoding.
360     if (Signaling)
361       Result = Floating(
362           llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
363     else
364       Result = Floating(
365           llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
366   }
367 
368   S.Stk.push<Floating>(Result);
369   return true;
370 }
371 
372 static bool interp__builtin_inf(InterpState &S, CodePtr OpPC,
373                                 const InterpFrame *Frame, const Function *F) {
374   const llvm::fltSemantics &TargetSemantics =
375       S.getASTContext().getFloatTypeSemantics(F->getDecl()->getReturnType());
376 
377   S.Stk.push<Floating>(Floating::getInf(TargetSemantics));
378   return true;
379 }
380 
381 static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC,
382                                      const InterpFrame *Frame,
383                                      const Function *F) {
384   const Floating &Arg1 = getParam<Floating>(Frame, 0);
385   const Floating &Arg2 = getParam<Floating>(Frame, 1);
386 
387   APFloat Copy = Arg1.getAPFloat();
388   Copy.copySign(Arg2.getAPFloat());
389   S.Stk.push<Floating>(Floating(Copy));
390 
391   return true;
392 }
393 
394 static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC,
395                                  const InterpFrame *Frame, const Function *F,
396                                  bool IsNumBuiltin) {
397   const Floating &LHS = getParam<Floating>(Frame, 0);
398   const Floating &RHS = getParam<Floating>(Frame, 1);
399 
400   Floating Result;
401 
402   if (IsNumBuiltin) {
403     Result = llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat());
404   } else {
405     // When comparing zeroes, return -0.0 if one of the zeroes is negative.
406     if (LHS.isZero() && RHS.isZero() && RHS.isNegative())
407       Result = RHS;
408     else if (LHS.isNan() || RHS < LHS)
409       Result = RHS;
410     else
411       Result = LHS;
412   }
413 
414   S.Stk.push<Floating>(Result);
415   return true;
416 }
417 
418 static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC,
419                                  const InterpFrame *Frame, const Function *Func,
420                                  bool IsNumBuiltin) {
421   const Floating &LHS = getParam<Floating>(Frame, 0);
422   const Floating &RHS = getParam<Floating>(Frame, 1);
423 
424   Floating Result;
425 
426   if (IsNumBuiltin) {
427     Result = llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat());
428   } else {
429     // When comparing zeroes, return +0.0 if one of the zeroes is positive.
430     if (LHS.isZero() && RHS.isZero() && LHS.isNegative())
431       Result = RHS;
432     else if (LHS.isNan() || RHS > LHS)
433       Result = RHS;
434     else
435       Result = LHS;
436   }
437 
438   S.Stk.push<Floating>(Result);
439   return true;
440 }
441 
442 /// Defined as __builtin_isnan(...), to accommodate the fact that it can
443 /// take a float, double, long double, etc.
444 /// But for us, that's all a Floating anyway.
445 static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC,
446                                   const InterpFrame *Frame, const Function *F,
447                                   const CallExpr *Call) {
448   const Floating &Arg = S.Stk.peek<Floating>();
449 
450   pushInteger(S, Arg.isNan(), Call->getType());
451   return true;
452 }
453 
454 static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC,
455                                         const InterpFrame *Frame,
456                                         const Function *F,
457                                         const CallExpr *Call) {
458   const Floating &Arg = S.Stk.peek<Floating>();
459 
460   pushInteger(S, Arg.isSignaling(), Call->getType());
461   return true;
462 }
463 
464 static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC,
465                                   const InterpFrame *Frame, const Function *F,
466                                   bool CheckSign, const CallExpr *Call) {
467   const Floating &Arg = S.Stk.peek<Floating>();
468   bool IsInf = Arg.isInf();
469 
470   if (CheckSign)
471     pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType());
472   else
473     pushInteger(S, Arg.isInf(), Call->getType());
474   return true;
475 }
476 
477 static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC,
478                                      const InterpFrame *Frame,
479                                      const Function *F, const CallExpr *Call) {
480   const Floating &Arg = S.Stk.peek<Floating>();
481 
482   pushInteger(S, Arg.isFinite(), Call->getType());
483   return true;
484 }
485 
486 static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC,
487                                      const InterpFrame *Frame,
488                                      const Function *F, const CallExpr *Call) {
489   const Floating &Arg = S.Stk.peek<Floating>();
490 
491   pushInteger(S, Arg.isNormal(), Call->getType());
492   return true;
493 }
494 
495 static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC,
496                                         const InterpFrame *Frame,
497                                         const Function *F,
498                                         const CallExpr *Call) {
499   const Floating &Arg = S.Stk.peek<Floating>();
500 
501   pushInteger(S, Arg.isDenormal(), Call->getType());
502   return true;
503 }
504 
505 static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC,
506                                    const InterpFrame *Frame, const Function *F,
507                                    const CallExpr *Call) {
508   const Floating &Arg = S.Stk.peek<Floating>();
509 
510   pushInteger(S, Arg.isZero(), Call->getType());
511   return true;
512 }
513 
514 static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC,
515                                     const InterpFrame *Frame, const Function *F,
516                                     const CallExpr *Call) {
517   const Floating &Arg = S.Stk.peek<Floating>();
518 
519   pushInteger(S, Arg.isNegative(), Call->getType());
520   return true;
521 }
522 
523 static bool interp_floating_comparison(InterpState &S, CodePtr OpPC,
524                                        const InterpFrame *Frame,
525                                        const Function *F,
526                                        const CallExpr *Call) {
527   const Floating &RHS = S.Stk.peek<Floating>();
528   const Floating &LHS = S.Stk.peek<Floating>(align(2u * primSize(PT_Float)));
529   unsigned ID = F->getBuiltinID();
530 
531   pushInteger(
532       S,
533       [&] {
534         switch (ID) {
535         case Builtin::BI__builtin_isgreater:
536           return LHS > RHS;
537         case Builtin::BI__builtin_isgreaterequal:
538           return LHS >= RHS;
539         case Builtin::BI__builtin_isless:
540           return LHS < RHS;
541         case Builtin::BI__builtin_islessequal:
542           return LHS <= RHS;
543         case Builtin::BI__builtin_islessgreater: {
544           ComparisonCategoryResult cmp = LHS.compare(RHS);
545           return cmp == ComparisonCategoryResult::Less ||
546                  cmp == ComparisonCategoryResult::Greater;
547         }
548         case Builtin::BI__builtin_isunordered:
549           return LHS.compare(RHS) == ComparisonCategoryResult::Unordered;
550         default:
551           llvm_unreachable("Unexpected builtin ID: Should be a floating point "
552                            "comparison function");
553         }
554       }(),
555       Call->getType());
556   return true;
557 }
558 
559 /// First parameter to __builtin_isfpclass is the floating value, the
560 /// second one is an integral value.
561 static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC,
562                                       const InterpFrame *Frame,
563                                       const Function *Func,
564                                       const CallExpr *Call) {
565   PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType());
566   APSInt FPClassArg = peekToAPSInt(S.Stk, FPClassArgT);
567   const Floating &F =
568       S.Stk.peek<Floating>(align(primSize(FPClassArgT) + primSize(PT_Float)));
569 
570   int32_t Result =
571       static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue());
572   pushInteger(S, Result, Call->getType());
573 
574   return true;
575 }
576 
577 /// Five int values followed by one floating value.
578 static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
579                                        const InterpFrame *Frame,
580                                        const Function *Func,
581                                        const CallExpr *Call) {
582   const Floating &Val = S.Stk.peek<Floating>();
583 
584   unsigned Index;
585   switch (Val.getCategory()) {
586   case APFloat::fcNaN:
587     Index = 0;
588     break;
589   case APFloat::fcInfinity:
590     Index = 1;
591     break;
592   case APFloat::fcNormal:
593     Index = Val.isDenormal() ? 3 : 2;
594     break;
595   case APFloat::fcZero:
596     Index = 4;
597     break;
598   }
599 
600   // The last argument is first on the stack.
601   assert(Index <= 4);
602   unsigned IntSize = primSize(getIntPrimType(S));
603   unsigned Offset =
604       align(primSize(PT_Float)) + ((1 + (4 - Index)) * align(IntSize));
605 
606   APSInt I = peekToAPSInt(S.Stk, getIntPrimType(S), Offset);
607   pushInteger(S, I, Call->getType());
608   return true;
609 }
610 
611 // The C standard says "fabs raises no floating-point exceptions,
612 // even if x is a signaling NaN. The returned value is independent of
613 // the current rounding direction mode."  Therefore constant folding can
614 // proceed without regard to the floating point settings.
615 // Reference, WG14 N2478 F.10.4.3
616 static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC,
617                                  const InterpFrame *Frame,
618                                  const Function *Func) {
619   const Floating &Val = getParam<Floating>(Frame, 0);
620 
621   S.Stk.push<Floating>(Floating::abs(Val));
622   return true;
623 }
624 
625 static bool interp__builtin_abs(InterpState &S, CodePtr OpPC,
626                                 const InterpFrame *Frame, const Function *Func,
627                                 const CallExpr *Call) {
628   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
629   APSInt Val = peekToAPSInt(S.Stk, ArgT);
630   if (Val ==
631       APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
632     return false;
633   if (Val.isNegative())
634     Val.negate();
635   pushInteger(S, Val, Call->getType());
636   return true;
637 }
638 
639 static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
640                                      const InterpFrame *Frame,
641                                      const Function *Func,
642                                      const CallExpr *Call) {
643   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
644   APSInt Val = peekToAPSInt(S.Stk, ArgT);
645   pushInteger(S, Val.popcount(), Call->getType());
646   return true;
647 }
648 
649 static bool interp__builtin_parity(InterpState &S, CodePtr OpPC,
650                                    const InterpFrame *Frame,
651                                    const Function *Func, const CallExpr *Call) {
652   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
653   APSInt Val = peekToAPSInt(S.Stk, ArgT);
654   pushInteger(S, Val.popcount() % 2, Call->getType());
655   return true;
656 }
657 
658 static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC,
659                                   const InterpFrame *Frame,
660                                   const Function *Func, const CallExpr *Call) {
661   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
662   APSInt Val = peekToAPSInt(S.Stk, ArgT);
663   pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
664   return true;
665 }
666 
667 static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC,
668                                        const InterpFrame *Frame,
669                                        const Function *Func,
670                                        const CallExpr *Call) {
671   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
672   APSInt Val = peekToAPSInt(S.Stk, ArgT);
673   pushInteger(S, Val.reverseBits(), Call->getType());
674   return true;
675 }
676 
677 static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC,
678                                           const InterpFrame *Frame,
679                                           const Function *Func,
680                                           const CallExpr *Call) {
681   // This is an unevaluated call, so there are no arguments on the stack.
682   assert(Call->getNumArgs() == 1);
683   const Expr *Arg = Call->getArg(0);
684 
685   GCCTypeClass ResultClass =
686       EvaluateBuiltinClassifyType(Arg->getType(), S.getLangOpts());
687   int32_t ReturnVal = static_cast<int32_t>(ResultClass);
688   pushInteger(S, ReturnVal, Call->getType());
689   return true;
690 }
691 
692 // __builtin_expect(long, long)
693 // __builtin_expect_with_probability(long, long, double)
694 static bool interp__builtin_expect(InterpState &S, CodePtr OpPC,
695                                    const InterpFrame *Frame,
696                                    const Function *Func, const CallExpr *Call) {
697   // The return value is simply the value of the first parameter.
698   // We ignore the probability.
699   unsigned NumArgs = Call->getNumArgs();
700   assert(NumArgs == 2 || NumArgs == 3);
701 
702   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
703   unsigned Offset = align(primSize(getLongPrimType(S))) * 2;
704   if (NumArgs == 3)
705     Offset += align(primSize(PT_Float));
706 
707   APSInt Val = peekToAPSInt(S.Stk, ArgT, Offset);
708   pushInteger(S, Val, Call->getType());
709   return true;
710 }
711 
712 /// rotateleft(value, amount)
713 static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC,
714                                    const InterpFrame *Frame,
715                                    const Function *Func, const CallExpr *Call,
716                                    bool Right) {
717   PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType());
718   PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType());
719 
720   APSInt Amount = peekToAPSInt(S.Stk, AmountT);
721   APSInt Value = peekToAPSInt(
722       S.Stk, ValueT, align(primSize(AmountT)) + align(primSize(ValueT)));
723 
724   APSInt Result;
725   if (Right)
726     Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())),
727                     /*IsUnsigned=*/true);
728   else // Left.
729     Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
730                     /*IsUnsigned=*/true);
731 
732   pushInteger(S, Result, Call->getType());
733   return true;
734 }
735 
736 static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC,
737                                 const InterpFrame *Frame, const Function *Func,
738                                 const CallExpr *Call) {
739   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
740   APSInt Value = peekToAPSInt(S.Stk, ArgT);
741 
742   uint64_t N = Value.countr_zero();
743   pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
744   return true;
745 }
746 
747 static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
748                                       const InterpFrame *Frame,
749                                       const Function *Func,
750                                       const CallExpr *Call) {
751   assert(Call->getArg(0)->isLValue());
752   PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
753 
754   if (PtrT == PT_FnPtr) {
755     const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>();
756     S.Stk.push<FunctionPointer>(Arg);
757   } else if (PtrT == PT_Ptr) {
758     const Pointer &Arg = S.Stk.peek<Pointer>();
759     S.Stk.push<Pointer>(Arg);
760   } else {
761     assert(false && "Unsupported pointer type passed to __builtin_addressof()");
762   }
763   return true;
764 }
765 
766 static bool interp__builtin_move(InterpState &S, CodePtr OpPC,
767                                  const InterpFrame *Frame, const Function *Func,
768                                  const CallExpr *Call) {
769 
770   PrimType ArgT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
771 
772   TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg););
773 
774   return Func->getDecl()->isConstexpr();
775 }
776 
777 static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC,
778                                                  const InterpFrame *Frame,
779                                                  const Function *Func,
780                                                  const CallExpr *Call) {
781   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
782   APSInt Arg = peekToAPSInt(S.Stk, ArgT);
783 
784   int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber(
785       Arg.getZExtValue());
786   pushInteger(S, Result, Call->getType());
787   return true;
788 }
789 
790 /// Just takes the first Argument to the call and puts it on the stack.
791 static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
792                         const Function *Func, const CallExpr *Call) {
793   const Pointer &Arg = S.Stk.peek<Pointer>();
794   S.Stk.push<Pointer>(Arg);
795   return true;
796 }
797 
798 // Two integral values followed by a pointer (lhs, rhs, resultOut)
799 static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC,
800                                        const InterpFrame *Frame,
801                                        const Function *Func,
802                                        const CallExpr *Call) {
803   Pointer &ResultPtr = S.Stk.peek<Pointer>();
804   if (ResultPtr.isDummy())
805     return false;
806 
807   unsigned BuiltinOp = Func->getBuiltinID();
808   PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
809   PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
810   APSInt RHS = peekToAPSInt(S.Stk, RHST,
811                             align(primSize(PT_Ptr)) + align(primSize(RHST)));
812   APSInt LHS = peekToAPSInt(S.Stk, LHST,
813                             align(primSize(PT_Ptr)) + align(primSize(RHST)) +
814                                 align(primSize(LHST)));
815   QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
816   PrimType ResultT = *S.getContext().classify(ResultType);
817   bool Overflow;
818 
819   APSInt Result;
820   if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
821       BuiltinOp == Builtin::BI__builtin_sub_overflow ||
822       BuiltinOp == Builtin::BI__builtin_mul_overflow) {
823     bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
824                     ResultType->isSignedIntegerOrEnumerationType();
825     bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
826                      ResultType->isSignedIntegerOrEnumerationType();
827     uint64_t LHSSize = LHS.getBitWidth();
828     uint64_t RHSSize = RHS.getBitWidth();
829     uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
830     uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
831 
832     // Add an additional bit if the signedness isn't uniformly agreed to. We
833     // could do this ONLY if there is a signed and an unsigned that both have
834     // MaxBits, but the code to check that is pretty nasty.  The issue will be
835     // caught in the shrink-to-result later anyway.
836     if (IsSigned && !AllSigned)
837       ++MaxBits;
838 
839     LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
840     RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
841     Result = APSInt(MaxBits, !IsSigned);
842   }
843 
844   // Find largest int.
845   switch (BuiltinOp) {
846   default:
847     llvm_unreachable("Invalid value for BuiltinOp");
848   case Builtin::BI__builtin_add_overflow:
849   case Builtin::BI__builtin_sadd_overflow:
850   case Builtin::BI__builtin_saddl_overflow:
851   case Builtin::BI__builtin_saddll_overflow:
852   case Builtin::BI__builtin_uadd_overflow:
853   case Builtin::BI__builtin_uaddl_overflow:
854   case Builtin::BI__builtin_uaddll_overflow:
855     Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
856                             : LHS.uadd_ov(RHS, Overflow);
857     break;
858   case Builtin::BI__builtin_sub_overflow:
859   case Builtin::BI__builtin_ssub_overflow:
860   case Builtin::BI__builtin_ssubl_overflow:
861   case Builtin::BI__builtin_ssubll_overflow:
862   case Builtin::BI__builtin_usub_overflow:
863   case Builtin::BI__builtin_usubl_overflow:
864   case Builtin::BI__builtin_usubll_overflow:
865     Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
866                             : LHS.usub_ov(RHS, Overflow);
867     break;
868   case Builtin::BI__builtin_mul_overflow:
869   case Builtin::BI__builtin_smul_overflow:
870   case Builtin::BI__builtin_smull_overflow:
871   case Builtin::BI__builtin_smulll_overflow:
872   case Builtin::BI__builtin_umul_overflow:
873   case Builtin::BI__builtin_umull_overflow:
874   case Builtin::BI__builtin_umulll_overflow:
875     Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
876                             : LHS.umul_ov(RHS, Overflow);
877     break;
878   }
879 
880   // In the case where multiple sizes are allowed, truncate and see if
881   // the values are the same.
882   if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
883       BuiltinOp == Builtin::BI__builtin_sub_overflow ||
884       BuiltinOp == Builtin::BI__builtin_mul_overflow) {
885     // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
886     // since it will give us the behavior of a TruncOrSelf in the case where
887     // its parameter <= its size.  We previously set Result to be at least the
888     // type-size of the result, so getTypeSize(ResultType) <= Resu
889     APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
890     Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
891 
892     if (!APSInt::isSameValue(Temp, Result))
893       Overflow = true;
894     Result = Temp;
895   }
896 
897   // Write Result to ResultPtr and put Overflow on the stacl.
898   assignInteger(ResultPtr, ResultT, Result);
899   ResultPtr.initialize();
900   assert(Func->getDecl()->getReturnType()->isBooleanType());
901   S.Stk.push<Boolean>(Overflow);
902   return true;
903 }
904 
905 /// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
906 static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC,
907                                     const InterpFrame *Frame,
908                                     const Function *Func,
909                                     const CallExpr *Call) {
910   unsigned BuiltinOp = Func->getBuiltinID();
911   PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
912   PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
913   PrimType CarryT = *S.getContext().classify(Call->getArg(2)->getType());
914   APSInt RHS = peekToAPSInt(S.Stk, RHST,
915                             align(primSize(PT_Ptr)) + align(primSize(CarryT)) +
916                                 align(primSize(RHST)));
917   APSInt LHS =
918       peekToAPSInt(S.Stk, LHST,
919                    align(primSize(PT_Ptr)) + align(primSize(RHST)) +
920                        align(primSize(CarryT)) + align(primSize(LHST)));
921   APSInt CarryIn = peekToAPSInt(
922       S.Stk, LHST, align(primSize(PT_Ptr)) + align(primSize(CarryT)));
923   APSInt CarryOut;
924 
925   APSInt Result;
926   // Copy the number of bits and sign.
927   Result = LHS;
928   CarryOut = LHS;
929 
930   bool FirstOverflowed = false;
931   bool SecondOverflowed = false;
932   switch (BuiltinOp) {
933   default:
934     llvm_unreachable("Invalid value for BuiltinOp");
935   case Builtin::BI__builtin_addcb:
936   case Builtin::BI__builtin_addcs:
937   case Builtin::BI__builtin_addc:
938   case Builtin::BI__builtin_addcl:
939   case Builtin::BI__builtin_addcll:
940     Result =
941         LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
942     break;
943   case Builtin::BI__builtin_subcb:
944   case Builtin::BI__builtin_subcs:
945   case Builtin::BI__builtin_subc:
946   case Builtin::BI__builtin_subcl:
947   case Builtin::BI__builtin_subcll:
948     Result =
949         LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
950     break;
951   }
952   // It is possible for both overflows to happen but CGBuiltin uses an OR so
953   // this is consistent.
954   CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
955 
956   Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
957   QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
958   PrimType CarryOutT = *S.getContext().classify(CarryOutType);
959   assignInteger(CarryOutPtr, CarryOutT, CarryOut);
960   CarryOutPtr.initialize();
961 
962   assert(Call->getType() == Call->getArg(0)->getType());
963   pushInteger(S, Result, Call->getType());
964   return true;
965 }
966 
967 static bool interp__builtin_clz(InterpState &S, CodePtr OpPC,
968                                 const InterpFrame *Frame, const Function *Func,
969                                 const CallExpr *Call) {
970   unsigned CallSize = callArgSize(S, Call);
971   unsigned BuiltinOp = Func->getBuiltinID();
972   PrimType ValT = *S.getContext().classify(Call->getArg(0));
973   const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
974 
975   // When the argument is 0, the result of GCC builtins is undefined, whereas
976   // for Microsoft intrinsics, the result is the bit-width of the argument.
977   bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
978                          BuiltinOp != Builtin::BI__lzcnt &&
979                          BuiltinOp != Builtin::BI__lzcnt64;
980 
981   if (Val == 0) {
982     if (Func->getBuiltinID() == Builtin::BI__builtin_clzg &&
983         Call->getNumArgs() == 2) {
984       // We have a fallback parameter.
985       PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
986       const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
987       pushInteger(S, Fallback, Call->getType());
988       return true;
989     }
990 
991     if (ZeroIsUndefined)
992       return false;
993   }
994 
995   pushInteger(S, Val.countl_zero(), Call->getType());
996   return true;
997 }
998 
999 static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC,
1000                                 const InterpFrame *Frame, const Function *Func,
1001                                 const CallExpr *Call) {
1002   unsigned CallSize = callArgSize(S, Call);
1003   PrimType ValT = *S.getContext().classify(Call->getArg(0));
1004   const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
1005 
1006   if (Val == 0) {
1007     if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg &&
1008         Call->getNumArgs() == 2) {
1009       // We have a fallback parameter.
1010       PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
1011       const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
1012       pushInteger(S, Fallback, Call->getType());
1013       return true;
1014     }
1015     return false;
1016   }
1017 
1018   pushInteger(S, Val.countr_zero(), Call->getType());
1019   return true;
1020 }
1021 
1022 static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC,
1023                                   const InterpFrame *Frame,
1024                                   const Function *Func, const CallExpr *Call) {
1025   PrimType ReturnT = *S.getContext().classify(Call->getType());
1026   PrimType ValT = *S.getContext().classify(Call->getArg(0));
1027   const APSInt &Val = peekToAPSInt(S.Stk, ValT);
1028   assert(Val.getActiveBits() <= 64);
1029 
1030   INT_TYPE_SWITCH(ReturnT,
1031                   { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
1032   return true;
1033 }
1034 
1035 /// bool __atomic_always_lock_free(size_t, void const volatile*)
1036 /// bool __atomic_is_lock_free(size_t, void const volatile*)
1037 /// bool __c11_atomic_is_lock_free(size_t)
1038 static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
1039                                              const InterpFrame *Frame,
1040                                              const Function *Func,
1041                                              const CallExpr *Call) {
1042   unsigned BuiltinOp = Func->getBuiltinID();
1043 
1044   PrimType ValT = *S.getContext().classify(Call->getArg(0));
1045   unsigned SizeValOffset = 0;
1046   if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free)
1047     SizeValOffset = align(primSize(ValT)) + align(primSize(PT_Ptr));
1048   const APSInt &SizeVal = peekToAPSInt(S.Stk, ValT, SizeValOffset);
1049 
1050   auto returnBool = [&S](bool Value) -> bool {
1051     S.Stk.push<Boolean>(Value);
1052     return true;
1053   };
1054 
1055   // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1056   // of two less than or equal to the maximum inline atomic width, we know it
1057   // is lock-free.  If the size isn't a power of two, or greater than the
1058   // maximum alignment where we promote atomics, we know it is not lock-free
1059   // (at least not in the sense of atomic_is_lock_free).  Otherwise,
1060   // the answer can only be determined at runtime; for example, 16-byte
1061   // atomics have lock-free implementations on some, but not all,
1062   // x86-64 processors.
1063 
1064   // Check power-of-two.
1065   CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1066   if (Size.isPowerOfTwo()) {
1067     // Check against inlining width.
1068     unsigned InlineWidthBits =
1069         S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
1070     if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1071 
1072       // OK, we will inline appropriately-aligned operations of this size,
1073       // and _Atomic(T) is appropriately-aligned.
1074       if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
1075           Size == CharUnits::One())
1076         return returnBool(true);
1077 
1078       // Same for null pointers.
1079       assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1080       const Pointer &Ptr = S.Stk.peek<Pointer>();
1081       if (Ptr.isZero())
1082         return returnBool(true);
1083 
1084       if (Ptr.isIntegralPointer()) {
1085         uint64_t IntVal = Ptr.getIntegerRepresentation();
1086         if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1087           return returnBool(true);
1088       }
1089 
1090       const Expr *PtrArg = Call->getArg(1);
1091       // Otherwise, check if the type's alignment against Size.
1092       if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1093         // Drop the potential implicit-cast to 'const volatile void*', getting
1094         // the underlying type.
1095         if (ICE->getCastKind() == CK_BitCast)
1096           PtrArg = ICE->getSubExpr();
1097       }
1098 
1099       if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1100         QualType PointeeType = PtrTy->getPointeeType();
1101         if (!PointeeType->isIncompleteType() &&
1102             S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1103           // OK, we will inline operations on this object.
1104           return returnBool(true);
1105         }
1106       }
1107     }
1108   }
1109 
1110   if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1111     return returnBool(false);
1112 
1113   return false;
1114 }
1115 
1116 /// __builtin_complex(Float A, float B);
1117 static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
1118                                     const InterpFrame *Frame,
1119                                     const Function *Func,
1120                                     const CallExpr *Call) {
1121   const Floating &Arg2 = S.Stk.peek<Floating>();
1122   const Floating &Arg1 = S.Stk.peek<Floating>(align(primSize(PT_Float)) * 2);
1123   Pointer &Result = S.Stk.peek<Pointer>(align(primSize(PT_Float)) * 2 +
1124                                         align(primSize(PT_Ptr)));
1125 
1126   Result.atIndex(0).deref<Floating>() = Arg1;
1127   Result.atIndex(0).initialize();
1128   Result.atIndex(1).deref<Floating>() = Arg2;
1129   Result.atIndex(1).initialize();
1130   Result.initialize();
1131 
1132   return true;
1133 }
1134 
1135 /// __builtin_is_aligned()
1136 /// __builtin_align_up()
1137 /// __builtin_align_down()
1138 /// The first parameter is either an integer or a pointer.
1139 /// The second parameter is the requested alignment as an integer.
1140 static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC,
1141                                                const InterpFrame *Frame,
1142                                                const Function *Func,
1143                                                const CallExpr *Call) {
1144   unsigned BuiltinOp = Func->getBuiltinID();
1145   unsigned CallSize = callArgSize(S, Call);
1146 
1147   PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1148   const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT);
1149 
1150   if (Alignment < 0 || !Alignment.isPowerOf2()) {
1151     S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1152     return false;
1153   }
1154   unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1155   APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1156   if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1157     S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1158         << MaxValue << Call->getArg(0)->getType() << Alignment;
1159     return false;
1160   }
1161 
1162   // The first parameter is either an integer or a pointer (but not a function
1163   // pointer).
1164   PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1165 
1166   if (isIntegralType(FirstArgT)) {
1167     const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize);
1168     APSInt Align = Alignment.extOrTrunc(Src.getBitWidth());
1169     if (BuiltinOp == Builtin::BI__builtin_align_up) {
1170       APSInt AlignedVal =
1171           APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned());
1172       pushInteger(S, AlignedVal, Call->getType());
1173     } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1174       APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned());
1175       pushInteger(S, AlignedVal, Call->getType());
1176     } else {
1177       assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1178       S.Stk.push<Boolean>((Src & (Align - 1)) == 0);
1179     }
1180     return true;
1181   }
1182 
1183   assert(FirstArgT == PT_Ptr);
1184   const Pointer &Ptr = S.Stk.peek<Pointer>(CallSize);
1185 
1186   unsigned PtrOffset = Ptr.getByteOffset();
1187   PtrOffset = Ptr.getIndex();
1188   CharUnits BaseAlignment =
1189       S.getASTContext().getDeclAlign(Ptr.getDeclDesc()->asValueDecl());
1190   CharUnits PtrAlign =
1191       BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1192 
1193   if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1194     if (PtrAlign.getQuantity() >= Alignment) {
1195       S.Stk.push<Boolean>(true);
1196       return true;
1197     }
1198     // If the alignment is not known to be sufficient, some cases could still
1199     // be aligned at run time. However, if the requested alignment is less or
1200     // equal to the base alignment and the offset is not aligned, we know that
1201     // the run-time value can never be aligned.
1202     if (BaseAlignment.getQuantity() >= Alignment &&
1203         PtrAlign.getQuantity() < Alignment) {
1204       S.Stk.push<Boolean>(false);
1205       return true;
1206     }
1207 
1208     S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1209         << Alignment;
1210     return false;
1211   }
1212 
1213   assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1214          BuiltinOp == Builtin::BI__builtin_align_up);
1215 
1216   // For align_up/align_down, we can return the same value if the alignment
1217   // is known to be greater or equal to the requested value.
1218   if (PtrAlign.getQuantity() >= Alignment) {
1219     S.Stk.push<Pointer>(Ptr);
1220     return true;
1221   }
1222 
1223   // The alignment could be greater than the minimum at run-time, so we cannot
1224   // infer much about the resulting pointer value. One case is possible:
1225   // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1226   // can infer the correct index if the requested alignment is smaller than
1227   // the base alignment so we can perform the computation on the offset.
1228   if (BaseAlignment.getQuantity() >= Alignment) {
1229     assert(Alignment.getBitWidth() <= 64 &&
1230            "Cannot handle > 64-bit address-space");
1231     uint64_t Alignment64 = Alignment.getZExtValue();
1232     CharUnits NewOffset =
1233         CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1234                                     ? llvm::alignDown(PtrOffset, Alignment64)
1235                                     : llvm::alignTo(PtrOffset, Alignment64));
1236 
1237     S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1238     return true;
1239   }
1240 
1241   // Otherwise, we cannot constant-evaluate the result.
1242   S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1243   return false;
1244 }
1245 
1246 /// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1247 static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC,
1248                                            const InterpFrame *Frame,
1249                                            const Function *Func,
1250                                            const CallExpr *Call) {
1251   assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1252 
1253   // Might be called with function pointers in C.
1254   std::optional<PrimType> PtrT = S.Ctx.classify(Call->getArg(0));
1255   if (PtrT != PT_Ptr)
1256     return false;
1257 
1258   unsigned ArgSize = callArgSize(S, Call);
1259   const Pointer &Ptr = S.Stk.peek<Pointer>(ArgSize);
1260   std::optional<APSInt> ExtraOffset;
1261   APSInt Alignment;
1262   if (Call->getNumArgs() == 2) {
1263     Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1264   } else {
1265     PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1266     PrimType ExtraOffsetT = *S.Ctx.classify(Call->getArg(2));
1267     Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)),
1268                              align(primSize(AlignmentT)) +
1269                                  align(primSize(ExtraOffsetT)));
1270     ExtraOffset = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1271   }
1272 
1273   CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1274 
1275   // If there is a base object, then it must have the correct alignment.
1276   if (Ptr.isBlockPointer()) {
1277     CharUnits BaseAlignment;
1278     if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1279       BaseAlignment = S.getASTContext().getDeclAlign(VD);
1280     else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1281       BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1282 
1283     if (BaseAlignment < Align) {
1284       S.CCEDiag(Call->getArg(0),
1285                 diag::note_constexpr_baa_insufficient_alignment)
1286           << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1287       return false;
1288     }
1289   }
1290 
1291   APValue AV = Ptr.toAPValue(S.getASTContext());
1292   CharUnits AVOffset = AV.getLValueOffset();
1293   if (ExtraOffset)
1294     AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1295   if (AVOffset.alignTo(Align) != AVOffset) {
1296     if (Ptr.isBlockPointer())
1297       S.CCEDiag(Call->getArg(0),
1298                 diag::note_constexpr_baa_insufficient_alignment)
1299           << 1 << AVOffset.getQuantity() << Align.getQuantity();
1300     else
1301       S.CCEDiag(Call->getArg(0),
1302                 diag::note_constexpr_baa_value_insufficient_alignment)
1303           << AVOffset.getQuantity() << Align.getQuantity();
1304     return false;
1305   }
1306 
1307   S.Stk.push<Pointer>(Ptr);
1308   return true;
1309 }
1310 
1311 static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC,
1312                                        const InterpFrame *Frame,
1313                                        const Function *Func,
1314                                        const CallExpr *Call) {
1315   if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1316       !Call->getArg(1)->getType()->isIntegerType())
1317     return false;
1318 
1319   PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1320   PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1321   APSInt Val = peekToAPSInt(S.Stk, ValT,
1322                             align(primSize(ValT)) + align(primSize(IndexT)));
1323   APSInt Index = peekToAPSInt(S.Stk, IndexT);
1324 
1325   unsigned BitWidth = Val.getBitWidth();
1326   uint64_t Shift = Index.extractBitsAsZExtValue(8, 0);
1327   uint64_t Length = Index.extractBitsAsZExtValue(8, 8);
1328   Length = Length > BitWidth ? BitWidth : Length;
1329 
1330   // Handle out of bounds cases.
1331   if (Length == 0 || Shift >= BitWidth) {
1332     pushInteger(S, 0, Call->getType());
1333     return true;
1334   }
1335 
1336   uint64_t Result = Val.getZExtValue() >> Shift;
1337   Result &= llvm::maskTrailingOnes<uint64_t>(Length);
1338   pushInteger(S, Result, Call->getType());
1339   return true;
1340 }
1341 
1342 static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC,
1343                                       const InterpFrame *Frame,
1344                                       const Function *Func,
1345                                       const CallExpr *Call) {
1346   QualType CallType = Call->getType();
1347   if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1348       !Call->getArg(1)->getType()->isIntegerType() ||
1349       !CallType->isIntegerType())
1350     return false;
1351 
1352   PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1353   PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1354 
1355   APSInt Val = peekToAPSInt(S.Stk, ValT,
1356                             align(primSize(ValT)) + align(primSize(IndexT)));
1357   APSInt Idx = peekToAPSInt(S.Stk, IndexT);
1358 
1359   unsigned BitWidth = Val.getBitWidth();
1360   uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
1361 
1362   if (Index < BitWidth)
1363     Val.clearHighBits(BitWidth - Index);
1364 
1365   pushInteger(S, Val, CallType);
1366   return true;
1367 }
1368 
1369 static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC,
1370                                        const InterpFrame *Frame,
1371                                        const Function *Func,
1372                                        const CallExpr *Call) {
1373   QualType CallType = Call->getType();
1374   if (!CallType->isIntegerType() ||
1375       !Call->getArg(0)->getType()->isIntegerType())
1376     return false;
1377 
1378   APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1379   pushInteger(S, Val.countLeadingZeros(), CallType);
1380   return true;
1381 }
1382 
1383 static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC,
1384                                        const InterpFrame *Frame,
1385                                        const Function *Func,
1386                                        const CallExpr *Call) {
1387   QualType CallType = Call->getType();
1388   if (!CallType->isIntegerType() ||
1389       !Call->getArg(0)->getType()->isIntegerType())
1390     return false;
1391 
1392   APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1393   pushInteger(S, Val.countTrailingZeros(), CallType);
1394   return true;
1395 }
1396 
1397 static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC,
1398                                       const InterpFrame *Frame,
1399                                       const Function *Func,
1400                                       const CallExpr *Call) {
1401   if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1402       !Call->getArg(1)->getType()->isIntegerType())
1403     return false;
1404 
1405   PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1406   PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1407 
1408   APSInt Val =
1409       peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT)));
1410   APSInt Mask = peekToAPSInt(S.Stk, MaskT);
1411 
1412   unsigned BitWidth = Val.getBitWidth();
1413   APInt Result = APInt::getZero(BitWidth);
1414   for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1415     if (Mask[I])
1416       Result.setBitVal(I, Val[P++]);
1417   }
1418   pushInteger(S, Result, Call->getType());
1419   return true;
1420 }
1421 
1422 static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC,
1423                                       const InterpFrame *Frame,
1424                                       const Function *Func,
1425                                       const CallExpr *Call) {
1426   if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1427       !Call->getArg(1)->getType()->isIntegerType())
1428     return false;
1429 
1430   PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1431   PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1432 
1433   APSInt Val =
1434       peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT)));
1435   APSInt Mask = peekToAPSInt(S.Stk, MaskT);
1436 
1437   unsigned BitWidth = Val.getBitWidth();
1438   APInt Result = APInt::getZero(BitWidth);
1439   for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1440     if (Mask[I])
1441       Result.setBitVal(P++, Val[I]);
1442   }
1443   pushInteger(S, Result, Call->getType());
1444   return true;
1445 }
1446 
1447 static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S,
1448                                                     CodePtr OpPC,
1449                                                     const InterpFrame *Frame,
1450                                                     const Function *Func,
1451                                                     const CallExpr *Call) {
1452   if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1453       !Call->getArg(1)->getType()->isIntegerType() ||
1454       !Call->getArg(2)->getType()->isIntegerType())
1455     return false;
1456 
1457   unsigned BuiltinOp = Func->getBuiltinID();
1458   APSInt CarryIn = getAPSIntParam(Frame, 0);
1459   APSInt LHS = getAPSIntParam(Frame, 1);
1460   APSInt RHS = getAPSIntParam(Frame, 2);
1461 
1462   bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1463                BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1464 
1465   unsigned BitWidth = LHS.getBitWidth();
1466   unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1467   APInt ExResult =
1468       IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1469             : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1470 
1471   APInt Result = ExResult.extractBits(BitWidth, 0);
1472   APSInt CarryOut =
1473       APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1474 
1475   Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
1476   QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1477   PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1478   assignInteger(CarryOutPtr, CarryOutT, APSInt(Result, true));
1479 
1480   pushInteger(S, CarryOut, Call->getType());
1481 
1482   return true;
1483 }
1484 
1485 static bool interp__builtin_os_log_format_buffer_size(InterpState &S,
1486                                                       CodePtr OpPC,
1487                                                       const InterpFrame *Frame,
1488                                                       const Function *Func,
1489                                                       const CallExpr *Call) {
1490   analyze_os_log::OSLogBufferLayout Layout;
1491   analyze_os_log::computeOSLogBufferLayout(S.getASTContext(), Call, Layout);
1492   pushInteger(S, Layout.size().getQuantity(), Call->getType());
1493   return true;
1494 }
1495 
1496 static bool interp__builtin_ptrauth_string_discriminator(
1497     InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
1498     const Function *Func, const CallExpr *Call) {
1499   const auto &Ptr = S.Stk.peek<Pointer>();
1500   assert(Ptr.getFieldDesc()->isPrimitiveArray());
1501 
1502   StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1503   uint64_t Result = getPointerAuthStableSipHash(R);
1504   pushInteger(S, Result, Call->getType());
1505   return true;
1506 }
1507 
1508 // FIXME: This implementation is not complete.
1509 // The Compiler instance we create cannot access the current stack frame, local
1510 // variables, function parameters, etc. We also need protection from
1511 // side-effects, fatal errors, etc.
1512 static bool interp__builtin_constant_p(InterpState &S, CodePtr OpPC,
1513                                        const InterpFrame *Frame,
1514                                        const Function *Func,
1515                                        const CallExpr *Call) {
1516   const Expr *Arg = Call->getArg(0);
1517   QualType ArgType = Arg->getType();
1518 
1519   auto returnInt = [&S, Call](bool Value) -> bool {
1520     pushInteger(S, Value, Call->getType());
1521     return true;
1522   };
1523 
1524   // __builtin_constant_p always has one operand. The rules which gcc follows
1525   // are not precisely documented, but are as follows:
1526   //
1527   //  - If the operand is of integral, floating, complex or enumeration type,
1528   //    and can be folded to a known value of that type, it returns 1.
1529   //  - If the operand can be folded to a pointer to the first character
1530   //    of a string literal (or such a pointer cast to an integral type)
1531   //    or to a null pointer or an integer cast to a pointer, it returns 1.
1532   //
1533   // Otherwise, it returns 0.
1534   //
1535   // FIXME: GCC also intends to return 1 for literals of aggregate types, but
1536   // its support for this did not work prior to GCC 9 and is not yet well
1537   // understood.
1538   if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() ||
1539       ArgType->isAnyComplexType() || ArgType->isPointerType() ||
1540       ArgType->isNullPtrType()) {
1541     InterpStack Stk;
1542     Compiler<EvalEmitter> C(S.Ctx, S.P, S, Stk);
1543     auto Res = C.interpretExpr(Arg, /*ConvertResultToRValue=*/Arg->isGLValue());
1544     if (Res.isInvalid()) {
1545       C.cleanup();
1546       Stk.clear();
1547     }
1548 
1549     if (!Res.isInvalid() && !Res.empty()) {
1550       const APValue &LV = Res.toAPValue();
1551       if (LV.isLValue()) {
1552         APValue::LValueBase Base = LV.getLValueBase();
1553         if (Base.isNull()) {
1554           // A null base is acceptable.
1555           return returnInt(true);
1556         } else if (const auto *E = Base.dyn_cast<const Expr *>()) {
1557           if (!isa<StringLiteral>(E))
1558             return returnInt(false);
1559           return returnInt(LV.getLValueOffset().isZero());
1560         } else if (Base.is<TypeInfoLValue>()) {
1561           // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to
1562           // evaluate to true.
1563           return returnInt(true);
1564         } else {
1565           // Any other base is not constant enough for GCC.
1566           return returnInt(false);
1567         }
1568       }
1569     }
1570 
1571     // Otherwise, any constant value is good enough.
1572     return returnInt(true);
1573   }
1574 
1575   return returnInt(false);
1576 }
1577 
1578 static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
1579                                          const InterpFrame *Frame,
1580                                          const Function *Func,
1581                                          const CallExpr *Call) {
1582   // A call to __operator_new is only valid within std::allocate<>::allocate.
1583   // Walk up the call stack to find the appropriate caller and get the
1584   // element type from it.
1585   QualType ElemType;
1586 
1587   for (const InterpFrame *F = Frame; F; F = F->Caller) {
1588     const Function *Func = F->getFunction();
1589     if (!Func)
1590       continue;
1591     const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Func->getDecl());
1592     if (!MD)
1593       continue;
1594     const IdentifierInfo *FnII = MD->getIdentifier();
1595     if (!FnII || !FnII->isStr("allocate"))
1596       continue;
1597 
1598     const auto *CTSD =
1599         dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent());
1600     if (!CTSD)
1601       continue;
1602 
1603     const IdentifierInfo *ClassII = CTSD->getIdentifier();
1604     const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
1605     if (CTSD->isInStdNamespace() && ClassII && ClassII->isStr("allocator") &&
1606         TAL.size() >= 1 && TAL[0].getKind() == TemplateArgument::Type) {
1607       ElemType = TAL[0].getAsType();
1608       break;
1609     }
1610   }
1611 
1612   if (ElemType.isNull()) {
1613     S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1614                        ? diag::note_constexpr_new_untyped
1615                        : diag::note_constexpr_new);
1616     return false;
1617   }
1618 
1619   if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1620     S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1621         << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1622     return false;
1623   }
1624 
1625   APSInt Bytes = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0)));
1626   CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1627   assert(!ElemSize.isZero());
1628   // Divide the number of bytes by sizeof(ElemType), so we get the number of
1629   // elements we should allocate.
1630   APInt NumElems, Remainder;
1631   APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1632   APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1633   if (Remainder != 0) {
1634     // This likely indicates a bug in the implementation of 'std::allocator'.
1635     S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1636         << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1637     return false;
1638   }
1639 
1640   // NB: The same check we're using in CheckArraySize()
1641   if (NumElems.getActiveBits() >
1642           ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
1643       NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1644     // FIXME: NoThrow check?
1645     const SourceInfo &Loc = S.Current->getSource(OpPC);
1646     S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1647         << NumElems.getZExtValue();
1648     return false;
1649   }
1650 
1651   std::optional<PrimType> ElemT = S.getContext().classify(ElemType);
1652   DynamicAllocator &Allocator = S.getAllocator();
1653   if (ElemT) {
1654     if (NumElems.ule(1)) {
1655       const Descriptor *Desc =
1656           S.P.createDescriptor(Call, *ElemT, Descriptor::InlineDescMD,
1657                                /*IsConst=*/false, /*IsTemporary=*/false,
1658                                /*IsMutable=*/false);
1659       Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1660                                     DynamicAllocator::Form::Operator);
1661       assert(B);
1662 
1663       S.Stk.push<Pointer>(B);
1664       return true;
1665     }
1666     assert(NumElems.ugt(1));
1667 
1668     Block *B =
1669         Allocator.allocate(Call, *ElemT, NumElems.getZExtValue(),
1670                            S.Ctx.getEvalID(), DynamicAllocator::Form::Operator);
1671     assert(B);
1672     S.Stk.push<Pointer>(B);
1673     return true;
1674   }
1675 
1676   assert(!ElemT);
1677   // Structs etc.
1678   const Descriptor *Desc = S.P.createDescriptor(
1679       Call, ElemType.getTypePtr(), Descriptor::InlineDescMD,
1680       /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false,
1681       /*Init=*/nullptr);
1682 
1683   if (NumElems.ule(1)) {
1684     Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1685                                   DynamicAllocator::Form::Operator);
1686     assert(B);
1687     S.Stk.push<Pointer>(B);
1688     return true;
1689   }
1690 
1691   Block *B =
1692       Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1693                          DynamicAllocator::Form::Operator);
1694   assert(B);
1695   S.Stk.push<Pointer>(B);
1696   return true;
1697 }
1698 
1699 static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC,
1700                                             const InterpFrame *Frame,
1701                                             const Function *Func,
1702                                             const CallExpr *Call) {
1703   const Expr *Source = nullptr;
1704   const Block *BlockToDelete = nullptr;
1705 
1706   {
1707     const Pointer &Ptr = S.Stk.peek<Pointer>();
1708 
1709     if (Ptr.isZero()) {
1710       S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1711       return true;
1712     }
1713 
1714     Source = Ptr.getDeclDesc()->asExpr();
1715     BlockToDelete = Ptr.block();
1716   }
1717   assert(BlockToDelete);
1718 
1719   DynamicAllocator &Allocator = S.getAllocator();
1720   const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1721   std::optional<DynamicAllocator::Form> AllocForm =
1722       Allocator.getAllocationForm(Source);
1723 
1724   if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1725     // Nothing has been deallocated, this must be a double-delete.
1726     const SourceInfo &Loc = S.Current->getSource(OpPC);
1727     S.FFDiag(Loc, diag::note_constexpr_double_delete);
1728     return false;
1729   }
1730   assert(AllocForm);
1731 
1732   return CheckNewDeleteForms(
1733       S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1734 }
1735 
1736 static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC,
1737                                              const InterpFrame *Frame,
1738                                              const Function *Func,
1739                                              const CallExpr *Call) {
1740   const Floating &Arg0 = S.Stk.peek<Floating>();
1741   S.Stk.push<Floating>(Arg0);
1742   return true;
1743 }
1744 
1745 static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC,
1746                                           const InterpFrame *Frame,
1747                                           const Function *Func,
1748                                           const CallExpr *Call) {
1749   const Pointer &Arg = S.Stk.peek<Pointer>();
1750   assert(Arg.getFieldDesc()->isPrimitiveArray());
1751 
1752   unsigned ID = Func->getBuiltinID();
1753   QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1754   assert(Call->getType() == ElemType);
1755   PrimType ElemT = *S.getContext().classify(ElemType);
1756   unsigned NumElems = Arg.getNumElems();
1757 
1758   INT_TYPE_SWITCH_NO_BOOL(ElemT, {
1759     T Result = Arg.atIndex(0).deref<T>();
1760     unsigned BitWidth = Result.bitWidth();
1761     for (unsigned I = 1; I != NumElems; ++I) {
1762       T Elem = Arg.atIndex(I).deref<T>();
1763       T PrevResult = Result;
1764 
1765       if (ID == Builtin::BI__builtin_reduce_add) {
1766         if (T::add(Result, Elem, BitWidth, &Result)) {
1767           unsigned OverflowBits = BitWidth + 1;
1768           (void)handleOverflow(S, OpPC,
1769                                (PrevResult.toAPSInt(OverflowBits) +
1770                                 Elem.toAPSInt(OverflowBits)));
1771           return false;
1772         }
1773       } else if (ID == Builtin::BI__builtin_reduce_mul) {
1774         if (T::mul(Result, Elem, BitWidth, &Result)) {
1775           unsigned OverflowBits = BitWidth * 2;
1776           (void)handleOverflow(S, OpPC,
1777                                (PrevResult.toAPSInt(OverflowBits) *
1778                                 Elem.toAPSInt(OverflowBits)));
1779           return false;
1780         }
1781 
1782       } else if (ID == Builtin::BI__builtin_reduce_and) {
1783         (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1784       } else if (ID == Builtin::BI__builtin_reduce_or) {
1785         (void)T::bitOr(Result, Elem, BitWidth, &Result);
1786       } else if (ID == Builtin::BI__builtin_reduce_xor) {
1787         (void)T::bitXor(Result, Elem, BitWidth, &Result);
1788       } else {
1789         llvm_unreachable("Unhandled vector reduce builtin");
1790       }
1791     }
1792     pushInteger(S, Result.toAPSInt(), Call->getType());
1793   });
1794 
1795   return true;
1796 }
1797 
1798 /// Can be called with an integer or vector as the first and only parameter.
1799 static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
1800                                                  const InterpFrame *Frame,
1801                                                  const Function *Func,
1802                                                  const CallExpr *Call) {
1803   assert(Call->getNumArgs() == 1);
1804   if (Call->getArg(0)->getType()->isIntegerType()) {
1805     PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1806     APSInt Val = peekToAPSInt(S.Stk, ArgT);
1807     pushInteger(S, Val.popcount(), Call->getType());
1808     return true;
1809   }
1810   // Otherwise, the argument must be a vector.
1811   assert(Call->getArg(0)->getType()->isVectorType());
1812   const Pointer &Arg = S.Stk.peek<Pointer>();
1813   assert(Arg.getFieldDesc()->isPrimitiveArray());
1814   const Pointer &Dst = S.Stk.peek<Pointer>(primSize(PT_Ptr) * 2);
1815   assert(Dst.getFieldDesc()->isPrimitiveArray());
1816   assert(Arg.getFieldDesc()->getNumElems() ==
1817          Dst.getFieldDesc()->getNumElems());
1818 
1819   QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1820   PrimType ElemT = *S.getContext().classify(ElemType);
1821   unsigned NumElems = Arg.getNumElems();
1822 
1823   // FIXME: Reading from uninitialized vector elements?
1824   for (unsigned I = 0; I != NumElems; ++I) {
1825     INT_TYPE_SWITCH_NO_BOOL(ElemT, {
1826       Dst.atIndex(I).deref<T>() =
1827           T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount());
1828       Dst.atIndex(I).initialize();
1829     });
1830   }
1831 
1832   return true;
1833 }
1834 
1835 static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
1836                                    const InterpFrame *Frame,
1837                                    const Function *Func, const CallExpr *Call) {
1838   assert(Call->getNumArgs() == 3);
1839   unsigned ID = Func->getBuiltinID();
1840   Pointer DestPtr = getParam<Pointer>(Frame, 0);
1841   const ASTContext &ASTCtx = S.getASTContext();
1842   const Pointer &SrcPtr = getParam<Pointer>(Frame, 1);
1843   const APSInt &Size =
1844       peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)));
1845   assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1846 
1847   if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1848     diagnoseNonConstexprBuiltin(S, OpPC, ID);
1849 
1850   bool Move = (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove);
1851 
1852   // If the size is zero, we treat this as always being a valid no-op.
1853   if (Size.isZero()) {
1854     S.Stk.push<Pointer>(DestPtr);
1855     return true;
1856   }
1857 
1858   if (SrcPtr.isZero() || DestPtr.isZero()) {
1859     Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1860     S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1861         << /*IsMove=*/Move << /*IsWchar=*/false << !SrcPtr.isZero()
1862         << DiagPtr.toDiagnosticString(ASTCtx);
1863     return false;
1864   }
1865 
1866   // Can't read from dummy pointers.
1867   if (DestPtr.isDummy() || SrcPtr.isDummy())
1868     return false;
1869 
1870   QualType DestElemType;
1871   size_t RemainingDestElems;
1872   if (DestPtr.getFieldDesc()->isArray()) {
1873     DestElemType = DestPtr.getFieldDesc()->getElemQualType();
1874     RemainingDestElems = (DestPtr.getNumElems() - DestPtr.getIndex());
1875   } else {
1876     DestElemType = DestPtr.getType();
1877     RemainingDestElems = 1;
1878   }
1879   unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1880 
1881   if (Size.urem(DestElemSize) != 0) {
1882     S.FFDiag(S.Current->getSource(OpPC),
1883              diag::note_constexpr_memcpy_unsupported)
1884         << Move << /*IsWchar=*/false << 0 << DestElemType << Size
1885         << DestElemSize;
1886     return false;
1887   }
1888 
1889   QualType SrcElemType;
1890   size_t RemainingSrcElems;
1891   if (SrcPtr.getFieldDesc()->isArray()) {
1892     SrcElemType = SrcPtr.getFieldDesc()->getElemQualType();
1893     RemainingSrcElems = (SrcPtr.getNumElems() - SrcPtr.getIndex());
1894   } else {
1895     SrcElemType = SrcPtr.getType();
1896     RemainingSrcElems = 1;
1897   }
1898   unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1899 
1900   if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1901     S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1902         << Move << SrcElemType << DestElemType;
1903     return false;
1904   }
1905 
1906   // Check if we have enough elements to read from and write to/
1907   size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1908   size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1909   if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
1910     APInt N = Size.udiv(DestElemSize);
1911     S.FFDiag(S.Current->getSource(OpPC),
1912              diag::note_constexpr_memcpy_unsupported)
1913         << Move << /*IsWChar*/ false << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
1914         << DestElemType << toString(N, 10, /*Signed=*/false);
1915     return false;
1916   }
1917 
1918   // Check for overlapping memory regions.
1919   if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1920     unsigned SrcIndex = SrcPtr.getIndex() * SrcPtr.elemSize();
1921     unsigned DstIndex = DestPtr.getIndex() * DestPtr.elemSize();
1922     unsigned N = Size.getZExtValue();
1923 
1924     if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
1925         (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
1926       S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1927           << /*IsWChar=*/false;
1928       return false;
1929     }
1930   }
1931 
1932   assert(Size.getZExtValue() % DestElemSize == 0);
1933   if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
1934     return false;
1935 
1936   S.Stk.push<Pointer>(DestPtr);
1937   return true;
1938 }
1939 
1940 /// Determine if T is a character type for which we guarantee that
1941 /// sizeof(T) == 1.
1942 static bool isOneByteCharacterType(QualType T) {
1943   return T->isCharType() || T->isChar8Type();
1944 }
1945 
1946 static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
1947                                    const InterpFrame *Frame,
1948                                    const Function *Func, const CallExpr *Call) {
1949   assert(Call->getNumArgs() == 3);
1950   unsigned ID = Func->getBuiltinID();
1951   const Pointer &PtrA = getParam<Pointer>(Frame, 0);
1952   const Pointer &PtrB = getParam<Pointer>(Frame, 1);
1953   const APSInt &Size =
1954       peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)));
1955 
1956   if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1957       ID == Builtin::BIwmemcmp)
1958     diagnoseNonConstexprBuiltin(S, OpPC, ID);
1959 
1960   if (Size.isZero()) {
1961     pushInteger(S, 0, Call->getType());
1962     return true;
1963   }
1964 
1965   bool IsWide =
1966       (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1967 
1968   const ASTContext &ASTCtx = S.getASTContext();
1969   // FIXME: This is an arbitrary limitation the current constant interpreter
1970   // had. We could remove this.
1971   if (!IsWide && (!isOneByteCharacterType(PtrA.getType()) ||
1972                   !isOneByteCharacterType(PtrB.getType()))) {
1973     S.FFDiag(S.Current->getSource(OpPC),
1974              diag::note_constexpr_memcmp_unsupported)
1975         << ("'" + ASTCtx.BuiltinInfo.getName(ID) + "'").str() << PtrA.getType()
1976         << PtrB.getType();
1977     return false;
1978   }
1979 
1980   if (PtrA.isDummy() || PtrB.isDummy())
1981     return false;
1982 
1983   // Now, read both pointers to a buffer and compare those.
1984   BitcastBuffer BufferA(
1985       Bits(ASTCtx.getTypeSize(PtrA.getFieldDesc()->getType())));
1986   readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
1987   // FIXME: The swapping here is UNDOING something we do when reading the
1988   // data into the buffer.
1989   if (ASTCtx.getTargetInfo().isBigEndian())
1990     swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
1991 
1992   BitcastBuffer BufferB(
1993       Bits(ASTCtx.getTypeSize(PtrB.getFieldDesc()->getType())));
1994   readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
1995   // FIXME: The swapping here is UNDOING something we do when reading the
1996   // data into the buffer.
1997   if (ASTCtx.getTargetInfo().isBigEndian())
1998     swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
1999 
2000   size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
2001                                   BufferB.byteSize().getQuantity());
2002 
2003   unsigned ElemSize = 1;
2004   if (IsWide)
2005     ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
2006   // The Size given for the wide variants is in wide-char units. Convert it
2007   // to bytes.
2008   size_t ByteSize = Size.getZExtValue() * ElemSize;
2009   size_t CmpSize = std::min(MinBufferSize, ByteSize);
2010 
2011   for (size_t I = 0; I != CmpSize; I += ElemSize) {
2012     if (IsWide) {
2013       INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
2014         T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
2015         T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
2016         if (A < B) {
2017           pushInteger(S, -1, Call->getType());
2018           return true;
2019         } else if (A > B) {
2020           pushInteger(S, 1, Call->getType());
2021           return true;
2022         }
2023       });
2024     } else {
2025       std::byte A = BufferA.Data[I];
2026       std::byte B = BufferB.Data[I];
2027 
2028       if (A < B) {
2029         pushInteger(S, -1, Call->getType());
2030         return true;
2031       } else if (A > B) {
2032         pushInteger(S, 1, Call->getType());
2033         return true;
2034       }
2035     }
2036   }
2037 
2038   // We compared CmpSize bytes above. If the limiting factor was the Size
2039   // passed, we're done and the result is equality (0).
2040   if (ByteSize <= CmpSize) {
2041     pushInteger(S, 0, Call->getType());
2042     return true;
2043   }
2044 
2045   // However, if we read all the available bytes but were instructed to read
2046   // even more, diagnose this as a "read of dereferenced one-past-the-end
2047   // pointer". This is what would happen if we called CheckRead() on every array
2048   // element.
2049   S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2050       << AK_Read << S.Current->getRange(OpPC);
2051   return false;
2052 }
2053 
2054 bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
2055                       const CallExpr *Call, uint32_t BuiltinID) {
2056   const InterpFrame *Frame = S.Current;
2057 
2058   std::optional<PrimType> ReturnT = S.getContext().classify(Call);
2059 
2060   switch (BuiltinID) {
2061   case Builtin::BI__builtin_is_constant_evaluated:
2062     if (!interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call))
2063       return false;
2064     break;
2065   case Builtin::BI__builtin_assume:
2066   case Builtin::BI__assume:
2067     break;
2068   case Builtin::BI__builtin_strcmp:
2069   case Builtin::BIstrcmp:
2070   case Builtin::BI__builtin_strncmp:
2071   case Builtin::BIstrncmp:
2072     if (!interp__builtin_strcmp(S, OpPC, Frame, F, Call))
2073       return false;
2074     break;
2075   case Builtin::BI__builtin_strlen:
2076   case Builtin::BIstrlen:
2077   case Builtin::BI__builtin_wcslen:
2078   case Builtin::BIwcslen:
2079     if (!interp__builtin_strlen(S, OpPC, Frame, F, Call))
2080       return false;
2081     break;
2082   case Builtin::BI__builtin_nan:
2083   case Builtin::BI__builtin_nanf:
2084   case Builtin::BI__builtin_nanl:
2085   case Builtin::BI__builtin_nanf16:
2086   case Builtin::BI__builtin_nanf128:
2087     if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false))
2088       return false;
2089     break;
2090   case Builtin::BI__builtin_nans:
2091   case Builtin::BI__builtin_nansf:
2092   case Builtin::BI__builtin_nansl:
2093   case Builtin::BI__builtin_nansf16:
2094   case Builtin::BI__builtin_nansf128:
2095     if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true))
2096       return false;
2097     break;
2098 
2099   case Builtin::BI__builtin_huge_val:
2100   case Builtin::BI__builtin_huge_valf:
2101   case Builtin::BI__builtin_huge_vall:
2102   case Builtin::BI__builtin_huge_valf16:
2103   case Builtin::BI__builtin_huge_valf128:
2104   case Builtin::BI__builtin_inf:
2105   case Builtin::BI__builtin_inff:
2106   case Builtin::BI__builtin_infl:
2107   case Builtin::BI__builtin_inff16:
2108   case Builtin::BI__builtin_inff128:
2109     if (!interp__builtin_inf(S, OpPC, Frame, F))
2110       return false;
2111     break;
2112   case Builtin::BI__builtin_copysign:
2113   case Builtin::BI__builtin_copysignf:
2114   case Builtin::BI__builtin_copysignl:
2115   case Builtin::BI__builtin_copysignf128:
2116     if (!interp__builtin_copysign(S, OpPC, Frame, F))
2117       return false;
2118     break;
2119 
2120   case Builtin::BI__builtin_fmin:
2121   case Builtin::BI__builtin_fminf:
2122   case Builtin::BI__builtin_fminl:
2123   case Builtin::BI__builtin_fminf16:
2124   case Builtin::BI__builtin_fminf128:
2125     if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/false))
2126       return false;
2127     break;
2128 
2129   case Builtin::BI__builtin_fminimum_num:
2130   case Builtin::BI__builtin_fminimum_numf:
2131   case Builtin::BI__builtin_fminimum_numl:
2132   case Builtin::BI__builtin_fminimum_numf16:
2133   case Builtin::BI__builtin_fminimum_numf128:
2134     if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/true))
2135       return false;
2136     break;
2137 
2138   case Builtin::BI__builtin_fmax:
2139   case Builtin::BI__builtin_fmaxf:
2140   case Builtin::BI__builtin_fmaxl:
2141   case Builtin::BI__builtin_fmaxf16:
2142   case Builtin::BI__builtin_fmaxf128:
2143     if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/false))
2144       return false;
2145     break;
2146 
2147   case Builtin::BI__builtin_fmaximum_num:
2148   case Builtin::BI__builtin_fmaximum_numf:
2149   case Builtin::BI__builtin_fmaximum_numl:
2150   case Builtin::BI__builtin_fmaximum_numf16:
2151   case Builtin::BI__builtin_fmaximum_numf128:
2152     if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/true))
2153       return false;
2154     break;
2155 
2156   case Builtin::BI__builtin_isnan:
2157     if (!interp__builtin_isnan(S, OpPC, Frame, F, Call))
2158       return false;
2159     break;
2160   case Builtin::BI__builtin_issignaling:
2161     if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call))
2162       return false;
2163     break;
2164 
2165   case Builtin::BI__builtin_isinf:
2166     if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false, Call))
2167       return false;
2168     break;
2169 
2170   case Builtin::BI__builtin_isinf_sign:
2171     if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true, Call))
2172       return false;
2173     break;
2174 
2175   case Builtin::BI__builtin_isfinite:
2176     if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call))
2177       return false;
2178     break;
2179   case Builtin::BI__builtin_isnormal:
2180     if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call))
2181       return false;
2182     break;
2183   case Builtin::BI__builtin_issubnormal:
2184     if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call))
2185       return false;
2186     break;
2187   case Builtin::BI__builtin_iszero:
2188     if (!interp__builtin_iszero(S, OpPC, Frame, F, Call))
2189       return false;
2190     break;
2191   case Builtin::BI__builtin_signbit:
2192   case Builtin::BI__builtin_signbitf:
2193   case Builtin::BI__builtin_signbitl:
2194     if (!interp__builtin_signbit(S, OpPC, Frame, F, Call))
2195       return false;
2196     break;
2197   case Builtin::BI__builtin_isgreater:
2198   case Builtin::BI__builtin_isgreaterequal:
2199   case Builtin::BI__builtin_isless:
2200   case Builtin::BI__builtin_islessequal:
2201   case Builtin::BI__builtin_islessgreater:
2202   case Builtin::BI__builtin_isunordered:
2203     if (!interp_floating_comparison(S, OpPC, Frame, F, Call))
2204       return false;
2205     break;
2206   case Builtin::BI__builtin_isfpclass:
2207     if (!interp__builtin_isfpclass(S, OpPC, Frame, F, Call))
2208       return false;
2209     break;
2210   case Builtin::BI__builtin_fpclassify:
2211     if (!interp__builtin_fpclassify(S, OpPC, Frame, F, Call))
2212       return false;
2213     break;
2214 
2215   case Builtin::BI__builtin_fabs:
2216   case Builtin::BI__builtin_fabsf:
2217   case Builtin::BI__builtin_fabsl:
2218   case Builtin::BI__builtin_fabsf128:
2219     if (!interp__builtin_fabs(S, OpPC, Frame, F))
2220       return false;
2221     break;
2222 
2223   case Builtin::BI__builtin_abs:
2224   case Builtin::BI__builtin_labs:
2225   case Builtin::BI__builtin_llabs:
2226     if (!interp__builtin_abs(S, OpPC, Frame, F, Call))
2227       return false;
2228     break;
2229 
2230   case Builtin::BI__builtin_popcount:
2231   case Builtin::BI__builtin_popcountl:
2232   case Builtin::BI__builtin_popcountll:
2233   case Builtin::BI__builtin_popcountg:
2234   case Builtin::BI__popcnt16: // Microsoft variants of popcount
2235   case Builtin::BI__popcnt:
2236   case Builtin::BI__popcnt64:
2237     if (!interp__builtin_popcount(S, OpPC, Frame, F, Call))
2238       return false;
2239     break;
2240 
2241   case Builtin::BI__builtin_parity:
2242   case Builtin::BI__builtin_parityl:
2243   case Builtin::BI__builtin_parityll:
2244     if (!interp__builtin_parity(S, OpPC, Frame, F, Call))
2245       return false;
2246     break;
2247 
2248   case Builtin::BI__builtin_clrsb:
2249   case Builtin::BI__builtin_clrsbl:
2250   case Builtin::BI__builtin_clrsbll:
2251     if (!interp__builtin_clrsb(S, OpPC, Frame, F, Call))
2252       return false;
2253     break;
2254 
2255   case Builtin::BI__builtin_bitreverse8:
2256   case Builtin::BI__builtin_bitreverse16:
2257   case Builtin::BI__builtin_bitreverse32:
2258   case Builtin::BI__builtin_bitreverse64:
2259     if (!interp__builtin_bitreverse(S, OpPC, Frame, F, Call))
2260       return false;
2261     break;
2262 
2263   case Builtin::BI__builtin_classify_type:
2264     if (!interp__builtin_classify_type(S, OpPC, Frame, F, Call))
2265       return false;
2266     break;
2267 
2268   case Builtin::BI__builtin_expect:
2269   case Builtin::BI__builtin_expect_with_probability:
2270     if (!interp__builtin_expect(S, OpPC, Frame, F, Call))
2271       return false;
2272     break;
2273 
2274   case Builtin::BI__builtin_rotateleft8:
2275   case Builtin::BI__builtin_rotateleft16:
2276   case Builtin::BI__builtin_rotateleft32:
2277   case Builtin::BI__builtin_rotateleft64:
2278   case Builtin::BI_rotl8: // Microsoft variants of rotate left
2279   case Builtin::BI_rotl16:
2280   case Builtin::BI_rotl:
2281   case Builtin::BI_lrotl:
2282   case Builtin::BI_rotl64:
2283     if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/false))
2284       return false;
2285     break;
2286 
2287   case Builtin::BI__builtin_rotateright8:
2288   case Builtin::BI__builtin_rotateright16:
2289   case Builtin::BI__builtin_rotateright32:
2290   case Builtin::BI__builtin_rotateright64:
2291   case Builtin::BI_rotr8: // Microsoft variants of rotate right
2292   case Builtin::BI_rotr16:
2293   case Builtin::BI_rotr:
2294   case Builtin::BI_lrotr:
2295   case Builtin::BI_rotr64:
2296     if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/true))
2297       return false;
2298     break;
2299 
2300   case Builtin::BI__builtin_ffs:
2301   case Builtin::BI__builtin_ffsl:
2302   case Builtin::BI__builtin_ffsll:
2303     if (!interp__builtin_ffs(S, OpPC, Frame, F, Call))
2304       return false;
2305     break;
2306   case Builtin::BIaddressof:
2307   case Builtin::BI__addressof:
2308   case Builtin::BI__builtin_addressof:
2309     if (!interp__builtin_addressof(S, OpPC, Frame, F, Call))
2310       return false;
2311     break;
2312 
2313   case Builtin::BIas_const:
2314   case Builtin::BIforward:
2315   case Builtin::BIforward_like:
2316   case Builtin::BImove:
2317   case Builtin::BImove_if_noexcept:
2318     if (!interp__builtin_move(S, OpPC, Frame, F, Call))
2319       return false;
2320     break;
2321 
2322   case Builtin::BI__builtin_eh_return_data_regno:
2323     if (!interp__builtin_eh_return_data_regno(S, OpPC, Frame, F, Call))
2324       return false;
2325     break;
2326 
2327   case Builtin::BI__builtin_launder:
2328     if (!noopPointer(S, OpPC, Frame, F, Call))
2329       return false;
2330     break;
2331 
2332   case Builtin::BI__builtin_add_overflow:
2333   case Builtin::BI__builtin_sub_overflow:
2334   case Builtin::BI__builtin_mul_overflow:
2335   case Builtin::BI__builtin_sadd_overflow:
2336   case Builtin::BI__builtin_uadd_overflow:
2337   case Builtin::BI__builtin_uaddl_overflow:
2338   case Builtin::BI__builtin_uaddll_overflow:
2339   case Builtin::BI__builtin_usub_overflow:
2340   case Builtin::BI__builtin_usubl_overflow:
2341   case Builtin::BI__builtin_usubll_overflow:
2342   case Builtin::BI__builtin_umul_overflow:
2343   case Builtin::BI__builtin_umull_overflow:
2344   case Builtin::BI__builtin_umulll_overflow:
2345   case Builtin::BI__builtin_saddl_overflow:
2346   case Builtin::BI__builtin_saddll_overflow:
2347   case Builtin::BI__builtin_ssub_overflow:
2348   case Builtin::BI__builtin_ssubl_overflow:
2349   case Builtin::BI__builtin_ssubll_overflow:
2350   case Builtin::BI__builtin_smul_overflow:
2351   case Builtin::BI__builtin_smull_overflow:
2352   case Builtin::BI__builtin_smulll_overflow:
2353     if (!interp__builtin_overflowop(S, OpPC, Frame, F, Call))
2354       return false;
2355     break;
2356 
2357   case Builtin::BI__builtin_addcb:
2358   case Builtin::BI__builtin_addcs:
2359   case Builtin::BI__builtin_addc:
2360   case Builtin::BI__builtin_addcl:
2361   case Builtin::BI__builtin_addcll:
2362   case Builtin::BI__builtin_subcb:
2363   case Builtin::BI__builtin_subcs:
2364   case Builtin::BI__builtin_subc:
2365   case Builtin::BI__builtin_subcl:
2366   case Builtin::BI__builtin_subcll:
2367     if (!interp__builtin_carryop(S, OpPC, Frame, F, Call))
2368       return false;
2369     break;
2370 
2371   case Builtin::BI__builtin_clz:
2372   case Builtin::BI__builtin_clzl:
2373   case Builtin::BI__builtin_clzll:
2374   case Builtin::BI__builtin_clzs:
2375   case Builtin::BI__builtin_clzg:
2376   case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
2377   case Builtin::BI__lzcnt:
2378   case Builtin::BI__lzcnt64:
2379     if (!interp__builtin_clz(S, OpPC, Frame, F, Call))
2380       return false;
2381     break;
2382 
2383   case Builtin::BI__builtin_ctz:
2384   case Builtin::BI__builtin_ctzl:
2385   case Builtin::BI__builtin_ctzll:
2386   case Builtin::BI__builtin_ctzs:
2387   case Builtin::BI__builtin_ctzg:
2388     if (!interp__builtin_ctz(S, OpPC, Frame, F, Call))
2389       return false;
2390     break;
2391 
2392   case Builtin::BI__builtin_bswap16:
2393   case Builtin::BI__builtin_bswap32:
2394   case Builtin::BI__builtin_bswap64:
2395     if (!interp__builtin_bswap(S, OpPC, Frame, F, Call))
2396       return false;
2397     break;
2398 
2399   case Builtin::BI__atomic_always_lock_free:
2400   case Builtin::BI__atomic_is_lock_free:
2401   case Builtin::BI__c11_atomic_is_lock_free:
2402     if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, F, Call))
2403       return false;
2404     break;
2405 
2406   case Builtin::BI__builtin_complex:
2407     if (!interp__builtin_complex(S, OpPC, Frame, F, Call))
2408       return false;
2409     break;
2410 
2411   case Builtin::BI__builtin_is_aligned:
2412   case Builtin::BI__builtin_align_up:
2413   case Builtin::BI__builtin_align_down:
2414     if (!interp__builtin_is_aligned_up_down(S, OpPC, Frame, F, Call))
2415       return false;
2416     break;
2417 
2418   case Builtin::BI__builtin_assume_aligned:
2419     if (!interp__builtin_assume_aligned(S, OpPC, Frame, F, Call))
2420       return false;
2421     break;
2422 
2423   case clang::X86::BI__builtin_ia32_bextr_u32:
2424   case clang::X86::BI__builtin_ia32_bextr_u64:
2425   case clang::X86::BI__builtin_ia32_bextri_u32:
2426   case clang::X86::BI__builtin_ia32_bextri_u64:
2427     if (!interp__builtin_ia32_bextr(S, OpPC, Frame, F, Call))
2428       return false;
2429     break;
2430 
2431   case clang::X86::BI__builtin_ia32_bzhi_si:
2432   case clang::X86::BI__builtin_ia32_bzhi_di:
2433     if (!interp__builtin_ia32_bzhi(S, OpPC, Frame, F, Call))
2434       return false;
2435     break;
2436 
2437   case clang::X86::BI__builtin_ia32_lzcnt_u16:
2438   case clang::X86::BI__builtin_ia32_lzcnt_u32:
2439   case clang::X86::BI__builtin_ia32_lzcnt_u64:
2440     if (!interp__builtin_ia32_lzcnt(S, OpPC, Frame, F, Call))
2441       return false;
2442     break;
2443 
2444   case clang::X86::BI__builtin_ia32_tzcnt_u16:
2445   case clang::X86::BI__builtin_ia32_tzcnt_u32:
2446   case clang::X86::BI__builtin_ia32_tzcnt_u64:
2447     if (!interp__builtin_ia32_tzcnt(S, OpPC, Frame, F, Call))
2448       return false;
2449     break;
2450 
2451   case clang::X86::BI__builtin_ia32_pdep_si:
2452   case clang::X86::BI__builtin_ia32_pdep_di:
2453     if (!interp__builtin_ia32_pdep(S, OpPC, Frame, F, Call))
2454       return false;
2455     break;
2456 
2457   case clang::X86::BI__builtin_ia32_pext_si:
2458   case clang::X86::BI__builtin_ia32_pext_di:
2459     if (!interp__builtin_ia32_pext(S, OpPC, Frame, F, Call))
2460       return false;
2461     break;
2462 
2463   case clang::X86::BI__builtin_ia32_addcarryx_u32:
2464   case clang::X86::BI__builtin_ia32_addcarryx_u64:
2465   case clang::X86::BI__builtin_ia32_subborrow_u32:
2466   case clang::X86::BI__builtin_ia32_subborrow_u64:
2467     if (!interp__builtin_ia32_addcarry_subborrow(S, OpPC, Frame, F, Call))
2468       return false;
2469     break;
2470 
2471   case Builtin::BI__builtin_os_log_format_buffer_size:
2472     if (!interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, F, Call))
2473       return false;
2474     break;
2475 
2476   case Builtin::BI__builtin_ptrauth_string_discriminator:
2477     if (!interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, F, Call))
2478       return false;
2479     break;
2480 
2481   case Builtin::BI__builtin_constant_p:
2482     if (!interp__builtin_constant_p(S, OpPC, Frame, F, Call))
2483       return false;
2484     break;
2485 
2486   case Builtin::BI__noop:
2487     pushInteger(S, 0, Call->getType());
2488     break;
2489 
2490   case Builtin::BI__builtin_operator_new:
2491     if (!interp__builtin_operator_new(S, OpPC, Frame, F, Call))
2492       return false;
2493     break;
2494 
2495   case Builtin::BI__builtin_operator_delete:
2496     if (!interp__builtin_operator_delete(S, OpPC, Frame, F, Call))
2497       return false;
2498     break;
2499 
2500   case Builtin::BI__arithmetic_fence:
2501     if (!interp__builtin_arithmetic_fence(S, OpPC, Frame, F, Call))
2502       return false;
2503     break;
2504 
2505   case Builtin::BI__builtin_reduce_add:
2506   case Builtin::BI__builtin_reduce_mul:
2507   case Builtin::BI__builtin_reduce_and:
2508   case Builtin::BI__builtin_reduce_or:
2509   case Builtin::BI__builtin_reduce_xor:
2510     if (!interp__builtin_vector_reduce(S, OpPC, Frame, F, Call))
2511       return false;
2512     break;
2513 
2514   case Builtin::BI__builtin_elementwise_popcount:
2515     if (!interp__builtin_elementwise_popcount(S, OpPC, Frame, F, Call))
2516       return false;
2517     break;
2518 
2519   case Builtin::BI__builtin_memcpy:
2520   case Builtin::BImemcpy:
2521   case Builtin::BI__builtin_memmove:
2522   case Builtin::BImemmove:
2523     if (!interp__builtin_memcpy(S, OpPC, Frame, F, Call))
2524       return false;
2525     break;
2526 
2527   case Builtin::BI__builtin_memcmp:
2528   case Builtin::BImemcmp:
2529   case Builtin::BI__builtin_bcmp:
2530   case Builtin::BIbcmp:
2531   case Builtin::BI__builtin_wmemcmp:
2532   case Builtin::BIwmemcmp:
2533     if (!interp__builtin_memcmp(S, OpPC, Frame, F, Call))
2534       return false;
2535     break;
2536 
2537   default:
2538     S.FFDiag(S.Current->getLocation(OpPC),
2539              diag::note_invalid_subexpr_in_const_expr)
2540         << S.Current->getRange(OpPC);
2541 
2542     return false;
2543   }
2544 
2545   return retPrimValue(S, OpPC, ReturnT);
2546 }
2547 
2548 bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
2549                        llvm::ArrayRef<int64_t> ArrayIndices,
2550                        int64_t &IntResult) {
2551   CharUnits Result;
2552   unsigned N = E->getNumComponents();
2553   assert(N > 0);
2554 
2555   unsigned ArrayIndex = 0;
2556   QualType CurrentType = E->getTypeSourceInfo()->getType();
2557   for (unsigned I = 0; I != N; ++I) {
2558     const OffsetOfNode &Node = E->getComponent(I);
2559     switch (Node.getKind()) {
2560     case OffsetOfNode::Field: {
2561       const FieldDecl *MemberDecl = Node.getField();
2562       const RecordType *RT = CurrentType->getAs<RecordType>();
2563       if (!RT)
2564         return false;
2565       const RecordDecl *RD = RT->getDecl();
2566       if (RD->isInvalidDecl())
2567         return false;
2568       const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD);
2569       unsigned FieldIndex = MemberDecl->getFieldIndex();
2570       assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
2571       Result +=
2572           S.getASTContext().toCharUnitsFromBits(RL.getFieldOffset(FieldIndex));
2573       CurrentType = MemberDecl->getType().getNonReferenceType();
2574       break;
2575     }
2576     case OffsetOfNode::Array: {
2577       // When generating bytecode, we put all the index expressions as Sint64 on
2578       // the stack.
2579       int64_t Index = ArrayIndices[ArrayIndex];
2580       const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
2581       if (!AT)
2582         return false;
2583       CurrentType = AT->getElementType();
2584       CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
2585       Result += Index * ElementSize;
2586       ++ArrayIndex;
2587       break;
2588     }
2589     case OffsetOfNode::Base: {
2590       const CXXBaseSpecifier *BaseSpec = Node.getBase();
2591       if (BaseSpec->isVirtual())
2592         return false;
2593 
2594       // Find the layout of the class whose base we are looking into.
2595       const RecordType *RT = CurrentType->getAs<RecordType>();
2596       if (!RT)
2597         return false;
2598       const RecordDecl *RD = RT->getDecl();
2599       if (RD->isInvalidDecl())
2600         return false;
2601       const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD);
2602 
2603       // Find the base class itself.
2604       CurrentType = BaseSpec->getType();
2605       const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2606       if (!BaseRT)
2607         return false;
2608 
2609       // Add the offset to the base.
2610       Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
2611       break;
2612     }
2613     case OffsetOfNode::Identifier:
2614       llvm_unreachable("Dependent OffsetOfExpr?");
2615     }
2616   }
2617 
2618   IntResult = Result.getQuantity();
2619 
2620   return true;
2621 }
2622 
2623 bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
2624                                 const Pointer &Ptr, const APSInt &IntValue) {
2625 
2626   const Record *R = Ptr.getRecord();
2627   assert(R);
2628   assert(R->getNumFields() == 1);
2629 
2630   unsigned FieldOffset = R->getField(0u)->Offset;
2631   const Pointer &FieldPtr = Ptr.atField(FieldOffset);
2632   PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
2633 
2634   INT_TYPE_SWITCH(FieldT,
2635                   FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
2636   FieldPtr.initialize();
2637   return true;
2638 }
2639 
2640 static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2641                           Pointer &Dest, bool Activate);
2642 static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
2643                        Pointer &Dest, bool Activate = false) {
2644   [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2645   const Descriptor *DestDesc = Dest.getFieldDesc();
2646 
2647   auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
2648     Pointer DestField = Dest.atField(F.Offset);
2649     if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) {
2650       TYPE_SWITCH(*FT, {
2651         DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
2652         if (Src.atField(F.Offset).isInitialized())
2653           DestField.initialize();
2654         if (Activate)
2655           DestField.activate();
2656       });
2657       return true;
2658     }
2659     // Composite field.
2660     return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
2661   };
2662 
2663   assert(SrcDesc->isRecord());
2664   assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
2665   const Record *R = DestDesc->ElemRecord;
2666   for (const Record::Field &F : R->fields()) {
2667     if (R->isUnion()) {
2668       // For unions, only copy the active field.
2669       const Pointer &SrcField = Src.atField(F.Offset);
2670       if (SrcField.isActive()) {
2671         if (!copyField(F, /*Activate=*/true))
2672           return false;
2673       }
2674     } else {
2675       if (!copyField(F, Activate))
2676         return false;
2677     }
2678   }
2679 
2680   for (const Record::Base &B : R->bases()) {
2681     Pointer DestBase = Dest.atField(B.Offset);
2682     if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
2683       return false;
2684   }
2685 
2686   Dest.initialize();
2687   return true;
2688 }
2689 
2690 static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2691                           Pointer &Dest, bool Activate = false) {
2692   assert(Src.isLive() && Dest.isLive());
2693 
2694   [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2695   const Descriptor *DestDesc = Dest.getFieldDesc();
2696 
2697   assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
2698 
2699   if (DestDesc->isPrimitiveArray()) {
2700     assert(SrcDesc->isPrimitiveArray());
2701     assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2702     PrimType ET = DestDesc->getPrimType();
2703     for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2704       Pointer DestElem = Dest.atIndex(I);
2705       TYPE_SWITCH(ET, {
2706         DestElem.deref<T>() = Src.atIndex(I).deref<T>();
2707         DestElem.initialize();
2708       });
2709     }
2710     return true;
2711   }
2712 
2713   if (DestDesc->isRecord())
2714     return copyRecord(S, OpPC, Src, Dest, Activate);
2715   return Invalid(S, OpPC);
2716 }
2717 
2718 bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
2719   return copyComposite(S, OpPC, Src, Dest);
2720 }
2721 
2722 } // namespace interp
2723 } // namespace clang
2724