xref: /llvm-project/clang/lib/AST/ByteCode/InterpBuiltin.cpp (revision 61c2ac03d85f731d75cda23d1918f03d0cb962dc)
1 //===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "../ExprConstShared.h"
9 #include "Boolean.h"
10 #include "Compiler.h"
11 #include "EvalEmitter.h"
12 #include "Interp.h"
13 #include "InterpBuiltinBitCast.h"
14 #include "PrimType.h"
15 #include "clang/AST/OSLog.h"
16 #include "clang/AST/RecordLayout.h"
17 #include "clang/Basic/Builtins.h"
18 #include "clang/Basic/TargetBuiltins.h"
19 #include "clang/Basic/TargetInfo.h"
20 #include "llvm/Support/SipHash.h"
21 
22 namespace clang {
23 namespace interp {
24 
25 static unsigned callArgSize(const InterpState &S, const CallExpr *C) {
26   unsigned O = 0;
27 
28   for (const Expr *E : C->arguments()) {
29     O += align(primSize(*S.getContext().classify(E)));
30   }
31 
32   return O;
33 }
34 
35 template <typename T>
36 static T getParam(const InterpFrame *Frame, unsigned Index) {
37   assert(Frame->getFunction()->getNumParams() > Index);
38   unsigned Offset = Frame->getFunction()->getParamOffset(Index);
39   return Frame->getParam<T>(Offset);
40 }
41 
42 static APSInt getAPSIntParam(const InterpFrame *Frame, unsigned Index) {
43   APSInt R;
44   unsigned Offset = Frame->getFunction()->getParamOffset(Index);
45   INT_TYPE_SWITCH(Frame->getFunction()->getParamType(Index),
46                   R = Frame->getParam<T>(Offset).toAPSInt());
47   return R;
48 }
49 
50 static PrimType getIntPrimType(const InterpState &S) {
51   const TargetInfo &TI = S.getASTContext().getTargetInfo();
52   unsigned IntWidth = TI.getIntWidth();
53 
54   if (IntWidth == 32)
55     return PT_Sint32;
56   else if (IntWidth == 16)
57     return PT_Sint16;
58   llvm_unreachable("Int isn't 16 or 32 bit?");
59 }
60 
61 static PrimType getLongPrimType(const InterpState &S) {
62   const TargetInfo &TI = S.getASTContext().getTargetInfo();
63   unsigned LongWidth = TI.getLongWidth();
64 
65   if (LongWidth == 64)
66     return PT_Sint64;
67   else if (LongWidth == 32)
68     return PT_Sint32;
69   else if (LongWidth == 16)
70     return PT_Sint16;
71   llvm_unreachable("long isn't 16, 32 or 64 bit?");
72 }
73 
74 /// Peek an integer value from the stack into an APSInt.
75 static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) {
76   if (Offset == 0)
77     Offset = align(primSize(T));
78 
79   APSInt R;
80   INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt());
81 
82   return R;
83 }
84 
85 /// Pushes \p Val on the stack as the type given by \p QT.
86 static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
87   assert(QT->isSignedIntegerOrEnumerationType() ||
88          QT->isUnsignedIntegerOrEnumerationType());
89   std::optional<PrimType> T = S.getContext().classify(QT);
90   assert(T);
91 
92   unsigned BitWidth = S.getASTContext().getTypeSize(QT);
93   if (QT->isSignedIntegerOrEnumerationType()) {
94     int64_t V = Val.getSExtValue();
95     INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
96   } else {
97     assert(QT->isUnsignedIntegerOrEnumerationType());
98     uint64_t V = Val.getZExtValue();
99     INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
100   }
101 }
102 
103 template <typename T>
104 static void pushInteger(InterpState &S, T Val, QualType QT) {
105   if constexpr (std::is_same_v<T, APInt>)
106     pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
107   else if constexpr (std::is_same_v<T, APSInt>)
108     pushInteger(S, Val, QT);
109   else
110     pushInteger(S,
111                 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
112                              std::is_signed_v<T>),
113                        !std::is_signed_v<T>),
114                 QT);
115 }
116 
117 static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) {
118   INT_TYPE_SWITCH_NO_BOOL(
119       ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
120 }
121 
122 static bool retPrimValue(InterpState &S, CodePtr OpPC,
123                          std::optional<PrimType> &T) {
124   if (!T)
125     return RetVoid(S, OpPC);
126 
127 #define RET_CASE(X)                                                            \
128   case X:                                                                      \
129     return Ret<X>(S, OpPC);
130   switch (*T) {
131     RET_CASE(PT_Ptr);
132     RET_CASE(PT_FnPtr);
133     RET_CASE(PT_Float);
134     RET_CASE(PT_Bool);
135     RET_CASE(PT_Sint8);
136     RET_CASE(PT_Uint8);
137     RET_CASE(PT_Sint16);
138     RET_CASE(PT_Uint16);
139     RET_CASE(PT_Sint32);
140     RET_CASE(PT_Uint32);
141     RET_CASE(PT_Sint64);
142     RET_CASE(PT_Uint64);
143     RET_CASE(PT_IntAP);
144     RET_CASE(PT_IntAPS);
145   default:
146     llvm_unreachable("Unsupported return type for builtin function");
147   }
148 #undef RET_CASE
149 }
150 
151 static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC,
152                                         unsigned ID) {
153   auto Loc = S.Current->getSource(OpPC);
154   if (S.getLangOpts().CPlusPlus11)
155     S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
156         << /*isConstexpr=*/0 << /*isConstructor=*/0
157         << ("'" + S.getASTContext().BuiltinInfo.getName(ID) + "'").str();
158   else
159     S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
160 }
161 
162 static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC,
163                                                   const InterpFrame *Frame,
164                                                   const CallExpr *Call) {
165   unsigned Depth = S.Current->getDepth();
166   auto isStdCall = [](const FunctionDecl *F) -> bool {
167     return F && F->isInStdNamespace() && F->getIdentifier() &&
168            F->getIdentifier()->isStr("is_constant_evaluated");
169   };
170   const InterpFrame *Caller = Frame->Caller;
171   // The current frame is the one for __builtin_is_constant_evaluated.
172   // The one above that, potentially the one for std::is_constant_evaluated().
173   if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
174       S.getEvalStatus().Diag &&
175       (Depth == 1 || (Depth == 2 && isStdCall(Caller->getCallee())))) {
176     if (Caller->Caller && isStdCall(Caller->getCallee())) {
177       const Expr *E = Caller->Caller->getExpr(Caller->getRetPC());
178       S.report(E->getExprLoc(),
179                diag::warn_is_constant_evaluated_always_true_constexpr)
180           << "std::is_constant_evaluated" << E->getSourceRange();
181     } else {
182       const Expr *E = Frame->Caller->getExpr(Frame->getRetPC());
183       S.report(E->getExprLoc(),
184                diag::warn_is_constant_evaluated_always_true_constexpr)
185           << "__builtin_is_constant_evaluated" << E->getSourceRange();
186     }
187   }
188 
189   S.Stk.push<Boolean>(Boolean::from(S.inConstantContext()));
190   return true;
191 }
192 
193 static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
194                                    const InterpFrame *Frame,
195                                    const Function *Func, const CallExpr *Call) {
196   unsigned ID = Func->getBuiltinID();
197   const Pointer &A = getParam<Pointer>(Frame, 0);
198   const Pointer &B = getParam<Pointer>(Frame, 1);
199 
200   if (ID == Builtin::BIstrcmp)
201     diagnoseNonConstexprBuiltin(S, OpPC, ID);
202 
203   if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
204     return false;
205 
206   if (A.isDummy() || B.isDummy())
207     return false;
208 
209   assert(A.getFieldDesc()->isPrimitiveArray());
210   assert(B.getFieldDesc()->isPrimitiveArray());
211 
212   unsigned IndexA = A.getIndex();
213   unsigned IndexB = B.getIndex();
214   int32_t Result = 0;
215   for (;; ++IndexA, ++IndexB) {
216     const Pointer &PA = A.atIndex(IndexA);
217     const Pointer &PB = B.atIndex(IndexB);
218     if (!CheckRange(S, OpPC, PA, AK_Read) ||
219         !CheckRange(S, OpPC, PB, AK_Read)) {
220       return false;
221     }
222     uint8_t CA = PA.deref<uint8_t>();
223     uint8_t CB = PB.deref<uint8_t>();
224 
225     if (CA > CB) {
226       Result = 1;
227       break;
228     } else if (CA < CB) {
229       Result = -1;
230       break;
231     }
232     if (CA == 0 || CB == 0)
233       break;
234   }
235 
236   pushInteger(S, Result, Call->getType());
237   return true;
238 }
239 
240 static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
241                                    const InterpFrame *Frame,
242                                    const Function *Func, const CallExpr *Call) {
243   unsigned ID = Func->getBuiltinID();
244   const Pointer &StrPtr = getParam<Pointer>(Frame, 0);
245 
246   if (ID == Builtin::BIstrlen)
247     diagnoseNonConstexprBuiltin(S, OpPC, ID);
248 
249   if (!CheckArray(S, OpPC, StrPtr))
250     return false;
251 
252   if (!CheckLive(S, OpPC, StrPtr, AK_Read))
253     return false;
254 
255   if (!CheckDummy(S, OpPC, StrPtr, AK_Read))
256     return false;
257 
258   assert(StrPtr.getFieldDesc()->isPrimitiveArray());
259 
260   size_t Len = 0;
261   for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
262     const Pointer &ElemPtr = StrPtr.atIndex(I);
263 
264     if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
265       return false;
266 
267     uint8_t Val = ElemPtr.deref<uint8_t>();
268     if (Val == 0)
269       break;
270   }
271 
272   pushInteger(S, Len, Call->getType());
273 
274   return true;
275 }
276 
277 static bool interp__builtin_nan(InterpState &S, CodePtr OpPC,
278                                 const InterpFrame *Frame, const Function *F,
279                                 bool Signaling) {
280   const Pointer &Arg = getParam<Pointer>(Frame, 0);
281 
282   if (!CheckLoad(S, OpPC, Arg))
283     return false;
284 
285   assert(Arg.getFieldDesc()->isPrimitiveArray());
286 
287   // Convert the given string to an integer using StringRef's API.
288   llvm::APInt Fill;
289   std::string Str;
290   assert(Arg.getNumElems() >= 1);
291   for (unsigned I = 0;; ++I) {
292     const Pointer &Elem = Arg.atIndex(I);
293 
294     if (!CheckLoad(S, OpPC, Elem))
295       return false;
296 
297     if (Elem.deref<int8_t>() == 0)
298       break;
299 
300     Str += Elem.deref<char>();
301   }
302 
303   // Treat empty strings as if they were zero.
304   if (Str.empty())
305     Fill = llvm::APInt(32, 0);
306   else if (StringRef(Str).getAsInteger(0, Fill))
307     return false;
308 
309   const llvm::fltSemantics &TargetSemantics =
310       S.getASTContext().getFloatTypeSemantics(F->getDecl()->getReturnType());
311 
312   Floating Result;
313   if (S.getASTContext().getTargetInfo().isNan2008()) {
314     if (Signaling)
315       Result = Floating(
316           llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
317     else
318       Result = Floating(
319           llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
320   } else {
321     // Prior to IEEE 754-2008, architectures were allowed to choose whether
322     // the first bit of their significand was set for qNaN or sNaN. MIPS chose
323     // a different encoding to what became a standard in 2008, and for pre-
324     // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
325     // sNaN. This is now known as "legacy NaN" encoding.
326     if (Signaling)
327       Result = Floating(
328           llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
329     else
330       Result = Floating(
331           llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
332   }
333 
334   S.Stk.push<Floating>(Result);
335   return true;
336 }
337 
338 static bool interp__builtin_inf(InterpState &S, CodePtr OpPC,
339                                 const InterpFrame *Frame, const Function *F) {
340   const llvm::fltSemantics &TargetSemantics =
341       S.getASTContext().getFloatTypeSemantics(F->getDecl()->getReturnType());
342 
343   S.Stk.push<Floating>(Floating::getInf(TargetSemantics));
344   return true;
345 }
346 
347 static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC,
348                                      const InterpFrame *Frame,
349                                      const Function *F) {
350   const Floating &Arg1 = getParam<Floating>(Frame, 0);
351   const Floating &Arg2 = getParam<Floating>(Frame, 1);
352 
353   APFloat Copy = Arg1.getAPFloat();
354   Copy.copySign(Arg2.getAPFloat());
355   S.Stk.push<Floating>(Floating(Copy));
356 
357   return true;
358 }
359 
360 static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC,
361                                  const InterpFrame *Frame, const Function *F,
362                                  bool IsNumBuiltin) {
363   const Floating &LHS = getParam<Floating>(Frame, 0);
364   const Floating &RHS = getParam<Floating>(Frame, 1);
365 
366   Floating Result;
367 
368   if (IsNumBuiltin) {
369     Result = llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat());
370   } else {
371     // When comparing zeroes, return -0.0 if one of the zeroes is negative.
372     if (LHS.isZero() && RHS.isZero() && RHS.isNegative())
373       Result = RHS;
374     else if (LHS.isNan() || RHS < LHS)
375       Result = RHS;
376     else
377       Result = LHS;
378   }
379 
380   S.Stk.push<Floating>(Result);
381   return true;
382 }
383 
384 static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC,
385                                  const InterpFrame *Frame, const Function *Func,
386                                  bool IsNumBuiltin) {
387   const Floating &LHS = getParam<Floating>(Frame, 0);
388   const Floating &RHS = getParam<Floating>(Frame, 1);
389 
390   Floating Result;
391 
392   if (IsNumBuiltin) {
393     Result = llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat());
394   } else {
395     // When comparing zeroes, return +0.0 if one of the zeroes is positive.
396     if (LHS.isZero() && RHS.isZero() && LHS.isNegative())
397       Result = RHS;
398     else if (LHS.isNan() || RHS > LHS)
399       Result = RHS;
400     else
401       Result = LHS;
402   }
403 
404   S.Stk.push<Floating>(Result);
405   return true;
406 }
407 
408 /// Defined as __builtin_isnan(...), to accommodate the fact that it can
409 /// take a float, double, long double, etc.
410 /// But for us, that's all a Floating anyway.
411 static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC,
412                                   const InterpFrame *Frame, const Function *F,
413                                   const CallExpr *Call) {
414   const Floating &Arg = S.Stk.peek<Floating>();
415 
416   pushInteger(S, Arg.isNan(), Call->getType());
417   return true;
418 }
419 
420 static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC,
421                                         const InterpFrame *Frame,
422                                         const Function *F,
423                                         const CallExpr *Call) {
424   const Floating &Arg = S.Stk.peek<Floating>();
425 
426   pushInteger(S, Arg.isSignaling(), Call->getType());
427   return true;
428 }
429 
430 static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC,
431                                   const InterpFrame *Frame, const Function *F,
432                                   bool CheckSign, const CallExpr *Call) {
433   const Floating &Arg = S.Stk.peek<Floating>();
434   bool IsInf = Arg.isInf();
435 
436   if (CheckSign)
437     pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType());
438   else
439     pushInteger(S, Arg.isInf(), Call->getType());
440   return true;
441 }
442 
443 static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC,
444                                      const InterpFrame *Frame,
445                                      const Function *F, const CallExpr *Call) {
446   const Floating &Arg = S.Stk.peek<Floating>();
447 
448   pushInteger(S, Arg.isFinite(), Call->getType());
449   return true;
450 }
451 
452 static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC,
453                                      const InterpFrame *Frame,
454                                      const Function *F, const CallExpr *Call) {
455   const Floating &Arg = S.Stk.peek<Floating>();
456 
457   pushInteger(S, Arg.isNormal(), Call->getType());
458   return true;
459 }
460 
461 static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC,
462                                         const InterpFrame *Frame,
463                                         const Function *F,
464                                         const CallExpr *Call) {
465   const Floating &Arg = S.Stk.peek<Floating>();
466 
467   pushInteger(S, Arg.isDenormal(), Call->getType());
468   return true;
469 }
470 
471 static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC,
472                                    const InterpFrame *Frame, const Function *F,
473                                    const CallExpr *Call) {
474   const Floating &Arg = S.Stk.peek<Floating>();
475 
476   pushInteger(S, Arg.isZero(), Call->getType());
477   return true;
478 }
479 
480 static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC,
481                                     const InterpFrame *Frame, const Function *F,
482                                     const CallExpr *Call) {
483   const Floating &Arg = S.Stk.peek<Floating>();
484 
485   pushInteger(S, Arg.isNegative(), Call->getType());
486   return true;
487 }
488 
489 static bool interp_floating_comparison(InterpState &S, CodePtr OpPC,
490                                        const InterpFrame *Frame,
491                                        const Function *F,
492                                        const CallExpr *Call) {
493   const Floating &RHS = S.Stk.peek<Floating>();
494   const Floating &LHS = S.Stk.peek<Floating>(align(2u * primSize(PT_Float)));
495   unsigned ID = F->getBuiltinID();
496 
497   pushInteger(
498       S,
499       [&] {
500         switch (ID) {
501         case Builtin::BI__builtin_isgreater:
502           return LHS > RHS;
503         case Builtin::BI__builtin_isgreaterequal:
504           return LHS >= RHS;
505         case Builtin::BI__builtin_isless:
506           return LHS < RHS;
507         case Builtin::BI__builtin_islessequal:
508           return LHS <= RHS;
509         case Builtin::BI__builtin_islessgreater: {
510           ComparisonCategoryResult cmp = LHS.compare(RHS);
511           return cmp == ComparisonCategoryResult::Less ||
512                  cmp == ComparisonCategoryResult::Greater;
513         }
514         case Builtin::BI__builtin_isunordered:
515           return LHS.compare(RHS) == ComparisonCategoryResult::Unordered;
516         default:
517           llvm_unreachable("Unexpected builtin ID: Should be a floating point "
518                            "comparison function");
519         }
520       }(),
521       Call->getType());
522   return true;
523 }
524 
525 /// First parameter to __builtin_isfpclass is the floating value, the
526 /// second one is an integral value.
527 static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC,
528                                       const InterpFrame *Frame,
529                                       const Function *Func,
530                                       const CallExpr *Call) {
531   PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType());
532   APSInt FPClassArg = peekToAPSInt(S.Stk, FPClassArgT);
533   const Floating &F =
534       S.Stk.peek<Floating>(align(primSize(FPClassArgT) + primSize(PT_Float)));
535 
536   int32_t Result =
537       static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue());
538   pushInteger(S, Result, Call->getType());
539 
540   return true;
541 }
542 
543 /// Five int values followed by one floating value.
544 static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
545                                        const InterpFrame *Frame,
546                                        const Function *Func,
547                                        const CallExpr *Call) {
548   const Floating &Val = S.Stk.peek<Floating>();
549 
550   unsigned Index;
551   switch (Val.getCategory()) {
552   case APFloat::fcNaN:
553     Index = 0;
554     break;
555   case APFloat::fcInfinity:
556     Index = 1;
557     break;
558   case APFloat::fcNormal:
559     Index = Val.isDenormal() ? 3 : 2;
560     break;
561   case APFloat::fcZero:
562     Index = 4;
563     break;
564   }
565 
566   // The last argument is first on the stack.
567   assert(Index <= 4);
568   unsigned IntSize = primSize(getIntPrimType(S));
569   unsigned Offset =
570       align(primSize(PT_Float)) + ((1 + (4 - Index)) * align(IntSize));
571 
572   APSInt I = peekToAPSInt(S.Stk, getIntPrimType(S), Offset);
573   pushInteger(S, I, Call->getType());
574   return true;
575 }
576 
577 // The C standard says "fabs raises no floating-point exceptions,
578 // even if x is a signaling NaN. The returned value is independent of
579 // the current rounding direction mode."  Therefore constant folding can
580 // proceed without regard to the floating point settings.
581 // Reference, WG14 N2478 F.10.4.3
582 static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC,
583                                  const InterpFrame *Frame,
584                                  const Function *Func) {
585   const Floating &Val = getParam<Floating>(Frame, 0);
586 
587   S.Stk.push<Floating>(Floating::abs(Val));
588   return true;
589 }
590 
591 static bool interp__builtin_abs(InterpState &S, CodePtr OpPC,
592                                 const InterpFrame *Frame, const Function *Func,
593                                 const CallExpr *Call) {
594   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
595   APSInt Val = peekToAPSInt(S.Stk, ArgT);
596   if (Val ==
597       APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
598     return false;
599   if (Val.isNegative())
600     Val.negate();
601   pushInteger(S, Val, Call->getType());
602   return true;
603 }
604 
605 static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
606                                      const InterpFrame *Frame,
607                                      const Function *Func,
608                                      const CallExpr *Call) {
609   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
610   APSInt Val = peekToAPSInt(S.Stk, ArgT);
611   pushInteger(S, Val.popcount(), Call->getType());
612   return true;
613 }
614 
615 static bool interp__builtin_parity(InterpState &S, CodePtr OpPC,
616                                    const InterpFrame *Frame,
617                                    const Function *Func, const CallExpr *Call) {
618   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
619   APSInt Val = peekToAPSInt(S.Stk, ArgT);
620   pushInteger(S, Val.popcount() % 2, Call->getType());
621   return true;
622 }
623 
624 static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC,
625                                   const InterpFrame *Frame,
626                                   const Function *Func, const CallExpr *Call) {
627   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
628   APSInt Val = peekToAPSInt(S.Stk, ArgT);
629   pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
630   return true;
631 }
632 
633 static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC,
634                                        const InterpFrame *Frame,
635                                        const Function *Func,
636                                        const CallExpr *Call) {
637   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
638   APSInt Val = peekToAPSInt(S.Stk, ArgT);
639   pushInteger(S, Val.reverseBits(), Call->getType());
640   return true;
641 }
642 
643 static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC,
644                                           const InterpFrame *Frame,
645                                           const Function *Func,
646                                           const CallExpr *Call) {
647   // This is an unevaluated call, so there are no arguments on the stack.
648   assert(Call->getNumArgs() == 1);
649   const Expr *Arg = Call->getArg(0);
650 
651   GCCTypeClass ResultClass =
652       EvaluateBuiltinClassifyType(Arg->getType(), S.getLangOpts());
653   int32_t ReturnVal = static_cast<int32_t>(ResultClass);
654   pushInteger(S, ReturnVal, Call->getType());
655   return true;
656 }
657 
658 // __builtin_expect(long, long)
659 // __builtin_expect_with_probability(long, long, double)
660 static bool interp__builtin_expect(InterpState &S, CodePtr OpPC,
661                                    const InterpFrame *Frame,
662                                    const Function *Func, const CallExpr *Call) {
663   // The return value is simply the value of the first parameter.
664   // We ignore the probability.
665   unsigned NumArgs = Call->getNumArgs();
666   assert(NumArgs == 2 || NumArgs == 3);
667 
668   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
669   unsigned Offset = align(primSize(getLongPrimType(S))) * 2;
670   if (NumArgs == 3)
671     Offset += align(primSize(PT_Float));
672 
673   APSInt Val = peekToAPSInt(S.Stk, ArgT, Offset);
674   pushInteger(S, Val, Call->getType());
675   return true;
676 }
677 
678 /// rotateleft(value, amount)
679 static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC,
680                                    const InterpFrame *Frame,
681                                    const Function *Func, const CallExpr *Call,
682                                    bool Right) {
683   PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType());
684   PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType());
685 
686   APSInt Amount = peekToAPSInt(S.Stk, AmountT);
687   APSInt Value = peekToAPSInt(
688       S.Stk, ValueT, align(primSize(AmountT)) + align(primSize(ValueT)));
689 
690   APSInt Result;
691   if (Right)
692     Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())),
693                     /*IsUnsigned=*/true);
694   else // Left.
695     Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
696                     /*IsUnsigned=*/true);
697 
698   pushInteger(S, Result, Call->getType());
699   return true;
700 }
701 
702 static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC,
703                                 const InterpFrame *Frame, const Function *Func,
704                                 const CallExpr *Call) {
705   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
706   APSInt Value = peekToAPSInt(S.Stk, ArgT);
707 
708   uint64_t N = Value.countr_zero();
709   pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
710   return true;
711 }
712 
713 static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
714                                       const InterpFrame *Frame,
715                                       const Function *Func,
716                                       const CallExpr *Call) {
717   assert(Call->getArg(0)->isLValue());
718   PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
719 
720   if (PtrT == PT_FnPtr) {
721     const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>();
722     S.Stk.push<FunctionPointer>(Arg);
723   } else if (PtrT == PT_Ptr) {
724     const Pointer &Arg = S.Stk.peek<Pointer>();
725     S.Stk.push<Pointer>(Arg);
726   } else {
727     assert(false && "Unsupported pointer type passed to __builtin_addressof()");
728   }
729   return true;
730 }
731 
732 static bool interp__builtin_move(InterpState &S, CodePtr OpPC,
733                                  const InterpFrame *Frame, const Function *Func,
734                                  const CallExpr *Call) {
735 
736   PrimType ArgT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
737 
738   TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg););
739 
740   return Func->getDecl()->isConstexpr();
741 }
742 
743 static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC,
744                                                  const InterpFrame *Frame,
745                                                  const Function *Func,
746                                                  const CallExpr *Call) {
747   PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
748   APSInt Arg = peekToAPSInt(S.Stk, ArgT);
749 
750   int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber(
751       Arg.getZExtValue());
752   pushInteger(S, Result, Call->getType());
753   return true;
754 }
755 
756 /// Just takes the first Argument to the call and puts it on the stack.
757 static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
758                         const Function *Func, const CallExpr *Call) {
759   const Pointer &Arg = S.Stk.peek<Pointer>();
760   S.Stk.push<Pointer>(Arg);
761   return true;
762 }
763 
764 // Two integral values followed by a pointer (lhs, rhs, resultOut)
765 static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC,
766                                        const InterpFrame *Frame,
767                                        const Function *Func,
768                                        const CallExpr *Call) {
769   Pointer &ResultPtr = S.Stk.peek<Pointer>();
770   if (ResultPtr.isDummy())
771     return false;
772 
773   unsigned BuiltinOp = Func->getBuiltinID();
774   PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
775   PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
776   APSInt RHS = peekToAPSInt(S.Stk, RHST,
777                             align(primSize(PT_Ptr)) + align(primSize(RHST)));
778   APSInt LHS = peekToAPSInt(S.Stk, LHST,
779                             align(primSize(PT_Ptr)) + align(primSize(RHST)) +
780                                 align(primSize(LHST)));
781   QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
782   PrimType ResultT = *S.getContext().classify(ResultType);
783   bool Overflow;
784 
785   APSInt Result;
786   if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
787       BuiltinOp == Builtin::BI__builtin_sub_overflow ||
788       BuiltinOp == Builtin::BI__builtin_mul_overflow) {
789     bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
790                     ResultType->isSignedIntegerOrEnumerationType();
791     bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
792                      ResultType->isSignedIntegerOrEnumerationType();
793     uint64_t LHSSize = LHS.getBitWidth();
794     uint64_t RHSSize = RHS.getBitWidth();
795     uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
796     uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
797 
798     // Add an additional bit if the signedness isn't uniformly agreed to. We
799     // could do this ONLY if there is a signed and an unsigned that both have
800     // MaxBits, but the code to check that is pretty nasty.  The issue will be
801     // caught in the shrink-to-result later anyway.
802     if (IsSigned && !AllSigned)
803       ++MaxBits;
804 
805     LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
806     RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
807     Result = APSInt(MaxBits, !IsSigned);
808   }
809 
810   // Find largest int.
811   switch (BuiltinOp) {
812   default:
813     llvm_unreachable("Invalid value for BuiltinOp");
814   case Builtin::BI__builtin_add_overflow:
815   case Builtin::BI__builtin_sadd_overflow:
816   case Builtin::BI__builtin_saddl_overflow:
817   case Builtin::BI__builtin_saddll_overflow:
818   case Builtin::BI__builtin_uadd_overflow:
819   case Builtin::BI__builtin_uaddl_overflow:
820   case Builtin::BI__builtin_uaddll_overflow:
821     Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
822                             : LHS.uadd_ov(RHS, Overflow);
823     break;
824   case Builtin::BI__builtin_sub_overflow:
825   case Builtin::BI__builtin_ssub_overflow:
826   case Builtin::BI__builtin_ssubl_overflow:
827   case Builtin::BI__builtin_ssubll_overflow:
828   case Builtin::BI__builtin_usub_overflow:
829   case Builtin::BI__builtin_usubl_overflow:
830   case Builtin::BI__builtin_usubll_overflow:
831     Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
832                             : LHS.usub_ov(RHS, Overflow);
833     break;
834   case Builtin::BI__builtin_mul_overflow:
835   case Builtin::BI__builtin_smul_overflow:
836   case Builtin::BI__builtin_smull_overflow:
837   case Builtin::BI__builtin_smulll_overflow:
838   case Builtin::BI__builtin_umul_overflow:
839   case Builtin::BI__builtin_umull_overflow:
840   case Builtin::BI__builtin_umulll_overflow:
841     Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
842                             : LHS.umul_ov(RHS, Overflow);
843     break;
844   }
845 
846   // In the case where multiple sizes are allowed, truncate and see if
847   // the values are the same.
848   if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
849       BuiltinOp == Builtin::BI__builtin_sub_overflow ||
850       BuiltinOp == Builtin::BI__builtin_mul_overflow) {
851     // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
852     // since it will give us the behavior of a TruncOrSelf in the case where
853     // its parameter <= its size.  We previously set Result to be at least the
854     // type-size of the result, so getTypeSize(ResultType) <= Resu
855     APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
856     Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
857 
858     if (!APSInt::isSameValue(Temp, Result))
859       Overflow = true;
860     Result = Temp;
861   }
862 
863   // Write Result to ResultPtr and put Overflow on the stacl.
864   assignInteger(ResultPtr, ResultT, Result);
865   ResultPtr.initialize();
866   assert(Func->getDecl()->getReturnType()->isBooleanType());
867   S.Stk.push<Boolean>(Overflow);
868   return true;
869 }
870 
871 /// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
872 static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC,
873                                     const InterpFrame *Frame,
874                                     const Function *Func,
875                                     const CallExpr *Call) {
876   unsigned BuiltinOp = Func->getBuiltinID();
877   PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
878   PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
879   PrimType CarryT = *S.getContext().classify(Call->getArg(2)->getType());
880   APSInt RHS = peekToAPSInt(S.Stk, RHST,
881                             align(primSize(PT_Ptr)) + align(primSize(CarryT)) +
882                                 align(primSize(RHST)));
883   APSInt LHS =
884       peekToAPSInt(S.Stk, LHST,
885                    align(primSize(PT_Ptr)) + align(primSize(RHST)) +
886                        align(primSize(CarryT)) + align(primSize(LHST)));
887   APSInt CarryIn = peekToAPSInt(
888       S.Stk, LHST, align(primSize(PT_Ptr)) + align(primSize(CarryT)));
889   APSInt CarryOut;
890 
891   APSInt Result;
892   // Copy the number of bits and sign.
893   Result = LHS;
894   CarryOut = LHS;
895 
896   bool FirstOverflowed = false;
897   bool SecondOverflowed = false;
898   switch (BuiltinOp) {
899   default:
900     llvm_unreachable("Invalid value for BuiltinOp");
901   case Builtin::BI__builtin_addcb:
902   case Builtin::BI__builtin_addcs:
903   case Builtin::BI__builtin_addc:
904   case Builtin::BI__builtin_addcl:
905   case Builtin::BI__builtin_addcll:
906     Result =
907         LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
908     break;
909   case Builtin::BI__builtin_subcb:
910   case Builtin::BI__builtin_subcs:
911   case Builtin::BI__builtin_subc:
912   case Builtin::BI__builtin_subcl:
913   case Builtin::BI__builtin_subcll:
914     Result =
915         LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
916     break;
917   }
918   // It is possible for both overflows to happen but CGBuiltin uses an OR so
919   // this is consistent.
920   CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
921 
922   Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
923   QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
924   PrimType CarryOutT = *S.getContext().classify(CarryOutType);
925   assignInteger(CarryOutPtr, CarryOutT, CarryOut);
926   CarryOutPtr.initialize();
927 
928   assert(Call->getType() == Call->getArg(0)->getType());
929   pushInteger(S, Result, Call->getType());
930   return true;
931 }
932 
933 static bool interp__builtin_clz(InterpState &S, CodePtr OpPC,
934                                 const InterpFrame *Frame, const Function *Func,
935                                 const CallExpr *Call) {
936   unsigned CallSize = callArgSize(S, Call);
937   unsigned BuiltinOp = Func->getBuiltinID();
938   PrimType ValT = *S.getContext().classify(Call->getArg(0));
939   const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
940 
941   // When the argument is 0, the result of GCC builtins is undefined, whereas
942   // for Microsoft intrinsics, the result is the bit-width of the argument.
943   bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
944                          BuiltinOp != Builtin::BI__lzcnt &&
945                          BuiltinOp != Builtin::BI__lzcnt64;
946 
947   if (Val == 0) {
948     if (Func->getBuiltinID() == Builtin::BI__builtin_clzg &&
949         Call->getNumArgs() == 2) {
950       // We have a fallback parameter.
951       PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
952       const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
953       pushInteger(S, Fallback, Call->getType());
954       return true;
955     }
956 
957     if (ZeroIsUndefined)
958       return false;
959   }
960 
961   pushInteger(S, Val.countl_zero(), Call->getType());
962   return true;
963 }
964 
965 static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC,
966                                 const InterpFrame *Frame, const Function *Func,
967                                 const CallExpr *Call) {
968   unsigned CallSize = callArgSize(S, Call);
969   PrimType ValT = *S.getContext().classify(Call->getArg(0));
970   const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
971 
972   if (Val == 0) {
973     if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg &&
974         Call->getNumArgs() == 2) {
975       // We have a fallback parameter.
976       PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
977       const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
978       pushInteger(S, Fallback, Call->getType());
979       return true;
980     }
981     return false;
982   }
983 
984   pushInteger(S, Val.countr_zero(), Call->getType());
985   return true;
986 }
987 
988 static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC,
989                                   const InterpFrame *Frame,
990                                   const Function *Func, const CallExpr *Call) {
991   PrimType ReturnT = *S.getContext().classify(Call->getType());
992   PrimType ValT = *S.getContext().classify(Call->getArg(0));
993   const APSInt &Val = peekToAPSInt(S.Stk, ValT);
994   assert(Val.getActiveBits() <= 64);
995 
996   INT_TYPE_SWITCH(ReturnT,
997                   { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
998   return true;
999 }
1000 
1001 /// bool __atomic_always_lock_free(size_t, void const volatile*)
1002 /// bool __atomic_is_lock_free(size_t, void const volatile*)
1003 /// bool __c11_atomic_is_lock_free(size_t)
1004 static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
1005                                              const InterpFrame *Frame,
1006                                              const Function *Func,
1007                                              const CallExpr *Call) {
1008   unsigned BuiltinOp = Func->getBuiltinID();
1009 
1010   PrimType ValT = *S.getContext().classify(Call->getArg(0));
1011   unsigned SizeValOffset = 0;
1012   if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free)
1013     SizeValOffset = align(primSize(ValT)) + align(primSize(PT_Ptr));
1014   const APSInt &SizeVal = peekToAPSInt(S.Stk, ValT, SizeValOffset);
1015 
1016   auto returnBool = [&S](bool Value) -> bool {
1017     S.Stk.push<Boolean>(Value);
1018     return true;
1019   };
1020 
1021   // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1022   // of two less than or equal to the maximum inline atomic width, we know it
1023   // is lock-free.  If the size isn't a power of two, or greater than the
1024   // maximum alignment where we promote atomics, we know it is not lock-free
1025   // (at least not in the sense of atomic_is_lock_free).  Otherwise,
1026   // the answer can only be determined at runtime; for example, 16-byte
1027   // atomics have lock-free implementations on some, but not all,
1028   // x86-64 processors.
1029 
1030   // Check power-of-two.
1031   CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1032   if (Size.isPowerOfTwo()) {
1033     // Check against inlining width.
1034     unsigned InlineWidthBits =
1035         S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth();
1036     if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1037 
1038       // OK, we will inline appropriately-aligned operations of this size,
1039       // and _Atomic(T) is appropriately-aligned.
1040       if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
1041           Size == CharUnits::One())
1042         return returnBool(true);
1043 
1044       // Same for null pointers.
1045       assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1046       const Pointer &Ptr = S.Stk.peek<Pointer>();
1047       if (Ptr.isZero())
1048         return returnBool(true);
1049 
1050       if (Ptr.isIntegralPointer()) {
1051         uint64_t IntVal = Ptr.getIntegerRepresentation();
1052         if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1053           return returnBool(true);
1054       }
1055 
1056       const Expr *PtrArg = Call->getArg(1);
1057       // Otherwise, check if the type's alignment against Size.
1058       if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1059         // Drop the potential implicit-cast to 'const volatile void*', getting
1060         // the underlying type.
1061         if (ICE->getCastKind() == CK_BitCast)
1062           PtrArg = ICE->getSubExpr();
1063       }
1064 
1065       if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1066         QualType PointeeType = PtrTy->getPointeeType();
1067         if (!PointeeType->isIncompleteType() &&
1068             S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1069           // OK, we will inline operations on this object.
1070           return returnBool(true);
1071         }
1072       }
1073     }
1074   }
1075 
1076   if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1077     return returnBool(false);
1078 
1079   return false;
1080 }
1081 
1082 /// __builtin_complex(Float A, float B);
1083 static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
1084                                     const InterpFrame *Frame,
1085                                     const Function *Func,
1086                                     const CallExpr *Call) {
1087   const Floating &Arg2 = S.Stk.peek<Floating>();
1088   const Floating &Arg1 = S.Stk.peek<Floating>(align(primSize(PT_Float)) * 2);
1089   Pointer &Result = S.Stk.peek<Pointer>(align(primSize(PT_Float)) * 2 +
1090                                         align(primSize(PT_Ptr)));
1091 
1092   Result.atIndex(0).deref<Floating>() = Arg1;
1093   Result.atIndex(0).initialize();
1094   Result.atIndex(1).deref<Floating>() = Arg2;
1095   Result.atIndex(1).initialize();
1096   Result.initialize();
1097 
1098   return true;
1099 }
1100 
1101 /// __builtin_is_aligned()
1102 /// __builtin_align_up()
1103 /// __builtin_align_down()
1104 /// The first parameter is either an integer or a pointer.
1105 /// The second parameter is the requested alignment as an integer.
1106 static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC,
1107                                                const InterpFrame *Frame,
1108                                                const Function *Func,
1109                                                const CallExpr *Call) {
1110   unsigned BuiltinOp = Func->getBuiltinID();
1111   unsigned CallSize = callArgSize(S, Call);
1112 
1113   PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1114   const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT);
1115 
1116   if (Alignment < 0 || !Alignment.isPowerOf2()) {
1117     S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1118     return false;
1119   }
1120   unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1121   APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1122   if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1123     S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1124         << MaxValue << Call->getArg(0)->getType() << Alignment;
1125     return false;
1126   }
1127 
1128   // The first parameter is either an integer or a pointer (but not a function
1129   // pointer).
1130   PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1131 
1132   if (isIntegralType(FirstArgT)) {
1133     const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize);
1134     APSInt Align = Alignment.extOrTrunc(Src.getBitWidth());
1135     if (BuiltinOp == Builtin::BI__builtin_align_up) {
1136       APSInt AlignedVal =
1137           APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned());
1138       pushInteger(S, AlignedVal, Call->getType());
1139     } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1140       APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned());
1141       pushInteger(S, AlignedVal, Call->getType());
1142     } else {
1143       assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1144       S.Stk.push<Boolean>((Src & (Align - 1)) == 0);
1145     }
1146     return true;
1147   }
1148 
1149   assert(FirstArgT == PT_Ptr);
1150   const Pointer &Ptr = S.Stk.peek<Pointer>(CallSize);
1151 
1152   unsigned PtrOffset = Ptr.getByteOffset();
1153   PtrOffset = Ptr.getIndex();
1154   CharUnits BaseAlignment =
1155       S.getASTContext().getDeclAlign(Ptr.getDeclDesc()->asValueDecl());
1156   CharUnits PtrAlign =
1157       BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1158 
1159   if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1160     if (PtrAlign.getQuantity() >= Alignment) {
1161       S.Stk.push<Boolean>(true);
1162       return true;
1163     }
1164     // If the alignment is not known to be sufficient, some cases could still
1165     // be aligned at run time. However, if the requested alignment is less or
1166     // equal to the base alignment and the offset is not aligned, we know that
1167     // the run-time value can never be aligned.
1168     if (BaseAlignment.getQuantity() >= Alignment &&
1169         PtrAlign.getQuantity() < Alignment) {
1170       S.Stk.push<Boolean>(false);
1171       return true;
1172     }
1173 
1174     S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1175         << Alignment;
1176     return false;
1177   }
1178 
1179   assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1180          BuiltinOp == Builtin::BI__builtin_align_up);
1181 
1182   // For align_up/align_down, we can return the same value if the alignment
1183   // is known to be greater or equal to the requested value.
1184   if (PtrAlign.getQuantity() >= Alignment) {
1185     S.Stk.push<Pointer>(Ptr);
1186     return true;
1187   }
1188 
1189   // The alignment could be greater than the minimum at run-time, so we cannot
1190   // infer much about the resulting pointer value. One case is possible:
1191   // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1192   // can infer the correct index if the requested alignment is smaller than
1193   // the base alignment so we can perform the computation on the offset.
1194   if (BaseAlignment.getQuantity() >= Alignment) {
1195     assert(Alignment.getBitWidth() <= 64 &&
1196            "Cannot handle > 64-bit address-space");
1197     uint64_t Alignment64 = Alignment.getZExtValue();
1198     CharUnits NewOffset =
1199         CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1200                                     ? llvm::alignDown(PtrOffset, Alignment64)
1201                                     : llvm::alignTo(PtrOffset, Alignment64));
1202 
1203     S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1204     return true;
1205   }
1206 
1207   // Otherwise, we cannot constant-evaluate the result.
1208   S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1209   return false;
1210 }
1211 
1212 /// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1213 static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC,
1214                                            const InterpFrame *Frame,
1215                                            const Function *Func,
1216                                            const CallExpr *Call) {
1217   assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1218 
1219   // Might be called with function pointers in C.
1220   std::optional<PrimType> PtrT = S.Ctx.classify(Call->getArg(0));
1221   if (PtrT != PT_Ptr)
1222     return false;
1223 
1224   unsigned ArgSize = callArgSize(S, Call);
1225   const Pointer &Ptr = S.Stk.peek<Pointer>(ArgSize);
1226   std::optional<APSInt> ExtraOffset;
1227   APSInt Alignment;
1228   if (Call->getNumArgs() == 2) {
1229     Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1230   } else {
1231     PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1232     PrimType ExtraOffsetT = *S.Ctx.classify(Call->getArg(2));
1233     Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)),
1234                              align(primSize(AlignmentT)) +
1235                                  align(primSize(ExtraOffsetT)));
1236     ExtraOffset = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1237   }
1238 
1239   CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1240 
1241   // If there is a base object, then it must have the correct alignment.
1242   if (Ptr.isBlockPointer()) {
1243     CharUnits BaseAlignment;
1244     if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1245       BaseAlignment = S.getASTContext().getDeclAlign(VD);
1246     else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1247       BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1248 
1249     if (BaseAlignment < Align) {
1250       S.CCEDiag(Call->getArg(0),
1251                 diag::note_constexpr_baa_insufficient_alignment)
1252           << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1253       return false;
1254     }
1255   }
1256 
1257   APValue AV = Ptr.toAPValue(S.getASTContext());
1258   CharUnits AVOffset = AV.getLValueOffset();
1259   if (ExtraOffset)
1260     AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1261   if (AVOffset.alignTo(Align) != AVOffset) {
1262     if (Ptr.isBlockPointer())
1263       S.CCEDiag(Call->getArg(0),
1264                 diag::note_constexpr_baa_insufficient_alignment)
1265           << 1 << AVOffset.getQuantity() << Align.getQuantity();
1266     else
1267       S.CCEDiag(Call->getArg(0),
1268                 diag::note_constexpr_baa_value_insufficient_alignment)
1269           << AVOffset.getQuantity() << Align.getQuantity();
1270     return false;
1271   }
1272 
1273   S.Stk.push<Pointer>(Ptr);
1274   return true;
1275 }
1276 
1277 static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC,
1278                                        const InterpFrame *Frame,
1279                                        const Function *Func,
1280                                        const CallExpr *Call) {
1281   if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1282       !Call->getArg(1)->getType()->isIntegerType())
1283     return false;
1284 
1285   PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1286   PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1287   APSInt Val = peekToAPSInt(S.Stk, ValT,
1288                             align(primSize(ValT)) + align(primSize(IndexT)));
1289   APSInt Index = peekToAPSInt(S.Stk, IndexT);
1290 
1291   unsigned BitWidth = Val.getBitWidth();
1292   uint64_t Shift = Index.extractBitsAsZExtValue(8, 0);
1293   uint64_t Length = Index.extractBitsAsZExtValue(8, 8);
1294   Length = Length > BitWidth ? BitWidth : Length;
1295 
1296   // Handle out of bounds cases.
1297   if (Length == 0 || Shift >= BitWidth) {
1298     pushInteger(S, 0, Call->getType());
1299     return true;
1300   }
1301 
1302   uint64_t Result = Val.getZExtValue() >> Shift;
1303   Result &= llvm::maskTrailingOnes<uint64_t>(Length);
1304   pushInteger(S, Result, Call->getType());
1305   return true;
1306 }
1307 
1308 static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC,
1309                                       const InterpFrame *Frame,
1310                                       const Function *Func,
1311                                       const CallExpr *Call) {
1312   QualType CallType = Call->getType();
1313   if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1314       !Call->getArg(1)->getType()->isIntegerType() ||
1315       !CallType->isIntegerType())
1316     return false;
1317 
1318   PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1319   PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1320 
1321   APSInt Val = peekToAPSInt(S.Stk, ValT,
1322                             align(primSize(ValT)) + align(primSize(IndexT)));
1323   APSInt Idx = peekToAPSInt(S.Stk, IndexT);
1324 
1325   unsigned BitWidth = Val.getBitWidth();
1326   uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
1327 
1328   if (Index < BitWidth)
1329     Val.clearHighBits(BitWidth - Index);
1330 
1331   pushInteger(S, Val, CallType);
1332   return true;
1333 }
1334 
1335 static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC,
1336                                        const InterpFrame *Frame,
1337                                        const Function *Func,
1338                                        const CallExpr *Call) {
1339   QualType CallType = Call->getType();
1340   if (!CallType->isIntegerType() ||
1341       !Call->getArg(0)->getType()->isIntegerType())
1342     return false;
1343 
1344   APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1345   pushInteger(S, Val.countLeadingZeros(), CallType);
1346   return true;
1347 }
1348 
1349 static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC,
1350                                        const InterpFrame *Frame,
1351                                        const Function *Func,
1352                                        const CallExpr *Call) {
1353   QualType CallType = Call->getType();
1354   if (!CallType->isIntegerType() ||
1355       !Call->getArg(0)->getType()->isIntegerType())
1356     return false;
1357 
1358   APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1359   pushInteger(S, Val.countTrailingZeros(), CallType);
1360   return true;
1361 }
1362 
1363 static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC,
1364                                       const InterpFrame *Frame,
1365                                       const Function *Func,
1366                                       const CallExpr *Call) {
1367   if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1368       !Call->getArg(1)->getType()->isIntegerType())
1369     return false;
1370 
1371   PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1372   PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1373 
1374   APSInt Val =
1375       peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT)));
1376   APSInt Mask = peekToAPSInt(S.Stk, MaskT);
1377 
1378   unsigned BitWidth = Val.getBitWidth();
1379   APInt Result = APInt::getZero(BitWidth);
1380   for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1381     if (Mask[I])
1382       Result.setBitVal(I, Val[P++]);
1383   }
1384   pushInteger(S, Result, Call->getType());
1385   return true;
1386 }
1387 
1388 static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC,
1389                                       const InterpFrame *Frame,
1390                                       const Function *Func,
1391                                       const CallExpr *Call) {
1392   if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1393       !Call->getArg(1)->getType()->isIntegerType())
1394     return false;
1395 
1396   PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1397   PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1398 
1399   APSInt Val =
1400       peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT)));
1401   APSInt Mask = peekToAPSInt(S.Stk, MaskT);
1402 
1403   unsigned BitWidth = Val.getBitWidth();
1404   APInt Result = APInt::getZero(BitWidth);
1405   for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1406     if (Mask[I])
1407       Result.setBitVal(P++, Val[I]);
1408   }
1409   pushInteger(S, Result, Call->getType());
1410   return true;
1411 }
1412 
1413 static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S,
1414                                                     CodePtr OpPC,
1415                                                     const InterpFrame *Frame,
1416                                                     const Function *Func,
1417                                                     const CallExpr *Call) {
1418   if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1419       !Call->getArg(1)->getType()->isIntegerType() ||
1420       !Call->getArg(2)->getType()->isIntegerType())
1421     return false;
1422 
1423   unsigned BuiltinOp = Func->getBuiltinID();
1424   APSInt CarryIn = getAPSIntParam(Frame, 0);
1425   APSInt LHS = getAPSIntParam(Frame, 1);
1426   APSInt RHS = getAPSIntParam(Frame, 2);
1427 
1428   bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1429                BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1430 
1431   unsigned BitWidth = LHS.getBitWidth();
1432   unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1433   APInt ExResult =
1434       IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1435             : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1436 
1437   APInt Result = ExResult.extractBits(BitWidth, 0);
1438   APSInt CarryOut =
1439       APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1440 
1441   Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
1442   QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1443   PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1444   assignInteger(CarryOutPtr, CarryOutT, APSInt(Result, true));
1445 
1446   pushInteger(S, CarryOut, Call->getType());
1447 
1448   return true;
1449 }
1450 
1451 static bool interp__builtin_os_log_format_buffer_size(InterpState &S,
1452                                                       CodePtr OpPC,
1453                                                       const InterpFrame *Frame,
1454                                                       const Function *Func,
1455                                                       const CallExpr *Call) {
1456   analyze_os_log::OSLogBufferLayout Layout;
1457   analyze_os_log::computeOSLogBufferLayout(S.getASTContext(), Call, Layout);
1458   pushInteger(S, Layout.size().getQuantity(), Call->getType());
1459   return true;
1460 }
1461 
1462 static bool interp__builtin_ptrauth_string_discriminator(
1463     InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
1464     const Function *Func, const CallExpr *Call) {
1465   const auto &Ptr = S.Stk.peek<Pointer>();
1466   assert(Ptr.getFieldDesc()->isPrimitiveArray());
1467 
1468   StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1469   uint64_t Result = getPointerAuthStableSipHash(R);
1470   pushInteger(S, Result, Call->getType());
1471   return true;
1472 }
1473 
1474 // FIXME: This implementation is not complete.
1475 // The Compiler instance we create cannot access the current stack frame, local
1476 // variables, function parameters, etc. We also need protection from
1477 // side-effects, fatal errors, etc.
1478 static bool interp__builtin_constant_p(InterpState &S, CodePtr OpPC,
1479                                        const InterpFrame *Frame,
1480                                        const Function *Func,
1481                                        const CallExpr *Call) {
1482   const Expr *Arg = Call->getArg(0);
1483   QualType ArgType = Arg->getType();
1484 
1485   auto returnInt = [&S, Call](bool Value) -> bool {
1486     pushInteger(S, Value, Call->getType());
1487     return true;
1488   };
1489 
1490   // __builtin_constant_p always has one operand. The rules which gcc follows
1491   // are not precisely documented, but are as follows:
1492   //
1493   //  - If the operand is of integral, floating, complex or enumeration type,
1494   //    and can be folded to a known value of that type, it returns 1.
1495   //  - If the operand can be folded to a pointer to the first character
1496   //    of a string literal (or such a pointer cast to an integral type)
1497   //    or to a null pointer or an integer cast to a pointer, it returns 1.
1498   //
1499   // Otherwise, it returns 0.
1500   //
1501   // FIXME: GCC also intends to return 1 for literals of aggregate types, but
1502   // its support for this did not work prior to GCC 9 and is not yet well
1503   // understood.
1504   if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() ||
1505       ArgType->isAnyComplexType() || ArgType->isPointerType() ||
1506       ArgType->isNullPtrType()) {
1507     InterpStack Stk;
1508     Compiler<EvalEmitter> C(S.Ctx, S.P, S, Stk);
1509     auto Res = C.interpretExpr(Arg, /*ConvertResultToRValue=*/Arg->isGLValue());
1510     if (Res.isInvalid()) {
1511       C.cleanup();
1512       Stk.clear();
1513     }
1514 
1515     if (!Res.isInvalid() && !Res.empty()) {
1516       const APValue &LV = Res.toAPValue();
1517       if (LV.isLValue()) {
1518         APValue::LValueBase Base = LV.getLValueBase();
1519         if (Base.isNull()) {
1520           // A null base is acceptable.
1521           return returnInt(true);
1522         } else if (const auto *E = Base.dyn_cast<const Expr *>()) {
1523           if (!isa<StringLiteral>(E))
1524             return returnInt(false);
1525           return returnInt(LV.getLValueOffset().isZero());
1526         } else if (Base.is<TypeInfoLValue>()) {
1527           // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to
1528           // evaluate to true.
1529           return returnInt(true);
1530         } else {
1531           // Any other base is not constant enough for GCC.
1532           return returnInt(false);
1533         }
1534       }
1535     }
1536 
1537     // Otherwise, any constant value is good enough.
1538     return returnInt(true);
1539   }
1540 
1541   return returnInt(false);
1542 }
1543 
1544 static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC,
1545                                          const InterpFrame *Frame,
1546                                          const Function *Func,
1547                                          const CallExpr *Call) {
1548   // A call to __operator_new is only valid within std::allocate<>::allocate.
1549   // Walk up the call stack to find the appropriate caller and get the
1550   // element type from it.
1551   QualType ElemType;
1552 
1553   for (const InterpFrame *F = Frame; F; F = F->Caller) {
1554     const Function *Func = F->getFunction();
1555     if (!Func)
1556       continue;
1557     const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Func->getDecl());
1558     if (!MD)
1559       continue;
1560     const IdentifierInfo *FnII = MD->getIdentifier();
1561     if (!FnII || !FnII->isStr("allocate"))
1562       continue;
1563 
1564     const auto *CTSD =
1565         dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent());
1566     if (!CTSD)
1567       continue;
1568 
1569     const IdentifierInfo *ClassII = CTSD->getIdentifier();
1570     const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
1571     if (CTSD->isInStdNamespace() && ClassII && ClassII->isStr("allocator") &&
1572         TAL.size() >= 1 && TAL[0].getKind() == TemplateArgument::Type) {
1573       ElemType = TAL[0].getAsType();
1574       break;
1575     }
1576   }
1577 
1578   if (ElemType.isNull()) {
1579     S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1580                        ? diag::note_constexpr_new_untyped
1581                        : diag::note_constexpr_new);
1582     return false;
1583   }
1584 
1585   if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1586     S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1587         << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1588     return false;
1589   }
1590 
1591   APSInt Bytes = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0)));
1592   CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1593   assert(!ElemSize.isZero());
1594   // Divide the number of bytes by sizeof(ElemType), so we get the number of
1595   // elements we should allocate.
1596   APInt NumElems, Remainder;
1597   APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1598   APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1599   if (Remainder != 0) {
1600     // This likely indicates a bug in the implementation of 'std::allocator'.
1601     S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1602         << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1603     return false;
1604   }
1605 
1606   // NB: The same check we're using in CheckArraySize()
1607   if (NumElems.getActiveBits() >
1608           ConstantArrayType::getMaxSizeBits(S.getASTContext()) ||
1609       NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1610     // FIXME: NoThrow check?
1611     const SourceInfo &Loc = S.Current->getSource(OpPC);
1612     S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1613         << NumElems.getZExtValue();
1614     return false;
1615   }
1616 
1617   std::optional<PrimType> ElemT = S.getContext().classify(ElemType);
1618   DynamicAllocator &Allocator = S.getAllocator();
1619   if (ElemT) {
1620     if (NumElems.ule(1)) {
1621       const Descriptor *Desc =
1622           S.P.createDescriptor(Call, *ElemT, Descriptor::InlineDescMD,
1623                                /*IsConst=*/false, /*IsTemporary=*/false,
1624                                /*IsMutable=*/false);
1625       Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1626                                     DynamicAllocator::Form::Operator);
1627       assert(B);
1628 
1629       S.Stk.push<Pointer>(B);
1630       return true;
1631     }
1632     assert(NumElems.ugt(1));
1633 
1634     Block *B =
1635         Allocator.allocate(Call, *ElemT, NumElems.getZExtValue(),
1636                            S.Ctx.getEvalID(), DynamicAllocator::Form::Operator);
1637     assert(B);
1638     S.Stk.push<Pointer>(B);
1639     return true;
1640   }
1641 
1642   assert(!ElemT);
1643   // Structs etc.
1644   const Descriptor *Desc = S.P.createDescriptor(
1645       Call, ElemType.getTypePtr(), Descriptor::InlineDescMD,
1646       /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false,
1647       /*Init=*/nullptr);
1648 
1649   if (NumElems.ule(1)) {
1650     Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1651                                   DynamicAllocator::Form::Operator);
1652     assert(B);
1653     S.Stk.push<Pointer>(B);
1654     return true;
1655   }
1656 
1657   Block *B =
1658       Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1659                          DynamicAllocator::Form::Operator);
1660   assert(B);
1661   S.Stk.push<Pointer>(B);
1662   return true;
1663 }
1664 
1665 static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC,
1666                                             const InterpFrame *Frame,
1667                                             const Function *Func,
1668                                             const CallExpr *Call) {
1669   const Expr *Source = nullptr;
1670   const Block *BlockToDelete = nullptr;
1671 
1672   {
1673     const Pointer &Ptr = S.Stk.peek<Pointer>();
1674 
1675     if (Ptr.isZero()) {
1676       S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1677       return true;
1678     }
1679 
1680     Source = Ptr.getDeclDesc()->asExpr();
1681     BlockToDelete = Ptr.block();
1682   }
1683   assert(BlockToDelete);
1684 
1685   DynamicAllocator &Allocator = S.getAllocator();
1686   const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1687   std::optional<DynamicAllocator::Form> AllocForm =
1688       Allocator.getAllocationForm(Source);
1689 
1690   if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1691     // Nothing has been deallocated, this must be a double-delete.
1692     const SourceInfo &Loc = S.Current->getSource(OpPC);
1693     S.FFDiag(Loc, diag::note_constexpr_double_delete);
1694     return false;
1695   }
1696   assert(AllocForm);
1697 
1698   return CheckNewDeleteForms(
1699       S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1700 }
1701 
1702 static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC,
1703                                              const InterpFrame *Frame,
1704                                              const Function *Func,
1705                                              const CallExpr *Call) {
1706   const Floating &Arg0 = S.Stk.peek<Floating>();
1707   S.Stk.push<Floating>(Arg0);
1708   return true;
1709 }
1710 
1711 static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC,
1712                                           const InterpFrame *Frame,
1713                                           const Function *Func,
1714                                           const CallExpr *Call) {
1715   const Pointer &Arg = S.Stk.peek<Pointer>();
1716   assert(Arg.getFieldDesc()->isPrimitiveArray());
1717 
1718   unsigned ID = Func->getBuiltinID();
1719   QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1720   assert(Call->getType() == ElemType);
1721   PrimType ElemT = *S.getContext().classify(ElemType);
1722   unsigned NumElems = Arg.getNumElems();
1723 
1724   INT_TYPE_SWITCH_NO_BOOL(ElemT, {
1725     T Result = Arg.atIndex(0).deref<T>();
1726     unsigned BitWidth = Result.bitWidth();
1727     for (unsigned I = 1; I != NumElems; ++I) {
1728       T Elem = Arg.atIndex(I).deref<T>();
1729       T PrevResult = Result;
1730 
1731       if (ID == Builtin::BI__builtin_reduce_add) {
1732         if (T::add(Result, Elem, BitWidth, &Result)) {
1733           unsigned OverflowBits = BitWidth + 1;
1734           (void)handleOverflow(S, OpPC,
1735                                (PrevResult.toAPSInt(OverflowBits) +
1736                                 Elem.toAPSInt(OverflowBits)));
1737           return false;
1738         }
1739       } else if (ID == Builtin::BI__builtin_reduce_mul) {
1740         if (T::mul(Result, Elem, BitWidth, &Result)) {
1741           unsigned OverflowBits = BitWidth * 2;
1742           (void)handleOverflow(S, OpPC,
1743                                (PrevResult.toAPSInt(OverflowBits) *
1744                                 Elem.toAPSInt(OverflowBits)));
1745           return false;
1746         }
1747 
1748       } else if (ID == Builtin::BI__builtin_reduce_and) {
1749         (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1750       } else if (ID == Builtin::BI__builtin_reduce_or) {
1751         (void)T::bitOr(Result, Elem, BitWidth, &Result);
1752       } else if (ID == Builtin::BI__builtin_reduce_xor) {
1753         (void)T::bitXor(Result, Elem, BitWidth, &Result);
1754       } else {
1755         llvm_unreachable("Unhandled vector reduce builtin");
1756       }
1757     }
1758     pushInteger(S, Result.toAPSInt(), Call->getType());
1759   });
1760 
1761   return true;
1762 }
1763 
1764 /// Can be called with an integer or vector as the first and only parameter.
1765 static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
1766                                                  const InterpFrame *Frame,
1767                                                  const Function *Func,
1768                                                  const CallExpr *Call) {
1769   assert(Call->getNumArgs() == 1);
1770   if (Call->getArg(0)->getType()->isIntegerType()) {
1771     PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1772     APSInt Val = peekToAPSInt(S.Stk, ArgT);
1773     pushInteger(S, Val.popcount(), Call->getType());
1774     return true;
1775   }
1776   // Otherwise, the argument must be a vector.
1777   assert(Call->getArg(0)->getType()->isVectorType());
1778   const Pointer &Arg = S.Stk.peek<Pointer>();
1779   assert(Arg.getFieldDesc()->isPrimitiveArray());
1780   const Pointer &Dst = S.Stk.peek<Pointer>(primSize(PT_Ptr) * 2);
1781   assert(Dst.getFieldDesc()->isPrimitiveArray());
1782   assert(Arg.getFieldDesc()->getNumElems() ==
1783          Dst.getFieldDesc()->getNumElems());
1784 
1785   QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1786   PrimType ElemT = *S.getContext().classify(ElemType);
1787   unsigned NumElems = Arg.getNumElems();
1788 
1789   // FIXME: Reading from uninitialized vector elements?
1790   for (unsigned I = 0; I != NumElems; ++I) {
1791     INT_TYPE_SWITCH_NO_BOOL(ElemT, {
1792       Dst.atIndex(I).deref<T>() =
1793           T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount());
1794       Dst.atIndex(I).initialize();
1795     });
1796   }
1797 
1798   return true;
1799 }
1800 static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC,
1801                                    const InterpFrame *Frame,
1802                                    const Function *Func, const CallExpr *Call) {
1803   assert(Call->getNumArgs() == 3);
1804   unsigned ID = Func->getBuiltinID();
1805   Pointer DestPtr = getParam<Pointer>(Frame, 0);
1806   const Pointer &SrcPtr = getParam<Pointer>(Frame, 1);
1807   const APSInt &Size =
1808       peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)));
1809   assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1810 
1811   if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1812     diagnoseNonConstexprBuiltin(S, OpPC, ID);
1813 
1814   bool Move = (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove);
1815 
1816   // If the size is zero, we treat this as always being a valid no-op.
1817   if (Size.isZero()) {
1818     S.Stk.push<Pointer>(DestPtr);
1819     return true;
1820   }
1821 
1822   if (SrcPtr.isZero() || DestPtr.isZero()) {
1823     Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1824     S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1825         << /*IsMove=*/Move << /*IsWchar=*/false << !SrcPtr.isZero()
1826         << DiagPtr.toDiagnosticString(S.getASTContext());
1827     return false;
1828   }
1829 
1830   // As a last resort, reject dummy pointers.
1831   if (DestPtr.isDummy() || SrcPtr.isDummy())
1832     return false;
1833 
1834   if (!DoBitCastPtr(S, OpPC, SrcPtr, DestPtr))
1835     return false;
1836 
1837   S.Stk.push<Pointer>(DestPtr);
1838   return true;
1839 }
1840 
1841 bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
1842                       const CallExpr *Call, uint32_t BuiltinID) {
1843   const InterpFrame *Frame = S.Current;
1844 
1845   std::optional<PrimType> ReturnT = S.getContext().classify(Call);
1846 
1847   switch (BuiltinID) {
1848   case Builtin::BI__builtin_is_constant_evaluated:
1849     if (!interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call))
1850       return false;
1851     break;
1852   case Builtin::BI__builtin_assume:
1853   case Builtin::BI__assume:
1854     break;
1855   case Builtin::BI__builtin_strcmp:
1856   case Builtin::BIstrcmp:
1857     if (!interp__builtin_strcmp(S, OpPC, Frame, F, Call))
1858       return false;
1859     break;
1860   case Builtin::BI__builtin_strlen:
1861   case Builtin::BIstrlen:
1862     if (!interp__builtin_strlen(S, OpPC, Frame, F, Call))
1863       return false;
1864     break;
1865   case Builtin::BI__builtin_nan:
1866   case Builtin::BI__builtin_nanf:
1867   case Builtin::BI__builtin_nanl:
1868   case Builtin::BI__builtin_nanf16:
1869   case Builtin::BI__builtin_nanf128:
1870     if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false))
1871       return false;
1872     break;
1873   case Builtin::BI__builtin_nans:
1874   case Builtin::BI__builtin_nansf:
1875   case Builtin::BI__builtin_nansl:
1876   case Builtin::BI__builtin_nansf16:
1877   case Builtin::BI__builtin_nansf128:
1878     if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true))
1879       return false;
1880     break;
1881 
1882   case Builtin::BI__builtin_huge_val:
1883   case Builtin::BI__builtin_huge_valf:
1884   case Builtin::BI__builtin_huge_vall:
1885   case Builtin::BI__builtin_huge_valf16:
1886   case Builtin::BI__builtin_huge_valf128:
1887   case Builtin::BI__builtin_inf:
1888   case Builtin::BI__builtin_inff:
1889   case Builtin::BI__builtin_infl:
1890   case Builtin::BI__builtin_inff16:
1891   case Builtin::BI__builtin_inff128:
1892     if (!interp__builtin_inf(S, OpPC, Frame, F))
1893       return false;
1894     break;
1895   case Builtin::BI__builtin_copysign:
1896   case Builtin::BI__builtin_copysignf:
1897   case Builtin::BI__builtin_copysignl:
1898   case Builtin::BI__builtin_copysignf128:
1899     if (!interp__builtin_copysign(S, OpPC, Frame, F))
1900       return false;
1901     break;
1902 
1903   case Builtin::BI__builtin_fmin:
1904   case Builtin::BI__builtin_fminf:
1905   case Builtin::BI__builtin_fminl:
1906   case Builtin::BI__builtin_fminf16:
1907   case Builtin::BI__builtin_fminf128:
1908     if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/false))
1909       return false;
1910     break;
1911 
1912   case Builtin::BI__builtin_fminimum_num:
1913   case Builtin::BI__builtin_fminimum_numf:
1914   case Builtin::BI__builtin_fminimum_numl:
1915   case Builtin::BI__builtin_fminimum_numf16:
1916   case Builtin::BI__builtin_fminimum_numf128:
1917     if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/true))
1918       return false;
1919     break;
1920 
1921   case Builtin::BI__builtin_fmax:
1922   case Builtin::BI__builtin_fmaxf:
1923   case Builtin::BI__builtin_fmaxl:
1924   case Builtin::BI__builtin_fmaxf16:
1925   case Builtin::BI__builtin_fmaxf128:
1926     if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/false))
1927       return false;
1928     break;
1929 
1930   case Builtin::BI__builtin_fmaximum_num:
1931   case Builtin::BI__builtin_fmaximum_numf:
1932   case Builtin::BI__builtin_fmaximum_numl:
1933   case Builtin::BI__builtin_fmaximum_numf16:
1934   case Builtin::BI__builtin_fmaximum_numf128:
1935     if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/true))
1936       return false;
1937     break;
1938 
1939   case Builtin::BI__builtin_isnan:
1940     if (!interp__builtin_isnan(S, OpPC, Frame, F, Call))
1941       return false;
1942     break;
1943   case Builtin::BI__builtin_issignaling:
1944     if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call))
1945       return false;
1946     break;
1947 
1948   case Builtin::BI__builtin_isinf:
1949     if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false, Call))
1950       return false;
1951     break;
1952 
1953   case Builtin::BI__builtin_isinf_sign:
1954     if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true, Call))
1955       return false;
1956     break;
1957 
1958   case Builtin::BI__builtin_isfinite:
1959     if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call))
1960       return false;
1961     break;
1962   case Builtin::BI__builtin_isnormal:
1963     if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call))
1964       return false;
1965     break;
1966   case Builtin::BI__builtin_issubnormal:
1967     if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call))
1968       return false;
1969     break;
1970   case Builtin::BI__builtin_iszero:
1971     if (!interp__builtin_iszero(S, OpPC, Frame, F, Call))
1972       return false;
1973     break;
1974   case Builtin::BI__builtin_signbit:
1975   case Builtin::BI__builtin_signbitf:
1976   case Builtin::BI__builtin_signbitl:
1977     if (!interp__builtin_signbit(S, OpPC, Frame, F, Call))
1978       return false;
1979     break;
1980   case Builtin::BI__builtin_isgreater:
1981   case Builtin::BI__builtin_isgreaterequal:
1982   case Builtin::BI__builtin_isless:
1983   case Builtin::BI__builtin_islessequal:
1984   case Builtin::BI__builtin_islessgreater:
1985   case Builtin::BI__builtin_isunordered:
1986     if (!interp_floating_comparison(S, OpPC, Frame, F, Call))
1987       return false;
1988     break;
1989   case Builtin::BI__builtin_isfpclass:
1990     if (!interp__builtin_isfpclass(S, OpPC, Frame, F, Call))
1991       return false;
1992     break;
1993   case Builtin::BI__builtin_fpclassify:
1994     if (!interp__builtin_fpclassify(S, OpPC, Frame, F, Call))
1995       return false;
1996     break;
1997 
1998   case Builtin::BI__builtin_fabs:
1999   case Builtin::BI__builtin_fabsf:
2000   case Builtin::BI__builtin_fabsl:
2001   case Builtin::BI__builtin_fabsf128:
2002     if (!interp__builtin_fabs(S, OpPC, Frame, F))
2003       return false;
2004     break;
2005 
2006   case Builtin::BI__builtin_abs:
2007   case Builtin::BI__builtin_labs:
2008   case Builtin::BI__builtin_llabs:
2009     if (!interp__builtin_abs(S, OpPC, Frame, F, Call))
2010       return false;
2011     break;
2012 
2013   case Builtin::BI__builtin_popcount:
2014   case Builtin::BI__builtin_popcountl:
2015   case Builtin::BI__builtin_popcountll:
2016   case Builtin::BI__builtin_popcountg:
2017   case Builtin::BI__popcnt16: // Microsoft variants of popcount
2018   case Builtin::BI__popcnt:
2019   case Builtin::BI__popcnt64:
2020     if (!interp__builtin_popcount(S, OpPC, Frame, F, Call))
2021       return false;
2022     break;
2023 
2024   case Builtin::BI__builtin_parity:
2025   case Builtin::BI__builtin_parityl:
2026   case Builtin::BI__builtin_parityll:
2027     if (!interp__builtin_parity(S, OpPC, Frame, F, Call))
2028       return false;
2029     break;
2030 
2031   case Builtin::BI__builtin_clrsb:
2032   case Builtin::BI__builtin_clrsbl:
2033   case Builtin::BI__builtin_clrsbll:
2034     if (!interp__builtin_clrsb(S, OpPC, Frame, F, Call))
2035       return false;
2036     break;
2037 
2038   case Builtin::BI__builtin_bitreverse8:
2039   case Builtin::BI__builtin_bitreverse16:
2040   case Builtin::BI__builtin_bitreverse32:
2041   case Builtin::BI__builtin_bitreverse64:
2042     if (!interp__builtin_bitreverse(S, OpPC, Frame, F, Call))
2043       return false;
2044     break;
2045 
2046   case Builtin::BI__builtin_classify_type:
2047     if (!interp__builtin_classify_type(S, OpPC, Frame, F, Call))
2048       return false;
2049     break;
2050 
2051   case Builtin::BI__builtin_expect:
2052   case Builtin::BI__builtin_expect_with_probability:
2053     if (!interp__builtin_expect(S, OpPC, Frame, F, Call))
2054       return false;
2055     break;
2056 
2057   case Builtin::BI__builtin_rotateleft8:
2058   case Builtin::BI__builtin_rotateleft16:
2059   case Builtin::BI__builtin_rotateleft32:
2060   case Builtin::BI__builtin_rotateleft64:
2061   case Builtin::BI_rotl8: // Microsoft variants of rotate left
2062   case Builtin::BI_rotl16:
2063   case Builtin::BI_rotl:
2064   case Builtin::BI_lrotl:
2065   case Builtin::BI_rotl64:
2066     if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/false))
2067       return false;
2068     break;
2069 
2070   case Builtin::BI__builtin_rotateright8:
2071   case Builtin::BI__builtin_rotateright16:
2072   case Builtin::BI__builtin_rotateright32:
2073   case Builtin::BI__builtin_rotateright64:
2074   case Builtin::BI_rotr8: // Microsoft variants of rotate right
2075   case Builtin::BI_rotr16:
2076   case Builtin::BI_rotr:
2077   case Builtin::BI_lrotr:
2078   case Builtin::BI_rotr64:
2079     if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/true))
2080       return false;
2081     break;
2082 
2083   case Builtin::BI__builtin_ffs:
2084   case Builtin::BI__builtin_ffsl:
2085   case Builtin::BI__builtin_ffsll:
2086     if (!interp__builtin_ffs(S, OpPC, Frame, F, Call))
2087       return false;
2088     break;
2089   case Builtin::BIaddressof:
2090   case Builtin::BI__addressof:
2091   case Builtin::BI__builtin_addressof:
2092     if (!interp__builtin_addressof(S, OpPC, Frame, F, Call))
2093       return false;
2094     break;
2095 
2096   case Builtin::BIas_const:
2097   case Builtin::BIforward:
2098   case Builtin::BIforward_like:
2099   case Builtin::BImove:
2100   case Builtin::BImove_if_noexcept:
2101     if (!interp__builtin_move(S, OpPC, Frame, F, Call))
2102       return false;
2103     break;
2104 
2105   case Builtin::BI__builtin_eh_return_data_regno:
2106     if (!interp__builtin_eh_return_data_regno(S, OpPC, Frame, F, Call))
2107       return false;
2108     break;
2109 
2110   case Builtin::BI__builtin_launder:
2111     if (!noopPointer(S, OpPC, Frame, F, Call))
2112       return false;
2113     break;
2114 
2115   case Builtin::BI__builtin_add_overflow:
2116   case Builtin::BI__builtin_sub_overflow:
2117   case Builtin::BI__builtin_mul_overflow:
2118   case Builtin::BI__builtin_sadd_overflow:
2119   case Builtin::BI__builtin_uadd_overflow:
2120   case Builtin::BI__builtin_uaddl_overflow:
2121   case Builtin::BI__builtin_uaddll_overflow:
2122   case Builtin::BI__builtin_usub_overflow:
2123   case Builtin::BI__builtin_usubl_overflow:
2124   case Builtin::BI__builtin_usubll_overflow:
2125   case Builtin::BI__builtin_umul_overflow:
2126   case Builtin::BI__builtin_umull_overflow:
2127   case Builtin::BI__builtin_umulll_overflow:
2128   case Builtin::BI__builtin_saddl_overflow:
2129   case Builtin::BI__builtin_saddll_overflow:
2130   case Builtin::BI__builtin_ssub_overflow:
2131   case Builtin::BI__builtin_ssubl_overflow:
2132   case Builtin::BI__builtin_ssubll_overflow:
2133   case Builtin::BI__builtin_smul_overflow:
2134   case Builtin::BI__builtin_smull_overflow:
2135   case Builtin::BI__builtin_smulll_overflow:
2136     if (!interp__builtin_overflowop(S, OpPC, Frame, F, Call))
2137       return false;
2138     break;
2139 
2140   case Builtin::BI__builtin_addcb:
2141   case Builtin::BI__builtin_addcs:
2142   case Builtin::BI__builtin_addc:
2143   case Builtin::BI__builtin_addcl:
2144   case Builtin::BI__builtin_addcll:
2145   case Builtin::BI__builtin_subcb:
2146   case Builtin::BI__builtin_subcs:
2147   case Builtin::BI__builtin_subc:
2148   case Builtin::BI__builtin_subcl:
2149   case Builtin::BI__builtin_subcll:
2150     if (!interp__builtin_carryop(S, OpPC, Frame, F, Call))
2151       return false;
2152     break;
2153 
2154   case Builtin::BI__builtin_clz:
2155   case Builtin::BI__builtin_clzl:
2156   case Builtin::BI__builtin_clzll:
2157   case Builtin::BI__builtin_clzs:
2158   case Builtin::BI__builtin_clzg:
2159   case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
2160   case Builtin::BI__lzcnt:
2161   case Builtin::BI__lzcnt64:
2162     if (!interp__builtin_clz(S, OpPC, Frame, F, Call))
2163       return false;
2164     break;
2165 
2166   case Builtin::BI__builtin_ctz:
2167   case Builtin::BI__builtin_ctzl:
2168   case Builtin::BI__builtin_ctzll:
2169   case Builtin::BI__builtin_ctzs:
2170   case Builtin::BI__builtin_ctzg:
2171     if (!interp__builtin_ctz(S, OpPC, Frame, F, Call))
2172       return false;
2173     break;
2174 
2175   case Builtin::BI__builtin_bswap16:
2176   case Builtin::BI__builtin_bswap32:
2177   case Builtin::BI__builtin_bswap64:
2178     if (!interp__builtin_bswap(S, OpPC, Frame, F, Call))
2179       return false;
2180     break;
2181 
2182   case Builtin::BI__atomic_always_lock_free:
2183   case Builtin::BI__atomic_is_lock_free:
2184   case Builtin::BI__c11_atomic_is_lock_free:
2185     if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, F, Call))
2186       return false;
2187     break;
2188 
2189   case Builtin::BI__builtin_complex:
2190     if (!interp__builtin_complex(S, OpPC, Frame, F, Call))
2191       return false;
2192     break;
2193 
2194   case Builtin::BI__builtin_is_aligned:
2195   case Builtin::BI__builtin_align_up:
2196   case Builtin::BI__builtin_align_down:
2197     if (!interp__builtin_is_aligned_up_down(S, OpPC, Frame, F, Call))
2198       return false;
2199     break;
2200 
2201   case Builtin::BI__builtin_assume_aligned:
2202     if (!interp__builtin_assume_aligned(S, OpPC, Frame, F, Call))
2203       return false;
2204     break;
2205 
2206   case clang::X86::BI__builtin_ia32_bextr_u32:
2207   case clang::X86::BI__builtin_ia32_bextr_u64:
2208   case clang::X86::BI__builtin_ia32_bextri_u32:
2209   case clang::X86::BI__builtin_ia32_bextri_u64:
2210     if (!interp__builtin_ia32_bextr(S, OpPC, Frame, F, Call))
2211       return false;
2212     break;
2213 
2214   case clang::X86::BI__builtin_ia32_bzhi_si:
2215   case clang::X86::BI__builtin_ia32_bzhi_di:
2216     if (!interp__builtin_ia32_bzhi(S, OpPC, Frame, F, Call))
2217       return false;
2218     break;
2219 
2220   case clang::X86::BI__builtin_ia32_lzcnt_u16:
2221   case clang::X86::BI__builtin_ia32_lzcnt_u32:
2222   case clang::X86::BI__builtin_ia32_lzcnt_u64:
2223     if (!interp__builtin_ia32_lzcnt(S, OpPC, Frame, F, Call))
2224       return false;
2225     break;
2226 
2227   case clang::X86::BI__builtin_ia32_tzcnt_u16:
2228   case clang::X86::BI__builtin_ia32_tzcnt_u32:
2229   case clang::X86::BI__builtin_ia32_tzcnt_u64:
2230     if (!interp__builtin_ia32_tzcnt(S, OpPC, Frame, F, Call))
2231       return false;
2232     break;
2233 
2234   case clang::X86::BI__builtin_ia32_pdep_si:
2235   case clang::X86::BI__builtin_ia32_pdep_di:
2236     if (!interp__builtin_ia32_pdep(S, OpPC, Frame, F, Call))
2237       return false;
2238     break;
2239 
2240   case clang::X86::BI__builtin_ia32_pext_si:
2241   case clang::X86::BI__builtin_ia32_pext_di:
2242     if (!interp__builtin_ia32_pext(S, OpPC, Frame, F, Call))
2243       return false;
2244     break;
2245 
2246   case clang::X86::BI__builtin_ia32_addcarryx_u32:
2247   case clang::X86::BI__builtin_ia32_addcarryx_u64:
2248   case clang::X86::BI__builtin_ia32_subborrow_u32:
2249   case clang::X86::BI__builtin_ia32_subborrow_u64:
2250     if (!interp__builtin_ia32_addcarry_subborrow(S, OpPC, Frame, F, Call))
2251       return false;
2252     break;
2253 
2254   case Builtin::BI__builtin_os_log_format_buffer_size:
2255     if (!interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, F, Call))
2256       return false;
2257     break;
2258 
2259   case Builtin::BI__builtin_ptrauth_string_discriminator:
2260     if (!interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, F, Call))
2261       return false;
2262     break;
2263 
2264   case Builtin::BI__builtin_constant_p:
2265     if (!interp__builtin_constant_p(S, OpPC, Frame, F, Call))
2266       return false;
2267     break;
2268 
2269   case Builtin::BI__noop:
2270     pushInteger(S, 0, Call->getType());
2271     break;
2272 
2273   case Builtin::BI__builtin_operator_new:
2274     if (!interp__builtin_operator_new(S, OpPC, Frame, F, Call))
2275       return false;
2276     break;
2277 
2278   case Builtin::BI__builtin_operator_delete:
2279     if (!interp__builtin_operator_delete(S, OpPC, Frame, F, Call))
2280       return false;
2281     break;
2282 
2283   case Builtin::BI__arithmetic_fence:
2284     if (!interp__builtin_arithmetic_fence(S, OpPC, Frame, F, Call))
2285       return false;
2286     break;
2287 
2288   case Builtin::BI__builtin_reduce_add:
2289   case Builtin::BI__builtin_reduce_mul:
2290   case Builtin::BI__builtin_reduce_and:
2291   case Builtin::BI__builtin_reduce_or:
2292   case Builtin::BI__builtin_reduce_xor:
2293     if (!interp__builtin_vector_reduce(S, OpPC, Frame, F, Call))
2294       return false;
2295     break;
2296 
2297   case Builtin::BI__builtin_elementwise_popcount:
2298     if (!interp__builtin_elementwise_popcount(S, OpPC, Frame, F, Call))
2299       return false;
2300     break;
2301 
2302   case Builtin::BI__builtin_memcpy:
2303   case Builtin::BImemcpy:
2304   case Builtin::BI__builtin_memmove:
2305   case Builtin::BImemmove:
2306     if (!interp__builtin_memcpy(S, OpPC, Frame, F, Call))
2307       return false;
2308     break;
2309 
2310   default:
2311     S.FFDiag(S.Current->getLocation(OpPC),
2312              diag::note_invalid_subexpr_in_const_expr)
2313         << S.Current->getRange(OpPC);
2314 
2315     return false;
2316   }
2317 
2318   return retPrimValue(S, OpPC, ReturnT);
2319 }
2320 
2321 bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
2322                        llvm::ArrayRef<int64_t> ArrayIndices,
2323                        int64_t &IntResult) {
2324   CharUnits Result;
2325   unsigned N = E->getNumComponents();
2326   assert(N > 0);
2327 
2328   unsigned ArrayIndex = 0;
2329   QualType CurrentType = E->getTypeSourceInfo()->getType();
2330   for (unsigned I = 0; I != N; ++I) {
2331     const OffsetOfNode &Node = E->getComponent(I);
2332     switch (Node.getKind()) {
2333     case OffsetOfNode::Field: {
2334       const FieldDecl *MemberDecl = Node.getField();
2335       const RecordType *RT = CurrentType->getAs<RecordType>();
2336       if (!RT)
2337         return false;
2338       const RecordDecl *RD = RT->getDecl();
2339       if (RD->isInvalidDecl())
2340         return false;
2341       const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD);
2342       unsigned FieldIndex = MemberDecl->getFieldIndex();
2343       assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
2344       Result +=
2345           S.getASTContext().toCharUnitsFromBits(RL.getFieldOffset(FieldIndex));
2346       CurrentType = MemberDecl->getType().getNonReferenceType();
2347       break;
2348     }
2349     case OffsetOfNode::Array: {
2350       // When generating bytecode, we put all the index expressions as Sint64 on
2351       // the stack.
2352       int64_t Index = ArrayIndices[ArrayIndex];
2353       const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
2354       if (!AT)
2355         return false;
2356       CurrentType = AT->getElementType();
2357       CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
2358       Result += Index * ElementSize;
2359       ++ArrayIndex;
2360       break;
2361     }
2362     case OffsetOfNode::Base: {
2363       const CXXBaseSpecifier *BaseSpec = Node.getBase();
2364       if (BaseSpec->isVirtual())
2365         return false;
2366 
2367       // Find the layout of the class whose base we are looking into.
2368       const RecordType *RT = CurrentType->getAs<RecordType>();
2369       if (!RT)
2370         return false;
2371       const RecordDecl *RD = RT->getDecl();
2372       if (RD->isInvalidDecl())
2373         return false;
2374       const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD);
2375 
2376       // Find the base class itself.
2377       CurrentType = BaseSpec->getType();
2378       const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2379       if (!BaseRT)
2380         return false;
2381 
2382       // Add the offset to the base.
2383       Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
2384       break;
2385     }
2386     case OffsetOfNode::Identifier:
2387       llvm_unreachable("Dependent OffsetOfExpr?");
2388     }
2389   }
2390 
2391   IntResult = Result.getQuantity();
2392 
2393   return true;
2394 }
2395 
2396 bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
2397                                 const Pointer &Ptr, const APSInt &IntValue) {
2398 
2399   const Record *R = Ptr.getRecord();
2400   assert(R);
2401   assert(R->getNumFields() == 1);
2402 
2403   unsigned FieldOffset = R->getField(0u)->Offset;
2404   const Pointer &FieldPtr = Ptr.atField(FieldOffset);
2405   PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
2406 
2407   INT_TYPE_SWITCH(FieldT,
2408                   FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
2409   FieldPtr.initialize();
2410   return true;
2411 }
2412 
2413 static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2414                           Pointer &Dest, bool Activate);
2415 static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
2416                        Pointer &Dest, bool Activate = false) {
2417   [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2418   const Descriptor *DestDesc = Dest.getFieldDesc();
2419 
2420   auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
2421     Pointer DestField = Dest.atField(F.Offset);
2422     if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) {
2423       TYPE_SWITCH(*FT, {
2424         DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
2425         if (Src.atField(F.Offset).isInitialized())
2426           DestField.initialize();
2427         if (Activate)
2428           DestField.activate();
2429       });
2430       return true;
2431     }
2432     // Composite field.
2433     return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
2434   };
2435 
2436   assert(SrcDesc->isRecord());
2437   assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
2438   const Record *R = DestDesc->ElemRecord;
2439   for (const Record::Field &F : R->fields()) {
2440     if (R->isUnion()) {
2441       // For unions, only copy the active field.
2442       const Pointer &SrcField = Src.atField(F.Offset);
2443       if (SrcField.isActive()) {
2444         if (!copyField(F, /*Activate=*/true))
2445           return false;
2446       }
2447     } else {
2448       if (!copyField(F, Activate))
2449         return false;
2450     }
2451   }
2452 
2453   for (const Record::Base &B : R->bases()) {
2454     Pointer DestBase = Dest.atField(B.Offset);
2455     if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
2456       return false;
2457   }
2458 
2459   Dest.initialize();
2460   return true;
2461 }
2462 
2463 static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2464                           Pointer &Dest, bool Activate = false) {
2465   assert(Src.isLive() && Dest.isLive());
2466 
2467   [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2468   const Descriptor *DestDesc = Dest.getFieldDesc();
2469 
2470   assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
2471 
2472   if (DestDesc->isPrimitiveArray()) {
2473     assert(SrcDesc->isPrimitiveArray());
2474     assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2475     PrimType ET = DestDesc->getPrimType();
2476     for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2477       Pointer DestElem = Dest.atIndex(I);
2478       TYPE_SWITCH(ET, {
2479         DestElem.deref<T>() = Src.atIndex(I).deref<T>();
2480         DestElem.initialize();
2481       });
2482     }
2483     return true;
2484   }
2485 
2486   if (DestDesc->isRecord())
2487     return copyRecord(S, OpPC, Src, Dest, Activate);
2488   return Invalid(S, OpPC);
2489 }
2490 
2491 bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
2492   return copyComposite(S, OpPC, Src, Dest);
2493 }
2494 
2495 } // namespace interp
2496 } // namespace clang
2497