1 //===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 #include "../ExprConstShared.h" 9 #include "Boolean.h" 10 #include "Compiler.h" 11 #include "EvalEmitter.h" 12 #include "Interp.h" 13 #include "InterpBuiltinBitCast.h" 14 #include "PrimType.h" 15 #include "clang/AST/OSLog.h" 16 #include "clang/AST/RecordLayout.h" 17 #include "clang/Basic/Builtins.h" 18 #include "clang/Basic/TargetBuiltins.h" 19 #include "clang/Basic/TargetInfo.h" 20 #include "llvm/Support/SipHash.h" 21 22 namespace clang { 23 namespace interp { 24 25 static unsigned callArgSize(const InterpState &S, const CallExpr *C) { 26 unsigned O = 0; 27 28 for (const Expr *E : C->arguments()) { 29 O += align(primSize(*S.getContext().classify(E))); 30 } 31 32 return O; 33 } 34 35 template <typename T> 36 static T getParam(const InterpFrame *Frame, unsigned Index) { 37 assert(Frame->getFunction()->getNumParams() > Index); 38 unsigned Offset = Frame->getFunction()->getParamOffset(Index); 39 return Frame->getParam<T>(Offset); 40 } 41 42 static APSInt getAPSIntParam(const InterpFrame *Frame, unsigned Index) { 43 APSInt R; 44 unsigned Offset = Frame->getFunction()->getParamOffset(Index); 45 INT_TYPE_SWITCH(Frame->getFunction()->getParamType(Index), 46 R = Frame->getParam<T>(Offset).toAPSInt()); 47 return R; 48 } 49 50 static PrimType getIntPrimType(const InterpState &S) { 51 const TargetInfo &TI = S.getASTContext().getTargetInfo(); 52 unsigned IntWidth = TI.getIntWidth(); 53 54 if (IntWidth == 32) 55 return PT_Sint32; 56 else if (IntWidth == 16) 57 return PT_Sint16; 58 llvm_unreachable("Int isn't 16 or 32 bit?"); 59 } 60 61 static PrimType getLongPrimType(const InterpState &S) { 62 const TargetInfo &TI = S.getASTContext().getTargetInfo(); 63 unsigned LongWidth = TI.getLongWidth(); 64 65 if (LongWidth == 64) 66 return PT_Sint64; 67 else if (LongWidth == 32) 68 return PT_Sint32; 69 else if (LongWidth == 16) 70 return PT_Sint16; 71 llvm_unreachable("long isn't 16, 32 or 64 bit?"); 72 } 73 74 /// Peek an integer value from the stack into an APSInt. 75 static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) { 76 if (Offset == 0) 77 Offset = align(primSize(T)); 78 79 APSInt R; 80 INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt()); 81 82 return R; 83 } 84 85 /// Pushes \p Val on the stack as the type given by \p QT. 86 static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) { 87 assert(QT->isSignedIntegerOrEnumerationType() || 88 QT->isUnsignedIntegerOrEnumerationType()); 89 std::optional<PrimType> T = S.getContext().classify(QT); 90 assert(T); 91 92 unsigned BitWidth = S.getASTContext().getTypeSize(QT); 93 if (QT->isSignedIntegerOrEnumerationType()) { 94 int64_t V = Val.getSExtValue(); 95 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); }); 96 } else { 97 assert(QT->isUnsignedIntegerOrEnumerationType()); 98 uint64_t V = Val.getZExtValue(); 99 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); }); 100 } 101 } 102 103 template <typename T> 104 static void pushInteger(InterpState &S, T Val, QualType QT) { 105 if constexpr (std::is_same_v<T, APInt>) 106 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT); 107 else if constexpr (std::is_same_v<T, APSInt>) 108 pushInteger(S, Val, QT); 109 else 110 pushInteger(S, 111 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val), 112 std::is_signed_v<T>), 113 !std::is_signed_v<T>), 114 QT); 115 } 116 117 static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) { 118 INT_TYPE_SWITCH_NO_BOOL( 119 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); }); 120 } 121 122 static bool retPrimValue(InterpState &S, CodePtr OpPC, 123 std::optional<PrimType> &T) { 124 if (!T) 125 return RetVoid(S, OpPC); 126 127 #define RET_CASE(X) \ 128 case X: \ 129 return Ret<X>(S, OpPC); 130 switch (*T) { 131 RET_CASE(PT_Ptr); 132 RET_CASE(PT_FnPtr); 133 RET_CASE(PT_Float); 134 RET_CASE(PT_Bool); 135 RET_CASE(PT_Sint8); 136 RET_CASE(PT_Uint8); 137 RET_CASE(PT_Sint16); 138 RET_CASE(PT_Uint16); 139 RET_CASE(PT_Sint32); 140 RET_CASE(PT_Uint32); 141 RET_CASE(PT_Sint64); 142 RET_CASE(PT_Uint64); 143 RET_CASE(PT_IntAP); 144 RET_CASE(PT_IntAPS); 145 default: 146 llvm_unreachable("Unsupported return type for builtin function"); 147 } 148 #undef RET_CASE 149 } 150 151 static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, 152 unsigned ID) { 153 auto Loc = S.Current->getSource(OpPC); 154 if (S.getLangOpts().CPlusPlus11) 155 S.CCEDiag(Loc, diag::note_constexpr_invalid_function) 156 << /*isConstexpr=*/0 << /*isConstructor=*/0 157 << ("'" + S.getASTContext().BuiltinInfo.getName(ID) + "'").str(); 158 else 159 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr); 160 } 161 162 static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, 163 const InterpFrame *Frame, 164 const CallExpr *Call) { 165 unsigned Depth = S.Current->getDepth(); 166 auto isStdCall = [](const FunctionDecl *F) -> bool { 167 return F && F->isInStdNamespace() && F->getIdentifier() && 168 F->getIdentifier()->isStr("is_constant_evaluated"); 169 }; 170 const InterpFrame *Caller = Frame->Caller; 171 // The current frame is the one for __builtin_is_constant_evaluated. 172 // The one above that, potentially the one for std::is_constant_evaluated(). 173 if (S.inConstantContext() && !S.checkingPotentialConstantExpression() && 174 S.getEvalStatus().Diag && 175 (Depth == 1 || (Depth == 2 && isStdCall(Caller->getCallee())))) { 176 if (Caller->Caller && isStdCall(Caller->getCallee())) { 177 const Expr *E = Caller->Caller->getExpr(Caller->getRetPC()); 178 S.report(E->getExprLoc(), 179 diag::warn_is_constant_evaluated_always_true_constexpr) 180 << "std::is_constant_evaluated" << E->getSourceRange(); 181 } else { 182 const Expr *E = Frame->Caller->getExpr(Frame->getRetPC()); 183 S.report(E->getExprLoc(), 184 diag::warn_is_constant_evaluated_always_true_constexpr) 185 << "__builtin_is_constant_evaluated" << E->getSourceRange(); 186 } 187 } 188 189 S.Stk.push<Boolean>(Boolean::from(S.inConstantContext())); 190 return true; 191 } 192 193 static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, 194 const InterpFrame *Frame, 195 const Function *Func, const CallExpr *Call) { 196 unsigned ID = Func->getBuiltinID(); 197 const Pointer &A = getParam<Pointer>(Frame, 0); 198 const Pointer &B = getParam<Pointer>(Frame, 1); 199 200 if (ID == Builtin::BIstrcmp) 201 diagnoseNonConstexprBuiltin(S, OpPC, ID); 202 203 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read)) 204 return false; 205 206 if (A.isDummy() || B.isDummy()) 207 return false; 208 209 assert(A.getFieldDesc()->isPrimitiveArray()); 210 assert(B.getFieldDesc()->isPrimitiveArray()); 211 212 unsigned IndexA = A.getIndex(); 213 unsigned IndexB = B.getIndex(); 214 int32_t Result = 0; 215 for (;; ++IndexA, ++IndexB) { 216 const Pointer &PA = A.atIndex(IndexA); 217 const Pointer &PB = B.atIndex(IndexB); 218 if (!CheckRange(S, OpPC, PA, AK_Read) || 219 !CheckRange(S, OpPC, PB, AK_Read)) { 220 return false; 221 } 222 uint8_t CA = PA.deref<uint8_t>(); 223 uint8_t CB = PB.deref<uint8_t>(); 224 225 if (CA > CB) { 226 Result = 1; 227 break; 228 } else if (CA < CB) { 229 Result = -1; 230 break; 231 } 232 if (CA == 0 || CB == 0) 233 break; 234 } 235 236 pushInteger(S, Result, Call->getType()); 237 return true; 238 } 239 240 static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, 241 const InterpFrame *Frame, 242 const Function *Func, const CallExpr *Call) { 243 unsigned ID = Func->getBuiltinID(); 244 const Pointer &StrPtr = getParam<Pointer>(Frame, 0); 245 246 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen) 247 diagnoseNonConstexprBuiltin(S, OpPC, ID); 248 249 if (!CheckArray(S, OpPC, StrPtr)) 250 return false; 251 252 if (!CheckLive(S, OpPC, StrPtr, AK_Read)) 253 return false; 254 255 if (!CheckDummy(S, OpPC, StrPtr, AK_Read)) 256 return false; 257 258 assert(StrPtr.getFieldDesc()->isPrimitiveArray()); 259 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize(); 260 261 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) { 262 const ASTContext &AC = S.getASTContext(); 263 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity()); 264 } 265 266 size_t Len = 0; 267 for (size_t I = StrPtr.getIndex();; ++I, ++Len) { 268 const Pointer &ElemPtr = StrPtr.atIndex(I); 269 270 if (!CheckRange(S, OpPC, ElemPtr, AK_Read)) 271 return false; 272 273 uint32_t Val; 274 switch (ElemSize) { 275 case 1: 276 Val = ElemPtr.deref<uint8_t>(); 277 break; 278 case 2: 279 Val = ElemPtr.deref<uint16_t>(); 280 break; 281 case 4: 282 Val = ElemPtr.deref<uint32_t>(); 283 break; 284 default: 285 llvm_unreachable("Unsupported char size"); 286 } 287 if (Val == 0) 288 break; 289 } 290 291 pushInteger(S, Len, Call->getType()); 292 293 return true; 294 } 295 296 static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, 297 const InterpFrame *Frame, const Function *F, 298 bool Signaling) { 299 const Pointer &Arg = getParam<Pointer>(Frame, 0); 300 301 if (!CheckLoad(S, OpPC, Arg)) 302 return false; 303 304 assert(Arg.getFieldDesc()->isPrimitiveArray()); 305 306 // Convert the given string to an integer using StringRef's API. 307 llvm::APInt Fill; 308 std::string Str; 309 assert(Arg.getNumElems() >= 1); 310 for (unsigned I = 0;; ++I) { 311 const Pointer &Elem = Arg.atIndex(I); 312 313 if (!CheckLoad(S, OpPC, Elem)) 314 return false; 315 316 if (Elem.deref<int8_t>() == 0) 317 break; 318 319 Str += Elem.deref<char>(); 320 } 321 322 // Treat empty strings as if they were zero. 323 if (Str.empty()) 324 Fill = llvm::APInt(32, 0); 325 else if (StringRef(Str).getAsInteger(0, Fill)) 326 return false; 327 328 const llvm::fltSemantics &TargetSemantics = 329 S.getASTContext().getFloatTypeSemantics(F->getDecl()->getReturnType()); 330 331 Floating Result; 332 if (S.getASTContext().getTargetInfo().isNan2008()) { 333 if (Signaling) 334 Result = Floating( 335 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill)); 336 else 337 Result = Floating( 338 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill)); 339 } else { 340 // Prior to IEEE 754-2008, architectures were allowed to choose whether 341 // the first bit of their significand was set for qNaN or sNaN. MIPS chose 342 // a different encoding to what became a standard in 2008, and for pre- 343 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as 344 // sNaN. This is now known as "legacy NaN" encoding. 345 if (Signaling) 346 Result = Floating( 347 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill)); 348 else 349 Result = Floating( 350 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill)); 351 } 352 353 S.Stk.push<Floating>(Result); 354 return true; 355 } 356 357 static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, 358 const InterpFrame *Frame, const Function *F) { 359 const llvm::fltSemantics &TargetSemantics = 360 S.getASTContext().getFloatTypeSemantics(F->getDecl()->getReturnType()); 361 362 S.Stk.push<Floating>(Floating::getInf(TargetSemantics)); 363 return true; 364 } 365 366 static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, 367 const InterpFrame *Frame, 368 const Function *F) { 369 const Floating &Arg1 = getParam<Floating>(Frame, 0); 370 const Floating &Arg2 = getParam<Floating>(Frame, 1); 371 372 APFloat Copy = Arg1.getAPFloat(); 373 Copy.copySign(Arg2.getAPFloat()); 374 S.Stk.push<Floating>(Floating(Copy)); 375 376 return true; 377 } 378 379 static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, 380 const InterpFrame *Frame, const Function *F, 381 bool IsNumBuiltin) { 382 const Floating &LHS = getParam<Floating>(Frame, 0); 383 const Floating &RHS = getParam<Floating>(Frame, 1); 384 385 Floating Result; 386 387 if (IsNumBuiltin) { 388 Result = llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()); 389 } else { 390 // When comparing zeroes, return -0.0 if one of the zeroes is negative. 391 if (LHS.isZero() && RHS.isZero() && RHS.isNegative()) 392 Result = RHS; 393 else if (LHS.isNan() || RHS < LHS) 394 Result = RHS; 395 else 396 Result = LHS; 397 } 398 399 S.Stk.push<Floating>(Result); 400 return true; 401 } 402 403 static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, 404 const InterpFrame *Frame, const Function *Func, 405 bool IsNumBuiltin) { 406 const Floating &LHS = getParam<Floating>(Frame, 0); 407 const Floating &RHS = getParam<Floating>(Frame, 1); 408 409 Floating Result; 410 411 if (IsNumBuiltin) { 412 Result = llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()); 413 } else { 414 // When comparing zeroes, return +0.0 if one of the zeroes is positive. 415 if (LHS.isZero() && RHS.isZero() && LHS.isNegative()) 416 Result = RHS; 417 else if (LHS.isNan() || RHS > LHS) 418 Result = RHS; 419 else 420 Result = LHS; 421 } 422 423 S.Stk.push<Floating>(Result); 424 return true; 425 } 426 427 /// Defined as __builtin_isnan(...), to accommodate the fact that it can 428 /// take a float, double, long double, etc. 429 /// But for us, that's all a Floating anyway. 430 static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, 431 const InterpFrame *Frame, const Function *F, 432 const CallExpr *Call) { 433 const Floating &Arg = S.Stk.peek<Floating>(); 434 435 pushInteger(S, Arg.isNan(), Call->getType()); 436 return true; 437 } 438 439 static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, 440 const InterpFrame *Frame, 441 const Function *F, 442 const CallExpr *Call) { 443 const Floating &Arg = S.Stk.peek<Floating>(); 444 445 pushInteger(S, Arg.isSignaling(), Call->getType()); 446 return true; 447 } 448 449 static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, 450 const InterpFrame *Frame, const Function *F, 451 bool CheckSign, const CallExpr *Call) { 452 const Floating &Arg = S.Stk.peek<Floating>(); 453 bool IsInf = Arg.isInf(); 454 455 if (CheckSign) 456 pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType()); 457 else 458 pushInteger(S, Arg.isInf(), Call->getType()); 459 return true; 460 } 461 462 static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, 463 const InterpFrame *Frame, 464 const Function *F, const CallExpr *Call) { 465 const Floating &Arg = S.Stk.peek<Floating>(); 466 467 pushInteger(S, Arg.isFinite(), Call->getType()); 468 return true; 469 } 470 471 static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, 472 const InterpFrame *Frame, 473 const Function *F, const CallExpr *Call) { 474 const Floating &Arg = S.Stk.peek<Floating>(); 475 476 pushInteger(S, Arg.isNormal(), Call->getType()); 477 return true; 478 } 479 480 static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, 481 const InterpFrame *Frame, 482 const Function *F, 483 const CallExpr *Call) { 484 const Floating &Arg = S.Stk.peek<Floating>(); 485 486 pushInteger(S, Arg.isDenormal(), Call->getType()); 487 return true; 488 } 489 490 static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, 491 const InterpFrame *Frame, const Function *F, 492 const CallExpr *Call) { 493 const Floating &Arg = S.Stk.peek<Floating>(); 494 495 pushInteger(S, Arg.isZero(), Call->getType()); 496 return true; 497 } 498 499 static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, 500 const InterpFrame *Frame, const Function *F, 501 const CallExpr *Call) { 502 const Floating &Arg = S.Stk.peek<Floating>(); 503 504 pushInteger(S, Arg.isNegative(), Call->getType()); 505 return true; 506 } 507 508 static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, 509 const InterpFrame *Frame, 510 const Function *F, 511 const CallExpr *Call) { 512 const Floating &RHS = S.Stk.peek<Floating>(); 513 const Floating &LHS = S.Stk.peek<Floating>(align(2u * primSize(PT_Float))); 514 unsigned ID = F->getBuiltinID(); 515 516 pushInteger( 517 S, 518 [&] { 519 switch (ID) { 520 case Builtin::BI__builtin_isgreater: 521 return LHS > RHS; 522 case Builtin::BI__builtin_isgreaterequal: 523 return LHS >= RHS; 524 case Builtin::BI__builtin_isless: 525 return LHS < RHS; 526 case Builtin::BI__builtin_islessequal: 527 return LHS <= RHS; 528 case Builtin::BI__builtin_islessgreater: { 529 ComparisonCategoryResult cmp = LHS.compare(RHS); 530 return cmp == ComparisonCategoryResult::Less || 531 cmp == ComparisonCategoryResult::Greater; 532 } 533 case Builtin::BI__builtin_isunordered: 534 return LHS.compare(RHS) == ComparisonCategoryResult::Unordered; 535 default: 536 llvm_unreachable("Unexpected builtin ID: Should be a floating point " 537 "comparison function"); 538 } 539 }(), 540 Call->getType()); 541 return true; 542 } 543 544 /// First parameter to __builtin_isfpclass is the floating value, the 545 /// second one is an integral value. 546 static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, 547 const InterpFrame *Frame, 548 const Function *Func, 549 const CallExpr *Call) { 550 PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType()); 551 APSInt FPClassArg = peekToAPSInt(S.Stk, FPClassArgT); 552 const Floating &F = 553 S.Stk.peek<Floating>(align(primSize(FPClassArgT) + primSize(PT_Float))); 554 555 int32_t Result = 556 static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue()); 557 pushInteger(S, Result, Call->getType()); 558 559 return true; 560 } 561 562 /// Five int values followed by one floating value. 563 static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, 564 const InterpFrame *Frame, 565 const Function *Func, 566 const CallExpr *Call) { 567 const Floating &Val = S.Stk.peek<Floating>(); 568 569 unsigned Index; 570 switch (Val.getCategory()) { 571 case APFloat::fcNaN: 572 Index = 0; 573 break; 574 case APFloat::fcInfinity: 575 Index = 1; 576 break; 577 case APFloat::fcNormal: 578 Index = Val.isDenormal() ? 3 : 2; 579 break; 580 case APFloat::fcZero: 581 Index = 4; 582 break; 583 } 584 585 // The last argument is first on the stack. 586 assert(Index <= 4); 587 unsigned IntSize = primSize(getIntPrimType(S)); 588 unsigned Offset = 589 align(primSize(PT_Float)) + ((1 + (4 - Index)) * align(IntSize)); 590 591 APSInt I = peekToAPSInt(S.Stk, getIntPrimType(S), Offset); 592 pushInteger(S, I, Call->getType()); 593 return true; 594 } 595 596 // The C standard says "fabs raises no floating-point exceptions, 597 // even if x is a signaling NaN. The returned value is independent of 598 // the current rounding direction mode." Therefore constant folding can 599 // proceed without regard to the floating point settings. 600 // Reference, WG14 N2478 F.10.4.3 601 static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, 602 const InterpFrame *Frame, 603 const Function *Func) { 604 const Floating &Val = getParam<Floating>(Frame, 0); 605 606 S.Stk.push<Floating>(Floating::abs(Val)); 607 return true; 608 } 609 610 static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, 611 const InterpFrame *Frame, const Function *Func, 612 const CallExpr *Call) { 613 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 614 APSInt Val = peekToAPSInt(S.Stk, ArgT); 615 if (Val == 616 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false)) 617 return false; 618 if (Val.isNegative()) 619 Val.negate(); 620 pushInteger(S, Val, Call->getType()); 621 return true; 622 } 623 624 static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, 625 const InterpFrame *Frame, 626 const Function *Func, 627 const CallExpr *Call) { 628 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 629 APSInt Val = peekToAPSInt(S.Stk, ArgT); 630 pushInteger(S, Val.popcount(), Call->getType()); 631 return true; 632 } 633 634 static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, 635 const InterpFrame *Frame, 636 const Function *Func, const CallExpr *Call) { 637 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 638 APSInt Val = peekToAPSInt(S.Stk, ArgT); 639 pushInteger(S, Val.popcount() % 2, Call->getType()); 640 return true; 641 } 642 643 static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, 644 const InterpFrame *Frame, 645 const Function *Func, const CallExpr *Call) { 646 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 647 APSInt Val = peekToAPSInt(S.Stk, ArgT); 648 pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType()); 649 return true; 650 } 651 652 static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, 653 const InterpFrame *Frame, 654 const Function *Func, 655 const CallExpr *Call) { 656 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 657 APSInt Val = peekToAPSInt(S.Stk, ArgT); 658 pushInteger(S, Val.reverseBits(), Call->getType()); 659 return true; 660 } 661 662 static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, 663 const InterpFrame *Frame, 664 const Function *Func, 665 const CallExpr *Call) { 666 // This is an unevaluated call, so there are no arguments on the stack. 667 assert(Call->getNumArgs() == 1); 668 const Expr *Arg = Call->getArg(0); 669 670 GCCTypeClass ResultClass = 671 EvaluateBuiltinClassifyType(Arg->getType(), S.getLangOpts()); 672 int32_t ReturnVal = static_cast<int32_t>(ResultClass); 673 pushInteger(S, ReturnVal, Call->getType()); 674 return true; 675 } 676 677 // __builtin_expect(long, long) 678 // __builtin_expect_with_probability(long, long, double) 679 static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, 680 const InterpFrame *Frame, 681 const Function *Func, const CallExpr *Call) { 682 // The return value is simply the value of the first parameter. 683 // We ignore the probability. 684 unsigned NumArgs = Call->getNumArgs(); 685 assert(NumArgs == 2 || NumArgs == 3); 686 687 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 688 unsigned Offset = align(primSize(getLongPrimType(S))) * 2; 689 if (NumArgs == 3) 690 Offset += align(primSize(PT_Float)); 691 692 APSInt Val = peekToAPSInt(S.Stk, ArgT, Offset); 693 pushInteger(S, Val, Call->getType()); 694 return true; 695 } 696 697 /// rotateleft(value, amount) 698 static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, 699 const InterpFrame *Frame, 700 const Function *Func, const CallExpr *Call, 701 bool Right) { 702 PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType()); 703 PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType()); 704 705 APSInt Amount = peekToAPSInt(S.Stk, AmountT); 706 APSInt Value = peekToAPSInt( 707 S.Stk, ValueT, align(primSize(AmountT)) + align(primSize(ValueT))); 708 709 APSInt Result; 710 if (Right) 711 Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())), 712 /*IsUnsigned=*/true); 713 else // Left. 714 Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())), 715 /*IsUnsigned=*/true); 716 717 pushInteger(S, Result, Call->getType()); 718 return true; 719 } 720 721 static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, 722 const InterpFrame *Frame, const Function *Func, 723 const CallExpr *Call) { 724 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 725 APSInt Value = peekToAPSInt(S.Stk, ArgT); 726 727 uint64_t N = Value.countr_zero(); 728 pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType()); 729 return true; 730 } 731 732 static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, 733 const InterpFrame *Frame, 734 const Function *Func, 735 const CallExpr *Call) { 736 assert(Call->getArg(0)->isLValue()); 737 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr); 738 739 if (PtrT == PT_FnPtr) { 740 const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>(); 741 S.Stk.push<FunctionPointer>(Arg); 742 } else if (PtrT == PT_Ptr) { 743 const Pointer &Arg = S.Stk.peek<Pointer>(); 744 S.Stk.push<Pointer>(Arg); 745 } else { 746 assert(false && "Unsupported pointer type passed to __builtin_addressof()"); 747 } 748 return true; 749 } 750 751 static bool interp__builtin_move(InterpState &S, CodePtr OpPC, 752 const InterpFrame *Frame, const Function *Func, 753 const CallExpr *Call) { 754 755 PrimType ArgT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr); 756 757 TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg);); 758 759 return Func->getDecl()->isConstexpr(); 760 } 761 762 static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, 763 const InterpFrame *Frame, 764 const Function *Func, 765 const CallExpr *Call) { 766 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 767 APSInt Arg = peekToAPSInt(S.Stk, ArgT); 768 769 int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber( 770 Arg.getZExtValue()); 771 pushInteger(S, Result, Call->getType()); 772 return true; 773 } 774 775 /// Just takes the first Argument to the call and puts it on the stack. 776 static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, 777 const Function *Func, const CallExpr *Call) { 778 const Pointer &Arg = S.Stk.peek<Pointer>(); 779 S.Stk.push<Pointer>(Arg); 780 return true; 781 } 782 783 // Two integral values followed by a pointer (lhs, rhs, resultOut) 784 static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, 785 const InterpFrame *Frame, 786 const Function *Func, 787 const CallExpr *Call) { 788 Pointer &ResultPtr = S.Stk.peek<Pointer>(); 789 if (ResultPtr.isDummy()) 790 return false; 791 792 unsigned BuiltinOp = Func->getBuiltinID(); 793 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType()); 794 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType()); 795 APSInt RHS = peekToAPSInt(S.Stk, RHST, 796 align(primSize(PT_Ptr)) + align(primSize(RHST))); 797 APSInt LHS = peekToAPSInt(S.Stk, LHST, 798 align(primSize(PT_Ptr)) + align(primSize(RHST)) + 799 align(primSize(LHST))); 800 QualType ResultType = Call->getArg(2)->getType()->getPointeeType(); 801 PrimType ResultT = *S.getContext().classify(ResultType); 802 bool Overflow; 803 804 APSInt Result; 805 if (BuiltinOp == Builtin::BI__builtin_add_overflow || 806 BuiltinOp == Builtin::BI__builtin_sub_overflow || 807 BuiltinOp == Builtin::BI__builtin_mul_overflow) { 808 bool IsSigned = LHS.isSigned() || RHS.isSigned() || 809 ResultType->isSignedIntegerOrEnumerationType(); 810 bool AllSigned = LHS.isSigned() && RHS.isSigned() && 811 ResultType->isSignedIntegerOrEnumerationType(); 812 uint64_t LHSSize = LHS.getBitWidth(); 813 uint64_t RHSSize = RHS.getBitWidth(); 814 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType); 815 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize); 816 817 // Add an additional bit if the signedness isn't uniformly agreed to. We 818 // could do this ONLY if there is a signed and an unsigned that both have 819 // MaxBits, but the code to check that is pretty nasty. The issue will be 820 // caught in the shrink-to-result later anyway. 821 if (IsSigned && !AllSigned) 822 ++MaxBits; 823 824 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned); 825 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned); 826 Result = APSInt(MaxBits, !IsSigned); 827 } 828 829 // Find largest int. 830 switch (BuiltinOp) { 831 default: 832 llvm_unreachable("Invalid value for BuiltinOp"); 833 case Builtin::BI__builtin_add_overflow: 834 case Builtin::BI__builtin_sadd_overflow: 835 case Builtin::BI__builtin_saddl_overflow: 836 case Builtin::BI__builtin_saddll_overflow: 837 case Builtin::BI__builtin_uadd_overflow: 838 case Builtin::BI__builtin_uaddl_overflow: 839 case Builtin::BI__builtin_uaddll_overflow: 840 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow) 841 : LHS.uadd_ov(RHS, Overflow); 842 break; 843 case Builtin::BI__builtin_sub_overflow: 844 case Builtin::BI__builtin_ssub_overflow: 845 case Builtin::BI__builtin_ssubl_overflow: 846 case Builtin::BI__builtin_ssubll_overflow: 847 case Builtin::BI__builtin_usub_overflow: 848 case Builtin::BI__builtin_usubl_overflow: 849 case Builtin::BI__builtin_usubll_overflow: 850 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow) 851 : LHS.usub_ov(RHS, Overflow); 852 break; 853 case Builtin::BI__builtin_mul_overflow: 854 case Builtin::BI__builtin_smul_overflow: 855 case Builtin::BI__builtin_smull_overflow: 856 case Builtin::BI__builtin_smulll_overflow: 857 case Builtin::BI__builtin_umul_overflow: 858 case Builtin::BI__builtin_umull_overflow: 859 case Builtin::BI__builtin_umulll_overflow: 860 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow) 861 : LHS.umul_ov(RHS, Overflow); 862 break; 863 } 864 865 // In the case where multiple sizes are allowed, truncate and see if 866 // the values are the same. 867 if (BuiltinOp == Builtin::BI__builtin_add_overflow || 868 BuiltinOp == Builtin::BI__builtin_sub_overflow || 869 BuiltinOp == Builtin::BI__builtin_mul_overflow) { 870 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead, 871 // since it will give us the behavior of a TruncOrSelf in the case where 872 // its parameter <= its size. We previously set Result to be at least the 873 // type-size of the result, so getTypeSize(ResultType) <= Resu 874 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType)); 875 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType()); 876 877 if (!APSInt::isSameValue(Temp, Result)) 878 Overflow = true; 879 Result = Temp; 880 } 881 882 // Write Result to ResultPtr and put Overflow on the stacl. 883 assignInteger(ResultPtr, ResultT, Result); 884 ResultPtr.initialize(); 885 assert(Func->getDecl()->getReturnType()->isBooleanType()); 886 S.Stk.push<Boolean>(Overflow); 887 return true; 888 } 889 890 /// Three integral values followed by a pointer (lhs, rhs, carry, carryOut). 891 static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, 892 const InterpFrame *Frame, 893 const Function *Func, 894 const CallExpr *Call) { 895 unsigned BuiltinOp = Func->getBuiltinID(); 896 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType()); 897 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType()); 898 PrimType CarryT = *S.getContext().classify(Call->getArg(2)->getType()); 899 APSInt RHS = peekToAPSInt(S.Stk, RHST, 900 align(primSize(PT_Ptr)) + align(primSize(CarryT)) + 901 align(primSize(RHST))); 902 APSInt LHS = 903 peekToAPSInt(S.Stk, LHST, 904 align(primSize(PT_Ptr)) + align(primSize(RHST)) + 905 align(primSize(CarryT)) + align(primSize(LHST))); 906 APSInt CarryIn = peekToAPSInt( 907 S.Stk, LHST, align(primSize(PT_Ptr)) + align(primSize(CarryT))); 908 APSInt CarryOut; 909 910 APSInt Result; 911 // Copy the number of bits and sign. 912 Result = LHS; 913 CarryOut = LHS; 914 915 bool FirstOverflowed = false; 916 bool SecondOverflowed = false; 917 switch (BuiltinOp) { 918 default: 919 llvm_unreachable("Invalid value for BuiltinOp"); 920 case Builtin::BI__builtin_addcb: 921 case Builtin::BI__builtin_addcs: 922 case Builtin::BI__builtin_addc: 923 case Builtin::BI__builtin_addcl: 924 case Builtin::BI__builtin_addcll: 925 Result = 926 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed); 927 break; 928 case Builtin::BI__builtin_subcb: 929 case Builtin::BI__builtin_subcs: 930 case Builtin::BI__builtin_subc: 931 case Builtin::BI__builtin_subcl: 932 case Builtin::BI__builtin_subcll: 933 Result = 934 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed); 935 break; 936 } 937 // It is possible for both overflows to happen but CGBuiltin uses an OR so 938 // this is consistent. 939 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed); 940 941 Pointer &CarryOutPtr = S.Stk.peek<Pointer>(); 942 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType(); 943 PrimType CarryOutT = *S.getContext().classify(CarryOutType); 944 assignInteger(CarryOutPtr, CarryOutT, CarryOut); 945 CarryOutPtr.initialize(); 946 947 assert(Call->getType() == Call->getArg(0)->getType()); 948 pushInteger(S, Result, Call->getType()); 949 return true; 950 } 951 952 static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, 953 const InterpFrame *Frame, const Function *Func, 954 const CallExpr *Call) { 955 unsigned CallSize = callArgSize(S, Call); 956 unsigned BuiltinOp = Func->getBuiltinID(); 957 PrimType ValT = *S.getContext().classify(Call->getArg(0)); 958 const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize); 959 960 // When the argument is 0, the result of GCC builtins is undefined, whereas 961 // for Microsoft intrinsics, the result is the bit-width of the argument. 962 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 && 963 BuiltinOp != Builtin::BI__lzcnt && 964 BuiltinOp != Builtin::BI__lzcnt64; 965 966 if (Val == 0) { 967 if (Func->getBuiltinID() == Builtin::BI__builtin_clzg && 968 Call->getNumArgs() == 2) { 969 // We have a fallback parameter. 970 PrimType FallbackT = *S.getContext().classify(Call->getArg(1)); 971 const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT); 972 pushInteger(S, Fallback, Call->getType()); 973 return true; 974 } 975 976 if (ZeroIsUndefined) 977 return false; 978 } 979 980 pushInteger(S, Val.countl_zero(), Call->getType()); 981 return true; 982 } 983 984 static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, 985 const InterpFrame *Frame, const Function *Func, 986 const CallExpr *Call) { 987 unsigned CallSize = callArgSize(S, Call); 988 PrimType ValT = *S.getContext().classify(Call->getArg(0)); 989 const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize); 990 991 if (Val == 0) { 992 if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg && 993 Call->getNumArgs() == 2) { 994 // We have a fallback parameter. 995 PrimType FallbackT = *S.getContext().classify(Call->getArg(1)); 996 const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT); 997 pushInteger(S, Fallback, Call->getType()); 998 return true; 999 } 1000 return false; 1001 } 1002 1003 pushInteger(S, Val.countr_zero(), Call->getType()); 1004 return true; 1005 } 1006 1007 static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, 1008 const InterpFrame *Frame, 1009 const Function *Func, const CallExpr *Call) { 1010 PrimType ReturnT = *S.getContext().classify(Call->getType()); 1011 PrimType ValT = *S.getContext().classify(Call->getArg(0)); 1012 const APSInt &Val = peekToAPSInt(S.Stk, ValT); 1013 assert(Val.getActiveBits() <= 64); 1014 1015 INT_TYPE_SWITCH(ReturnT, 1016 { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); }); 1017 return true; 1018 } 1019 1020 /// bool __atomic_always_lock_free(size_t, void const volatile*) 1021 /// bool __atomic_is_lock_free(size_t, void const volatile*) 1022 /// bool __c11_atomic_is_lock_free(size_t) 1023 static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, 1024 const InterpFrame *Frame, 1025 const Function *Func, 1026 const CallExpr *Call) { 1027 unsigned BuiltinOp = Func->getBuiltinID(); 1028 1029 PrimType ValT = *S.getContext().classify(Call->getArg(0)); 1030 unsigned SizeValOffset = 0; 1031 if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free) 1032 SizeValOffset = align(primSize(ValT)) + align(primSize(PT_Ptr)); 1033 const APSInt &SizeVal = peekToAPSInt(S.Stk, ValT, SizeValOffset); 1034 1035 auto returnBool = [&S](bool Value) -> bool { 1036 S.Stk.push<Boolean>(Value); 1037 return true; 1038 }; 1039 1040 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power 1041 // of two less than or equal to the maximum inline atomic width, we know it 1042 // is lock-free. If the size isn't a power of two, or greater than the 1043 // maximum alignment where we promote atomics, we know it is not lock-free 1044 // (at least not in the sense of atomic_is_lock_free). Otherwise, 1045 // the answer can only be determined at runtime; for example, 16-byte 1046 // atomics have lock-free implementations on some, but not all, 1047 // x86-64 processors. 1048 1049 // Check power-of-two. 1050 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue()); 1051 if (Size.isPowerOfTwo()) { 1052 // Check against inlining width. 1053 unsigned InlineWidthBits = 1054 S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth(); 1055 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) { 1056 1057 // OK, we will inline appropriately-aligned operations of this size, 1058 // and _Atomic(T) is appropriately-aligned. 1059 if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free || 1060 Size == CharUnits::One()) 1061 return returnBool(true); 1062 1063 // Same for null pointers. 1064 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free); 1065 const Pointer &Ptr = S.Stk.peek<Pointer>(); 1066 if (Ptr.isZero()) 1067 return returnBool(true); 1068 1069 if (Ptr.isIntegralPointer()) { 1070 uint64_t IntVal = Ptr.getIntegerRepresentation(); 1071 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign())) 1072 return returnBool(true); 1073 } 1074 1075 const Expr *PtrArg = Call->getArg(1); 1076 // Otherwise, check if the type's alignment against Size. 1077 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) { 1078 // Drop the potential implicit-cast to 'const volatile void*', getting 1079 // the underlying type. 1080 if (ICE->getCastKind() == CK_BitCast) 1081 PtrArg = ICE->getSubExpr(); 1082 } 1083 1084 if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) { 1085 QualType PointeeType = PtrTy->getPointeeType(); 1086 if (!PointeeType->isIncompleteType() && 1087 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) { 1088 // OK, we will inline operations on this object. 1089 return returnBool(true); 1090 } 1091 } 1092 } 1093 } 1094 1095 if (BuiltinOp == Builtin::BI__atomic_always_lock_free) 1096 return returnBool(false); 1097 1098 return false; 1099 } 1100 1101 /// __builtin_complex(Float A, float B); 1102 static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, 1103 const InterpFrame *Frame, 1104 const Function *Func, 1105 const CallExpr *Call) { 1106 const Floating &Arg2 = S.Stk.peek<Floating>(); 1107 const Floating &Arg1 = S.Stk.peek<Floating>(align(primSize(PT_Float)) * 2); 1108 Pointer &Result = S.Stk.peek<Pointer>(align(primSize(PT_Float)) * 2 + 1109 align(primSize(PT_Ptr))); 1110 1111 Result.atIndex(0).deref<Floating>() = Arg1; 1112 Result.atIndex(0).initialize(); 1113 Result.atIndex(1).deref<Floating>() = Arg2; 1114 Result.atIndex(1).initialize(); 1115 Result.initialize(); 1116 1117 return true; 1118 } 1119 1120 /// __builtin_is_aligned() 1121 /// __builtin_align_up() 1122 /// __builtin_align_down() 1123 /// The first parameter is either an integer or a pointer. 1124 /// The second parameter is the requested alignment as an integer. 1125 static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, 1126 const InterpFrame *Frame, 1127 const Function *Func, 1128 const CallExpr *Call) { 1129 unsigned BuiltinOp = Func->getBuiltinID(); 1130 unsigned CallSize = callArgSize(S, Call); 1131 1132 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1)); 1133 const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT); 1134 1135 if (Alignment < 0 || !Alignment.isPowerOf2()) { 1136 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment; 1137 return false; 1138 } 1139 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType()); 1140 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1)); 1141 if (APSInt::compareValues(Alignment, MaxValue) > 0) { 1142 S.FFDiag(Call, diag::note_constexpr_alignment_too_big) 1143 << MaxValue << Call->getArg(0)->getType() << Alignment; 1144 return false; 1145 } 1146 1147 // The first parameter is either an integer or a pointer (but not a function 1148 // pointer). 1149 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0)); 1150 1151 if (isIntegralType(FirstArgT)) { 1152 const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize); 1153 APSInt Align = Alignment.extOrTrunc(Src.getBitWidth()); 1154 if (BuiltinOp == Builtin::BI__builtin_align_up) { 1155 APSInt AlignedVal = 1156 APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned()); 1157 pushInteger(S, AlignedVal, Call->getType()); 1158 } else if (BuiltinOp == Builtin::BI__builtin_align_down) { 1159 APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned()); 1160 pushInteger(S, AlignedVal, Call->getType()); 1161 } else { 1162 assert(*S.Ctx.classify(Call->getType()) == PT_Bool); 1163 S.Stk.push<Boolean>((Src & (Align - 1)) == 0); 1164 } 1165 return true; 1166 } 1167 1168 assert(FirstArgT == PT_Ptr); 1169 const Pointer &Ptr = S.Stk.peek<Pointer>(CallSize); 1170 1171 unsigned PtrOffset = Ptr.getByteOffset(); 1172 PtrOffset = Ptr.getIndex(); 1173 CharUnits BaseAlignment = 1174 S.getASTContext().getDeclAlign(Ptr.getDeclDesc()->asValueDecl()); 1175 CharUnits PtrAlign = 1176 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset)); 1177 1178 if (BuiltinOp == Builtin::BI__builtin_is_aligned) { 1179 if (PtrAlign.getQuantity() >= Alignment) { 1180 S.Stk.push<Boolean>(true); 1181 return true; 1182 } 1183 // If the alignment is not known to be sufficient, some cases could still 1184 // be aligned at run time. However, if the requested alignment is less or 1185 // equal to the base alignment and the offset is not aligned, we know that 1186 // the run-time value can never be aligned. 1187 if (BaseAlignment.getQuantity() >= Alignment && 1188 PtrAlign.getQuantity() < Alignment) { 1189 S.Stk.push<Boolean>(false); 1190 return true; 1191 } 1192 1193 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute) 1194 << Alignment; 1195 return false; 1196 } 1197 1198 assert(BuiltinOp == Builtin::BI__builtin_align_down || 1199 BuiltinOp == Builtin::BI__builtin_align_up); 1200 1201 // For align_up/align_down, we can return the same value if the alignment 1202 // is known to be greater or equal to the requested value. 1203 if (PtrAlign.getQuantity() >= Alignment) { 1204 S.Stk.push<Pointer>(Ptr); 1205 return true; 1206 } 1207 1208 // The alignment could be greater than the minimum at run-time, so we cannot 1209 // infer much about the resulting pointer value. One case is possible: 1210 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we 1211 // can infer the correct index if the requested alignment is smaller than 1212 // the base alignment so we can perform the computation on the offset. 1213 if (BaseAlignment.getQuantity() >= Alignment) { 1214 assert(Alignment.getBitWidth() <= 64 && 1215 "Cannot handle > 64-bit address-space"); 1216 uint64_t Alignment64 = Alignment.getZExtValue(); 1217 CharUnits NewOffset = 1218 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down 1219 ? llvm::alignDown(PtrOffset, Alignment64) 1220 : llvm::alignTo(PtrOffset, Alignment64)); 1221 1222 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity())); 1223 return true; 1224 } 1225 1226 // Otherwise, we cannot constant-evaluate the result. 1227 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment; 1228 return false; 1229 } 1230 1231 /// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset]) 1232 static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, 1233 const InterpFrame *Frame, 1234 const Function *Func, 1235 const CallExpr *Call) { 1236 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3); 1237 1238 // Might be called with function pointers in C. 1239 std::optional<PrimType> PtrT = S.Ctx.classify(Call->getArg(0)); 1240 if (PtrT != PT_Ptr) 1241 return false; 1242 1243 unsigned ArgSize = callArgSize(S, Call); 1244 const Pointer &Ptr = S.Stk.peek<Pointer>(ArgSize); 1245 std::optional<APSInt> ExtraOffset; 1246 APSInt Alignment; 1247 if (Call->getNumArgs() == 2) { 1248 Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1))); 1249 } else { 1250 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1)); 1251 PrimType ExtraOffsetT = *S.Ctx.classify(Call->getArg(2)); 1252 Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)), 1253 align(primSize(AlignmentT)) + 1254 align(primSize(ExtraOffsetT))); 1255 ExtraOffset = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2))); 1256 } 1257 1258 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue()); 1259 1260 // If there is a base object, then it must have the correct alignment. 1261 if (Ptr.isBlockPointer()) { 1262 CharUnits BaseAlignment; 1263 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl()) 1264 BaseAlignment = S.getASTContext().getDeclAlign(VD); 1265 else if (const auto *E = Ptr.getDeclDesc()->asExpr()) 1266 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf); 1267 1268 if (BaseAlignment < Align) { 1269 S.CCEDiag(Call->getArg(0), 1270 diag::note_constexpr_baa_insufficient_alignment) 1271 << 0 << BaseAlignment.getQuantity() << Align.getQuantity(); 1272 return false; 1273 } 1274 } 1275 1276 APValue AV = Ptr.toAPValue(S.getASTContext()); 1277 CharUnits AVOffset = AV.getLValueOffset(); 1278 if (ExtraOffset) 1279 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue()); 1280 if (AVOffset.alignTo(Align) != AVOffset) { 1281 if (Ptr.isBlockPointer()) 1282 S.CCEDiag(Call->getArg(0), 1283 diag::note_constexpr_baa_insufficient_alignment) 1284 << 1 << AVOffset.getQuantity() << Align.getQuantity(); 1285 else 1286 S.CCEDiag(Call->getArg(0), 1287 diag::note_constexpr_baa_value_insufficient_alignment) 1288 << AVOffset.getQuantity() << Align.getQuantity(); 1289 return false; 1290 } 1291 1292 S.Stk.push<Pointer>(Ptr); 1293 return true; 1294 } 1295 1296 static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC, 1297 const InterpFrame *Frame, 1298 const Function *Func, 1299 const CallExpr *Call) { 1300 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || 1301 !Call->getArg(1)->getType()->isIntegerType()) 1302 return false; 1303 1304 PrimType ValT = *S.Ctx.classify(Call->getArg(0)); 1305 PrimType IndexT = *S.Ctx.classify(Call->getArg(1)); 1306 APSInt Val = peekToAPSInt(S.Stk, ValT, 1307 align(primSize(ValT)) + align(primSize(IndexT))); 1308 APSInt Index = peekToAPSInt(S.Stk, IndexT); 1309 1310 unsigned BitWidth = Val.getBitWidth(); 1311 uint64_t Shift = Index.extractBitsAsZExtValue(8, 0); 1312 uint64_t Length = Index.extractBitsAsZExtValue(8, 8); 1313 Length = Length > BitWidth ? BitWidth : Length; 1314 1315 // Handle out of bounds cases. 1316 if (Length == 0 || Shift >= BitWidth) { 1317 pushInteger(S, 0, Call->getType()); 1318 return true; 1319 } 1320 1321 uint64_t Result = Val.getZExtValue() >> Shift; 1322 Result &= llvm::maskTrailingOnes<uint64_t>(Length); 1323 pushInteger(S, Result, Call->getType()); 1324 return true; 1325 } 1326 1327 static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC, 1328 const InterpFrame *Frame, 1329 const Function *Func, 1330 const CallExpr *Call) { 1331 QualType CallType = Call->getType(); 1332 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || 1333 !Call->getArg(1)->getType()->isIntegerType() || 1334 !CallType->isIntegerType()) 1335 return false; 1336 1337 PrimType ValT = *S.Ctx.classify(Call->getArg(0)); 1338 PrimType IndexT = *S.Ctx.classify(Call->getArg(1)); 1339 1340 APSInt Val = peekToAPSInt(S.Stk, ValT, 1341 align(primSize(ValT)) + align(primSize(IndexT))); 1342 APSInt Idx = peekToAPSInt(S.Stk, IndexT); 1343 1344 unsigned BitWidth = Val.getBitWidth(); 1345 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0); 1346 1347 if (Index < BitWidth) 1348 Val.clearHighBits(BitWidth - Index); 1349 1350 pushInteger(S, Val, CallType); 1351 return true; 1352 } 1353 1354 static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC, 1355 const InterpFrame *Frame, 1356 const Function *Func, 1357 const CallExpr *Call) { 1358 QualType CallType = Call->getType(); 1359 if (!CallType->isIntegerType() || 1360 !Call->getArg(0)->getType()->isIntegerType()) 1361 return false; 1362 1363 APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0))); 1364 pushInteger(S, Val.countLeadingZeros(), CallType); 1365 return true; 1366 } 1367 1368 static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC, 1369 const InterpFrame *Frame, 1370 const Function *Func, 1371 const CallExpr *Call) { 1372 QualType CallType = Call->getType(); 1373 if (!CallType->isIntegerType() || 1374 !Call->getArg(0)->getType()->isIntegerType()) 1375 return false; 1376 1377 APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0))); 1378 pushInteger(S, Val.countTrailingZeros(), CallType); 1379 return true; 1380 } 1381 1382 static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC, 1383 const InterpFrame *Frame, 1384 const Function *Func, 1385 const CallExpr *Call) { 1386 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || 1387 !Call->getArg(1)->getType()->isIntegerType()) 1388 return false; 1389 1390 PrimType ValT = *S.Ctx.classify(Call->getArg(0)); 1391 PrimType MaskT = *S.Ctx.classify(Call->getArg(1)); 1392 1393 APSInt Val = 1394 peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT))); 1395 APSInt Mask = peekToAPSInt(S.Stk, MaskT); 1396 1397 unsigned BitWidth = Val.getBitWidth(); 1398 APInt Result = APInt::getZero(BitWidth); 1399 for (unsigned I = 0, P = 0; I != BitWidth; ++I) { 1400 if (Mask[I]) 1401 Result.setBitVal(I, Val[P++]); 1402 } 1403 pushInteger(S, Result, Call->getType()); 1404 return true; 1405 } 1406 1407 static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC, 1408 const InterpFrame *Frame, 1409 const Function *Func, 1410 const CallExpr *Call) { 1411 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || 1412 !Call->getArg(1)->getType()->isIntegerType()) 1413 return false; 1414 1415 PrimType ValT = *S.Ctx.classify(Call->getArg(0)); 1416 PrimType MaskT = *S.Ctx.classify(Call->getArg(1)); 1417 1418 APSInt Val = 1419 peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT))); 1420 APSInt Mask = peekToAPSInt(S.Stk, MaskT); 1421 1422 unsigned BitWidth = Val.getBitWidth(); 1423 APInt Result = APInt::getZero(BitWidth); 1424 for (unsigned I = 0, P = 0; I != BitWidth; ++I) { 1425 if (Mask[I]) 1426 Result.setBitVal(P++, Val[I]); 1427 } 1428 pushInteger(S, Result, Call->getType()); 1429 return true; 1430 } 1431 1432 static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, 1433 CodePtr OpPC, 1434 const InterpFrame *Frame, 1435 const Function *Func, 1436 const CallExpr *Call) { 1437 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() || 1438 !Call->getArg(1)->getType()->isIntegerType() || 1439 !Call->getArg(2)->getType()->isIntegerType()) 1440 return false; 1441 1442 unsigned BuiltinOp = Func->getBuiltinID(); 1443 APSInt CarryIn = getAPSIntParam(Frame, 0); 1444 APSInt LHS = getAPSIntParam(Frame, 1); 1445 APSInt RHS = getAPSIntParam(Frame, 2); 1446 1447 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 || 1448 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64; 1449 1450 unsigned BitWidth = LHS.getBitWidth(); 1451 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0; 1452 APInt ExResult = 1453 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit)) 1454 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit)); 1455 1456 APInt Result = ExResult.extractBits(BitWidth, 0); 1457 APSInt CarryOut = 1458 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true); 1459 1460 Pointer &CarryOutPtr = S.Stk.peek<Pointer>(); 1461 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType(); 1462 PrimType CarryOutT = *S.getContext().classify(CarryOutType); 1463 assignInteger(CarryOutPtr, CarryOutT, APSInt(Result, true)); 1464 1465 pushInteger(S, CarryOut, Call->getType()); 1466 1467 return true; 1468 } 1469 1470 static bool interp__builtin_os_log_format_buffer_size(InterpState &S, 1471 CodePtr OpPC, 1472 const InterpFrame *Frame, 1473 const Function *Func, 1474 const CallExpr *Call) { 1475 analyze_os_log::OSLogBufferLayout Layout; 1476 analyze_os_log::computeOSLogBufferLayout(S.getASTContext(), Call, Layout); 1477 pushInteger(S, Layout.size().getQuantity(), Call->getType()); 1478 return true; 1479 } 1480 1481 static bool interp__builtin_ptrauth_string_discriminator( 1482 InterpState &S, CodePtr OpPC, const InterpFrame *Frame, 1483 const Function *Func, const CallExpr *Call) { 1484 const auto &Ptr = S.Stk.peek<Pointer>(); 1485 assert(Ptr.getFieldDesc()->isPrimitiveArray()); 1486 1487 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1); 1488 uint64_t Result = getPointerAuthStableSipHash(R); 1489 pushInteger(S, Result, Call->getType()); 1490 return true; 1491 } 1492 1493 // FIXME: This implementation is not complete. 1494 // The Compiler instance we create cannot access the current stack frame, local 1495 // variables, function parameters, etc. We also need protection from 1496 // side-effects, fatal errors, etc. 1497 static bool interp__builtin_constant_p(InterpState &S, CodePtr OpPC, 1498 const InterpFrame *Frame, 1499 const Function *Func, 1500 const CallExpr *Call) { 1501 const Expr *Arg = Call->getArg(0); 1502 QualType ArgType = Arg->getType(); 1503 1504 auto returnInt = [&S, Call](bool Value) -> bool { 1505 pushInteger(S, Value, Call->getType()); 1506 return true; 1507 }; 1508 1509 // __builtin_constant_p always has one operand. The rules which gcc follows 1510 // are not precisely documented, but are as follows: 1511 // 1512 // - If the operand is of integral, floating, complex or enumeration type, 1513 // and can be folded to a known value of that type, it returns 1. 1514 // - If the operand can be folded to a pointer to the first character 1515 // of a string literal (or such a pointer cast to an integral type) 1516 // or to a null pointer or an integer cast to a pointer, it returns 1. 1517 // 1518 // Otherwise, it returns 0. 1519 // 1520 // FIXME: GCC also intends to return 1 for literals of aggregate types, but 1521 // its support for this did not work prior to GCC 9 and is not yet well 1522 // understood. 1523 if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() || 1524 ArgType->isAnyComplexType() || ArgType->isPointerType() || 1525 ArgType->isNullPtrType()) { 1526 InterpStack Stk; 1527 Compiler<EvalEmitter> C(S.Ctx, S.P, S, Stk); 1528 auto Res = C.interpretExpr(Arg, /*ConvertResultToRValue=*/Arg->isGLValue()); 1529 if (Res.isInvalid()) { 1530 C.cleanup(); 1531 Stk.clear(); 1532 } 1533 1534 if (!Res.isInvalid() && !Res.empty()) { 1535 const APValue &LV = Res.toAPValue(); 1536 if (LV.isLValue()) { 1537 APValue::LValueBase Base = LV.getLValueBase(); 1538 if (Base.isNull()) { 1539 // A null base is acceptable. 1540 return returnInt(true); 1541 } else if (const auto *E = Base.dyn_cast<const Expr *>()) { 1542 if (!isa<StringLiteral>(E)) 1543 return returnInt(false); 1544 return returnInt(LV.getLValueOffset().isZero()); 1545 } else if (Base.is<TypeInfoLValue>()) { 1546 // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to 1547 // evaluate to true. 1548 return returnInt(true); 1549 } else { 1550 // Any other base is not constant enough for GCC. 1551 return returnInt(false); 1552 } 1553 } 1554 } 1555 1556 // Otherwise, any constant value is good enough. 1557 return returnInt(true); 1558 } 1559 1560 return returnInt(false); 1561 } 1562 1563 static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, 1564 const InterpFrame *Frame, 1565 const Function *Func, 1566 const CallExpr *Call) { 1567 // A call to __operator_new is only valid within std::allocate<>::allocate. 1568 // Walk up the call stack to find the appropriate caller and get the 1569 // element type from it. 1570 QualType ElemType; 1571 1572 for (const InterpFrame *F = Frame; F; F = F->Caller) { 1573 const Function *Func = F->getFunction(); 1574 if (!Func) 1575 continue; 1576 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Func->getDecl()); 1577 if (!MD) 1578 continue; 1579 const IdentifierInfo *FnII = MD->getIdentifier(); 1580 if (!FnII || !FnII->isStr("allocate")) 1581 continue; 1582 1583 const auto *CTSD = 1584 dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent()); 1585 if (!CTSD) 1586 continue; 1587 1588 const IdentifierInfo *ClassII = CTSD->getIdentifier(); 1589 const TemplateArgumentList &TAL = CTSD->getTemplateArgs(); 1590 if (CTSD->isInStdNamespace() && ClassII && ClassII->isStr("allocator") && 1591 TAL.size() >= 1 && TAL[0].getKind() == TemplateArgument::Type) { 1592 ElemType = TAL[0].getAsType(); 1593 break; 1594 } 1595 } 1596 1597 if (ElemType.isNull()) { 1598 S.FFDiag(Call, S.getLangOpts().CPlusPlus20 1599 ? diag::note_constexpr_new_untyped 1600 : diag::note_constexpr_new); 1601 return false; 1602 } 1603 1604 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) { 1605 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type) 1606 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType; 1607 return false; 1608 } 1609 1610 APSInt Bytes = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0))); 1611 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType); 1612 assert(!ElemSize.isZero()); 1613 // Divide the number of bytes by sizeof(ElemType), so we get the number of 1614 // elements we should allocate. 1615 APInt NumElems, Remainder; 1616 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity()); 1617 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder); 1618 if (Remainder != 0) { 1619 // This likely indicates a bug in the implementation of 'std::allocator'. 1620 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size) 1621 << Bytes << APSInt(ElemSizeAP, true) << ElemType; 1622 return false; 1623 } 1624 1625 // NB: The same check we're using in CheckArraySize() 1626 if (NumElems.getActiveBits() > 1627 ConstantArrayType::getMaxSizeBits(S.getASTContext()) || 1628 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) { 1629 // FIXME: NoThrow check? 1630 const SourceInfo &Loc = S.Current->getSource(OpPC); 1631 S.FFDiag(Loc, diag::note_constexpr_new_too_large) 1632 << NumElems.getZExtValue(); 1633 return false; 1634 } 1635 1636 std::optional<PrimType> ElemT = S.getContext().classify(ElemType); 1637 DynamicAllocator &Allocator = S.getAllocator(); 1638 if (ElemT) { 1639 if (NumElems.ule(1)) { 1640 const Descriptor *Desc = 1641 S.P.createDescriptor(Call, *ElemT, Descriptor::InlineDescMD, 1642 /*IsConst=*/false, /*IsTemporary=*/false, 1643 /*IsMutable=*/false); 1644 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(), 1645 DynamicAllocator::Form::Operator); 1646 assert(B); 1647 1648 S.Stk.push<Pointer>(B); 1649 return true; 1650 } 1651 assert(NumElems.ugt(1)); 1652 1653 Block *B = 1654 Allocator.allocate(Call, *ElemT, NumElems.getZExtValue(), 1655 S.Ctx.getEvalID(), DynamicAllocator::Form::Operator); 1656 assert(B); 1657 S.Stk.push<Pointer>(B); 1658 return true; 1659 } 1660 1661 assert(!ElemT); 1662 // Structs etc. 1663 const Descriptor *Desc = S.P.createDescriptor( 1664 Call, ElemType.getTypePtr(), Descriptor::InlineDescMD, 1665 /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false, 1666 /*Init=*/nullptr); 1667 1668 if (NumElems.ule(1)) { 1669 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(), 1670 DynamicAllocator::Form::Operator); 1671 assert(B); 1672 S.Stk.push<Pointer>(B); 1673 return true; 1674 } 1675 1676 Block *B = 1677 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(), 1678 DynamicAllocator::Form::Operator); 1679 assert(B); 1680 S.Stk.push<Pointer>(B); 1681 return true; 1682 } 1683 1684 static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, 1685 const InterpFrame *Frame, 1686 const Function *Func, 1687 const CallExpr *Call) { 1688 const Expr *Source = nullptr; 1689 const Block *BlockToDelete = nullptr; 1690 1691 { 1692 const Pointer &Ptr = S.Stk.peek<Pointer>(); 1693 1694 if (Ptr.isZero()) { 1695 S.CCEDiag(Call, diag::note_constexpr_deallocate_null); 1696 return true; 1697 } 1698 1699 Source = Ptr.getDeclDesc()->asExpr(); 1700 BlockToDelete = Ptr.block(); 1701 } 1702 assert(BlockToDelete); 1703 1704 DynamicAllocator &Allocator = S.getAllocator(); 1705 const Descriptor *BlockDesc = BlockToDelete->getDescriptor(); 1706 std::optional<DynamicAllocator::Form> AllocForm = 1707 Allocator.getAllocationForm(Source); 1708 1709 if (!Allocator.deallocate(Source, BlockToDelete, S)) { 1710 // Nothing has been deallocated, this must be a double-delete. 1711 const SourceInfo &Loc = S.Current->getSource(OpPC); 1712 S.FFDiag(Loc, diag::note_constexpr_double_delete); 1713 return false; 1714 } 1715 assert(AllocForm); 1716 1717 return CheckNewDeleteForms( 1718 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source); 1719 } 1720 1721 static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, 1722 const InterpFrame *Frame, 1723 const Function *Func, 1724 const CallExpr *Call) { 1725 const Floating &Arg0 = S.Stk.peek<Floating>(); 1726 S.Stk.push<Floating>(Arg0); 1727 return true; 1728 } 1729 1730 static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, 1731 const InterpFrame *Frame, 1732 const Function *Func, 1733 const CallExpr *Call) { 1734 const Pointer &Arg = S.Stk.peek<Pointer>(); 1735 assert(Arg.getFieldDesc()->isPrimitiveArray()); 1736 1737 unsigned ID = Func->getBuiltinID(); 1738 QualType ElemType = Arg.getFieldDesc()->getElemQualType(); 1739 assert(Call->getType() == ElemType); 1740 PrimType ElemT = *S.getContext().classify(ElemType); 1741 unsigned NumElems = Arg.getNumElems(); 1742 1743 INT_TYPE_SWITCH_NO_BOOL(ElemT, { 1744 T Result = Arg.atIndex(0).deref<T>(); 1745 unsigned BitWidth = Result.bitWidth(); 1746 for (unsigned I = 1; I != NumElems; ++I) { 1747 T Elem = Arg.atIndex(I).deref<T>(); 1748 T PrevResult = Result; 1749 1750 if (ID == Builtin::BI__builtin_reduce_add) { 1751 if (T::add(Result, Elem, BitWidth, &Result)) { 1752 unsigned OverflowBits = BitWidth + 1; 1753 (void)handleOverflow(S, OpPC, 1754 (PrevResult.toAPSInt(OverflowBits) + 1755 Elem.toAPSInt(OverflowBits))); 1756 return false; 1757 } 1758 } else if (ID == Builtin::BI__builtin_reduce_mul) { 1759 if (T::mul(Result, Elem, BitWidth, &Result)) { 1760 unsigned OverflowBits = BitWidth * 2; 1761 (void)handleOverflow(S, OpPC, 1762 (PrevResult.toAPSInt(OverflowBits) * 1763 Elem.toAPSInt(OverflowBits))); 1764 return false; 1765 } 1766 1767 } else if (ID == Builtin::BI__builtin_reduce_and) { 1768 (void)T::bitAnd(Result, Elem, BitWidth, &Result); 1769 } else if (ID == Builtin::BI__builtin_reduce_or) { 1770 (void)T::bitOr(Result, Elem, BitWidth, &Result); 1771 } else if (ID == Builtin::BI__builtin_reduce_xor) { 1772 (void)T::bitXor(Result, Elem, BitWidth, &Result); 1773 } else { 1774 llvm_unreachable("Unhandled vector reduce builtin"); 1775 } 1776 } 1777 pushInteger(S, Result.toAPSInt(), Call->getType()); 1778 }); 1779 1780 return true; 1781 } 1782 1783 /// Can be called with an integer or vector as the first and only parameter. 1784 static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, 1785 const InterpFrame *Frame, 1786 const Function *Func, 1787 const CallExpr *Call) { 1788 assert(Call->getNumArgs() == 1); 1789 if (Call->getArg(0)->getType()->isIntegerType()) { 1790 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 1791 APSInt Val = peekToAPSInt(S.Stk, ArgT); 1792 pushInteger(S, Val.popcount(), Call->getType()); 1793 return true; 1794 } 1795 // Otherwise, the argument must be a vector. 1796 assert(Call->getArg(0)->getType()->isVectorType()); 1797 const Pointer &Arg = S.Stk.peek<Pointer>(); 1798 assert(Arg.getFieldDesc()->isPrimitiveArray()); 1799 const Pointer &Dst = S.Stk.peek<Pointer>(primSize(PT_Ptr) * 2); 1800 assert(Dst.getFieldDesc()->isPrimitiveArray()); 1801 assert(Arg.getFieldDesc()->getNumElems() == 1802 Dst.getFieldDesc()->getNumElems()); 1803 1804 QualType ElemType = Arg.getFieldDesc()->getElemQualType(); 1805 PrimType ElemT = *S.getContext().classify(ElemType); 1806 unsigned NumElems = Arg.getNumElems(); 1807 1808 // FIXME: Reading from uninitialized vector elements? 1809 for (unsigned I = 0; I != NumElems; ++I) { 1810 INT_TYPE_SWITCH_NO_BOOL(ElemT, { 1811 Dst.atIndex(I).deref<T>() = 1812 T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount()); 1813 Dst.atIndex(I).initialize(); 1814 }); 1815 } 1816 1817 return true; 1818 } 1819 static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, 1820 const InterpFrame *Frame, 1821 const Function *Func, const CallExpr *Call) { 1822 assert(Call->getNumArgs() == 3); 1823 unsigned ID = Func->getBuiltinID(); 1824 Pointer DestPtr = getParam<Pointer>(Frame, 0); 1825 const Pointer &SrcPtr = getParam<Pointer>(Frame, 1); 1826 const APSInt &Size = 1827 peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2))); 1828 assert(!Size.isSigned() && "memcpy and friends take an unsigned size"); 1829 1830 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove) 1831 diagnoseNonConstexprBuiltin(S, OpPC, ID); 1832 1833 bool Move = (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove); 1834 1835 // If the size is zero, we treat this as always being a valid no-op. 1836 if (Size.isZero()) { 1837 S.Stk.push<Pointer>(DestPtr); 1838 return true; 1839 } 1840 1841 if (SrcPtr.isZero() || DestPtr.isZero()) { 1842 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr); 1843 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null) 1844 << /*IsMove=*/Move << /*IsWchar=*/false << !SrcPtr.isZero() 1845 << DiagPtr.toDiagnosticString(S.getASTContext()); 1846 return false; 1847 } 1848 1849 // As a last resort, reject dummy pointers. 1850 if (DestPtr.isDummy() || SrcPtr.isDummy()) 1851 return false; 1852 1853 if (!DoBitCastPtr(S, OpPC, SrcPtr, DestPtr, Size.getZExtValue())) 1854 return false; 1855 1856 S.Stk.push<Pointer>(DestPtr); 1857 return true; 1858 } 1859 1860 bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, 1861 const CallExpr *Call, uint32_t BuiltinID) { 1862 const InterpFrame *Frame = S.Current; 1863 1864 std::optional<PrimType> ReturnT = S.getContext().classify(Call); 1865 1866 switch (BuiltinID) { 1867 case Builtin::BI__builtin_is_constant_evaluated: 1868 if (!interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call)) 1869 return false; 1870 break; 1871 case Builtin::BI__builtin_assume: 1872 case Builtin::BI__assume: 1873 break; 1874 case Builtin::BI__builtin_strcmp: 1875 case Builtin::BIstrcmp: 1876 if (!interp__builtin_strcmp(S, OpPC, Frame, F, Call)) 1877 return false; 1878 break; 1879 case Builtin::BI__builtin_strlen: 1880 case Builtin::BIstrlen: 1881 case Builtin::BI__builtin_wcslen: 1882 case Builtin::BIwcslen: 1883 if (!interp__builtin_strlen(S, OpPC, Frame, F, Call)) 1884 return false; 1885 break; 1886 case Builtin::BI__builtin_nan: 1887 case Builtin::BI__builtin_nanf: 1888 case Builtin::BI__builtin_nanl: 1889 case Builtin::BI__builtin_nanf16: 1890 case Builtin::BI__builtin_nanf128: 1891 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false)) 1892 return false; 1893 break; 1894 case Builtin::BI__builtin_nans: 1895 case Builtin::BI__builtin_nansf: 1896 case Builtin::BI__builtin_nansl: 1897 case Builtin::BI__builtin_nansf16: 1898 case Builtin::BI__builtin_nansf128: 1899 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true)) 1900 return false; 1901 break; 1902 1903 case Builtin::BI__builtin_huge_val: 1904 case Builtin::BI__builtin_huge_valf: 1905 case Builtin::BI__builtin_huge_vall: 1906 case Builtin::BI__builtin_huge_valf16: 1907 case Builtin::BI__builtin_huge_valf128: 1908 case Builtin::BI__builtin_inf: 1909 case Builtin::BI__builtin_inff: 1910 case Builtin::BI__builtin_infl: 1911 case Builtin::BI__builtin_inff16: 1912 case Builtin::BI__builtin_inff128: 1913 if (!interp__builtin_inf(S, OpPC, Frame, F)) 1914 return false; 1915 break; 1916 case Builtin::BI__builtin_copysign: 1917 case Builtin::BI__builtin_copysignf: 1918 case Builtin::BI__builtin_copysignl: 1919 case Builtin::BI__builtin_copysignf128: 1920 if (!interp__builtin_copysign(S, OpPC, Frame, F)) 1921 return false; 1922 break; 1923 1924 case Builtin::BI__builtin_fmin: 1925 case Builtin::BI__builtin_fminf: 1926 case Builtin::BI__builtin_fminl: 1927 case Builtin::BI__builtin_fminf16: 1928 case Builtin::BI__builtin_fminf128: 1929 if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/false)) 1930 return false; 1931 break; 1932 1933 case Builtin::BI__builtin_fminimum_num: 1934 case Builtin::BI__builtin_fminimum_numf: 1935 case Builtin::BI__builtin_fminimum_numl: 1936 case Builtin::BI__builtin_fminimum_numf16: 1937 case Builtin::BI__builtin_fminimum_numf128: 1938 if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/true)) 1939 return false; 1940 break; 1941 1942 case Builtin::BI__builtin_fmax: 1943 case Builtin::BI__builtin_fmaxf: 1944 case Builtin::BI__builtin_fmaxl: 1945 case Builtin::BI__builtin_fmaxf16: 1946 case Builtin::BI__builtin_fmaxf128: 1947 if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/false)) 1948 return false; 1949 break; 1950 1951 case Builtin::BI__builtin_fmaximum_num: 1952 case Builtin::BI__builtin_fmaximum_numf: 1953 case Builtin::BI__builtin_fmaximum_numl: 1954 case Builtin::BI__builtin_fmaximum_numf16: 1955 case Builtin::BI__builtin_fmaximum_numf128: 1956 if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/true)) 1957 return false; 1958 break; 1959 1960 case Builtin::BI__builtin_isnan: 1961 if (!interp__builtin_isnan(S, OpPC, Frame, F, Call)) 1962 return false; 1963 break; 1964 case Builtin::BI__builtin_issignaling: 1965 if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call)) 1966 return false; 1967 break; 1968 1969 case Builtin::BI__builtin_isinf: 1970 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false, Call)) 1971 return false; 1972 break; 1973 1974 case Builtin::BI__builtin_isinf_sign: 1975 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true, Call)) 1976 return false; 1977 break; 1978 1979 case Builtin::BI__builtin_isfinite: 1980 if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call)) 1981 return false; 1982 break; 1983 case Builtin::BI__builtin_isnormal: 1984 if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call)) 1985 return false; 1986 break; 1987 case Builtin::BI__builtin_issubnormal: 1988 if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call)) 1989 return false; 1990 break; 1991 case Builtin::BI__builtin_iszero: 1992 if (!interp__builtin_iszero(S, OpPC, Frame, F, Call)) 1993 return false; 1994 break; 1995 case Builtin::BI__builtin_signbit: 1996 case Builtin::BI__builtin_signbitf: 1997 case Builtin::BI__builtin_signbitl: 1998 if (!interp__builtin_signbit(S, OpPC, Frame, F, Call)) 1999 return false; 2000 break; 2001 case Builtin::BI__builtin_isgreater: 2002 case Builtin::BI__builtin_isgreaterequal: 2003 case Builtin::BI__builtin_isless: 2004 case Builtin::BI__builtin_islessequal: 2005 case Builtin::BI__builtin_islessgreater: 2006 case Builtin::BI__builtin_isunordered: 2007 if (!interp_floating_comparison(S, OpPC, Frame, F, Call)) 2008 return false; 2009 break; 2010 case Builtin::BI__builtin_isfpclass: 2011 if (!interp__builtin_isfpclass(S, OpPC, Frame, F, Call)) 2012 return false; 2013 break; 2014 case Builtin::BI__builtin_fpclassify: 2015 if (!interp__builtin_fpclassify(S, OpPC, Frame, F, Call)) 2016 return false; 2017 break; 2018 2019 case Builtin::BI__builtin_fabs: 2020 case Builtin::BI__builtin_fabsf: 2021 case Builtin::BI__builtin_fabsl: 2022 case Builtin::BI__builtin_fabsf128: 2023 if (!interp__builtin_fabs(S, OpPC, Frame, F)) 2024 return false; 2025 break; 2026 2027 case Builtin::BI__builtin_abs: 2028 case Builtin::BI__builtin_labs: 2029 case Builtin::BI__builtin_llabs: 2030 if (!interp__builtin_abs(S, OpPC, Frame, F, Call)) 2031 return false; 2032 break; 2033 2034 case Builtin::BI__builtin_popcount: 2035 case Builtin::BI__builtin_popcountl: 2036 case Builtin::BI__builtin_popcountll: 2037 case Builtin::BI__builtin_popcountg: 2038 case Builtin::BI__popcnt16: // Microsoft variants of popcount 2039 case Builtin::BI__popcnt: 2040 case Builtin::BI__popcnt64: 2041 if (!interp__builtin_popcount(S, OpPC, Frame, F, Call)) 2042 return false; 2043 break; 2044 2045 case Builtin::BI__builtin_parity: 2046 case Builtin::BI__builtin_parityl: 2047 case Builtin::BI__builtin_parityll: 2048 if (!interp__builtin_parity(S, OpPC, Frame, F, Call)) 2049 return false; 2050 break; 2051 2052 case Builtin::BI__builtin_clrsb: 2053 case Builtin::BI__builtin_clrsbl: 2054 case Builtin::BI__builtin_clrsbll: 2055 if (!interp__builtin_clrsb(S, OpPC, Frame, F, Call)) 2056 return false; 2057 break; 2058 2059 case Builtin::BI__builtin_bitreverse8: 2060 case Builtin::BI__builtin_bitreverse16: 2061 case Builtin::BI__builtin_bitreverse32: 2062 case Builtin::BI__builtin_bitreverse64: 2063 if (!interp__builtin_bitreverse(S, OpPC, Frame, F, Call)) 2064 return false; 2065 break; 2066 2067 case Builtin::BI__builtin_classify_type: 2068 if (!interp__builtin_classify_type(S, OpPC, Frame, F, Call)) 2069 return false; 2070 break; 2071 2072 case Builtin::BI__builtin_expect: 2073 case Builtin::BI__builtin_expect_with_probability: 2074 if (!interp__builtin_expect(S, OpPC, Frame, F, Call)) 2075 return false; 2076 break; 2077 2078 case Builtin::BI__builtin_rotateleft8: 2079 case Builtin::BI__builtin_rotateleft16: 2080 case Builtin::BI__builtin_rotateleft32: 2081 case Builtin::BI__builtin_rotateleft64: 2082 case Builtin::BI_rotl8: // Microsoft variants of rotate left 2083 case Builtin::BI_rotl16: 2084 case Builtin::BI_rotl: 2085 case Builtin::BI_lrotl: 2086 case Builtin::BI_rotl64: 2087 if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/false)) 2088 return false; 2089 break; 2090 2091 case Builtin::BI__builtin_rotateright8: 2092 case Builtin::BI__builtin_rotateright16: 2093 case Builtin::BI__builtin_rotateright32: 2094 case Builtin::BI__builtin_rotateright64: 2095 case Builtin::BI_rotr8: // Microsoft variants of rotate right 2096 case Builtin::BI_rotr16: 2097 case Builtin::BI_rotr: 2098 case Builtin::BI_lrotr: 2099 case Builtin::BI_rotr64: 2100 if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/true)) 2101 return false; 2102 break; 2103 2104 case Builtin::BI__builtin_ffs: 2105 case Builtin::BI__builtin_ffsl: 2106 case Builtin::BI__builtin_ffsll: 2107 if (!interp__builtin_ffs(S, OpPC, Frame, F, Call)) 2108 return false; 2109 break; 2110 case Builtin::BIaddressof: 2111 case Builtin::BI__addressof: 2112 case Builtin::BI__builtin_addressof: 2113 if (!interp__builtin_addressof(S, OpPC, Frame, F, Call)) 2114 return false; 2115 break; 2116 2117 case Builtin::BIas_const: 2118 case Builtin::BIforward: 2119 case Builtin::BIforward_like: 2120 case Builtin::BImove: 2121 case Builtin::BImove_if_noexcept: 2122 if (!interp__builtin_move(S, OpPC, Frame, F, Call)) 2123 return false; 2124 break; 2125 2126 case Builtin::BI__builtin_eh_return_data_regno: 2127 if (!interp__builtin_eh_return_data_regno(S, OpPC, Frame, F, Call)) 2128 return false; 2129 break; 2130 2131 case Builtin::BI__builtin_launder: 2132 if (!noopPointer(S, OpPC, Frame, F, Call)) 2133 return false; 2134 break; 2135 2136 case Builtin::BI__builtin_add_overflow: 2137 case Builtin::BI__builtin_sub_overflow: 2138 case Builtin::BI__builtin_mul_overflow: 2139 case Builtin::BI__builtin_sadd_overflow: 2140 case Builtin::BI__builtin_uadd_overflow: 2141 case Builtin::BI__builtin_uaddl_overflow: 2142 case Builtin::BI__builtin_uaddll_overflow: 2143 case Builtin::BI__builtin_usub_overflow: 2144 case Builtin::BI__builtin_usubl_overflow: 2145 case Builtin::BI__builtin_usubll_overflow: 2146 case Builtin::BI__builtin_umul_overflow: 2147 case Builtin::BI__builtin_umull_overflow: 2148 case Builtin::BI__builtin_umulll_overflow: 2149 case Builtin::BI__builtin_saddl_overflow: 2150 case Builtin::BI__builtin_saddll_overflow: 2151 case Builtin::BI__builtin_ssub_overflow: 2152 case Builtin::BI__builtin_ssubl_overflow: 2153 case Builtin::BI__builtin_ssubll_overflow: 2154 case Builtin::BI__builtin_smul_overflow: 2155 case Builtin::BI__builtin_smull_overflow: 2156 case Builtin::BI__builtin_smulll_overflow: 2157 if (!interp__builtin_overflowop(S, OpPC, Frame, F, Call)) 2158 return false; 2159 break; 2160 2161 case Builtin::BI__builtin_addcb: 2162 case Builtin::BI__builtin_addcs: 2163 case Builtin::BI__builtin_addc: 2164 case Builtin::BI__builtin_addcl: 2165 case Builtin::BI__builtin_addcll: 2166 case Builtin::BI__builtin_subcb: 2167 case Builtin::BI__builtin_subcs: 2168 case Builtin::BI__builtin_subc: 2169 case Builtin::BI__builtin_subcl: 2170 case Builtin::BI__builtin_subcll: 2171 if (!interp__builtin_carryop(S, OpPC, Frame, F, Call)) 2172 return false; 2173 break; 2174 2175 case Builtin::BI__builtin_clz: 2176 case Builtin::BI__builtin_clzl: 2177 case Builtin::BI__builtin_clzll: 2178 case Builtin::BI__builtin_clzs: 2179 case Builtin::BI__builtin_clzg: 2180 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes 2181 case Builtin::BI__lzcnt: 2182 case Builtin::BI__lzcnt64: 2183 if (!interp__builtin_clz(S, OpPC, Frame, F, Call)) 2184 return false; 2185 break; 2186 2187 case Builtin::BI__builtin_ctz: 2188 case Builtin::BI__builtin_ctzl: 2189 case Builtin::BI__builtin_ctzll: 2190 case Builtin::BI__builtin_ctzs: 2191 case Builtin::BI__builtin_ctzg: 2192 if (!interp__builtin_ctz(S, OpPC, Frame, F, Call)) 2193 return false; 2194 break; 2195 2196 case Builtin::BI__builtin_bswap16: 2197 case Builtin::BI__builtin_bswap32: 2198 case Builtin::BI__builtin_bswap64: 2199 if (!interp__builtin_bswap(S, OpPC, Frame, F, Call)) 2200 return false; 2201 break; 2202 2203 case Builtin::BI__atomic_always_lock_free: 2204 case Builtin::BI__atomic_is_lock_free: 2205 case Builtin::BI__c11_atomic_is_lock_free: 2206 if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, F, Call)) 2207 return false; 2208 break; 2209 2210 case Builtin::BI__builtin_complex: 2211 if (!interp__builtin_complex(S, OpPC, Frame, F, Call)) 2212 return false; 2213 break; 2214 2215 case Builtin::BI__builtin_is_aligned: 2216 case Builtin::BI__builtin_align_up: 2217 case Builtin::BI__builtin_align_down: 2218 if (!interp__builtin_is_aligned_up_down(S, OpPC, Frame, F, Call)) 2219 return false; 2220 break; 2221 2222 case Builtin::BI__builtin_assume_aligned: 2223 if (!interp__builtin_assume_aligned(S, OpPC, Frame, F, Call)) 2224 return false; 2225 break; 2226 2227 case clang::X86::BI__builtin_ia32_bextr_u32: 2228 case clang::X86::BI__builtin_ia32_bextr_u64: 2229 case clang::X86::BI__builtin_ia32_bextri_u32: 2230 case clang::X86::BI__builtin_ia32_bextri_u64: 2231 if (!interp__builtin_ia32_bextr(S, OpPC, Frame, F, Call)) 2232 return false; 2233 break; 2234 2235 case clang::X86::BI__builtin_ia32_bzhi_si: 2236 case clang::X86::BI__builtin_ia32_bzhi_di: 2237 if (!interp__builtin_ia32_bzhi(S, OpPC, Frame, F, Call)) 2238 return false; 2239 break; 2240 2241 case clang::X86::BI__builtin_ia32_lzcnt_u16: 2242 case clang::X86::BI__builtin_ia32_lzcnt_u32: 2243 case clang::X86::BI__builtin_ia32_lzcnt_u64: 2244 if (!interp__builtin_ia32_lzcnt(S, OpPC, Frame, F, Call)) 2245 return false; 2246 break; 2247 2248 case clang::X86::BI__builtin_ia32_tzcnt_u16: 2249 case clang::X86::BI__builtin_ia32_tzcnt_u32: 2250 case clang::X86::BI__builtin_ia32_tzcnt_u64: 2251 if (!interp__builtin_ia32_tzcnt(S, OpPC, Frame, F, Call)) 2252 return false; 2253 break; 2254 2255 case clang::X86::BI__builtin_ia32_pdep_si: 2256 case clang::X86::BI__builtin_ia32_pdep_di: 2257 if (!interp__builtin_ia32_pdep(S, OpPC, Frame, F, Call)) 2258 return false; 2259 break; 2260 2261 case clang::X86::BI__builtin_ia32_pext_si: 2262 case clang::X86::BI__builtin_ia32_pext_di: 2263 if (!interp__builtin_ia32_pext(S, OpPC, Frame, F, Call)) 2264 return false; 2265 break; 2266 2267 case clang::X86::BI__builtin_ia32_addcarryx_u32: 2268 case clang::X86::BI__builtin_ia32_addcarryx_u64: 2269 case clang::X86::BI__builtin_ia32_subborrow_u32: 2270 case clang::X86::BI__builtin_ia32_subborrow_u64: 2271 if (!interp__builtin_ia32_addcarry_subborrow(S, OpPC, Frame, F, Call)) 2272 return false; 2273 break; 2274 2275 case Builtin::BI__builtin_os_log_format_buffer_size: 2276 if (!interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, F, Call)) 2277 return false; 2278 break; 2279 2280 case Builtin::BI__builtin_ptrauth_string_discriminator: 2281 if (!interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, F, Call)) 2282 return false; 2283 break; 2284 2285 case Builtin::BI__builtin_constant_p: 2286 if (!interp__builtin_constant_p(S, OpPC, Frame, F, Call)) 2287 return false; 2288 break; 2289 2290 case Builtin::BI__noop: 2291 pushInteger(S, 0, Call->getType()); 2292 break; 2293 2294 case Builtin::BI__builtin_operator_new: 2295 if (!interp__builtin_operator_new(S, OpPC, Frame, F, Call)) 2296 return false; 2297 break; 2298 2299 case Builtin::BI__builtin_operator_delete: 2300 if (!interp__builtin_operator_delete(S, OpPC, Frame, F, Call)) 2301 return false; 2302 break; 2303 2304 case Builtin::BI__arithmetic_fence: 2305 if (!interp__builtin_arithmetic_fence(S, OpPC, Frame, F, Call)) 2306 return false; 2307 break; 2308 2309 case Builtin::BI__builtin_reduce_add: 2310 case Builtin::BI__builtin_reduce_mul: 2311 case Builtin::BI__builtin_reduce_and: 2312 case Builtin::BI__builtin_reduce_or: 2313 case Builtin::BI__builtin_reduce_xor: 2314 if (!interp__builtin_vector_reduce(S, OpPC, Frame, F, Call)) 2315 return false; 2316 break; 2317 2318 case Builtin::BI__builtin_elementwise_popcount: 2319 if (!interp__builtin_elementwise_popcount(S, OpPC, Frame, F, Call)) 2320 return false; 2321 break; 2322 2323 case Builtin::BI__builtin_memcpy: 2324 case Builtin::BImemcpy: 2325 case Builtin::BI__builtin_memmove: 2326 case Builtin::BImemmove: 2327 if (!interp__builtin_memcpy(S, OpPC, Frame, F, Call)) 2328 return false; 2329 break; 2330 2331 default: 2332 S.FFDiag(S.Current->getLocation(OpPC), 2333 diag::note_invalid_subexpr_in_const_expr) 2334 << S.Current->getRange(OpPC); 2335 2336 return false; 2337 } 2338 2339 return retPrimValue(S, OpPC, ReturnT); 2340 } 2341 2342 bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, 2343 llvm::ArrayRef<int64_t> ArrayIndices, 2344 int64_t &IntResult) { 2345 CharUnits Result; 2346 unsigned N = E->getNumComponents(); 2347 assert(N > 0); 2348 2349 unsigned ArrayIndex = 0; 2350 QualType CurrentType = E->getTypeSourceInfo()->getType(); 2351 for (unsigned I = 0; I != N; ++I) { 2352 const OffsetOfNode &Node = E->getComponent(I); 2353 switch (Node.getKind()) { 2354 case OffsetOfNode::Field: { 2355 const FieldDecl *MemberDecl = Node.getField(); 2356 const RecordType *RT = CurrentType->getAs<RecordType>(); 2357 if (!RT) 2358 return false; 2359 const RecordDecl *RD = RT->getDecl(); 2360 if (RD->isInvalidDecl()) 2361 return false; 2362 const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD); 2363 unsigned FieldIndex = MemberDecl->getFieldIndex(); 2364 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type"); 2365 Result += 2366 S.getASTContext().toCharUnitsFromBits(RL.getFieldOffset(FieldIndex)); 2367 CurrentType = MemberDecl->getType().getNonReferenceType(); 2368 break; 2369 } 2370 case OffsetOfNode::Array: { 2371 // When generating bytecode, we put all the index expressions as Sint64 on 2372 // the stack. 2373 int64_t Index = ArrayIndices[ArrayIndex]; 2374 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType); 2375 if (!AT) 2376 return false; 2377 CurrentType = AT->getElementType(); 2378 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType); 2379 Result += Index * ElementSize; 2380 ++ArrayIndex; 2381 break; 2382 } 2383 case OffsetOfNode::Base: { 2384 const CXXBaseSpecifier *BaseSpec = Node.getBase(); 2385 if (BaseSpec->isVirtual()) 2386 return false; 2387 2388 // Find the layout of the class whose base we are looking into. 2389 const RecordType *RT = CurrentType->getAs<RecordType>(); 2390 if (!RT) 2391 return false; 2392 const RecordDecl *RD = RT->getDecl(); 2393 if (RD->isInvalidDecl()) 2394 return false; 2395 const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD); 2396 2397 // Find the base class itself. 2398 CurrentType = BaseSpec->getType(); 2399 const RecordType *BaseRT = CurrentType->getAs<RecordType>(); 2400 if (!BaseRT) 2401 return false; 2402 2403 // Add the offset to the base. 2404 Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl())); 2405 break; 2406 } 2407 case OffsetOfNode::Identifier: 2408 llvm_unreachable("Dependent OffsetOfExpr?"); 2409 } 2410 } 2411 2412 IntResult = Result.getQuantity(); 2413 2414 return true; 2415 } 2416 2417 bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, 2418 const Pointer &Ptr, const APSInt &IntValue) { 2419 2420 const Record *R = Ptr.getRecord(); 2421 assert(R); 2422 assert(R->getNumFields() == 1); 2423 2424 unsigned FieldOffset = R->getField(0u)->Offset; 2425 const Pointer &FieldPtr = Ptr.atField(FieldOffset); 2426 PrimType FieldT = *S.getContext().classify(FieldPtr.getType()); 2427 2428 INT_TYPE_SWITCH(FieldT, 2429 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue())); 2430 FieldPtr.initialize(); 2431 return true; 2432 } 2433 2434 static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, 2435 Pointer &Dest, bool Activate); 2436 static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, 2437 Pointer &Dest, bool Activate = false) { 2438 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc(); 2439 const Descriptor *DestDesc = Dest.getFieldDesc(); 2440 2441 auto copyField = [&](const Record::Field &F, bool Activate) -> bool { 2442 Pointer DestField = Dest.atField(F.Offset); 2443 if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) { 2444 TYPE_SWITCH(*FT, { 2445 DestField.deref<T>() = Src.atField(F.Offset).deref<T>(); 2446 if (Src.atField(F.Offset).isInitialized()) 2447 DestField.initialize(); 2448 if (Activate) 2449 DestField.activate(); 2450 }); 2451 return true; 2452 } 2453 // Composite field. 2454 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate); 2455 }; 2456 2457 assert(SrcDesc->isRecord()); 2458 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord); 2459 const Record *R = DestDesc->ElemRecord; 2460 for (const Record::Field &F : R->fields()) { 2461 if (R->isUnion()) { 2462 // For unions, only copy the active field. 2463 const Pointer &SrcField = Src.atField(F.Offset); 2464 if (SrcField.isActive()) { 2465 if (!copyField(F, /*Activate=*/true)) 2466 return false; 2467 } 2468 } else { 2469 if (!copyField(F, Activate)) 2470 return false; 2471 } 2472 } 2473 2474 for (const Record::Base &B : R->bases()) { 2475 Pointer DestBase = Dest.atField(B.Offset); 2476 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate)) 2477 return false; 2478 } 2479 2480 Dest.initialize(); 2481 return true; 2482 } 2483 2484 static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, 2485 Pointer &Dest, bool Activate = false) { 2486 assert(Src.isLive() && Dest.isLive()); 2487 2488 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc(); 2489 const Descriptor *DestDesc = Dest.getFieldDesc(); 2490 2491 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive()); 2492 2493 if (DestDesc->isPrimitiveArray()) { 2494 assert(SrcDesc->isPrimitiveArray()); 2495 assert(SrcDesc->getNumElems() == DestDesc->getNumElems()); 2496 PrimType ET = DestDesc->getPrimType(); 2497 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) { 2498 Pointer DestElem = Dest.atIndex(I); 2499 TYPE_SWITCH(ET, { 2500 DestElem.deref<T>() = Src.atIndex(I).deref<T>(); 2501 DestElem.initialize(); 2502 }); 2503 } 2504 return true; 2505 } 2506 2507 if (DestDesc->isRecord()) 2508 return copyRecord(S, OpPC, Src, Dest, Activate); 2509 return Invalid(S, OpPC); 2510 } 2511 2512 bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) { 2513 return copyComposite(S, OpPC, Src, Dest); 2514 } 2515 2516 } // namespace interp 2517 } // namespace clang 2518