1 //===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 #include "../ExprConstShared.h" 9 #include "Boolean.h" 10 #include "Compiler.h" 11 #include "EvalEmitter.h" 12 #include "Interp.h" 13 #include "InterpBuiltinBitCast.h" 14 #include "PrimType.h" 15 #include "clang/AST/OSLog.h" 16 #include "clang/AST/RecordLayout.h" 17 #include "clang/Basic/Builtins.h" 18 #include "clang/Basic/TargetBuiltins.h" 19 #include "clang/Basic/TargetInfo.h" 20 #include "llvm/Support/SipHash.h" 21 22 namespace clang { 23 namespace interp { 24 25 static unsigned callArgSize(const InterpState &S, const CallExpr *C) { 26 unsigned O = 0; 27 28 for (const Expr *E : C->arguments()) { 29 O += align(primSize(*S.getContext().classify(E))); 30 } 31 32 return O; 33 } 34 35 template <typename T> 36 static T getParam(const InterpFrame *Frame, unsigned Index) { 37 assert(Frame->getFunction()->getNumParams() > Index); 38 unsigned Offset = Frame->getFunction()->getParamOffset(Index); 39 return Frame->getParam<T>(Offset); 40 } 41 42 static APSInt getAPSIntParam(const InterpFrame *Frame, unsigned Index) { 43 APSInt R; 44 unsigned Offset = Frame->getFunction()->getParamOffset(Index); 45 INT_TYPE_SWITCH(Frame->getFunction()->getParamType(Index), 46 R = Frame->getParam<T>(Offset).toAPSInt()); 47 return R; 48 } 49 50 static PrimType getIntPrimType(const InterpState &S) { 51 const TargetInfo &TI = S.getASTContext().getTargetInfo(); 52 unsigned IntWidth = TI.getIntWidth(); 53 54 if (IntWidth == 32) 55 return PT_Sint32; 56 else if (IntWidth == 16) 57 return PT_Sint16; 58 llvm_unreachable("Int isn't 16 or 32 bit?"); 59 } 60 61 static PrimType getLongPrimType(const InterpState &S) { 62 const TargetInfo &TI = S.getASTContext().getTargetInfo(); 63 unsigned LongWidth = TI.getLongWidth(); 64 65 if (LongWidth == 64) 66 return PT_Sint64; 67 else if (LongWidth == 32) 68 return PT_Sint32; 69 else if (LongWidth == 16) 70 return PT_Sint16; 71 llvm_unreachable("long isn't 16, 32 or 64 bit?"); 72 } 73 74 /// Peek an integer value from the stack into an APSInt. 75 static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) { 76 if (Offset == 0) 77 Offset = align(primSize(T)); 78 79 APSInt R; 80 INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt()); 81 82 return R; 83 } 84 85 /// Pushes \p Val on the stack as the type given by \p QT. 86 static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) { 87 assert(QT->isSignedIntegerOrEnumerationType() || 88 QT->isUnsignedIntegerOrEnumerationType()); 89 std::optional<PrimType> T = S.getContext().classify(QT); 90 assert(T); 91 92 unsigned BitWidth = S.getASTContext().getTypeSize(QT); 93 if (QT->isSignedIntegerOrEnumerationType()) { 94 int64_t V = Val.getSExtValue(); 95 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); }); 96 } else { 97 assert(QT->isUnsignedIntegerOrEnumerationType()); 98 uint64_t V = Val.getZExtValue(); 99 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); }); 100 } 101 } 102 103 template <typename T> 104 static void pushInteger(InterpState &S, T Val, QualType QT) { 105 if constexpr (std::is_same_v<T, APInt>) 106 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT); 107 else if constexpr (std::is_same_v<T, APSInt>) 108 pushInteger(S, Val, QT); 109 else 110 pushInteger(S, 111 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val), 112 std::is_signed_v<T>), 113 !std::is_signed_v<T>), 114 QT); 115 } 116 117 static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) { 118 INT_TYPE_SWITCH_NO_BOOL( 119 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); }); 120 } 121 122 static bool retPrimValue(InterpState &S, CodePtr OpPC, 123 std::optional<PrimType> &T) { 124 if (!T) 125 return RetVoid(S, OpPC); 126 127 #define RET_CASE(X) \ 128 case X: \ 129 return Ret<X>(S, OpPC); 130 switch (*T) { 131 RET_CASE(PT_Ptr); 132 RET_CASE(PT_FnPtr); 133 RET_CASE(PT_Float); 134 RET_CASE(PT_Bool); 135 RET_CASE(PT_Sint8); 136 RET_CASE(PT_Uint8); 137 RET_CASE(PT_Sint16); 138 RET_CASE(PT_Uint16); 139 RET_CASE(PT_Sint32); 140 RET_CASE(PT_Uint32); 141 RET_CASE(PT_Sint64); 142 RET_CASE(PT_Uint64); 143 RET_CASE(PT_IntAP); 144 RET_CASE(PT_IntAPS); 145 default: 146 llvm_unreachable("Unsupported return type for builtin function"); 147 } 148 #undef RET_CASE 149 } 150 151 static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, 152 unsigned ID) { 153 auto Loc = S.Current->getSource(OpPC); 154 if (S.getLangOpts().CPlusPlus11) 155 S.CCEDiag(Loc, diag::note_constexpr_invalid_function) 156 << /*isConstexpr=*/0 << /*isConstructor=*/0 157 << ("'" + S.getASTContext().BuiltinInfo.getName(ID) + "'").str(); 158 else 159 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr); 160 } 161 162 static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, 163 const InterpFrame *Frame, 164 const CallExpr *Call) { 165 unsigned Depth = S.Current->getDepth(); 166 auto isStdCall = [](const FunctionDecl *F) -> bool { 167 return F && F->isInStdNamespace() && F->getIdentifier() && 168 F->getIdentifier()->isStr("is_constant_evaluated"); 169 }; 170 const InterpFrame *Caller = Frame->Caller; 171 // The current frame is the one for __builtin_is_constant_evaluated. 172 // The one above that, potentially the one for std::is_constant_evaluated(). 173 if (S.inConstantContext() && !S.checkingPotentialConstantExpression() && 174 S.getEvalStatus().Diag && 175 (Depth == 1 || (Depth == 2 && isStdCall(Caller->getCallee())))) { 176 if (Caller->Caller && isStdCall(Caller->getCallee())) { 177 const Expr *E = Caller->Caller->getExpr(Caller->getRetPC()); 178 S.report(E->getExprLoc(), 179 diag::warn_is_constant_evaluated_always_true_constexpr) 180 << "std::is_constant_evaluated" << E->getSourceRange(); 181 } else { 182 const Expr *E = Frame->Caller->getExpr(Frame->getRetPC()); 183 S.report(E->getExprLoc(), 184 diag::warn_is_constant_evaluated_always_true_constexpr) 185 << "__builtin_is_constant_evaluated" << E->getSourceRange(); 186 } 187 } 188 189 S.Stk.push<Boolean>(Boolean::from(S.inConstantContext())); 190 return true; 191 } 192 193 static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, 194 const InterpFrame *Frame, 195 const Function *Func, const CallExpr *Call) { 196 unsigned ID = Func->getBuiltinID(); 197 const Pointer &A = getParam<Pointer>(Frame, 0); 198 const Pointer &B = getParam<Pointer>(Frame, 1); 199 200 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp) 201 diagnoseNonConstexprBuiltin(S, OpPC, ID); 202 203 uint64_t Limit = ~static_cast<uint64_t>(0); 204 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp) 205 Limit = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2))) 206 .getZExtValue(); 207 208 if (Limit == 0) { 209 pushInteger(S, 0, Call->getType()); 210 return true; 211 } 212 213 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read)) 214 return false; 215 216 if (A.isDummy() || B.isDummy()) 217 return false; 218 219 assert(A.getFieldDesc()->isPrimitiveArray()); 220 assert(B.getFieldDesc()->isPrimitiveArray()); 221 222 unsigned IndexA = A.getIndex(); 223 unsigned IndexB = B.getIndex(); 224 int32_t Result = 0; 225 uint64_t Steps = 0; 226 for (;; ++IndexA, ++IndexB, ++Steps) { 227 228 if (Steps >= Limit) 229 break; 230 const Pointer &PA = A.atIndex(IndexA); 231 const Pointer &PB = B.atIndex(IndexB); 232 if (!CheckRange(S, OpPC, PA, AK_Read) || 233 !CheckRange(S, OpPC, PB, AK_Read)) { 234 return false; 235 } 236 uint8_t CA = PA.deref<uint8_t>(); 237 uint8_t CB = PB.deref<uint8_t>(); 238 239 if (CA > CB) { 240 Result = 1; 241 break; 242 } else if (CA < CB) { 243 Result = -1; 244 break; 245 } 246 if (CA == 0 || CB == 0) 247 break; 248 } 249 250 pushInteger(S, Result, Call->getType()); 251 return true; 252 } 253 254 static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, 255 const InterpFrame *Frame, 256 const Function *Func, const CallExpr *Call) { 257 unsigned ID = Func->getBuiltinID(); 258 const Pointer &StrPtr = getParam<Pointer>(Frame, 0); 259 260 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen) 261 diagnoseNonConstexprBuiltin(S, OpPC, ID); 262 263 if (!CheckArray(S, OpPC, StrPtr)) 264 return false; 265 266 if (!CheckLive(S, OpPC, StrPtr, AK_Read)) 267 return false; 268 269 if (!CheckDummy(S, OpPC, StrPtr, AK_Read)) 270 return false; 271 272 assert(StrPtr.getFieldDesc()->isPrimitiveArray()); 273 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize(); 274 275 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) { 276 [[maybe_unused]] const ASTContext &AC = S.getASTContext(); 277 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity()); 278 } 279 280 size_t Len = 0; 281 for (size_t I = StrPtr.getIndex();; ++I, ++Len) { 282 const Pointer &ElemPtr = StrPtr.atIndex(I); 283 284 if (!CheckRange(S, OpPC, ElemPtr, AK_Read)) 285 return false; 286 287 uint32_t Val; 288 switch (ElemSize) { 289 case 1: 290 Val = ElemPtr.deref<uint8_t>(); 291 break; 292 case 2: 293 Val = ElemPtr.deref<uint16_t>(); 294 break; 295 case 4: 296 Val = ElemPtr.deref<uint32_t>(); 297 break; 298 default: 299 llvm_unreachable("Unsupported char size"); 300 } 301 if (Val == 0) 302 break; 303 } 304 305 pushInteger(S, Len, Call->getType()); 306 307 return true; 308 } 309 310 static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, 311 const InterpFrame *Frame, const Function *F, 312 bool Signaling) { 313 const Pointer &Arg = getParam<Pointer>(Frame, 0); 314 315 if (!CheckLoad(S, OpPC, Arg)) 316 return false; 317 318 assert(Arg.getFieldDesc()->isPrimitiveArray()); 319 320 // Convert the given string to an integer using StringRef's API. 321 llvm::APInt Fill; 322 std::string Str; 323 assert(Arg.getNumElems() >= 1); 324 for (unsigned I = 0;; ++I) { 325 const Pointer &Elem = Arg.atIndex(I); 326 327 if (!CheckLoad(S, OpPC, Elem)) 328 return false; 329 330 if (Elem.deref<int8_t>() == 0) 331 break; 332 333 Str += Elem.deref<char>(); 334 } 335 336 // Treat empty strings as if they were zero. 337 if (Str.empty()) 338 Fill = llvm::APInt(32, 0); 339 else if (StringRef(Str).getAsInteger(0, Fill)) 340 return false; 341 342 const llvm::fltSemantics &TargetSemantics = 343 S.getASTContext().getFloatTypeSemantics(F->getDecl()->getReturnType()); 344 345 Floating Result; 346 if (S.getASTContext().getTargetInfo().isNan2008()) { 347 if (Signaling) 348 Result = Floating( 349 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill)); 350 else 351 Result = Floating( 352 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill)); 353 } else { 354 // Prior to IEEE 754-2008, architectures were allowed to choose whether 355 // the first bit of their significand was set for qNaN or sNaN. MIPS chose 356 // a different encoding to what became a standard in 2008, and for pre- 357 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as 358 // sNaN. This is now known as "legacy NaN" encoding. 359 if (Signaling) 360 Result = Floating( 361 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill)); 362 else 363 Result = Floating( 364 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill)); 365 } 366 367 S.Stk.push<Floating>(Result); 368 return true; 369 } 370 371 static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, 372 const InterpFrame *Frame, const Function *F) { 373 const llvm::fltSemantics &TargetSemantics = 374 S.getASTContext().getFloatTypeSemantics(F->getDecl()->getReturnType()); 375 376 S.Stk.push<Floating>(Floating::getInf(TargetSemantics)); 377 return true; 378 } 379 380 static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, 381 const InterpFrame *Frame, 382 const Function *F) { 383 const Floating &Arg1 = getParam<Floating>(Frame, 0); 384 const Floating &Arg2 = getParam<Floating>(Frame, 1); 385 386 APFloat Copy = Arg1.getAPFloat(); 387 Copy.copySign(Arg2.getAPFloat()); 388 S.Stk.push<Floating>(Floating(Copy)); 389 390 return true; 391 } 392 393 static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, 394 const InterpFrame *Frame, const Function *F, 395 bool IsNumBuiltin) { 396 const Floating &LHS = getParam<Floating>(Frame, 0); 397 const Floating &RHS = getParam<Floating>(Frame, 1); 398 399 Floating Result; 400 401 if (IsNumBuiltin) { 402 Result = llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()); 403 } else { 404 // When comparing zeroes, return -0.0 if one of the zeroes is negative. 405 if (LHS.isZero() && RHS.isZero() && RHS.isNegative()) 406 Result = RHS; 407 else if (LHS.isNan() || RHS < LHS) 408 Result = RHS; 409 else 410 Result = LHS; 411 } 412 413 S.Stk.push<Floating>(Result); 414 return true; 415 } 416 417 static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, 418 const InterpFrame *Frame, const Function *Func, 419 bool IsNumBuiltin) { 420 const Floating &LHS = getParam<Floating>(Frame, 0); 421 const Floating &RHS = getParam<Floating>(Frame, 1); 422 423 Floating Result; 424 425 if (IsNumBuiltin) { 426 Result = llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()); 427 } else { 428 // When comparing zeroes, return +0.0 if one of the zeroes is positive. 429 if (LHS.isZero() && RHS.isZero() && LHS.isNegative()) 430 Result = RHS; 431 else if (LHS.isNan() || RHS > LHS) 432 Result = RHS; 433 else 434 Result = LHS; 435 } 436 437 S.Stk.push<Floating>(Result); 438 return true; 439 } 440 441 /// Defined as __builtin_isnan(...), to accommodate the fact that it can 442 /// take a float, double, long double, etc. 443 /// But for us, that's all a Floating anyway. 444 static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, 445 const InterpFrame *Frame, const Function *F, 446 const CallExpr *Call) { 447 const Floating &Arg = S.Stk.peek<Floating>(); 448 449 pushInteger(S, Arg.isNan(), Call->getType()); 450 return true; 451 } 452 453 static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, 454 const InterpFrame *Frame, 455 const Function *F, 456 const CallExpr *Call) { 457 const Floating &Arg = S.Stk.peek<Floating>(); 458 459 pushInteger(S, Arg.isSignaling(), Call->getType()); 460 return true; 461 } 462 463 static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, 464 const InterpFrame *Frame, const Function *F, 465 bool CheckSign, const CallExpr *Call) { 466 const Floating &Arg = S.Stk.peek<Floating>(); 467 bool IsInf = Arg.isInf(); 468 469 if (CheckSign) 470 pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType()); 471 else 472 pushInteger(S, Arg.isInf(), Call->getType()); 473 return true; 474 } 475 476 static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, 477 const InterpFrame *Frame, 478 const Function *F, const CallExpr *Call) { 479 const Floating &Arg = S.Stk.peek<Floating>(); 480 481 pushInteger(S, Arg.isFinite(), Call->getType()); 482 return true; 483 } 484 485 static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, 486 const InterpFrame *Frame, 487 const Function *F, const CallExpr *Call) { 488 const Floating &Arg = S.Stk.peek<Floating>(); 489 490 pushInteger(S, Arg.isNormal(), Call->getType()); 491 return true; 492 } 493 494 static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, 495 const InterpFrame *Frame, 496 const Function *F, 497 const CallExpr *Call) { 498 const Floating &Arg = S.Stk.peek<Floating>(); 499 500 pushInteger(S, Arg.isDenormal(), Call->getType()); 501 return true; 502 } 503 504 static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, 505 const InterpFrame *Frame, const Function *F, 506 const CallExpr *Call) { 507 const Floating &Arg = S.Stk.peek<Floating>(); 508 509 pushInteger(S, Arg.isZero(), Call->getType()); 510 return true; 511 } 512 513 static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, 514 const InterpFrame *Frame, const Function *F, 515 const CallExpr *Call) { 516 const Floating &Arg = S.Stk.peek<Floating>(); 517 518 pushInteger(S, Arg.isNegative(), Call->getType()); 519 return true; 520 } 521 522 static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, 523 const InterpFrame *Frame, 524 const Function *F, 525 const CallExpr *Call) { 526 const Floating &RHS = S.Stk.peek<Floating>(); 527 const Floating &LHS = S.Stk.peek<Floating>(align(2u * primSize(PT_Float))); 528 unsigned ID = F->getBuiltinID(); 529 530 pushInteger( 531 S, 532 [&] { 533 switch (ID) { 534 case Builtin::BI__builtin_isgreater: 535 return LHS > RHS; 536 case Builtin::BI__builtin_isgreaterequal: 537 return LHS >= RHS; 538 case Builtin::BI__builtin_isless: 539 return LHS < RHS; 540 case Builtin::BI__builtin_islessequal: 541 return LHS <= RHS; 542 case Builtin::BI__builtin_islessgreater: { 543 ComparisonCategoryResult cmp = LHS.compare(RHS); 544 return cmp == ComparisonCategoryResult::Less || 545 cmp == ComparisonCategoryResult::Greater; 546 } 547 case Builtin::BI__builtin_isunordered: 548 return LHS.compare(RHS) == ComparisonCategoryResult::Unordered; 549 default: 550 llvm_unreachable("Unexpected builtin ID: Should be a floating point " 551 "comparison function"); 552 } 553 }(), 554 Call->getType()); 555 return true; 556 } 557 558 /// First parameter to __builtin_isfpclass is the floating value, the 559 /// second one is an integral value. 560 static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, 561 const InterpFrame *Frame, 562 const Function *Func, 563 const CallExpr *Call) { 564 PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType()); 565 APSInt FPClassArg = peekToAPSInt(S.Stk, FPClassArgT); 566 const Floating &F = 567 S.Stk.peek<Floating>(align(primSize(FPClassArgT) + primSize(PT_Float))); 568 569 int32_t Result = 570 static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue()); 571 pushInteger(S, Result, Call->getType()); 572 573 return true; 574 } 575 576 /// Five int values followed by one floating value. 577 static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, 578 const InterpFrame *Frame, 579 const Function *Func, 580 const CallExpr *Call) { 581 const Floating &Val = S.Stk.peek<Floating>(); 582 583 unsigned Index; 584 switch (Val.getCategory()) { 585 case APFloat::fcNaN: 586 Index = 0; 587 break; 588 case APFloat::fcInfinity: 589 Index = 1; 590 break; 591 case APFloat::fcNormal: 592 Index = Val.isDenormal() ? 3 : 2; 593 break; 594 case APFloat::fcZero: 595 Index = 4; 596 break; 597 } 598 599 // The last argument is first on the stack. 600 assert(Index <= 4); 601 unsigned IntSize = primSize(getIntPrimType(S)); 602 unsigned Offset = 603 align(primSize(PT_Float)) + ((1 + (4 - Index)) * align(IntSize)); 604 605 APSInt I = peekToAPSInt(S.Stk, getIntPrimType(S), Offset); 606 pushInteger(S, I, Call->getType()); 607 return true; 608 } 609 610 // The C standard says "fabs raises no floating-point exceptions, 611 // even if x is a signaling NaN. The returned value is independent of 612 // the current rounding direction mode." Therefore constant folding can 613 // proceed without regard to the floating point settings. 614 // Reference, WG14 N2478 F.10.4.3 615 static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, 616 const InterpFrame *Frame, 617 const Function *Func) { 618 const Floating &Val = getParam<Floating>(Frame, 0); 619 620 S.Stk.push<Floating>(Floating::abs(Val)); 621 return true; 622 } 623 624 static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, 625 const InterpFrame *Frame, const Function *Func, 626 const CallExpr *Call) { 627 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 628 APSInt Val = peekToAPSInt(S.Stk, ArgT); 629 if (Val == 630 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false)) 631 return false; 632 if (Val.isNegative()) 633 Val.negate(); 634 pushInteger(S, Val, Call->getType()); 635 return true; 636 } 637 638 static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, 639 const InterpFrame *Frame, 640 const Function *Func, 641 const CallExpr *Call) { 642 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 643 APSInt Val = peekToAPSInt(S.Stk, ArgT); 644 pushInteger(S, Val.popcount(), Call->getType()); 645 return true; 646 } 647 648 static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, 649 const InterpFrame *Frame, 650 const Function *Func, const CallExpr *Call) { 651 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 652 APSInt Val = peekToAPSInt(S.Stk, ArgT); 653 pushInteger(S, Val.popcount() % 2, Call->getType()); 654 return true; 655 } 656 657 static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, 658 const InterpFrame *Frame, 659 const Function *Func, const CallExpr *Call) { 660 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 661 APSInt Val = peekToAPSInt(S.Stk, ArgT); 662 pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType()); 663 return true; 664 } 665 666 static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, 667 const InterpFrame *Frame, 668 const Function *Func, 669 const CallExpr *Call) { 670 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 671 APSInt Val = peekToAPSInt(S.Stk, ArgT); 672 pushInteger(S, Val.reverseBits(), Call->getType()); 673 return true; 674 } 675 676 static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, 677 const InterpFrame *Frame, 678 const Function *Func, 679 const CallExpr *Call) { 680 // This is an unevaluated call, so there are no arguments on the stack. 681 assert(Call->getNumArgs() == 1); 682 const Expr *Arg = Call->getArg(0); 683 684 GCCTypeClass ResultClass = 685 EvaluateBuiltinClassifyType(Arg->getType(), S.getLangOpts()); 686 int32_t ReturnVal = static_cast<int32_t>(ResultClass); 687 pushInteger(S, ReturnVal, Call->getType()); 688 return true; 689 } 690 691 // __builtin_expect(long, long) 692 // __builtin_expect_with_probability(long, long, double) 693 static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, 694 const InterpFrame *Frame, 695 const Function *Func, const CallExpr *Call) { 696 // The return value is simply the value of the first parameter. 697 // We ignore the probability. 698 unsigned NumArgs = Call->getNumArgs(); 699 assert(NumArgs == 2 || NumArgs == 3); 700 701 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 702 unsigned Offset = align(primSize(getLongPrimType(S))) * 2; 703 if (NumArgs == 3) 704 Offset += align(primSize(PT_Float)); 705 706 APSInt Val = peekToAPSInt(S.Stk, ArgT, Offset); 707 pushInteger(S, Val, Call->getType()); 708 return true; 709 } 710 711 /// rotateleft(value, amount) 712 static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, 713 const InterpFrame *Frame, 714 const Function *Func, const CallExpr *Call, 715 bool Right) { 716 PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType()); 717 PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType()); 718 719 APSInt Amount = peekToAPSInt(S.Stk, AmountT); 720 APSInt Value = peekToAPSInt( 721 S.Stk, ValueT, align(primSize(AmountT)) + align(primSize(ValueT))); 722 723 APSInt Result; 724 if (Right) 725 Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())), 726 /*IsUnsigned=*/true); 727 else // Left. 728 Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())), 729 /*IsUnsigned=*/true); 730 731 pushInteger(S, Result, Call->getType()); 732 return true; 733 } 734 735 static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, 736 const InterpFrame *Frame, const Function *Func, 737 const CallExpr *Call) { 738 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 739 APSInt Value = peekToAPSInt(S.Stk, ArgT); 740 741 uint64_t N = Value.countr_zero(); 742 pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType()); 743 return true; 744 } 745 746 static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, 747 const InterpFrame *Frame, 748 const Function *Func, 749 const CallExpr *Call) { 750 assert(Call->getArg(0)->isLValue()); 751 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr); 752 753 if (PtrT == PT_FnPtr) { 754 const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>(); 755 S.Stk.push<FunctionPointer>(Arg); 756 } else if (PtrT == PT_Ptr) { 757 const Pointer &Arg = S.Stk.peek<Pointer>(); 758 S.Stk.push<Pointer>(Arg); 759 } else { 760 assert(false && "Unsupported pointer type passed to __builtin_addressof()"); 761 } 762 return true; 763 } 764 765 static bool interp__builtin_move(InterpState &S, CodePtr OpPC, 766 const InterpFrame *Frame, const Function *Func, 767 const CallExpr *Call) { 768 769 PrimType ArgT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr); 770 771 TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg);); 772 773 return Func->getDecl()->isConstexpr(); 774 } 775 776 static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, 777 const InterpFrame *Frame, 778 const Function *Func, 779 const CallExpr *Call) { 780 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 781 APSInt Arg = peekToAPSInt(S.Stk, ArgT); 782 783 int Result = S.getASTContext().getTargetInfo().getEHDataRegisterNumber( 784 Arg.getZExtValue()); 785 pushInteger(S, Result, Call->getType()); 786 return true; 787 } 788 789 /// Just takes the first Argument to the call and puts it on the stack. 790 static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, 791 const Function *Func, const CallExpr *Call) { 792 const Pointer &Arg = S.Stk.peek<Pointer>(); 793 S.Stk.push<Pointer>(Arg); 794 return true; 795 } 796 797 // Two integral values followed by a pointer (lhs, rhs, resultOut) 798 static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, 799 const InterpFrame *Frame, 800 const Function *Func, 801 const CallExpr *Call) { 802 Pointer &ResultPtr = S.Stk.peek<Pointer>(); 803 if (ResultPtr.isDummy()) 804 return false; 805 806 unsigned BuiltinOp = Func->getBuiltinID(); 807 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType()); 808 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType()); 809 APSInt RHS = peekToAPSInt(S.Stk, RHST, 810 align(primSize(PT_Ptr)) + align(primSize(RHST))); 811 APSInt LHS = peekToAPSInt(S.Stk, LHST, 812 align(primSize(PT_Ptr)) + align(primSize(RHST)) + 813 align(primSize(LHST))); 814 QualType ResultType = Call->getArg(2)->getType()->getPointeeType(); 815 PrimType ResultT = *S.getContext().classify(ResultType); 816 bool Overflow; 817 818 APSInt Result; 819 if (BuiltinOp == Builtin::BI__builtin_add_overflow || 820 BuiltinOp == Builtin::BI__builtin_sub_overflow || 821 BuiltinOp == Builtin::BI__builtin_mul_overflow) { 822 bool IsSigned = LHS.isSigned() || RHS.isSigned() || 823 ResultType->isSignedIntegerOrEnumerationType(); 824 bool AllSigned = LHS.isSigned() && RHS.isSigned() && 825 ResultType->isSignedIntegerOrEnumerationType(); 826 uint64_t LHSSize = LHS.getBitWidth(); 827 uint64_t RHSSize = RHS.getBitWidth(); 828 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType); 829 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize); 830 831 // Add an additional bit if the signedness isn't uniformly agreed to. We 832 // could do this ONLY if there is a signed and an unsigned that both have 833 // MaxBits, but the code to check that is pretty nasty. The issue will be 834 // caught in the shrink-to-result later anyway. 835 if (IsSigned && !AllSigned) 836 ++MaxBits; 837 838 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned); 839 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned); 840 Result = APSInt(MaxBits, !IsSigned); 841 } 842 843 // Find largest int. 844 switch (BuiltinOp) { 845 default: 846 llvm_unreachable("Invalid value for BuiltinOp"); 847 case Builtin::BI__builtin_add_overflow: 848 case Builtin::BI__builtin_sadd_overflow: 849 case Builtin::BI__builtin_saddl_overflow: 850 case Builtin::BI__builtin_saddll_overflow: 851 case Builtin::BI__builtin_uadd_overflow: 852 case Builtin::BI__builtin_uaddl_overflow: 853 case Builtin::BI__builtin_uaddll_overflow: 854 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow) 855 : LHS.uadd_ov(RHS, Overflow); 856 break; 857 case Builtin::BI__builtin_sub_overflow: 858 case Builtin::BI__builtin_ssub_overflow: 859 case Builtin::BI__builtin_ssubl_overflow: 860 case Builtin::BI__builtin_ssubll_overflow: 861 case Builtin::BI__builtin_usub_overflow: 862 case Builtin::BI__builtin_usubl_overflow: 863 case Builtin::BI__builtin_usubll_overflow: 864 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow) 865 : LHS.usub_ov(RHS, Overflow); 866 break; 867 case Builtin::BI__builtin_mul_overflow: 868 case Builtin::BI__builtin_smul_overflow: 869 case Builtin::BI__builtin_smull_overflow: 870 case Builtin::BI__builtin_smulll_overflow: 871 case Builtin::BI__builtin_umul_overflow: 872 case Builtin::BI__builtin_umull_overflow: 873 case Builtin::BI__builtin_umulll_overflow: 874 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow) 875 : LHS.umul_ov(RHS, Overflow); 876 break; 877 } 878 879 // In the case where multiple sizes are allowed, truncate and see if 880 // the values are the same. 881 if (BuiltinOp == Builtin::BI__builtin_add_overflow || 882 BuiltinOp == Builtin::BI__builtin_sub_overflow || 883 BuiltinOp == Builtin::BI__builtin_mul_overflow) { 884 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead, 885 // since it will give us the behavior of a TruncOrSelf in the case where 886 // its parameter <= its size. We previously set Result to be at least the 887 // type-size of the result, so getTypeSize(ResultType) <= Resu 888 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType)); 889 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType()); 890 891 if (!APSInt::isSameValue(Temp, Result)) 892 Overflow = true; 893 Result = Temp; 894 } 895 896 // Write Result to ResultPtr and put Overflow on the stacl. 897 assignInteger(ResultPtr, ResultT, Result); 898 ResultPtr.initialize(); 899 assert(Func->getDecl()->getReturnType()->isBooleanType()); 900 S.Stk.push<Boolean>(Overflow); 901 return true; 902 } 903 904 /// Three integral values followed by a pointer (lhs, rhs, carry, carryOut). 905 static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, 906 const InterpFrame *Frame, 907 const Function *Func, 908 const CallExpr *Call) { 909 unsigned BuiltinOp = Func->getBuiltinID(); 910 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType()); 911 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType()); 912 PrimType CarryT = *S.getContext().classify(Call->getArg(2)->getType()); 913 APSInt RHS = peekToAPSInt(S.Stk, RHST, 914 align(primSize(PT_Ptr)) + align(primSize(CarryT)) + 915 align(primSize(RHST))); 916 APSInt LHS = 917 peekToAPSInt(S.Stk, LHST, 918 align(primSize(PT_Ptr)) + align(primSize(RHST)) + 919 align(primSize(CarryT)) + align(primSize(LHST))); 920 APSInt CarryIn = peekToAPSInt( 921 S.Stk, LHST, align(primSize(PT_Ptr)) + align(primSize(CarryT))); 922 APSInt CarryOut; 923 924 APSInt Result; 925 // Copy the number of bits and sign. 926 Result = LHS; 927 CarryOut = LHS; 928 929 bool FirstOverflowed = false; 930 bool SecondOverflowed = false; 931 switch (BuiltinOp) { 932 default: 933 llvm_unreachable("Invalid value for BuiltinOp"); 934 case Builtin::BI__builtin_addcb: 935 case Builtin::BI__builtin_addcs: 936 case Builtin::BI__builtin_addc: 937 case Builtin::BI__builtin_addcl: 938 case Builtin::BI__builtin_addcll: 939 Result = 940 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed); 941 break; 942 case Builtin::BI__builtin_subcb: 943 case Builtin::BI__builtin_subcs: 944 case Builtin::BI__builtin_subc: 945 case Builtin::BI__builtin_subcl: 946 case Builtin::BI__builtin_subcll: 947 Result = 948 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed); 949 break; 950 } 951 // It is possible for both overflows to happen but CGBuiltin uses an OR so 952 // this is consistent. 953 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed); 954 955 Pointer &CarryOutPtr = S.Stk.peek<Pointer>(); 956 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType(); 957 PrimType CarryOutT = *S.getContext().classify(CarryOutType); 958 assignInteger(CarryOutPtr, CarryOutT, CarryOut); 959 CarryOutPtr.initialize(); 960 961 assert(Call->getType() == Call->getArg(0)->getType()); 962 pushInteger(S, Result, Call->getType()); 963 return true; 964 } 965 966 static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, 967 const InterpFrame *Frame, const Function *Func, 968 const CallExpr *Call) { 969 unsigned CallSize = callArgSize(S, Call); 970 unsigned BuiltinOp = Func->getBuiltinID(); 971 PrimType ValT = *S.getContext().classify(Call->getArg(0)); 972 const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize); 973 974 // When the argument is 0, the result of GCC builtins is undefined, whereas 975 // for Microsoft intrinsics, the result is the bit-width of the argument. 976 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 && 977 BuiltinOp != Builtin::BI__lzcnt && 978 BuiltinOp != Builtin::BI__lzcnt64; 979 980 if (Val == 0) { 981 if (Func->getBuiltinID() == Builtin::BI__builtin_clzg && 982 Call->getNumArgs() == 2) { 983 // We have a fallback parameter. 984 PrimType FallbackT = *S.getContext().classify(Call->getArg(1)); 985 const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT); 986 pushInteger(S, Fallback, Call->getType()); 987 return true; 988 } 989 990 if (ZeroIsUndefined) 991 return false; 992 } 993 994 pushInteger(S, Val.countl_zero(), Call->getType()); 995 return true; 996 } 997 998 static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, 999 const InterpFrame *Frame, const Function *Func, 1000 const CallExpr *Call) { 1001 unsigned CallSize = callArgSize(S, Call); 1002 PrimType ValT = *S.getContext().classify(Call->getArg(0)); 1003 const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize); 1004 1005 if (Val == 0) { 1006 if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg && 1007 Call->getNumArgs() == 2) { 1008 // We have a fallback parameter. 1009 PrimType FallbackT = *S.getContext().classify(Call->getArg(1)); 1010 const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT); 1011 pushInteger(S, Fallback, Call->getType()); 1012 return true; 1013 } 1014 return false; 1015 } 1016 1017 pushInteger(S, Val.countr_zero(), Call->getType()); 1018 return true; 1019 } 1020 1021 static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, 1022 const InterpFrame *Frame, 1023 const Function *Func, const CallExpr *Call) { 1024 PrimType ReturnT = *S.getContext().classify(Call->getType()); 1025 PrimType ValT = *S.getContext().classify(Call->getArg(0)); 1026 const APSInt &Val = peekToAPSInt(S.Stk, ValT); 1027 assert(Val.getActiveBits() <= 64); 1028 1029 INT_TYPE_SWITCH(ReturnT, 1030 { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); }); 1031 return true; 1032 } 1033 1034 /// bool __atomic_always_lock_free(size_t, void const volatile*) 1035 /// bool __atomic_is_lock_free(size_t, void const volatile*) 1036 /// bool __c11_atomic_is_lock_free(size_t) 1037 static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, 1038 const InterpFrame *Frame, 1039 const Function *Func, 1040 const CallExpr *Call) { 1041 unsigned BuiltinOp = Func->getBuiltinID(); 1042 1043 PrimType ValT = *S.getContext().classify(Call->getArg(0)); 1044 unsigned SizeValOffset = 0; 1045 if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free) 1046 SizeValOffset = align(primSize(ValT)) + align(primSize(PT_Ptr)); 1047 const APSInt &SizeVal = peekToAPSInt(S.Stk, ValT, SizeValOffset); 1048 1049 auto returnBool = [&S](bool Value) -> bool { 1050 S.Stk.push<Boolean>(Value); 1051 return true; 1052 }; 1053 1054 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power 1055 // of two less than or equal to the maximum inline atomic width, we know it 1056 // is lock-free. If the size isn't a power of two, or greater than the 1057 // maximum alignment where we promote atomics, we know it is not lock-free 1058 // (at least not in the sense of atomic_is_lock_free). Otherwise, 1059 // the answer can only be determined at runtime; for example, 16-byte 1060 // atomics have lock-free implementations on some, but not all, 1061 // x86-64 processors. 1062 1063 // Check power-of-two. 1064 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue()); 1065 if (Size.isPowerOfTwo()) { 1066 // Check against inlining width. 1067 unsigned InlineWidthBits = 1068 S.getASTContext().getTargetInfo().getMaxAtomicInlineWidth(); 1069 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) { 1070 1071 // OK, we will inline appropriately-aligned operations of this size, 1072 // and _Atomic(T) is appropriately-aligned. 1073 if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free || 1074 Size == CharUnits::One()) 1075 return returnBool(true); 1076 1077 // Same for null pointers. 1078 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free); 1079 const Pointer &Ptr = S.Stk.peek<Pointer>(); 1080 if (Ptr.isZero()) 1081 return returnBool(true); 1082 1083 if (Ptr.isIntegralPointer()) { 1084 uint64_t IntVal = Ptr.getIntegerRepresentation(); 1085 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign())) 1086 return returnBool(true); 1087 } 1088 1089 const Expr *PtrArg = Call->getArg(1); 1090 // Otherwise, check if the type's alignment against Size. 1091 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) { 1092 // Drop the potential implicit-cast to 'const volatile void*', getting 1093 // the underlying type. 1094 if (ICE->getCastKind() == CK_BitCast) 1095 PtrArg = ICE->getSubExpr(); 1096 } 1097 1098 if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) { 1099 QualType PointeeType = PtrTy->getPointeeType(); 1100 if (!PointeeType->isIncompleteType() && 1101 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) { 1102 // OK, we will inline operations on this object. 1103 return returnBool(true); 1104 } 1105 } 1106 } 1107 } 1108 1109 if (BuiltinOp == Builtin::BI__atomic_always_lock_free) 1110 return returnBool(false); 1111 1112 return false; 1113 } 1114 1115 /// __builtin_complex(Float A, float B); 1116 static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, 1117 const InterpFrame *Frame, 1118 const Function *Func, 1119 const CallExpr *Call) { 1120 const Floating &Arg2 = S.Stk.peek<Floating>(); 1121 const Floating &Arg1 = S.Stk.peek<Floating>(align(primSize(PT_Float)) * 2); 1122 Pointer &Result = S.Stk.peek<Pointer>(align(primSize(PT_Float)) * 2 + 1123 align(primSize(PT_Ptr))); 1124 1125 Result.atIndex(0).deref<Floating>() = Arg1; 1126 Result.atIndex(0).initialize(); 1127 Result.atIndex(1).deref<Floating>() = Arg2; 1128 Result.atIndex(1).initialize(); 1129 Result.initialize(); 1130 1131 return true; 1132 } 1133 1134 /// __builtin_is_aligned() 1135 /// __builtin_align_up() 1136 /// __builtin_align_down() 1137 /// The first parameter is either an integer or a pointer. 1138 /// The second parameter is the requested alignment as an integer. 1139 static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, 1140 const InterpFrame *Frame, 1141 const Function *Func, 1142 const CallExpr *Call) { 1143 unsigned BuiltinOp = Func->getBuiltinID(); 1144 unsigned CallSize = callArgSize(S, Call); 1145 1146 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1)); 1147 const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT); 1148 1149 if (Alignment < 0 || !Alignment.isPowerOf2()) { 1150 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment; 1151 return false; 1152 } 1153 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType()); 1154 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1)); 1155 if (APSInt::compareValues(Alignment, MaxValue) > 0) { 1156 S.FFDiag(Call, diag::note_constexpr_alignment_too_big) 1157 << MaxValue << Call->getArg(0)->getType() << Alignment; 1158 return false; 1159 } 1160 1161 // The first parameter is either an integer or a pointer (but not a function 1162 // pointer). 1163 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0)); 1164 1165 if (isIntegralType(FirstArgT)) { 1166 const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize); 1167 APSInt Align = Alignment.extOrTrunc(Src.getBitWidth()); 1168 if (BuiltinOp == Builtin::BI__builtin_align_up) { 1169 APSInt AlignedVal = 1170 APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned()); 1171 pushInteger(S, AlignedVal, Call->getType()); 1172 } else if (BuiltinOp == Builtin::BI__builtin_align_down) { 1173 APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned()); 1174 pushInteger(S, AlignedVal, Call->getType()); 1175 } else { 1176 assert(*S.Ctx.classify(Call->getType()) == PT_Bool); 1177 S.Stk.push<Boolean>((Src & (Align - 1)) == 0); 1178 } 1179 return true; 1180 } 1181 1182 assert(FirstArgT == PT_Ptr); 1183 const Pointer &Ptr = S.Stk.peek<Pointer>(CallSize); 1184 1185 unsigned PtrOffset = Ptr.getByteOffset(); 1186 PtrOffset = Ptr.getIndex(); 1187 CharUnits BaseAlignment = 1188 S.getASTContext().getDeclAlign(Ptr.getDeclDesc()->asValueDecl()); 1189 CharUnits PtrAlign = 1190 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset)); 1191 1192 if (BuiltinOp == Builtin::BI__builtin_is_aligned) { 1193 if (PtrAlign.getQuantity() >= Alignment) { 1194 S.Stk.push<Boolean>(true); 1195 return true; 1196 } 1197 // If the alignment is not known to be sufficient, some cases could still 1198 // be aligned at run time. However, if the requested alignment is less or 1199 // equal to the base alignment and the offset is not aligned, we know that 1200 // the run-time value can never be aligned. 1201 if (BaseAlignment.getQuantity() >= Alignment && 1202 PtrAlign.getQuantity() < Alignment) { 1203 S.Stk.push<Boolean>(false); 1204 return true; 1205 } 1206 1207 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute) 1208 << Alignment; 1209 return false; 1210 } 1211 1212 assert(BuiltinOp == Builtin::BI__builtin_align_down || 1213 BuiltinOp == Builtin::BI__builtin_align_up); 1214 1215 // For align_up/align_down, we can return the same value if the alignment 1216 // is known to be greater or equal to the requested value. 1217 if (PtrAlign.getQuantity() >= Alignment) { 1218 S.Stk.push<Pointer>(Ptr); 1219 return true; 1220 } 1221 1222 // The alignment could be greater than the minimum at run-time, so we cannot 1223 // infer much about the resulting pointer value. One case is possible: 1224 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we 1225 // can infer the correct index if the requested alignment is smaller than 1226 // the base alignment so we can perform the computation on the offset. 1227 if (BaseAlignment.getQuantity() >= Alignment) { 1228 assert(Alignment.getBitWidth() <= 64 && 1229 "Cannot handle > 64-bit address-space"); 1230 uint64_t Alignment64 = Alignment.getZExtValue(); 1231 CharUnits NewOffset = 1232 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down 1233 ? llvm::alignDown(PtrOffset, Alignment64) 1234 : llvm::alignTo(PtrOffset, Alignment64)); 1235 1236 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity())); 1237 return true; 1238 } 1239 1240 // Otherwise, we cannot constant-evaluate the result. 1241 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment; 1242 return false; 1243 } 1244 1245 /// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset]) 1246 static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, 1247 const InterpFrame *Frame, 1248 const Function *Func, 1249 const CallExpr *Call) { 1250 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3); 1251 1252 // Might be called with function pointers in C. 1253 std::optional<PrimType> PtrT = S.Ctx.classify(Call->getArg(0)); 1254 if (PtrT != PT_Ptr) 1255 return false; 1256 1257 unsigned ArgSize = callArgSize(S, Call); 1258 const Pointer &Ptr = S.Stk.peek<Pointer>(ArgSize); 1259 std::optional<APSInt> ExtraOffset; 1260 APSInt Alignment; 1261 if (Call->getNumArgs() == 2) { 1262 Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1))); 1263 } else { 1264 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1)); 1265 PrimType ExtraOffsetT = *S.Ctx.classify(Call->getArg(2)); 1266 Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)), 1267 align(primSize(AlignmentT)) + 1268 align(primSize(ExtraOffsetT))); 1269 ExtraOffset = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2))); 1270 } 1271 1272 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue()); 1273 1274 // If there is a base object, then it must have the correct alignment. 1275 if (Ptr.isBlockPointer()) { 1276 CharUnits BaseAlignment; 1277 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl()) 1278 BaseAlignment = S.getASTContext().getDeclAlign(VD); 1279 else if (const auto *E = Ptr.getDeclDesc()->asExpr()) 1280 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf); 1281 1282 if (BaseAlignment < Align) { 1283 S.CCEDiag(Call->getArg(0), 1284 diag::note_constexpr_baa_insufficient_alignment) 1285 << 0 << BaseAlignment.getQuantity() << Align.getQuantity(); 1286 return false; 1287 } 1288 } 1289 1290 APValue AV = Ptr.toAPValue(S.getASTContext()); 1291 CharUnits AVOffset = AV.getLValueOffset(); 1292 if (ExtraOffset) 1293 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue()); 1294 if (AVOffset.alignTo(Align) != AVOffset) { 1295 if (Ptr.isBlockPointer()) 1296 S.CCEDiag(Call->getArg(0), 1297 diag::note_constexpr_baa_insufficient_alignment) 1298 << 1 << AVOffset.getQuantity() << Align.getQuantity(); 1299 else 1300 S.CCEDiag(Call->getArg(0), 1301 diag::note_constexpr_baa_value_insufficient_alignment) 1302 << AVOffset.getQuantity() << Align.getQuantity(); 1303 return false; 1304 } 1305 1306 S.Stk.push<Pointer>(Ptr); 1307 return true; 1308 } 1309 1310 static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC, 1311 const InterpFrame *Frame, 1312 const Function *Func, 1313 const CallExpr *Call) { 1314 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || 1315 !Call->getArg(1)->getType()->isIntegerType()) 1316 return false; 1317 1318 PrimType ValT = *S.Ctx.classify(Call->getArg(0)); 1319 PrimType IndexT = *S.Ctx.classify(Call->getArg(1)); 1320 APSInt Val = peekToAPSInt(S.Stk, ValT, 1321 align(primSize(ValT)) + align(primSize(IndexT))); 1322 APSInt Index = peekToAPSInt(S.Stk, IndexT); 1323 1324 unsigned BitWidth = Val.getBitWidth(); 1325 uint64_t Shift = Index.extractBitsAsZExtValue(8, 0); 1326 uint64_t Length = Index.extractBitsAsZExtValue(8, 8); 1327 Length = Length > BitWidth ? BitWidth : Length; 1328 1329 // Handle out of bounds cases. 1330 if (Length == 0 || Shift >= BitWidth) { 1331 pushInteger(S, 0, Call->getType()); 1332 return true; 1333 } 1334 1335 uint64_t Result = Val.getZExtValue() >> Shift; 1336 Result &= llvm::maskTrailingOnes<uint64_t>(Length); 1337 pushInteger(S, Result, Call->getType()); 1338 return true; 1339 } 1340 1341 static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC, 1342 const InterpFrame *Frame, 1343 const Function *Func, 1344 const CallExpr *Call) { 1345 QualType CallType = Call->getType(); 1346 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || 1347 !Call->getArg(1)->getType()->isIntegerType() || 1348 !CallType->isIntegerType()) 1349 return false; 1350 1351 PrimType ValT = *S.Ctx.classify(Call->getArg(0)); 1352 PrimType IndexT = *S.Ctx.classify(Call->getArg(1)); 1353 1354 APSInt Val = peekToAPSInt(S.Stk, ValT, 1355 align(primSize(ValT)) + align(primSize(IndexT))); 1356 APSInt Idx = peekToAPSInt(S.Stk, IndexT); 1357 1358 unsigned BitWidth = Val.getBitWidth(); 1359 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0); 1360 1361 if (Index < BitWidth) 1362 Val.clearHighBits(BitWidth - Index); 1363 1364 pushInteger(S, Val, CallType); 1365 return true; 1366 } 1367 1368 static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC, 1369 const InterpFrame *Frame, 1370 const Function *Func, 1371 const CallExpr *Call) { 1372 QualType CallType = Call->getType(); 1373 if (!CallType->isIntegerType() || 1374 !Call->getArg(0)->getType()->isIntegerType()) 1375 return false; 1376 1377 APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0))); 1378 pushInteger(S, Val.countLeadingZeros(), CallType); 1379 return true; 1380 } 1381 1382 static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC, 1383 const InterpFrame *Frame, 1384 const Function *Func, 1385 const CallExpr *Call) { 1386 QualType CallType = Call->getType(); 1387 if (!CallType->isIntegerType() || 1388 !Call->getArg(0)->getType()->isIntegerType()) 1389 return false; 1390 1391 APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0))); 1392 pushInteger(S, Val.countTrailingZeros(), CallType); 1393 return true; 1394 } 1395 1396 static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC, 1397 const InterpFrame *Frame, 1398 const Function *Func, 1399 const CallExpr *Call) { 1400 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || 1401 !Call->getArg(1)->getType()->isIntegerType()) 1402 return false; 1403 1404 PrimType ValT = *S.Ctx.classify(Call->getArg(0)); 1405 PrimType MaskT = *S.Ctx.classify(Call->getArg(1)); 1406 1407 APSInt Val = 1408 peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT))); 1409 APSInt Mask = peekToAPSInt(S.Stk, MaskT); 1410 1411 unsigned BitWidth = Val.getBitWidth(); 1412 APInt Result = APInt::getZero(BitWidth); 1413 for (unsigned I = 0, P = 0; I != BitWidth; ++I) { 1414 if (Mask[I]) 1415 Result.setBitVal(I, Val[P++]); 1416 } 1417 pushInteger(S, Result, Call->getType()); 1418 return true; 1419 } 1420 1421 static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC, 1422 const InterpFrame *Frame, 1423 const Function *Func, 1424 const CallExpr *Call) { 1425 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() || 1426 !Call->getArg(1)->getType()->isIntegerType()) 1427 return false; 1428 1429 PrimType ValT = *S.Ctx.classify(Call->getArg(0)); 1430 PrimType MaskT = *S.Ctx.classify(Call->getArg(1)); 1431 1432 APSInt Val = 1433 peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT))); 1434 APSInt Mask = peekToAPSInt(S.Stk, MaskT); 1435 1436 unsigned BitWidth = Val.getBitWidth(); 1437 APInt Result = APInt::getZero(BitWidth); 1438 for (unsigned I = 0, P = 0; I != BitWidth; ++I) { 1439 if (Mask[I]) 1440 Result.setBitVal(P++, Val[I]); 1441 } 1442 pushInteger(S, Result, Call->getType()); 1443 return true; 1444 } 1445 1446 static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, 1447 CodePtr OpPC, 1448 const InterpFrame *Frame, 1449 const Function *Func, 1450 const CallExpr *Call) { 1451 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() || 1452 !Call->getArg(1)->getType()->isIntegerType() || 1453 !Call->getArg(2)->getType()->isIntegerType()) 1454 return false; 1455 1456 unsigned BuiltinOp = Func->getBuiltinID(); 1457 APSInt CarryIn = getAPSIntParam(Frame, 0); 1458 APSInt LHS = getAPSIntParam(Frame, 1); 1459 APSInt RHS = getAPSIntParam(Frame, 2); 1460 1461 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 || 1462 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64; 1463 1464 unsigned BitWidth = LHS.getBitWidth(); 1465 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0; 1466 APInt ExResult = 1467 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit)) 1468 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit)); 1469 1470 APInt Result = ExResult.extractBits(BitWidth, 0); 1471 APSInt CarryOut = 1472 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true); 1473 1474 Pointer &CarryOutPtr = S.Stk.peek<Pointer>(); 1475 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType(); 1476 PrimType CarryOutT = *S.getContext().classify(CarryOutType); 1477 assignInteger(CarryOutPtr, CarryOutT, APSInt(Result, true)); 1478 1479 pushInteger(S, CarryOut, Call->getType()); 1480 1481 return true; 1482 } 1483 1484 static bool interp__builtin_os_log_format_buffer_size(InterpState &S, 1485 CodePtr OpPC, 1486 const InterpFrame *Frame, 1487 const Function *Func, 1488 const CallExpr *Call) { 1489 analyze_os_log::OSLogBufferLayout Layout; 1490 analyze_os_log::computeOSLogBufferLayout(S.getASTContext(), Call, Layout); 1491 pushInteger(S, Layout.size().getQuantity(), Call->getType()); 1492 return true; 1493 } 1494 1495 static bool interp__builtin_ptrauth_string_discriminator( 1496 InterpState &S, CodePtr OpPC, const InterpFrame *Frame, 1497 const Function *Func, const CallExpr *Call) { 1498 const auto &Ptr = S.Stk.peek<Pointer>(); 1499 assert(Ptr.getFieldDesc()->isPrimitiveArray()); 1500 1501 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1); 1502 uint64_t Result = getPointerAuthStableSipHash(R); 1503 pushInteger(S, Result, Call->getType()); 1504 return true; 1505 } 1506 1507 // FIXME: This implementation is not complete. 1508 // The Compiler instance we create cannot access the current stack frame, local 1509 // variables, function parameters, etc. We also need protection from 1510 // side-effects, fatal errors, etc. 1511 static bool interp__builtin_constant_p(InterpState &S, CodePtr OpPC, 1512 const InterpFrame *Frame, 1513 const Function *Func, 1514 const CallExpr *Call) { 1515 const Expr *Arg = Call->getArg(0); 1516 QualType ArgType = Arg->getType(); 1517 1518 auto returnInt = [&S, Call](bool Value) -> bool { 1519 pushInteger(S, Value, Call->getType()); 1520 return true; 1521 }; 1522 1523 // __builtin_constant_p always has one operand. The rules which gcc follows 1524 // are not precisely documented, but are as follows: 1525 // 1526 // - If the operand is of integral, floating, complex or enumeration type, 1527 // and can be folded to a known value of that type, it returns 1. 1528 // - If the operand can be folded to a pointer to the first character 1529 // of a string literal (or such a pointer cast to an integral type) 1530 // or to a null pointer or an integer cast to a pointer, it returns 1. 1531 // 1532 // Otherwise, it returns 0. 1533 // 1534 // FIXME: GCC also intends to return 1 for literals of aggregate types, but 1535 // its support for this did not work prior to GCC 9 and is not yet well 1536 // understood. 1537 if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() || 1538 ArgType->isAnyComplexType() || ArgType->isPointerType() || 1539 ArgType->isNullPtrType()) { 1540 InterpStack Stk; 1541 Compiler<EvalEmitter> C(S.Ctx, S.P, S, Stk); 1542 auto Res = C.interpretExpr(Arg, /*ConvertResultToRValue=*/Arg->isGLValue()); 1543 if (Res.isInvalid()) { 1544 C.cleanup(); 1545 Stk.clear(); 1546 } 1547 1548 if (!Res.isInvalid() && !Res.empty()) { 1549 const APValue &LV = Res.toAPValue(); 1550 if (LV.isLValue()) { 1551 APValue::LValueBase Base = LV.getLValueBase(); 1552 if (Base.isNull()) { 1553 // A null base is acceptable. 1554 return returnInt(true); 1555 } else if (const auto *E = Base.dyn_cast<const Expr *>()) { 1556 if (!isa<StringLiteral>(E)) 1557 return returnInt(false); 1558 return returnInt(LV.getLValueOffset().isZero()); 1559 } else if (Base.is<TypeInfoLValue>()) { 1560 // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to 1561 // evaluate to true. 1562 return returnInt(true); 1563 } else { 1564 // Any other base is not constant enough for GCC. 1565 return returnInt(false); 1566 } 1567 } 1568 } 1569 1570 // Otherwise, any constant value is good enough. 1571 return returnInt(true); 1572 } 1573 1574 return returnInt(false); 1575 } 1576 1577 static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, 1578 const InterpFrame *Frame, 1579 const Function *Func, 1580 const CallExpr *Call) { 1581 // A call to __operator_new is only valid within std::allocate<>::allocate. 1582 // Walk up the call stack to find the appropriate caller and get the 1583 // element type from it. 1584 QualType ElemType; 1585 1586 for (const InterpFrame *F = Frame; F; F = F->Caller) { 1587 const Function *Func = F->getFunction(); 1588 if (!Func) 1589 continue; 1590 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Func->getDecl()); 1591 if (!MD) 1592 continue; 1593 const IdentifierInfo *FnII = MD->getIdentifier(); 1594 if (!FnII || !FnII->isStr("allocate")) 1595 continue; 1596 1597 const auto *CTSD = 1598 dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent()); 1599 if (!CTSD) 1600 continue; 1601 1602 const IdentifierInfo *ClassII = CTSD->getIdentifier(); 1603 const TemplateArgumentList &TAL = CTSD->getTemplateArgs(); 1604 if (CTSD->isInStdNamespace() && ClassII && ClassII->isStr("allocator") && 1605 TAL.size() >= 1 && TAL[0].getKind() == TemplateArgument::Type) { 1606 ElemType = TAL[0].getAsType(); 1607 break; 1608 } 1609 } 1610 1611 if (ElemType.isNull()) { 1612 S.FFDiag(Call, S.getLangOpts().CPlusPlus20 1613 ? diag::note_constexpr_new_untyped 1614 : diag::note_constexpr_new); 1615 return false; 1616 } 1617 1618 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) { 1619 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type) 1620 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType; 1621 return false; 1622 } 1623 1624 APSInt Bytes = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0))); 1625 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType); 1626 assert(!ElemSize.isZero()); 1627 // Divide the number of bytes by sizeof(ElemType), so we get the number of 1628 // elements we should allocate. 1629 APInt NumElems, Remainder; 1630 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity()); 1631 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder); 1632 if (Remainder != 0) { 1633 // This likely indicates a bug in the implementation of 'std::allocator'. 1634 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size) 1635 << Bytes << APSInt(ElemSizeAP, true) << ElemType; 1636 return false; 1637 } 1638 1639 // NB: The same check we're using in CheckArraySize() 1640 if (NumElems.getActiveBits() > 1641 ConstantArrayType::getMaxSizeBits(S.getASTContext()) || 1642 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) { 1643 // FIXME: NoThrow check? 1644 const SourceInfo &Loc = S.Current->getSource(OpPC); 1645 S.FFDiag(Loc, diag::note_constexpr_new_too_large) 1646 << NumElems.getZExtValue(); 1647 return false; 1648 } 1649 1650 std::optional<PrimType> ElemT = S.getContext().classify(ElemType); 1651 DynamicAllocator &Allocator = S.getAllocator(); 1652 if (ElemT) { 1653 if (NumElems.ule(1)) { 1654 const Descriptor *Desc = 1655 S.P.createDescriptor(Call, *ElemT, Descriptor::InlineDescMD, 1656 /*IsConst=*/false, /*IsTemporary=*/false, 1657 /*IsMutable=*/false); 1658 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(), 1659 DynamicAllocator::Form::Operator); 1660 assert(B); 1661 1662 S.Stk.push<Pointer>(B); 1663 return true; 1664 } 1665 assert(NumElems.ugt(1)); 1666 1667 Block *B = 1668 Allocator.allocate(Call, *ElemT, NumElems.getZExtValue(), 1669 S.Ctx.getEvalID(), DynamicAllocator::Form::Operator); 1670 assert(B); 1671 S.Stk.push<Pointer>(B); 1672 return true; 1673 } 1674 1675 assert(!ElemT); 1676 // Structs etc. 1677 const Descriptor *Desc = S.P.createDescriptor( 1678 Call, ElemType.getTypePtr(), Descriptor::InlineDescMD, 1679 /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false, 1680 /*Init=*/nullptr); 1681 1682 if (NumElems.ule(1)) { 1683 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(), 1684 DynamicAllocator::Form::Operator); 1685 assert(B); 1686 S.Stk.push<Pointer>(B); 1687 return true; 1688 } 1689 1690 Block *B = 1691 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(), 1692 DynamicAllocator::Form::Operator); 1693 assert(B); 1694 S.Stk.push<Pointer>(B); 1695 return true; 1696 } 1697 1698 static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, 1699 const InterpFrame *Frame, 1700 const Function *Func, 1701 const CallExpr *Call) { 1702 const Expr *Source = nullptr; 1703 const Block *BlockToDelete = nullptr; 1704 1705 { 1706 const Pointer &Ptr = S.Stk.peek<Pointer>(); 1707 1708 if (Ptr.isZero()) { 1709 S.CCEDiag(Call, diag::note_constexpr_deallocate_null); 1710 return true; 1711 } 1712 1713 Source = Ptr.getDeclDesc()->asExpr(); 1714 BlockToDelete = Ptr.block(); 1715 } 1716 assert(BlockToDelete); 1717 1718 DynamicAllocator &Allocator = S.getAllocator(); 1719 const Descriptor *BlockDesc = BlockToDelete->getDescriptor(); 1720 std::optional<DynamicAllocator::Form> AllocForm = 1721 Allocator.getAllocationForm(Source); 1722 1723 if (!Allocator.deallocate(Source, BlockToDelete, S)) { 1724 // Nothing has been deallocated, this must be a double-delete. 1725 const SourceInfo &Loc = S.Current->getSource(OpPC); 1726 S.FFDiag(Loc, diag::note_constexpr_double_delete); 1727 return false; 1728 } 1729 assert(AllocForm); 1730 1731 return CheckNewDeleteForms( 1732 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source); 1733 } 1734 1735 static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, 1736 const InterpFrame *Frame, 1737 const Function *Func, 1738 const CallExpr *Call) { 1739 const Floating &Arg0 = S.Stk.peek<Floating>(); 1740 S.Stk.push<Floating>(Arg0); 1741 return true; 1742 } 1743 1744 static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, 1745 const InterpFrame *Frame, 1746 const Function *Func, 1747 const CallExpr *Call) { 1748 const Pointer &Arg = S.Stk.peek<Pointer>(); 1749 assert(Arg.getFieldDesc()->isPrimitiveArray()); 1750 1751 unsigned ID = Func->getBuiltinID(); 1752 QualType ElemType = Arg.getFieldDesc()->getElemQualType(); 1753 assert(Call->getType() == ElemType); 1754 PrimType ElemT = *S.getContext().classify(ElemType); 1755 unsigned NumElems = Arg.getNumElems(); 1756 1757 INT_TYPE_SWITCH_NO_BOOL(ElemT, { 1758 T Result = Arg.atIndex(0).deref<T>(); 1759 unsigned BitWidth = Result.bitWidth(); 1760 for (unsigned I = 1; I != NumElems; ++I) { 1761 T Elem = Arg.atIndex(I).deref<T>(); 1762 T PrevResult = Result; 1763 1764 if (ID == Builtin::BI__builtin_reduce_add) { 1765 if (T::add(Result, Elem, BitWidth, &Result)) { 1766 unsigned OverflowBits = BitWidth + 1; 1767 (void)handleOverflow(S, OpPC, 1768 (PrevResult.toAPSInt(OverflowBits) + 1769 Elem.toAPSInt(OverflowBits))); 1770 return false; 1771 } 1772 } else if (ID == Builtin::BI__builtin_reduce_mul) { 1773 if (T::mul(Result, Elem, BitWidth, &Result)) { 1774 unsigned OverflowBits = BitWidth * 2; 1775 (void)handleOverflow(S, OpPC, 1776 (PrevResult.toAPSInt(OverflowBits) * 1777 Elem.toAPSInt(OverflowBits))); 1778 return false; 1779 } 1780 1781 } else if (ID == Builtin::BI__builtin_reduce_and) { 1782 (void)T::bitAnd(Result, Elem, BitWidth, &Result); 1783 } else if (ID == Builtin::BI__builtin_reduce_or) { 1784 (void)T::bitOr(Result, Elem, BitWidth, &Result); 1785 } else if (ID == Builtin::BI__builtin_reduce_xor) { 1786 (void)T::bitXor(Result, Elem, BitWidth, &Result); 1787 } else { 1788 llvm_unreachable("Unhandled vector reduce builtin"); 1789 } 1790 } 1791 pushInteger(S, Result.toAPSInt(), Call->getType()); 1792 }); 1793 1794 return true; 1795 } 1796 1797 /// Can be called with an integer or vector as the first and only parameter. 1798 static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, 1799 const InterpFrame *Frame, 1800 const Function *Func, 1801 const CallExpr *Call) { 1802 assert(Call->getNumArgs() == 1); 1803 if (Call->getArg(0)->getType()->isIntegerType()) { 1804 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); 1805 APSInt Val = peekToAPSInt(S.Stk, ArgT); 1806 pushInteger(S, Val.popcount(), Call->getType()); 1807 return true; 1808 } 1809 // Otherwise, the argument must be a vector. 1810 assert(Call->getArg(0)->getType()->isVectorType()); 1811 const Pointer &Arg = S.Stk.peek<Pointer>(); 1812 assert(Arg.getFieldDesc()->isPrimitiveArray()); 1813 const Pointer &Dst = S.Stk.peek<Pointer>(primSize(PT_Ptr) * 2); 1814 assert(Dst.getFieldDesc()->isPrimitiveArray()); 1815 assert(Arg.getFieldDesc()->getNumElems() == 1816 Dst.getFieldDesc()->getNumElems()); 1817 1818 QualType ElemType = Arg.getFieldDesc()->getElemQualType(); 1819 PrimType ElemT = *S.getContext().classify(ElemType); 1820 unsigned NumElems = Arg.getNumElems(); 1821 1822 // FIXME: Reading from uninitialized vector elements? 1823 for (unsigned I = 0; I != NumElems; ++I) { 1824 INT_TYPE_SWITCH_NO_BOOL(ElemT, { 1825 Dst.atIndex(I).deref<T>() = 1826 T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount()); 1827 Dst.atIndex(I).initialize(); 1828 }); 1829 } 1830 1831 return true; 1832 } 1833 1834 static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, 1835 const InterpFrame *Frame, 1836 const Function *Func, const CallExpr *Call) { 1837 assert(Call->getNumArgs() == 3); 1838 unsigned ID = Func->getBuiltinID(); 1839 Pointer DestPtr = getParam<Pointer>(Frame, 0); 1840 const Pointer &SrcPtr = getParam<Pointer>(Frame, 1); 1841 const APSInt &Size = 1842 peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2))); 1843 assert(!Size.isSigned() && "memcpy and friends take an unsigned size"); 1844 1845 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove) 1846 diagnoseNonConstexprBuiltin(S, OpPC, ID); 1847 1848 bool Move = (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove); 1849 1850 // If the size is zero, we treat this as always being a valid no-op. 1851 if (Size.isZero()) { 1852 S.Stk.push<Pointer>(DestPtr); 1853 return true; 1854 } 1855 1856 if (SrcPtr.isZero() || DestPtr.isZero()) { 1857 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr); 1858 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null) 1859 << /*IsMove=*/Move << /*IsWchar=*/false << !SrcPtr.isZero() 1860 << DiagPtr.toDiagnosticString(S.getASTContext()); 1861 return false; 1862 } 1863 1864 QualType ElemType; 1865 if (SrcPtr.getFieldDesc()->isArray()) 1866 ElemType = SrcPtr.getFieldDesc()->getElemQualType(); 1867 else 1868 ElemType = SrcPtr.getType(); 1869 1870 unsigned ElemSize = 1871 S.getASTContext().getTypeSizeInChars(ElemType).getQuantity(); 1872 if (Size.urem(ElemSize) != 0) { 1873 S.FFDiag(S.Current->getSource(OpPC), 1874 diag::note_constexpr_memcpy_unsupported) 1875 << Move << /*IsWchar=*/false << 0 << ElemType << Size << ElemSize; 1876 return false; 1877 } 1878 1879 // Check for overlapping memory regions. 1880 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) { 1881 unsigned SrcIndex = SrcPtr.getIndex() * SrcPtr.elemSize(); 1882 unsigned DstIndex = DestPtr.getIndex() * DestPtr.elemSize(); 1883 unsigned N = Size.getZExtValue(); 1884 1885 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) || 1886 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) { 1887 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap) 1888 << /*IsWChar=*/false; 1889 return false; 1890 } 1891 } 1892 1893 // As a last resort, reject dummy pointers. 1894 if (DestPtr.isDummy() || SrcPtr.isDummy()) 1895 return false; 1896 1897 if (!DoBitCastPtr(S, OpPC, SrcPtr, DestPtr, Size.getZExtValue())) 1898 return false; 1899 1900 S.Stk.push<Pointer>(DestPtr); 1901 return true; 1902 } 1903 1904 /// Determine if T is a character type for which we guarantee that 1905 /// sizeof(T) == 1. 1906 static bool isOneByteCharacterType(QualType T) { 1907 return T->isCharType() || T->isChar8Type(); 1908 } 1909 1910 static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, 1911 const InterpFrame *Frame, 1912 const Function *Func, const CallExpr *Call) { 1913 assert(Call->getNumArgs() == 3); 1914 unsigned ID = Func->getBuiltinID(); 1915 const Pointer &PtrA = getParam<Pointer>(Frame, 0); 1916 const Pointer &PtrB = getParam<Pointer>(Frame, 1); 1917 const APSInt &Size = 1918 peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2))); 1919 1920 if (ID == Builtin::BImemcmp) 1921 diagnoseNonConstexprBuiltin(S, OpPC, ID); 1922 1923 if (Size.isZero()) { 1924 pushInteger(S, 0, Call->getType()); 1925 return true; 1926 } 1927 1928 // FIXME: This is an arbitrary limitation the current constant interpreter 1929 // had. We could remove this. 1930 if (!isOneByteCharacterType(PtrA.getType()) || 1931 !isOneByteCharacterType(PtrB.getType())) { 1932 S.FFDiag(S.Current->getSource(OpPC), 1933 diag::note_constexpr_memcmp_unsupported) 1934 << ("'" + S.getASTContext().BuiltinInfo.getName(ID) + "'").str() 1935 << PtrA.getType() << PtrB.getType(); 1936 return false; 1937 } 1938 1939 if (PtrA.isDummy() || PtrB.isDummy()) 1940 return false; 1941 1942 // Now, read both pointers to a buffer and compare those. 1943 BitcastBuffer BufferA( 1944 Bits(S.getASTContext().getTypeSize(PtrA.getFieldDesc()->getType()))); 1945 readPointerToBuffer(S.getContext(), PtrA, BufferA, false); 1946 1947 BitcastBuffer BufferB( 1948 Bits(S.getASTContext().getTypeSize(PtrB.getFieldDesc()->getType()))); 1949 readPointerToBuffer(S.getContext(), PtrB, BufferB, false); 1950 1951 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(), 1952 BufferB.byteSize().getQuantity()); 1953 size_t CmpSize = 1954 std::min(MinBufferSize, static_cast<size_t>(Size.getZExtValue())); 1955 int Result = std::memcmp(BufferA.Data.get(), BufferB.Data.get(), CmpSize); 1956 if (Result == 0) 1957 pushInteger(S, 0, Call->getType()); 1958 else if (Result < 0) 1959 pushInteger(S, -1, Call->getType()); 1960 else 1961 pushInteger(S, 1, Call->getType()); 1962 1963 return true; 1964 } 1965 1966 bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, 1967 const CallExpr *Call, uint32_t BuiltinID) { 1968 const InterpFrame *Frame = S.Current; 1969 1970 std::optional<PrimType> ReturnT = S.getContext().classify(Call); 1971 1972 switch (BuiltinID) { 1973 case Builtin::BI__builtin_is_constant_evaluated: 1974 if (!interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call)) 1975 return false; 1976 break; 1977 case Builtin::BI__builtin_assume: 1978 case Builtin::BI__assume: 1979 break; 1980 case Builtin::BI__builtin_strcmp: 1981 case Builtin::BIstrcmp: 1982 case Builtin::BI__builtin_strncmp: 1983 case Builtin::BIstrncmp: 1984 if (!interp__builtin_strcmp(S, OpPC, Frame, F, Call)) 1985 return false; 1986 break; 1987 case Builtin::BI__builtin_strlen: 1988 case Builtin::BIstrlen: 1989 case Builtin::BI__builtin_wcslen: 1990 case Builtin::BIwcslen: 1991 if (!interp__builtin_strlen(S, OpPC, Frame, F, Call)) 1992 return false; 1993 break; 1994 case Builtin::BI__builtin_nan: 1995 case Builtin::BI__builtin_nanf: 1996 case Builtin::BI__builtin_nanl: 1997 case Builtin::BI__builtin_nanf16: 1998 case Builtin::BI__builtin_nanf128: 1999 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false)) 2000 return false; 2001 break; 2002 case Builtin::BI__builtin_nans: 2003 case Builtin::BI__builtin_nansf: 2004 case Builtin::BI__builtin_nansl: 2005 case Builtin::BI__builtin_nansf16: 2006 case Builtin::BI__builtin_nansf128: 2007 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true)) 2008 return false; 2009 break; 2010 2011 case Builtin::BI__builtin_huge_val: 2012 case Builtin::BI__builtin_huge_valf: 2013 case Builtin::BI__builtin_huge_vall: 2014 case Builtin::BI__builtin_huge_valf16: 2015 case Builtin::BI__builtin_huge_valf128: 2016 case Builtin::BI__builtin_inf: 2017 case Builtin::BI__builtin_inff: 2018 case Builtin::BI__builtin_infl: 2019 case Builtin::BI__builtin_inff16: 2020 case Builtin::BI__builtin_inff128: 2021 if (!interp__builtin_inf(S, OpPC, Frame, F)) 2022 return false; 2023 break; 2024 case Builtin::BI__builtin_copysign: 2025 case Builtin::BI__builtin_copysignf: 2026 case Builtin::BI__builtin_copysignl: 2027 case Builtin::BI__builtin_copysignf128: 2028 if (!interp__builtin_copysign(S, OpPC, Frame, F)) 2029 return false; 2030 break; 2031 2032 case Builtin::BI__builtin_fmin: 2033 case Builtin::BI__builtin_fminf: 2034 case Builtin::BI__builtin_fminl: 2035 case Builtin::BI__builtin_fminf16: 2036 case Builtin::BI__builtin_fminf128: 2037 if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/false)) 2038 return false; 2039 break; 2040 2041 case Builtin::BI__builtin_fminimum_num: 2042 case Builtin::BI__builtin_fminimum_numf: 2043 case Builtin::BI__builtin_fminimum_numl: 2044 case Builtin::BI__builtin_fminimum_numf16: 2045 case Builtin::BI__builtin_fminimum_numf128: 2046 if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/true)) 2047 return false; 2048 break; 2049 2050 case Builtin::BI__builtin_fmax: 2051 case Builtin::BI__builtin_fmaxf: 2052 case Builtin::BI__builtin_fmaxl: 2053 case Builtin::BI__builtin_fmaxf16: 2054 case Builtin::BI__builtin_fmaxf128: 2055 if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/false)) 2056 return false; 2057 break; 2058 2059 case Builtin::BI__builtin_fmaximum_num: 2060 case Builtin::BI__builtin_fmaximum_numf: 2061 case Builtin::BI__builtin_fmaximum_numl: 2062 case Builtin::BI__builtin_fmaximum_numf16: 2063 case Builtin::BI__builtin_fmaximum_numf128: 2064 if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/true)) 2065 return false; 2066 break; 2067 2068 case Builtin::BI__builtin_isnan: 2069 if (!interp__builtin_isnan(S, OpPC, Frame, F, Call)) 2070 return false; 2071 break; 2072 case Builtin::BI__builtin_issignaling: 2073 if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call)) 2074 return false; 2075 break; 2076 2077 case Builtin::BI__builtin_isinf: 2078 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false, Call)) 2079 return false; 2080 break; 2081 2082 case Builtin::BI__builtin_isinf_sign: 2083 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true, Call)) 2084 return false; 2085 break; 2086 2087 case Builtin::BI__builtin_isfinite: 2088 if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call)) 2089 return false; 2090 break; 2091 case Builtin::BI__builtin_isnormal: 2092 if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call)) 2093 return false; 2094 break; 2095 case Builtin::BI__builtin_issubnormal: 2096 if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call)) 2097 return false; 2098 break; 2099 case Builtin::BI__builtin_iszero: 2100 if (!interp__builtin_iszero(S, OpPC, Frame, F, Call)) 2101 return false; 2102 break; 2103 case Builtin::BI__builtin_signbit: 2104 case Builtin::BI__builtin_signbitf: 2105 case Builtin::BI__builtin_signbitl: 2106 if (!interp__builtin_signbit(S, OpPC, Frame, F, Call)) 2107 return false; 2108 break; 2109 case Builtin::BI__builtin_isgreater: 2110 case Builtin::BI__builtin_isgreaterequal: 2111 case Builtin::BI__builtin_isless: 2112 case Builtin::BI__builtin_islessequal: 2113 case Builtin::BI__builtin_islessgreater: 2114 case Builtin::BI__builtin_isunordered: 2115 if (!interp_floating_comparison(S, OpPC, Frame, F, Call)) 2116 return false; 2117 break; 2118 case Builtin::BI__builtin_isfpclass: 2119 if (!interp__builtin_isfpclass(S, OpPC, Frame, F, Call)) 2120 return false; 2121 break; 2122 case Builtin::BI__builtin_fpclassify: 2123 if (!interp__builtin_fpclassify(S, OpPC, Frame, F, Call)) 2124 return false; 2125 break; 2126 2127 case Builtin::BI__builtin_fabs: 2128 case Builtin::BI__builtin_fabsf: 2129 case Builtin::BI__builtin_fabsl: 2130 case Builtin::BI__builtin_fabsf128: 2131 if (!interp__builtin_fabs(S, OpPC, Frame, F)) 2132 return false; 2133 break; 2134 2135 case Builtin::BI__builtin_abs: 2136 case Builtin::BI__builtin_labs: 2137 case Builtin::BI__builtin_llabs: 2138 if (!interp__builtin_abs(S, OpPC, Frame, F, Call)) 2139 return false; 2140 break; 2141 2142 case Builtin::BI__builtin_popcount: 2143 case Builtin::BI__builtin_popcountl: 2144 case Builtin::BI__builtin_popcountll: 2145 case Builtin::BI__builtin_popcountg: 2146 case Builtin::BI__popcnt16: // Microsoft variants of popcount 2147 case Builtin::BI__popcnt: 2148 case Builtin::BI__popcnt64: 2149 if (!interp__builtin_popcount(S, OpPC, Frame, F, Call)) 2150 return false; 2151 break; 2152 2153 case Builtin::BI__builtin_parity: 2154 case Builtin::BI__builtin_parityl: 2155 case Builtin::BI__builtin_parityll: 2156 if (!interp__builtin_parity(S, OpPC, Frame, F, Call)) 2157 return false; 2158 break; 2159 2160 case Builtin::BI__builtin_clrsb: 2161 case Builtin::BI__builtin_clrsbl: 2162 case Builtin::BI__builtin_clrsbll: 2163 if (!interp__builtin_clrsb(S, OpPC, Frame, F, Call)) 2164 return false; 2165 break; 2166 2167 case Builtin::BI__builtin_bitreverse8: 2168 case Builtin::BI__builtin_bitreverse16: 2169 case Builtin::BI__builtin_bitreverse32: 2170 case Builtin::BI__builtin_bitreverse64: 2171 if (!interp__builtin_bitreverse(S, OpPC, Frame, F, Call)) 2172 return false; 2173 break; 2174 2175 case Builtin::BI__builtin_classify_type: 2176 if (!interp__builtin_classify_type(S, OpPC, Frame, F, Call)) 2177 return false; 2178 break; 2179 2180 case Builtin::BI__builtin_expect: 2181 case Builtin::BI__builtin_expect_with_probability: 2182 if (!interp__builtin_expect(S, OpPC, Frame, F, Call)) 2183 return false; 2184 break; 2185 2186 case Builtin::BI__builtin_rotateleft8: 2187 case Builtin::BI__builtin_rotateleft16: 2188 case Builtin::BI__builtin_rotateleft32: 2189 case Builtin::BI__builtin_rotateleft64: 2190 case Builtin::BI_rotl8: // Microsoft variants of rotate left 2191 case Builtin::BI_rotl16: 2192 case Builtin::BI_rotl: 2193 case Builtin::BI_lrotl: 2194 case Builtin::BI_rotl64: 2195 if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/false)) 2196 return false; 2197 break; 2198 2199 case Builtin::BI__builtin_rotateright8: 2200 case Builtin::BI__builtin_rotateright16: 2201 case Builtin::BI__builtin_rotateright32: 2202 case Builtin::BI__builtin_rotateright64: 2203 case Builtin::BI_rotr8: // Microsoft variants of rotate right 2204 case Builtin::BI_rotr16: 2205 case Builtin::BI_rotr: 2206 case Builtin::BI_lrotr: 2207 case Builtin::BI_rotr64: 2208 if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/true)) 2209 return false; 2210 break; 2211 2212 case Builtin::BI__builtin_ffs: 2213 case Builtin::BI__builtin_ffsl: 2214 case Builtin::BI__builtin_ffsll: 2215 if (!interp__builtin_ffs(S, OpPC, Frame, F, Call)) 2216 return false; 2217 break; 2218 case Builtin::BIaddressof: 2219 case Builtin::BI__addressof: 2220 case Builtin::BI__builtin_addressof: 2221 if (!interp__builtin_addressof(S, OpPC, Frame, F, Call)) 2222 return false; 2223 break; 2224 2225 case Builtin::BIas_const: 2226 case Builtin::BIforward: 2227 case Builtin::BIforward_like: 2228 case Builtin::BImove: 2229 case Builtin::BImove_if_noexcept: 2230 if (!interp__builtin_move(S, OpPC, Frame, F, Call)) 2231 return false; 2232 break; 2233 2234 case Builtin::BI__builtin_eh_return_data_regno: 2235 if (!interp__builtin_eh_return_data_regno(S, OpPC, Frame, F, Call)) 2236 return false; 2237 break; 2238 2239 case Builtin::BI__builtin_launder: 2240 if (!noopPointer(S, OpPC, Frame, F, Call)) 2241 return false; 2242 break; 2243 2244 case Builtin::BI__builtin_add_overflow: 2245 case Builtin::BI__builtin_sub_overflow: 2246 case Builtin::BI__builtin_mul_overflow: 2247 case Builtin::BI__builtin_sadd_overflow: 2248 case Builtin::BI__builtin_uadd_overflow: 2249 case Builtin::BI__builtin_uaddl_overflow: 2250 case Builtin::BI__builtin_uaddll_overflow: 2251 case Builtin::BI__builtin_usub_overflow: 2252 case Builtin::BI__builtin_usubl_overflow: 2253 case Builtin::BI__builtin_usubll_overflow: 2254 case Builtin::BI__builtin_umul_overflow: 2255 case Builtin::BI__builtin_umull_overflow: 2256 case Builtin::BI__builtin_umulll_overflow: 2257 case Builtin::BI__builtin_saddl_overflow: 2258 case Builtin::BI__builtin_saddll_overflow: 2259 case Builtin::BI__builtin_ssub_overflow: 2260 case Builtin::BI__builtin_ssubl_overflow: 2261 case Builtin::BI__builtin_ssubll_overflow: 2262 case Builtin::BI__builtin_smul_overflow: 2263 case Builtin::BI__builtin_smull_overflow: 2264 case Builtin::BI__builtin_smulll_overflow: 2265 if (!interp__builtin_overflowop(S, OpPC, Frame, F, Call)) 2266 return false; 2267 break; 2268 2269 case Builtin::BI__builtin_addcb: 2270 case Builtin::BI__builtin_addcs: 2271 case Builtin::BI__builtin_addc: 2272 case Builtin::BI__builtin_addcl: 2273 case Builtin::BI__builtin_addcll: 2274 case Builtin::BI__builtin_subcb: 2275 case Builtin::BI__builtin_subcs: 2276 case Builtin::BI__builtin_subc: 2277 case Builtin::BI__builtin_subcl: 2278 case Builtin::BI__builtin_subcll: 2279 if (!interp__builtin_carryop(S, OpPC, Frame, F, Call)) 2280 return false; 2281 break; 2282 2283 case Builtin::BI__builtin_clz: 2284 case Builtin::BI__builtin_clzl: 2285 case Builtin::BI__builtin_clzll: 2286 case Builtin::BI__builtin_clzs: 2287 case Builtin::BI__builtin_clzg: 2288 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes 2289 case Builtin::BI__lzcnt: 2290 case Builtin::BI__lzcnt64: 2291 if (!interp__builtin_clz(S, OpPC, Frame, F, Call)) 2292 return false; 2293 break; 2294 2295 case Builtin::BI__builtin_ctz: 2296 case Builtin::BI__builtin_ctzl: 2297 case Builtin::BI__builtin_ctzll: 2298 case Builtin::BI__builtin_ctzs: 2299 case Builtin::BI__builtin_ctzg: 2300 if (!interp__builtin_ctz(S, OpPC, Frame, F, Call)) 2301 return false; 2302 break; 2303 2304 case Builtin::BI__builtin_bswap16: 2305 case Builtin::BI__builtin_bswap32: 2306 case Builtin::BI__builtin_bswap64: 2307 if (!interp__builtin_bswap(S, OpPC, Frame, F, Call)) 2308 return false; 2309 break; 2310 2311 case Builtin::BI__atomic_always_lock_free: 2312 case Builtin::BI__atomic_is_lock_free: 2313 case Builtin::BI__c11_atomic_is_lock_free: 2314 if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, F, Call)) 2315 return false; 2316 break; 2317 2318 case Builtin::BI__builtin_complex: 2319 if (!interp__builtin_complex(S, OpPC, Frame, F, Call)) 2320 return false; 2321 break; 2322 2323 case Builtin::BI__builtin_is_aligned: 2324 case Builtin::BI__builtin_align_up: 2325 case Builtin::BI__builtin_align_down: 2326 if (!interp__builtin_is_aligned_up_down(S, OpPC, Frame, F, Call)) 2327 return false; 2328 break; 2329 2330 case Builtin::BI__builtin_assume_aligned: 2331 if (!interp__builtin_assume_aligned(S, OpPC, Frame, F, Call)) 2332 return false; 2333 break; 2334 2335 case clang::X86::BI__builtin_ia32_bextr_u32: 2336 case clang::X86::BI__builtin_ia32_bextr_u64: 2337 case clang::X86::BI__builtin_ia32_bextri_u32: 2338 case clang::X86::BI__builtin_ia32_bextri_u64: 2339 if (!interp__builtin_ia32_bextr(S, OpPC, Frame, F, Call)) 2340 return false; 2341 break; 2342 2343 case clang::X86::BI__builtin_ia32_bzhi_si: 2344 case clang::X86::BI__builtin_ia32_bzhi_di: 2345 if (!interp__builtin_ia32_bzhi(S, OpPC, Frame, F, Call)) 2346 return false; 2347 break; 2348 2349 case clang::X86::BI__builtin_ia32_lzcnt_u16: 2350 case clang::X86::BI__builtin_ia32_lzcnt_u32: 2351 case clang::X86::BI__builtin_ia32_lzcnt_u64: 2352 if (!interp__builtin_ia32_lzcnt(S, OpPC, Frame, F, Call)) 2353 return false; 2354 break; 2355 2356 case clang::X86::BI__builtin_ia32_tzcnt_u16: 2357 case clang::X86::BI__builtin_ia32_tzcnt_u32: 2358 case clang::X86::BI__builtin_ia32_tzcnt_u64: 2359 if (!interp__builtin_ia32_tzcnt(S, OpPC, Frame, F, Call)) 2360 return false; 2361 break; 2362 2363 case clang::X86::BI__builtin_ia32_pdep_si: 2364 case clang::X86::BI__builtin_ia32_pdep_di: 2365 if (!interp__builtin_ia32_pdep(S, OpPC, Frame, F, Call)) 2366 return false; 2367 break; 2368 2369 case clang::X86::BI__builtin_ia32_pext_si: 2370 case clang::X86::BI__builtin_ia32_pext_di: 2371 if (!interp__builtin_ia32_pext(S, OpPC, Frame, F, Call)) 2372 return false; 2373 break; 2374 2375 case clang::X86::BI__builtin_ia32_addcarryx_u32: 2376 case clang::X86::BI__builtin_ia32_addcarryx_u64: 2377 case clang::X86::BI__builtin_ia32_subborrow_u32: 2378 case clang::X86::BI__builtin_ia32_subborrow_u64: 2379 if (!interp__builtin_ia32_addcarry_subborrow(S, OpPC, Frame, F, Call)) 2380 return false; 2381 break; 2382 2383 case Builtin::BI__builtin_os_log_format_buffer_size: 2384 if (!interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, F, Call)) 2385 return false; 2386 break; 2387 2388 case Builtin::BI__builtin_ptrauth_string_discriminator: 2389 if (!interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, F, Call)) 2390 return false; 2391 break; 2392 2393 case Builtin::BI__builtin_constant_p: 2394 if (!interp__builtin_constant_p(S, OpPC, Frame, F, Call)) 2395 return false; 2396 break; 2397 2398 case Builtin::BI__noop: 2399 pushInteger(S, 0, Call->getType()); 2400 break; 2401 2402 case Builtin::BI__builtin_operator_new: 2403 if (!interp__builtin_operator_new(S, OpPC, Frame, F, Call)) 2404 return false; 2405 break; 2406 2407 case Builtin::BI__builtin_operator_delete: 2408 if (!interp__builtin_operator_delete(S, OpPC, Frame, F, Call)) 2409 return false; 2410 break; 2411 2412 case Builtin::BI__arithmetic_fence: 2413 if (!interp__builtin_arithmetic_fence(S, OpPC, Frame, F, Call)) 2414 return false; 2415 break; 2416 2417 case Builtin::BI__builtin_reduce_add: 2418 case Builtin::BI__builtin_reduce_mul: 2419 case Builtin::BI__builtin_reduce_and: 2420 case Builtin::BI__builtin_reduce_or: 2421 case Builtin::BI__builtin_reduce_xor: 2422 if (!interp__builtin_vector_reduce(S, OpPC, Frame, F, Call)) 2423 return false; 2424 break; 2425 2426 case Builtin::BI__builtin_elementwise_popcount: 2427 if (!interp__builtin_elementwise_popcount(S, OpPC, Frame, F, Call)) 2428 return false; 2429 break; 2430 2431 case Builtin::BI__builtin_memcpy: 2432 case Builtin::BImemcpy: 2433 case Builtin::BI__builtin_memmove: 2434 case Builtin::BImemmove: 2435 if (!interp__builtin_memcpy(S, OpPC, Frame, F, Call)) 2436 return false; 2437 break; 2438 2439 case Builtin::BI__builtin_memcmp: 2440 case Builtin::BImemcmp: 2441 if (!interp__builtin_memcmp(S, OpPC, Frame, F, Call)) 2442 return false; 2443 break; 2444 2445 default: 2446 S.FFDiag(S.Current->getLocation(OpPC), 2447 diag::note_invalid_subexpr_in_const_expr) 2448 << S.Current->getRange(OpPC); 2449 2450 return false; 2451 } 2452 2453 return retPrimValue(S, OpPC, ReturnT); 2454 } 2455 2456 bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, 2457 llvm::ArrayRef<int64_t> ArrayIndices, 2458 int64_t &IntResult) { 2459 CharUnits Result; 2460 unsigned N = E->getNumComponents(); 2461 assert(N > 0); 2462 2463 unsigned ArrayIndex = 0; 2464 QualType CurrentType = E->getTypeSourceInfo()->getType(); 2465 for (unsigned I = 0; I != N; ++I) { 2466 const OffsetOfNode &Node = E->getComponent(I); 2467 switch (Node.getKind()) { 2468 case OffsetOfNode::Field: { 2469 const FieldDecl *MemberDecl = Node.getField(); 2470 const RecordType *RT = CurrentType->getAs<RecordType>(); 2471 if (!RT) 2472 return false; 2473 const RecordDecl *RD = RT->getDecl(); 2474 if (RD->isInvalidDecl()) 2475 return false; 2476 const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD); 2477 unsigned FieldIndex = MemberDecl->getFieldIndex(); 2478 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type"); 2479 Result += 2480 S.getASTContext().toCharUnitsFromBits(RL.getFieldOffset(FieldIndex)); 2481 CurrentType = MemberDecl->getType().getNonReferenceType(); 2482 break; 2483 } 2484 case OffsetOfNode::Array: { 2485 // When generating bytecode, we put all the index expressions as Sint64 on 2486 // the stack. 2487 int64_t Index = ArrayIndices[ArrayIndex]; 2488 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType); 2489 if (!AT) 2490 return false; 2491 CurrentType = AT->getElementType(); 2492 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType); 2493 Result += Index * ElementSize; 2494 ++ArrayIndex; 2495 break; 2496 } 2497 case OffsetOfNode::Base: { 2498 const CXXBaseSpecifier *BaseSpec = Node.getBase(); 2499 if (BaseSpec->isVirtual()) 2500 return false; 2501 2502 // Find the layout of the class whose base we are looking into. 2503 const RecordType *RT = CurrentType->getAs<RecordType>(); 2504 if (!RT) 2505 return false; 2506 const RecordDecl *RD = RT->getDecl(); 2507 if (RD->isInvalidDecl()) 2508 return false; 2509 const ASTRecordLayout &RL = S.getASTContext().getASTRecordLayout(RD); 2510 2511 // Find the base class itself. 2512 CurrentType = BaseSpec->getType(); 2513 const RecordType *BaseRT = CurrentType->getAs<RecordType>(); 2514 if (!BaseRT) 2515 return false; 2516 2517 // Add the offset to the base. 2518 Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl())); 2519 break; 2520 } 2521 case OffsetOfNode::Identifier: 2522 llvm_unreachable("Dependent OffsetOfExpr?"); 2523 } 2524 } 2525 2526 IntResult = Result.getQuantity(); 2527 2528 return true; 2529 } 2530 2531 bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, 2532 const Pointer &Ptr, const APSInt &IntValue) { 2533 2534 const Record *R = Ptr.getRecord(); 2535 assert(R); 2536 assert(R->getNumFields() == 1); 2537 2538 unsigned FieldOffset = R->getField(0u)->Offset; 2539 const Pointer &FieldPtr = Ptr.atField(FieldOffset); 2540 PrimType FieldT = *S.getContext().classify(FieldPtr.getType()); 2541 2542 INT_TYPE_SWITCH(FieldT, 2543 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue())); 2544 FieldPtr.initialize(); 2545 return true; 2546 } 2547 2548 static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, 2549 Pointer &Dest, bool Activate); 2550 static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, 2551 Pointer &Dest, bool Activate = false) { 2552 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc(); 2553 const Descriptor *DestDesc = Dest.getFieldDesc(); 2554 2555 auto copyField = [&](const Record::Field &F, bool Activate) -> bool { 2556 Pointer DestField = Dest.atField(F.Offset); 2557 if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) { 2558 TYPE_SWITCH(*FT, { 2559 DestField.deref<T>() = Src.atField(F.Offset).deref<T>(); 2560 if (Src.atField(F.Offset).isInitialized()) 2561 DestField.initialize(); 2562 if (Activate) 2563 DestField.activate(); 2564 }); 2565 return true; 2566 } 2567 // Composite field. 2568 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate); 2569 }; 2570 2571 assert(SrcDesc->isRecord()); 2572 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord); 2573 const Record *R = DestDesc->ElemRecord; 2574 for (const Record::Field &F : R->fields()) { 2575 if (R->isUnion()) { 2576 // For unions, only copy the active field. 2577 const Pointer &SrcField = Src.atField(F.Offset); 2578 if (SrcField.isActive()) { 2579 if (!copyField(F, /*Activate=*/true)) 2580 return false; 2581 } 2582 } else { 2583 if (!copyField(F, Activate)) 2584 return false; 2585 } 2586 } 2587 2588 for (const Record::Base &B : R->bases()) { 2589 Pointer DestBase = Dest.atField(B.Offset); 2590 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate)) 2591 return false; 2592 } 2593 2594 Dest.initialize(); 2595 return true; 2596 } 2597 2598 static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, 2599 Pointer &Dest, bool Activate = false) { 2600 assert(Src.isLive() && Dest.isLive()); 2601 2602 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc(); 2603 const Descriptor *DestDesc = Dest.getFieldDesc(); 2604 2605 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive()); 2606 2607 if (DestDesc->isPrimitiveArray()) { 2608 assert(SrcDesc->isPrimitiveArray()); 2609 assert(SrcDesc->getNumElems() == DestDesc->getNumElems()); 2610 PrimType ET = DestDesc->getPrimType(); 2611 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) { 2612 Pointer DestElem = Dest.atIndex(I); 2613 TYPE_SWITCH(ET, { 2614 DestElem.deref<T>() = Src.atIndex(I).deref<T>(); 2615 DestElem.initialize(); 2616 }); 2617 } 2618 return true; 2619 } 2620 2621 if (DestDesc->isRecord()) 2622 return copyRecord(S, OpPC, Src, Dest, Activate); 2623 return Invalid(S, OpPC); 2624 } 2625 2626 bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) { 2627 return copyComposite(S, OpPC, Src, Dest); 2628 } 2629 2630 } // namespace interp 2631 } // namespace clang 2632