1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is the desired number. 113 /// This is useful when doing custom type-checking. Returns true on error. 114 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 115 unsigned argCount = call->getNumArgs(); 116 if (argCount == desiredArgCount) return false; 117 118 if (argCount < desiredArgCount) 119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 120 << 0 /*function call*/ << desiredArgCount << argCount 121 << call->getSourceRange(); 122 123 // Highlight all the excess arguments. 124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 125 call->getArg(argCount - 1)->getEndLoc()); 126 127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 128 << 0 /*function call*/ << desiredArgCount << argCount 129 << call->getArg(1)->getSourceRange(); 130 } 131 132 /// Check that the first argument to __builtin_annotation is an integer 133 /// and the second argument is a non-wide string literal. 134 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 135 if (checkArgCount(S, TheCall, 2)) 136 return true; 137 138 // First argument should be an integer. 139 Expr *ValArg = TheCall->getArg(0); 140 QualType Ty = ValArg->getType(); 141 if (!Ty->isIntegerType()) { 142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 143 << ValArg->getSourceRange(); 144 return true; 145 } 146 147 // Second argument should be a constant string. 148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 150 if (!Literal || !Literal->isAscii()) { 151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 152 << StrArg->getSourceRange(); 153 return true; 154 } 155 156 TheCall->setType(Ty); 157 return false; 158 } 159 160 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 161 // We need at least one argument. 162 if (TheCall->getNumArgs() < 1) { 163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 164 << 0 << 1 << TheCall->getNumArgs() 165 << TheCall->getCallee()->getSourceRange(); 166 return true; 167 } 168 169 // All arguments should be wide string literals. 170 for (Expr *Arg : TheCall->arguments()) { 171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 172 if (!Literal || !Literal->isWide()) { 173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 174 << Arg->getSourceRange(); 175 return true; 176 } 177 } 178 179 return false; 180 } 181 182 /// Check that the argument to __builtin_addressof is a glvalue, and set the 183 /// result type to the corresponding pointer type. 184 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 185 if (checkArgCount(S, TheCall, 1)) 186 return true; 187 188 ExprResult Arg(TheCall->getArg(0)); 189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 190 if (ResultType.isNull()) 191 return true; 192 193 TheCall->setArg(0, Arg.get()); 194 TheCall->setType(ResultType); 195 return false; 196 } 197 198 /// Check the number of arguments and set the result type to 199 /// the argument type. 200 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 201 if (checkArgCount(S, TheCall, 1)) 202 return true; 203 204 TheCall->setType(TheCall->getArg(0)->getType()); 205 return false; 206 } 207 208 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 209 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 210 /// type (but not a function pointer) and that the alignment is a power-of-two. 211 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 212 if (checkArgCount(S, TheCall, 2)) 213 return true; 214 215 clang::Expr *Source = TheCall->getArg(0); 216 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 217 218 auto IsValidIntegerType = [](QualType Ty) { 219 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 220 }; 221 QualType SrcTy = Source->getType(); 222 // We should also be able to use it with arrays (but not functions!). 223 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 224 SrcTy = S.Context.getDecayedType(SrcTy); 225 } 226 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 227 SrcTy->isFunctionPointerType()) { 228 // FIXME: this is not quite the right error message since we don't allow 229 // floating point types, or member pointers. 230 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 231 << SrcTy; 232 return true; 233 } 234 235 clang::Expr *AlignOp = TheCall->getArg(1); 236 if (!IsValidIntegerType(AlignOp->getType())) { 237 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 238 << AlignOp->getType(); 239 return true; 240 } 241 Expr::EvalResult AlignResult; 242 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 243 // We can't check validity of alignment if it is value dependent. 244 if (!AlignOp->isValueDependent() && 245 AlignOp->EvaluateAsInt(AlignResult, S.Context, 246 Expr::SE_AllowSideEffects)) { 247 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 248 llvm::APSInt MaxValue( 249 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 250 if (AlignValue < 1) { 251 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 252 return true; 253 } 254 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 255 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 256 << toString(MaxValue, 10); 257 return true; 258 } 259 if (!AlignValue.isPowerOf2()) { 260 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 261 return true; 262 } 263 if (AlignValue == 1) { 264 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 265 << IsBooleanAlignBuiltin; 266 } 267 } 268 269 ExprResult SrcArg = S.PerformCopyInitialization( 270 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 271 SourceLocation(), Source); 272 if (SrcArg.isInvalid()) 273 return true; 274 TheCall->setArg(0, SrcArg.get()); 275 ExprResult AlignArg = 276 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 277 S.Context, AlignOp->getType(), false), 278 SourceLocation(), AlignOp); 279 if (AlignArg.isInvalid()) 280 return true; 281 TheCall->setArg(1, AlignArg.get()); 282 // For align_up/align_down, the return type is the same as the (potentially 283 // decayed) argument type including qualifiers. For is_aligned(), the result 284 // is always bool. 285 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 286 return false; 287 } 288 289 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 290 unsigned BuiltinID) { 291 if (checkArgCount(S, TheCall, 3)) 292 return true; 293 294 // First two arguments should be integers. 295 for (unsigned I = 0; I < 2; ++I) { 296 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 297 if (Arg.isInvalid()) return true; 298 TheCall->setArg(I, Arg.get()); 299 300 QualType Ty = Arg.get()->getType(); 301 if (!Ty->isIntegerType()) { 302 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 303 << Ty << Arg.get()->getSourceRange(); 304 return true; 305 } 306 } 307 308 // Third argument should be a pointer to a non-const integer. 309 // IRGen correctly handles volatile, restrict, and address spaces, and 310 // the other qualifiers aren't possible. 311 { 312 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 313 if (Arg.isInvalid()) return true; 314 TheCall->setArg(2, Arg.get()); 315 316 QualType Ty = Arg.get()->getType(); 317 const auto *PtrTy = Ty->getAs<PointerType>(); 318 if (!PtrTy || 319 !PtrTy->getPointeeType()->isIntegerType() || 320 PtrTy->getPointeeType().isConstQualified()) { 321 S.Diag(Arg.get()->getBeginLoc(), 322 diag::err_overflow_builtin_must_be_ptr_int) 323 << Ty << Arg.get()->getSourceRange(); 324 return true; 325 } 326 } 327 328 // Disallow signed ExtIntType args larger than 128 bits to mul function until 329 // we improve backend support. 330 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 331 for (unsigned I = 0; I < 3; ++I) { 332 const auto Arg = TheCall->getArg(I); 333 // Third argument will be a pointer. 334 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 335 if (Ty->isExtIntType() && Ty->isSignedIntegerType() && 336 S.getASTContext().getIntWidth(Ty) > 128) 337 return S.Diag(Arg->getBeginLoc(), 338 diag::err_overflow_builtin_ext_int_max_size) 339 << 128; 340 } 341 } 342 343 return false; 344 } 345 346 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 347 if (checkArgCount(S, BuiltinCall, 2)) 348 return true; 349 350 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 351 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 352 Expr *Call = BuiltinCall->getArg(0); 353 Expr *Chain = BuiltinCall->getArg(1); 354 355 if (Call->getStmtClass() != Stmt::CallExprClass) { 356 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 357 << Call->getSourceRange(); 358 return true; 359 } 360 361 auto CE = cast<CallExpr>(Call); 362 if (CE->getCallee()->getType()->isBlockPointerType()) { 363 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 364 << Call->getSourceRange(); 365 return true; 366 } 367 368 const Decl *TargetDecl = CE->getCalleeDecl(); 369 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 370 if (FD->getBuiltinID()) { 371 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 372 << Call->getSourceRange(); 373 return true; 374 } 375 376 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 377 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 378 << Call->getSourceRange(); 379 return true; 380 } 381 382 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 383 if (ChainResult.isInvalid()) 384 return true; 385 if (!ChainResult.get()->getType()->isPointerType()) { 386 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 387 << Chain->getSourceRange(); 388 return true; 389 } 390 391 QualType ReturnTy = CE->getCallReturnType(S.Context); 392 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 393 QualType BuiltinTy = S.Context.getFunctionType( 394 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 395 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 396 397 Builtin = 398 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 399 400 BuiltinCall->setType(CE->getType()); 401 BuiltinCall->setValueKind(CE->getValueKind()); 402 BuiltinCall->setObjectKind(CE->getObjectKind()); 403 BuiltinCall->setCallee(Builtin); 404 BuiltinCall->setArg(1, ChainResult.get()); 405 406 return false; 407 } 408 409 namespace { 410 411 class ScanfDiagnosticFormatHandler 412 : public analyze_format_string::FormatStringHandler { 413 // Accepts the argument index (relative to the first destination index) of the 414 // argument whose size we want. 415 using ComputeSizeFunction = 416 llvm::function_ref<Optional<llvm::APSInt>(unsigned)>; 417 418 // Accepts the argument index (relative to the first destination index), the 419 // destination size, and the source size). 420 using DiagnoseFunction = 421 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 422 423 ComputeSizeFunction ComputeSizeArgument; 424 DiagnoseFunction Diagnose; 425 426 public: 427 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 428 DiagnoseFunction Diagnose) 429 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 430 431 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 432 const char *StartSpecifier, 433 unsigned specifierLen) override { 434 if (!FS.consumesDataArgument()) 435 return true; 436 437 unsigned NulByte = 0; 438 switch ((FS.getConversionSpecifier().getKind())) { 439 default: 440 return true; 441 case analyze_format_string::ConversionSpecifier::sArg: 442 case analyze_format_string::ConversionSpecifier::ScanListArg: 443 NulByte = 1; 444 break; 445 case analyze_format_string::ConversionSpecifier::cArg: 446 break; 447 } 448 449 auto OptionalFW = FS.getFieldWidth(); 450 if (OptionalFW.getHowSpecified() != 451 analyze_format_string::OptionalAmount::HowSpecified::Constant) 452 return true; 453 454 unsigned SourceSize = OptionalFW.getConstantAmount() + NulByte; 455 456 auto DestSizeAPS = ComputeSizeArgument(FS.getArgIndex()); 457 if (!DestSizeAPS) 458 return true; 459 460 unsigned DestSize = DestSizeAPS->getZExtValue(); 461 462 if (DestSize < SourceSize) 463 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 464 465 return true; 466 } 467 }; 468 469 class EstimateSizeFormatHandler 470 : public analyze_format_string::FormatStringHandler { 471 size_t Size; 472 473 public: 474 EstimateSizeFormatHandler(StringRef Format) 475 : Size(std::min(Format.find(0), Format.size()) + 476 1 /* null byte always written by sprintf */) {} 477 478 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 479 const char *, unsigned SpecifierLen) override { 480 481 const size_t FieldWidth = computeFieldWidth(FS); 482 const size_t Precision = computePrecision(FS); 483 484 // The actual format. 485 switch (FS.getConversionSpecifier().getKind()) { 486 // Just a char. 487 case analyze_format_string::ConversionSpecifier::cArg: 488 case analyze_format_string::ConversionSpecifier::CArg: 489 Size += std::max(FieldWidth, (size_t)1); 490 break; 491 // Just an integer. 492 case analyze_format_string::ConversionSpecifier::dArg: 493 case analyze_format_string::ConversionSpecifier::DArg: 494 case analyze_format_string::ConversionSpecifier::iArg: 495 case analyze_format_string::ConversionSpecifier::oArg: 496 case analyze_format_string::ConversionSpecifier::OArg: 497 case analyze_format_string::ConversionSpecifier::uArg: 498 case analyze_format_string::ConversionSpecifier::UArg: 499 case analyze_format_string::ConversionSpecifier::xArg: 500 case analyze_format_string::ConversionSpecifier::XArg: 501 Size += std::max(FieldWidth, Precision); 502 break; 503 504 // %g style conversion switches between %f or %e style dynamically. 505 // %f always takes less space, so default to it. 506 case analyze_format_string::ConversionSpecifier::gArg: 507 case analyze_format_string::ConversionSpecifier::GArg: 508 509 // Floating point number in the form '[+]ddd.ddd'. 510 case analyze_format_string::ConversionSpecifier::fArg: 511 case analyze_format_string::ConversionSpecifier::FArg: 512 Size += std::max(FieldWidth, 1 /* integer part */ + 513 (Precision ? 1 + Precision 514 : 0) /* period + decimal */); 515 break; 516 517 // Floating point number in the form '[-]d.ddde[+-]dd'. 518 case analyze_format_string::ConversionSpecifier::eArg: 519 case analyze_format_string::ConversionSpecifier::EArg: 520 Size += 521 std::max(FieldWidth, 522 1 /* integer part */ + 523 (Precision ? 1 + Precision : 0) /* period + decimal */ + 524 1 /* e or E letter */ + 2 /* exponent */); 525 break; 526 527 // Floating point number in the form '[-]0xh.hhhhp±dd'. 528 case analyze_format_string::ConversionSpecifier::aArg: 529 case analyze_format_string::ConversionSpecifier::AArg: 530 Size += 531 std::max(FieldWidth, 532 2 /* 0x */ + 1 /* integer part */ + 533 (Precision ? 1 + Precision : 0) /* period + decimal */ + 534 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 535 break; 536 537 // Just a string. 538 case analyze_format_string::ConversionSpecifier::sArg: 539 case analyze_format_string::ConversionSpecifier::SArg: 540 Size += FieldWidth; 541 break; 542 543 // Just a pointer in the form '0xddd'. 544 case analyze_format_string::ConversionSpecifier::pArg: 545 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 546 break; 547 548 // A plain percent. 549 case analyze_format_string::ConversionSpecifier::PercentArg: 550 Size += 1; 551 break; 552 553 default: 554 break; 555 } 556 557 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 558 559 if (FS.hasAlternativeForm()) { 560 switch (FS.getConversionSpecifier().getKind()) { 561 default: 562 break; 563 // Force a leading '0'. 564 case analyze_format_string::ConversionSpecifier::oArg: 565 Size += 1; 566 break; 567 // Force a leading '0x'. 568 case analyze_format_string::ConversionSpecifier::xArg: 569 case analyze_format_string::ConversionSpecifier::XArg: 570 Size += 2; 571 break; 572 // Force a period '.' before decimal, even if precision is 0. 573 case analyze_format_string::ConversionSpecifier::aArg: 574 case analyze_format_string::ConversionSpecifier::AArg: 575 case analyze_format_string::ConversionSpecifier::eArg: 576 case analyze_format_string::ConversionSpecifier::EArg: 577 case analyze_format_string::ConversionSpecifier::fArg: 578 case analyze_format_string::ConversionSpecifier::FArg: 579 case analyze_format_string::ConversionSpecifier::gArg: 580 case analyze_format_string::ConversionSpecifier::GArg: 581 Size += (Precision ? 0 : 1); 582 break; 583 } 584 } 585 assert(SpecifierLen <= Size && "no underflow"); 586 Size -= SpecifierLen; 587 return true; 588 } 589 590 size_t getSizeLowerBound() const { return Size; } 591 592 private: 593 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 594 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 595 size_t FieldWidth = 0; 596 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 597 FieldWidth = FW.getConstantAmount(); 598 return FieldWidth; 599 } 600 601 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 602 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 603 size_t Precision = 0; 604 605 // See man 3 printf for default precision value based on the specifier. 606 switch (FW.getHowSpecified()) { 607 case analyze_format_string::OptionalAmount::NotSpecified: 608 switch (FS.getConversionSpecifier().getKind()) { 609 default: 610 break; 611 case analyze_format_string::ConversionSpecifier::dArg: // %d 612 case analyze_format_string::ConversionSpecifier::DArg: // %D 613 case analyze_format_string::ConversionSpecifier::iArg: // %i 614 Precision = 1; 615 break; 616 case analyze_format_string::ConversionSpecifier::oArg: // %d 617 case analyze_format_string::ConversionSpecifier::OArg: // %D 618 case analyze_format_string::ConversionSpecifier::uArg: // %d 619 case analyze_format_string::ConversionSpecifier::UArg: // %D 620 case analyze_format_string::ConversionSpecifier::xArg: // %d 621 case analyze_format_string::ConversionSpecifier::XArg: // %D 622 Precision = 1; 623 break; 624 case analyze_format_string::ConversionSpecifier::fArg: // %f 625 case analyze_format_string::ConversionSpecifier::FArg: // %F 626 case analyze_format_string::ConversionSpecifier::eArg: // %e 627 case analyze_format_string::ConversionSpecifier::EArg: // %E 628 case analyze_format_string::ConversionSpecifier::gArg: // %g 629 case analyze_format_string::ConversionSpecifier::GArg: // %G 630 Precision = 6; 631 break; 632 case analyze_format_string::ConversionSpecifier::pArg: // %d 633 Precision = 1; 634 break; 635 } 636 break; 637 case analyze_format_string::OptionalAmount::Constant: 638 Precision = FW.getConstantAmount(); 639 break; 640 default: 641 break; 642 } 643 return Precision; 644 } 645 }; 646 647 } // namespace 648 649 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 650 CallExpr *TheCall) { 651 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 652 isConstantEvaluated()) 653 return; 654 655 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 656 if (!BuiltinID) 657 return; 658 659 const TargetInfo &TI = getASTContext().getTargetInfo(); 660 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 661 662 auto ComputeExplicitObjectSizeArgument = 663 [&](unsigned Index) -> Optional<llvm::APSInt> { 664 Expr::EvalResult Result; 665 Expr *SizeArg = TheCall->getArg(Index); 666 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 667 return llvm::None; 668 return Result.Val.getInt(); 669 }; 670 671 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 672 // If the parameter has a pass_object_size attribute, then we should use its 673 // (potentially) more strict checking mode. Otherwise, conservatively assume 674 // type 0. 675 int BOSType = 0; 676 // This check can fail for variadic functions. 677 if (Index < FD->getNumParams()) { 678 if (const auto *POS = 679 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 680 BOSType = POS->getType(); 681 } 682 683 const Expr *ObjArg = TheCall->getArg(Index); 684 uint64_t Result; 685 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 686 return llvm::None; 687 688 // Get the object size in the target's size_t width. 689 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 690 }; 691 692 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 693 Expr *ObjArg = TheCall->getArg(Index); 694 uint64_t Result; 695 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 696 return llvm::None; 697 // Add 1 for null byte. 698 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 699 }; 700 701 Optional<llvm::APSInt> SourceSize; 702 Optional<llvm::APSInt> DestinationSize; 703 unsigned DiagID = 0; 704 bool IsChkVariant = false; 705 706 auto GetFunctionName = [&]() { 707 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 708 // Skim off the details of whichever builtin was called to produce a better 709 // diagnostic, as it's unlikely that the user wrote the __builtin 710 // explicitly. 711 if (IsChkVariant) { 712 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 713 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 714 } else if (FunctionName.startswith("__builtin_")) { 715 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 716 } 717 return FunctionName; 718 }; 719 720 switch (BuiltinID) { 721 default: 722 return; 723 case Builtin::BI__builtin_strcpy: 724 case Builtin::BIstrcpy: { 725 DiagID = diag::warn_fortify_strlen_overflow; 726 SourceSize = ComputeStrLenArgument(1); 727 DestinationSize = ComputeSizeArgument(0); 728 break; 729 } 730 731 case Builtin::BI__builtin___strcpy_chk: { 732 DiagID = diag::warn_fortify_strlen_overflow; 733 SourceSize = ComputeStrLenArgument(1); 734 DestinationSize = ComputeExplicitObjectSizeArgument(2); 735 IsChkVariant = true; 736 break; 737 } 738 739 case Builtin::BIscanf: 740 case Builtin::BIfscanf: 741 case Builtin::BIsscanf: { 742 unsigned FormatIndex = 1; 743 unsigned DataIndex = 2; 744 if (BuiltinID == Builtin::BIscanf) { 745 FormatIndex = 0; 746 DataIndex = 1; 747 } 748 749 const auto *FormatExpr = 750 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 751 752 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 753 if (!Format) 754 return; 755 756 if (!Format->isAscii() && !Format->isUTF8()) 757 return; 758 759 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 760 unsigned SourceSize) { 761 DiagID = diag::warn_fortify_scanf_overflow; 762 unsigned Index = ArgIndex + DataIndex; 763 StringRef FunctionName = GetFunctionName(); 764 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 765 PDiag(DiagID) << FunctionName << (Index + 1) 766 << DestSize << SourceSize); 767 }; 768 769 StringRef FormatStrRef = Format->getString(); 770 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 771 return ComputeSizeArgument(Index + DataIndex); 772 }; 773 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 774 const char *FormatBytes = FormatStrRef.data(); 775 const ConstantArrayType *T = 776 Context.getAsConstantArrayType(Format->getType()); 777 assert(T && "String literal not of constant array type!"); 778 size_t TypeSize = T->getSize().getZExtValue(); 779 780 // In case there's a null byte somewhere. 781 size_t StrLen = 782 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 783 784 analyze_format_string::ParseScanfString(H, FormatBytes, 785 FormatBytes + StrLen, getLangOpts(), 786 Context.getTargetInfo()); 787 788 // Unlike the other cases, in this one we have already issued the diagnostic 789 // here, so no need to continue (because unlike the other cases, here the 790 // diagnostic refers to the argument number). 791 return; 792 } 793 794 case Builtin::BIsprintf: 795 case Builtin::BI__builtin___sprintf_chk: { 796 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 797 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 798 799 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 800 801 if (!Format->isAscii() && !Format->isUTF8()) 802 return; 803 804 StringRef FormatStrRef = Format->getString(); 805 EstimateSizeFormatHandler H(FormatStrRef); 806 const char *FormatBytes = FormatStrRef.data(); 807 const ConstantArrayType *T = 808 Context.getAsConstantArrayType(Format->getType()); 809 assert(T && "String literal not of constant array type!"); 810 size_t TypeSize = T->getSize().getZExtValue(); 811 812 // In case there's a null byte somewhere. 813 size_t StrLen = 814 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 815 if (!analyze_format_string::ParsePrintfString( 816 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 817 Context.getTargetInfo(), false)) { 818 DiagID = diag::warn_fortify_source_format_overflow; 819 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 820 .extOrTrunc(SizeTypeWidth); 821 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 822 DestinationSize = ComputeExplicitObjectSizeArgument(2); 823 IsChkVariant = true; 824 } else { 825 DestinationSize = ComputeSizeArgument(0); 826 } 827 break; 828 } 829 } 830 return; 831 } 832 case Builtin::BI__builtin___memcpy_chk: 833 case Builtin::BI__builtin___memmove_chk: 834 case Builtin::BI__builtin___memset_chk: 835 case Builtin::BI__builtin___strlcat_chk: 836 case Builtin::BI__builtin___strlcpy_chk: 837 case Builtin::BI__builtin___strncat_chk: 838 case Builtin::BI__builtin___strncpy_chk: 839 case Builtin::BI__builtin___stpncpy_chk: 840 case Builtin::BI__builtin___memccpy_chk: 841 case Builtin::BI__builtin___mempcpy_chk: { 842 DiagID = diag::warn_builtin_chk_overflow; 843 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 844 DestinationSize = 845 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 846 IsChkVariant = true; 847 break; 848 } 849 850 case Builtin::BI__builtin___snprintf_chk: 851 case Builtin::BI__builtin___vsnprintf_chk: { 852 DiagID = diag::warn_builtin_chk_overflow; 853 SourceSize = ComputeExplicitObjectSizeArgument(1); 854 DestinationSize = ComputeExplicitObjectSizeArgument(3); 855 IsChkVariant = true; 856 break; 857 } 858 859 case Builtin::BIstrncat: 860 case Builtin::BI__builtin_strncat: 861 case Builtin::BIstrncpy: 862 case Builtin::BI__builtin_strncpy: 863 case Builtin::BIstpncpy: 864 case Builtin::BI__builtin_stpncpy: { 865 // Whether these functions overflow depends on the runtime strlen of the 866 // string, not just the buffer size, so emitting the "always overflow" 867 // diagnostic isn't quite right. We should still diagnose passing a buffer 868 // size larger than the destination buffer though; this is a runtime abort 869 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 870 DiagID = diag::warn_fortify_source_size_mismatch; 871 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 872 DestinationSize = ComputeSizeArgument(0); 873 break; 874 } 875 876 case Builtin::BImemcpy: 877 case Builtin::BI__builtin_memcpy: 878 case Builtin::BImemmove: 879 case Builtin::BI__builtin_memmove: 880 case Builtin::BImemset: 881 case Builtin::BI__builtin_memset: 882 case Builtin::BImempcpy: 883 case Builtin::BI__builtin_mempcpy: { 884 DiagID = diag::warn_fortify_source_overflow; 885 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 886 DestinationSize = ComputeSizeArgument(0); 887 break; 888 } 889 case Builtin::BIsnprintf: 890 case Builtin::BI__builtin_snprintf: 891 case Builtin::BIvsnprintf: 892 case Builtin::BI__builtin_vsnprintf: { 893 DiagID = diag::warn_fortify_source_size_mismatch; 894 SourceSize = ComputeExplicitObjectSizeArgument(1); 895 DestinationSize = ComputeSizeArgument(0); 896 break; 897 } 898 } 899 900 if (!SourceSize || !DestinationSize || 901 SourceSize.getValue().ule(DestinationSize.getValue())) 902 return; 903 904 StringRef FunctionName = GetFunctionName(); 905 906 SmallString<16> DestinationStr; 907 SmallString<16> SourceStr; 908 DestinationSize->toString(DestinationStr, /*Radix=*/10); 909 SourceSize->toString(SourceStr, /*Radix=*/10); 910 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 911 PDiag(DiagID) 912 << FunctionName << DestinationStr << SourceStr); 913 } 914 915 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 916 Scope::ScopeFlags NeededScopeFlags, 917 unsigned DiagID) { 918 // Scopes aren't available during instantiation. Fortunately, builtin 919 // functions cannot be template args so they cannot be formed through template 920 // instantiation. Therefore checking once during the parse is sufficient. 921 if (SemaRef.inTemplateInstantiation()) 922 return false; 923 924 Scope *S = SemaRef.getCurScope(); 925 while (S && !S->isSEHExceptScope()) 926 S = S->getParent(); 927 if (!S || !(S->getFlags() & NeededScopeFlags)) { 928 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 929 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 930 << DRE->getDecl()->getIdentifier(); 931 return true; 932 } 933 934 return false; 935 } 936 937 static inline bool isBlockPointer(Expr *Arg) { 938 return Arg->getType()->isBlockPointerType(); 939 } 940 941 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 942 /// void*, which is a requirement of device side enqueue. 943 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 944 const BlockPointerType *BPT = 945 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 946 ArrayRef<QualType> Params = 947 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 948 unsigned ArgCounter = 0; 949 bool IllegalParams = false; 950 // Iterate through the block parameters until either one is found that is not 951 // a local void*, or the block is valid. 952 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 953 I != E; ++I, ++ArgCounter) { 954 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 955 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 956 LangAS::opencl_local) { 957 // Get the location of the error. If a block literal has been passed 958 // (BlockExpr) then we can point straight to the offending argument, 959 // else we just point to the variable reference. 960 SourceLocation ErrorLoc; 961 if (isa<BlockExpr>(BlockArg)) { 962 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 963 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 964 } else if (isa<DeclRefExpr>(BlockArg)) { 965 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 966 } 967 S.Diag(ErrorLoc, 968 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 969 IllegalParams = true; 970 } 971 } 972 973 return IllegalParams; 974 } 975 976 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 977 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) { 978 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 979 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 980 return true; 981 } 982 return false; 983 } 984 985 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 986 if (checkArgCount(S, TheCall, 2)) 987 return true; 988 989 if (checkOpenCLSubgroupExt(S, TheCall)) 990 return true; 991 992 // First argument is an ndrange_t type. 993 Expr *NDRangeArg = TheCall->getArg(0); 994 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 995 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 996 << TheCall->getDirectCallee() << "'ndrange_t'"; 997 return true; 998 } 999 1000 Expr *BlockArg = TheCall->getArg(1); 1001 if (!isBlockPointer(BlockArg)) { 1002 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1003 << TheCall->getDirectCallee() << "block"; 1004 return true; 1005 } 1006 return checkOpenCLBlockArgs(S, BlockArg); 1007 } 1008 1009 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1010 /// get_kernel_work_group_size 1011 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1012 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1013 if (checkArgCount(S, TheCall, 1)) 1014 return true; 1015 1016 Expr *BlockArg = TheCall->getArg(0); 1017 if (!isBlockPointer(BlockArg)) { 1018 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1019 << TheCall->getDirectCallee() << "block"; 1020 return true; 1021 } 1022 return checkOpenCLBlockArgs(S, BlockArg); 1023 } 1024 1025 /// Diagnose integer type and any valid implicit conversion to it. 1026 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1027 const QualType &IntType); 1028 1029 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1030 unsigned Start, unsigned End) { 1031 bool IllegalParams = false; 1032 for (unsigned I = Start; I <= End; ++I) 1033 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1034 S.Context.getSizeType()); 1035 return IllegalParams; 1036 } 1037 1038 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1039 /// 'local void*' parameter of passed block. 1040 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1041 Expr *BlockArg, 1042 unsigned NumNonVarArgs) { 1043 const BlockPointerType *BPT = 1044 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1045 unsigned NumBlockParams = 1046 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1047 unsigned TotalNumArgs = TheCall->getNumArgs(); 1048 1049 // For each argument passed to the block, a corresponding uint needs to 1050 // be passed to describe the size of the local memory. 1051 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1052 S.Diag(TheCall->getBeginLoc(), 1053 diag::err_opencl_enqueue_kernel_local_size_args); 1054 return true; 1055 } 1056 1057 // Check that the sizes of the local memory are specified by integers. 1058 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1059 TotalNumArgs - 1); 1060 } 1061 1062 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1063 /// overload formats specified in Table 6.13.17.1. 1064 /// int enqueue_kernel(queue_t queue, 1065 /// kernel_enqueue_flags_t flags, 1066 /// const ndrange_t ndrange, 1067 /// void (^block)(void)) 1068 /// int enqueue_kernel(queue_t queue, 1069 /// kernel_enqueue_flags_t flags, 1070 /// const ndrange_t ndrange, 1071 /// uint num_events_in_wait_list, 1072 /// clk_event_t *event_wait_list, 1073 /// clk_event_t *event_ret, 1074 /// void (^block)(void)) 1075 /// int enqueue_kernel(queue_t queue, 1076 /// kernel_enqueue_flags_t flags, 1077 /// const ndrange_t ndrange, 1078 /// void (^block)(local void*, ...), 1079 /// uint size0, ...) 1080 /// int enqueue_kernel(queue_t queue, 1081 /// kernel_enqueue_flags_t flags, 1082 /// const ndrange_t ndrange, 1083 /// uint num_events_in_wait_list, 1084 /// clk_event_t *event_wait_list, 1085 /// clk_event_t *event_ret, 1086 /// void (^block)(local void*, ...), 1087 /// uint size0, ...) 1088 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1089 unsigned NumArgs = TheCall->getNumArgs(); 1090 1091 if (NumArgs < 4) { 1092 S.Diag(TheCall->getBeginLoc(), 1093 diag::err_typecheck_call_too_few_args_at_least) 1094 << 0 << 4 << NumArgs; 1095 return true; 1096 } 1097 1098 Expr *Arg0 = TheCall->getArg(0); 1099 Expr *Arg1 = TheCall->getArg(1); 1100 Expr *Arg2 = TheCall->getArg(2); 1101 Expr *Arg3 = TheCall->getArg(3); 1102 1103 // First argument always needs to be a queue_t type. 1104 if (!Arg0->getType()->isQueueT()) { 1105 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1106 diag::err_opencl_builtin_expected_type) 1107 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1108 return true; 1109 } 1110 1111 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1112 if (!Arg1->getType()->isIntegerType()) { 1113 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1114 diag::err_opencl_builtin_expected_type) 1115 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1116 return true; 1117 } 1118 1119 // Third argument is always an ndrange_t type. 1120 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1121 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1122 diag::err_opencl_builtin_expected_type) 1123 << TheCall->getDirectCallee() << "'ndrange_t'"; 1124 return true; 1125 } 1126 1127 // With four arguments, there is only one form that the function could be 1128 // called in: no events and no variable arguments. 1129 if (NumArgs == 4) { 1130 // check that the last argument is the right block type. 1131 if (!isBlockPointer(Arg3)) { 1132 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1133 << TheCall->getDirectCallee() << "block"; 1134 return true; 1135 } 1136 // we have a block type, check the prototype 1137 const BlockPointerType *BPT = 1138 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1139 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1140 S.Diag(Arg3->getBeginLoc(), 1141 diag::err_opencl_enqueue_kernel_blocks_no_args); 1142 return true; 1143 } 1144 return false; 1145 } 1146 // we can have block + varargs. 1147 if (isBlockPointer(Arg3)) 1148 return (checkOpenCLBlockArgs(S, Arg3) || 1149 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1150 // last two cases with either exactly 7 args or 7 args and varargs. 1151 if (NumArgs >= 7) { 1152 // check common block argument. 1153 Expr *Arg6 = TheCall->getArg(6); 1154 if (!isBlockPointer(Arg6)) { 1155 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1156 << TheCall->getDirectCallee() << "block"; 1157 return true; 1158 } 1159 if (checkOpenCLBlockArgs(S, Arg6)) 1160 return true; 1161 1162 // Forth argument has to be any integer type. 1163 if (!Arg3->getType()->isIntegerType()) { 1164 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1165 diag::err_opencl_builtin_expected_type) 1166 << TheCall->getDirectCallee() << "integer"; 1167 return true; 1168 } 1169 // check remaining common arguments. 1170 Expr *Arg4 = TheCall->getArg(4); 1171 Expr *Arg5 = TheCall->getArg(5); 1172 1173 // Fifth argument is always passed as a pointer to clk_event_t. 1174 if (!Arg4->isNullPointerConstant(S.Context, 1175 Expr::NPC_ValueDependentIsNotNull) && 1176 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1177 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1178 diag::err_opencl_builtin_expected_type) 1179 << TheCall->getDirectCallee() 1180 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1181 return true; 1182 } 1183 1184 // Sixth argument is always passed as a pointer to clk_event_t. 1185 if (!Arg5->isNullPointerConstant(S.Context, 1186 Expr::NPC_ValueDependentIsNotNull) && 1187 !(Arg5->getType()->isPointerType() && 1188 Arg5->getType()->getPointeeType()->isClkEventT())) { 1189 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1190 diag::err_opencl_builtin_expected_type) 1191 << TheCall->getDirectCallee() 1192 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1193 return true; 1194 } 1195 1196 if (NumArgs == 7) 1197 return false; 1198 1199 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1200 } 1201 1202 // None of the specific case has been detected, give generic error 1203 S.Diag(TheCall->getBeginLoc(), 1204 diag::err_opencl_enqueue_kernel_incorrect_args); 1205 return true; 1206 } 1207 1208 /// Returns OpenCL access qual. 1209 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1210 return D->getAttr<OpenCLAccessAttr>(); 1211 } 1212 1213 /// Returns true if pipe element type is different from the pointer. 1214 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1215 const Expr *Arg0 = Call->getArg(0); 1216 // First argument type should always be pipe. 1217 if (!Arg0->getType()->isPipeType()) { 1218 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1219 << Call->getDirectCallee() << Arg0->getSourceRange(); 1220 return true; 1221 } 1222 OpenCLAccessAttr *AccessQual = 1223 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1224 // Validates the access qualifier is compatible with the call. 1225 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1226 // read_only and write_only, and assumed to be read_only if no qualifier is 1227 // specified. 1228 switch (Call->getDirectCallee()->getBuiltinID()) { 1229 case Builtin::BIread_pipe: 1230 case Builtin::BIreserve_read_pipe: 1231 case Builtin::BIcommit_read_pipe: 1232 case Builtin::BIwork_group_reserve_read_pipe: 1233 case Builtin::BIsub_group_reserve_read_pipe: 1234 case Builtin::BIwork_group_commit_read_pipe: 1235 case Builtin::BIsub_group_commit_read_pipe: 1236 if (!(!AccessQual || AccessQual->isReadOnly())) { 1237 S.Diag(Arg0->getBeginLoc(), 1238 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1239 << "read_only" << Arg0->getSourceRange(); 1240 return true; 1241 } 1242 break; 1243 case Builtin::BIwrite_pipe: 1244 case Builtin::BIreserve_write_pipe: 1245 case Builtin::BIcommit_write_pipe: 1246 case Builtin::BIwork_group_reserve_write_pipe: 1247 case Builtin::BIsub_group_reserve_write_pipe: 1248 case Builtin::BIwork_group_commit_write_pipe: 1249 case Builtin::BIsub_group_commit_write_pipe: 1250 if (!(AccessQual && AccessQual->isWriteOnly())) { 1251 S.Diag(Arg0->getBeginLoc(), 1252 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1253 << "write_only" << Arg0->getSourceRange(); 1254 return true; 1255 } 1256 break; 1257 default: 1258 break; 1259 } 1260 return false; 1261 } 1262 1263 /// Returns true if pipe element type is different from the pointer. 1264 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1265 const Expr *Arg0 = Call->getArg(0); 1266 const Expr *ArgIdx = Call->getArg(Idx); 1267 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1268 const QualType EltTy = PipeTy->getElementType(); 1269 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1270 // The Idx argument should be a pointer and the type of the pointer and 1271 // the type of pipe element should also be the same. 1272 if (!ArgTy || 1273 !S.Context.hasSameType( 1274 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1275 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1276 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1277 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1278 return true; 1279 } 1280 return false; 1281 } 1282 1283 // Performs semantic analysis for the read/write_pipe call. 1284 // \param S Reference to the semantic analyzer. 1285 // \param Call A pointer to the builtin call. 1286 // \return True if a semantic error has been found, false otherwise. 1287 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1288 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1289 // functions have two forms. 1290 switch (Call->getNumArgs()) { 1291 case 2: 1292 if (checkOpenCLPipeArg(S, Call)) 1293 return true; 1294 // The call with 2 arguments should be 1295 // read/write_pipe(pipe T, T*). 1296 // Check packet type T. 1297 if (checkOpenCLPipePacketType(S, Call, 1)) 1298 return true; 1299 break; 1300 1301 case 4: { 1302 if (checkOpenCLPipeArg(S, Call)) 1303 return true; 1304 // The call with 4 arguments should be 1305 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1306 // Check reserve_id_t. 1307 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1308 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1309 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1310 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1311 return true; 1312 } 1313 1314 // Check the index. 1315 const Expr *Arg2 = Call->getArg(2); 1316 if (!Arg2->getType()->isIntegerType() && 1317 !Arg2->getType()->isUnsignedIntegerType()) { 1318 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1319 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1320 << Arg2->getType() << Arg2->getSourceRange(); 1321 return true; 1322 } 1323 1324 // Check packet type T. 1325 if (checkOpenCLPipePacketType(S, Call, 3)) 1326 return true; 1327 } break; 1328 default: 1329 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1330 << Call->getDirectCallee() << Call->getSourceRange(); 1331 return true; 1332 } 1333 1334 return false; 1335 } 1336 1337 // Performs a semantic analysis on the {work_group_/sub_group_ 1338 // /_}reserve_{read/write}_pipe 1339 // \param S Reference to the semantic analyzer. 1340 // \param Call The call to the builtin function to be analyzed. 1341 // \return True if a semantic error was found, false otherwise. 1342 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1343 if (checkArgCount(S, Call, 2)) 1344 return true; 1345 1346 if (checkOpenCLPipeArg(S, Call)) 1347 return true; 1348 1349 // Check the reserve size. 1350 if (!Call->getArg(1)->getType()->isIntegerType() && 1351 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1352 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1353 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1354 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1355 return true; 1356 } 1357 1358 // Since return type of reserve_read/write_pipe built-in function is 1359 // reserve_id_t, which is not defined in the builtin def file , we used int 1360 // as return type and need to override the return type of these functions. 1361 Call->setType(S.Context.OCLReserveIDTy); 1362 1363 return false; 1364 } 1365 1366 // Performs a semantic analysis on {work_group_/sub_group_ 1367 // /_}commit_{read/write}_pipe 1368 // \param S Reference to the semantic analyzer. 1369 // \param Call The call to the builtin function to be analyzed. 1370 // \return True if a semantic error was found, false otherwise. 1371 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1372 if (checkArgCount(S, Call, 2)) 1373 return true; 1374 1375 if (checkOpenCLPipeArg(S, Call)) 1376 return true; 1377 1378 // Check reserve_id_t. 1379 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1380 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1381 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1382 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1383 return true; 1384 } 1385 1386 return false; 1387 } 1388 1389 // Performs a semantic analysis on the call to built-in Pipe 1390 // Query Functions. 1391 // \param S Reference to the semantic analyzer. 1392 // \param Call The call to the builtin function to be analyzed. 1393 // \return True if a semantic error was found, false otherwise. 1394 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1395 if (checkArgCount(S, Call, 1)) 1396 return true; 1397 1398 if (!Call->getArg(0)->getType()->isPipeType()) { 1399 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1400 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1401 return true; 1402 } 1403 1404 return false; 1405 } 1406 1407 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1408 // Performs semantic analysis for the to_global/local/private call. 1409 // \param S Reference to the semantic analyzer. 1410 // \param BuiltinID ID of the builtin function. 1411 // \param Call A pointer to the builtin call. 1412 // \return True if a semantic error has been found, false otherwise. 1413 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1414 CallExpr *Call) { 1415 if (checkArgCount(S, Call, 1)) 1416 return true; 1417 1418 auto RT = Call->getArg(0)->getType(); 1419 if (!RT->isPointerType() || RT->getPointeeType() 1420 .getAddressSpace() == LangAS::opencl_constant) { 1421 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1422 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1423 return true; 1424 } 1425 1426 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1427 S.Diag(Call->getArg(0)->getBeginLoc(), 1428 diag::warn_opencl_generic_address_space_arg) 1429 << Call->getDirectCallee()->getNameInfo().getAsString() 1430 << Call->getArg(0)->getSourceRange(); 1431 } 1432 1433 RT = RT->getPointeeType(); 1434 auto Qual = RT.getQualifiers(); 1435 switch (BuiltinID) { 1436 case Builtin::BIto_global: 1437 Qual.setAddressSpace(LangAS::opencl_global); 1438 break; 1439 case Builtin::BIto_local: 1440 Qual.setAddressSpace(LangAS::opencl_local); 1441 break; 1442 case Builtin::BIto_private: 1443 Qual.setAddressSpace(LangAS::opencl_private); 1444 break; 1445 default: 1446 llvm_unreachable("Invalid builtin function"); 1447 } 1448 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1449 RT.getUnqualifiedType(), Qual))); 1450 1451 return false; 1452 } 1453 1454 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1455 if (checkArgCount(S, TheCall, 1)) 1456 return ExprError(); 1457 1458 // Compute __builtin_launder's parameter type from the argument. 1459 // The parameter type is: 1460 // * The type of the argument if it's not an array or function type, 1461 // Otherwise, 1462 // * The decayed argument type. 1463 QualType ParamTy = [&]() { 1464 QualType ArgTy = TheCall->getArg(0)->getType(); 1465 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1466 return S.Context.getPointerType(Ty->getElementType()); 1467 if (ArgTy->isFunctionType()) { 1468 return S.Context.getPointerType(ArgTy); 1469 } 1470 return ArgTy; 1471 }(); 1472 1473 TheCall->setType(ParamTy); 1474 1475 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1476 if (!ParamTy->isPointerType()) 1477 return 0; 1478 if (ParamTy->isFunctionPointerType()) 1479 return 1; 1480 if (ParamTy->isVoidPointerType()) 1481 return 2; 1482 return llvm::Optional<unsigned>{}; 1483 }(); 1484 if (DiagSelect.hasValue()) { 1485 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1486 << DiagSelect.getValue() << TheCall->getSourceRange(); 1487 return ExprError(); 1488 } 1489 1490 // We either have an incomplete class type, or we have a class template 1491 // whose instantiation has not been forced. Example: 1492 // 1493 // template <class T> struct Foo { T value; }; 1494 // Foo<int> *p = nullptr; 1495 // auto *d = __builtin_launder(p); 1496 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1497 diag::err_incomplete_type)) 1498 return ExprError(); 1499 1500 assert(ParamTy->getPointeeType()->isObjectType() && 1501 "Unhandled non-object pointer case"); 1502 1503 InitializedEntity Entity = 1504 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1505 ExprResult Arg = 1506 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1507 if (Arg.isInvalid()) 1508 return ExprError(); 1509 TheCall->setArg(0, Arg.get()); 1510 1511 return TheCall; 1512 } 1513 1514 // Emit an error and return true if the current architecture is not in the list 1515 // of supported architectures. 1516 static bool 1517 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1518 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1519 llvm::Triple::ArchType CurArch = 1520 S.getASTContext().getTargetInfo().getTriple().getArch(); 1521 if (llvm::is_contained(SupportedArchs, CurArch)) 1522 return false; 1523 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1524 << TheCall->getSourceRange(); 1525 return true; 1526 } 1527 1528 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1529 SourceLocation CallSiteLoc); 1530 1531 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1532 CallExpr *TheCall) { 1533 switch (TI.getTriple().getArch()) { 1534 default: 1535 // Some builtins don't require additional checking, so just consider these 1536 // acceptable. 1537 return false; 1538 case llvm::Triple::arm: 1539 case llvm::Triple::armeb: 1540 case llvm::Triple::thumb: 1541 case llvm::Triple::thumbeb: 1542 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1543 case llvm::Triple::aarch64: 1544 case llvm::Triple::aarch64_32: 1545 case llvm::Triple::aarch64_be: 1546 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1547 case llvm::Triple::bpfeb: 1548 case llvm::Triple::bpfel: 1549 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1550 case llvm::Triple::hexagon: 1551 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1552 case llvm::Triple::mips: 1553 case llvm::Triple::mipsel: 1554 case llvm::Triple::mips64: 1555 case llvm::Triple::mips64el: 1556 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1557 case llvm::Triple::systemz: 1558 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1559 case llvm::Triple::x86: 1560 case llvm::Triple::x86_64: 1561 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1562 case llvm::Triple::ppc: 1563 case llvm::Triple::ppcle: 1564 case llvm::Triple::ppc64: 1565 case llvm::Triple::ppc64le: 1566 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1567 case llvm::Triple::amdgcn: 1568 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1569 case llvm::Triple::riscv32: 1570 case llvm::Triple::riscv64: 1571 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1572 } 1573 } 1574 1575 ExprResult 1576 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1577 CallExpr *TheCall) { 1578 ExprResult TheCallResult(TheCall); 1579 1580 // Find out if any arguments are required to be integer constant expressions. 1581 unsigned ICEArguments = 0; 1582 ASTContext::GetBuiltinTypeError Error; 1583 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1584 if (Error != ASTContext::GE_None) 1585 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1586 1587 // If any arguments are required to be ICE's, check and diagnose. 1588 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1589 // Skip arguments not required to be ICE's. 1590 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1591 1592 llvm::APSInt Result; 1593 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1594 return true; 1595 ICEArguments &= ~(1 << ArgNo); 1596 } 1597 1598 switch (BuiltinID) { 1599 case Builtin::BI__builtin___CFStringMakeConstantString: 1600 assert(TheCall->getNumArgs() == 1 && 1601 "Wrong # arguments to builtin CFStringMakeConstantString"); 1602 if (CheckObjCString(TheCall->getArg(0))) 1603 return ExprError(); 1604 break; 1605 case Builtin::BI__builtin_ms_va_start: 1606 case Builtin::BI__builtin_stdarg_start: 1607 case Builtin::BI__builtin_va_start: 1608 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1609 return ExprError(); 1610 break; 1611 case Builtin::BI__va_start: { 1612 switch (Context.getTargetInfo().getTriple().getArch()) { 1613 case llvm::Triple::aarch64: 1614 case llvm::Triple::arm: 1615 case llvm::Triple::thumb: 1616 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1617 return ExprError(); 1618 break; 1619 default: 1620 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1621 return ExprError(); 1622 break; 1623 } 1624 break; 1625 } 1626 1627 // The acquire, release, and no fence variants are ARM and AArch64 only. 1628 case Builtin::BI_interlockedbittestandset_acq: 1629 case Builtin::BI_interlockedbittestandset_rel: 1630 case Builtin::BI_interlockedbittestandset_nf: 1631 case Builtin::BI_interlockedbittestandreset_acq: 1632 case Builtin::BI_interlockedbittestandreset_rel: 1633 case Builtin::BI_interlockedbittestandreset_nf: 1634 if (CheckBuiltinTargetSupport( 1635 *this, BuiltinID, TheCall, 1636 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1637 return ExprError(); 1638 break; 1639 1640 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1641 case Builtin::BI_bittest64: 1642 case Builtin::BI_bittestandcomplement64: 1643 case Builtin::BI_bittestandreset64: 1644 case Builtin::BI_bittestandset64: 1645 case Builtin::BI_interlockedbittestandreset64: 1646 case Builtin::BI_interlockedbittestandset64: 1647 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1648 {llvm::Triple::x86_64, llvm::Triple::arm, 1649 llvm::Triple::thumb, llvm::Triple::aarch64})) 1650 return ExprError(); 1651 break; 1652 1653 case Builtin::BI__builtin_isgreater: 1654 case Builtin::BI__builtin_isgreaterequal: 1655 case Builtin::BI__builtin_isless: 1656 case Builtin::BI__builtin_islessequal: 1657 case Builtin::BI__builtin_islessgreater: 1658 case Builtin::BI__builtin_isunordered: 1659 if (SemaBuiltinUnorderedCompare(TheCall)) 1660 return ExprError(); 1661 break; 1662 case Builtin::BI__builtin_fpclassify: 1663 if (SemaBuiltinFPClassification(TheCall, 6)) 1664 return ExprError(); 1665 break; 1666 case Builtin::BI__builtin_isfinite: 1667 case Builtin::BI__builtin_isinf: 1668 case Builtin::BI__builtin_isinf_sign: 1669 case Builtin::BI__builtin_isnan: 1670 case Builtin::BI__builtin_isnormal: 1671 case Builtin::BI__builtin_signbit: 1672 case Builtin::BI__builtin_signbitf: 1673 case Builtin::BI__builtin_signbitl: 1674 if (SemaBuiltinFPClassification(TheCall, 1)) 1675 return ExprError(); 1676 break; 1677 case Builtin::BI__builtin_shufflevector: 1678 return SemaBuiltinShuffleVector(TheCall); 1679 // TheCall will be freed by the smart pointer here, but that's fine, since 1680 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1681 case Builtin::BI__builtin_prefetch: 1682 if (SemaBuiltinPrefetch(TheCall)) 1683 return ExprError(); 1684 break; 1685 case Builtin::BI__builtin_alloca_with_align: 1686 if (SemaBuiltinAllocaWithAlign(TheCall)) 1687 return ExprError(); 1688 LLVM_FALLTHROUGH; 1689 case Builtin::BI__builtin_alloca: 1690 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1691 << TheCall->getDirectCallee(); 1692 break; 1693 case Builtin::BI__arithmetic_fence: 1694 if (SemaBuiltinArithmeticFence(TheCall)) 1695 return ExprError(); 1696 break; 1697 case Builtin::BI__assume: 1698 case Builtin::BI__builtin_assume: 1699 if (SemaBuiltinAssume(TheCall)) 1700 return ExprError(); 1701 break; 1702 case Builtin::BI__builtin_assume_aligned: 1703 if (SemaBuiltinAssumeAligned(TheCall)) 1704 return ExprError(); 1705 break; 1706 case Builtin::BI__builtin_dynamic_object_size: 1707 case Builtin::BI__builtin_object_size: 1708 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1709 return ExprError(); 1710 break; 1711 case Builtin::BI__builtin_longjmp: 1712 if (SemaBuiltinLongjmp(TheCall)) 1713 return ExprError(); 1714 break; 1715 case Builtin::BI__builtin_setjmp: 1716 if (SemaBuiltinSetjmp(TheCall)) 1717 return ExprError(); 1718 break; 1719 case Builtin::BI__builtin_classify_type: 1720 if (checkArgCount(*this, TheCall, 1)) return true; 1721 TheCall->setType(Context.IntTy); 1722 break; 1723 case Builtin::BI__builtin_complex: 1724 if (SemaBuiltinComplex(TheCall)) 1725 return ExprError(); 1726 break; 1727 case Builtin::BI__builtin_constant_p: { 1728 if (checkArgCount(*this, TheCall, 1)) return true; 1729 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1730 if (Arg.isInvalid()) return true; 1731 TheCall->setArg(0, Arg.get()); 1732 TheCall->setType(Context.IntTy); 1733 break; 1734 } 1735 case Builtin::BI__builtin_launder: 1736 return SemaBuiltinLaunder(*this, TheCall); 1737 case Builtin::BI__sync_fetch_and_add: 1738 case Builtin::BI__sync_fetch_and_add_1: 1739 case Builtin::BI__sync_fetch_and_add_2: 1740 case Builtin::BI__sync_fetch_and_add_4: 1741 case Builtin::BI__sync_fetch_and_add_8: 1742 case Builtin::BI__sync_fetch_and_add_16: 1743 case Builtin::BI__sync_fetch_and_sub: 1744 case Builtin::BI__sync_fetch_and_sub_1: 1745 case Builtin::BI__sync_fetch_and_sub_2: 1746 case Builtin::BI__sync_fetch_and_sub_4: 1747 case Builtin::BI__sync_fetch_and_sub_8: 1748 case Builtin::BI__sync_fetch_and_sub_16: 1749 case Builtin::BI__sync_fetch_and_or: 1750 case Builtin::BI__sync_fetch_and_or_1: 1751 case Builtin::BI__sync_fetch_and_or_2: 1752 case Builtin::BI__sync_fetch_and_or_4: 1753 case Builtin::BI__sync_fetch_and_or_8: 1754 case Builtin::BI__sync_fetch_and_or_16: 1755 case Builtin::BI__sync_fetch_and_and: 1756 case Builtin::BI__sync_fetch_and_and_1: 1757 case Builtin::BI__sync_fetch_and_and_2: 1758 case Builtin::BI__sync_fetch_and_and_4: 1759 case Builtin::BI__sync_fetch_and_and_8: 1760 case Builtin::BI__sync_fetch_and_and_16: 1761 case Builtin::BI__sync_fetch_and_xor: 1762 case Builtin::BI__sync_fetch_and_xor_1: 1763 case Builtin::BI__sync_fetch_and_xor_2: 1764 case Builtin::BI__sync_fetch_and_xor_4: 1765 case Builtin::BI__sync_fetch_and_xor_8: 1766 case Builtin::BI__sync_fetch_and_xor_16: 1767 case Builtin::BI__sync_fetch_and_nand: 1768 case Builtin::BI__sync_fetch_and_nand_1: 1769 case Builtin::BI__sync_fetch_and_nand_2: 1770 case Builtin::BI__sync_fetch_and_nand_4: 1771 case Builtin::BI__sync_fetch_and_nand_8: 1772 case Builtin::BI__sync_fetch_and_nand_16: 1773 case Builtin::BI__sync_add_and_fetch: 1774 case Builtin::BI__sync_add_and_fetch_1: 1775 case Builtin::BI__sync_add_and_fetch_2: 1776 case Builtin::BI__sync_add_and_fetch_4: 1777 case Builtin::BI__sync_add_and_fetch_8: 1778 case Builtin::BI__sync_add_and_fetch_16: 1779 case Builtin::BI__sync_sub_and_fetch: 1780 case Builtin::BI__sync_sub_and_fetch_1: 1781 case Builtin::BI__sync_sub_and_fetch_2: 1782 case Builtin::BI__sync_sub_and_fetch_4: 1783 case Builtin::BI__sync_sub_and_fetch_8: 1784 case Builtin::BI__sync_sub_and_fetch_16: 1785 case Builtin::BI__sync_and_and_fetch: 1786 case Builtin::BI__sync_and_and_fetch_1: 1787 case Builtin::BI__sync_and_and_fetch_2: 1788 case Builtin::BI__sync_and_and_fetch_4: 1789 case Builtin::BI__sync_and_and_fetch_8: 1790 case Builtin::BI__sync_and_and_fetch_16: 1791 case Builtin::BI__sync_or_and_fetch: 1792 case Builtin::BI__sync_or_and_fetch_1: 1793 case Builtin::BI__sync_or_and_fetch_2: 1794 case Builtin::BI__sync_or_and_fetch_4: 1795 case Builtin::BI__sync_or_and_fetch_8: 1796 case Builtin::BI__sync_or_and_fetch_16: 1797 case Builtin::BI__sync_xor_and_fetch: 1798 case Builtin::BI__sync_xor_and_fetch_1: 1799 case Builtin::BI__sync_xor_and_fetch_2: 1800 case Builtin::BI__sync_xor_and_fetch_4: 1801 case Builtin::BI__sync_xor_and_fetch_8: 1802 case Builtin::BI__sync_xor_and_fetch_16: 1803 case Builtin::BI__sync_nand_and_fetch: 1804 case Builtin::BI__sync_nand_and_fetch_1: 1805 case Builtin::BI__sync_nand_and_fetch_2: 1806 case Builtin::BI__sync_nand_and_fetch_4: 1807 case Builtin::BI__sync_nand_and_fetch_8: 1808 case Builtin::BI__sync_nand_and_fetch_16: 1809 case Builtin::BI__sync_val_compare_and_swap: 1810 case Builtin::BI__sync_val_compare_and_swap_1: 1811 case Builtin::BI__sync_val_compare_and_swap_2: 1812 case Builtin::BI__sync_val_compare_and_swap_4: 1813 case Builtin::BI__sync_val_compare_and_swap_8: 1814 case Builtin::BI__sync_val_compare_and_swap_16: 1815 case Builtin::BI__sync_bool_compare_and_swap: 1816 case Builtin::BI__sync_bool_compare_and_swap_1: 1817 case Builtin::BI__sync_bool_compare_and_swap_2: 1818 case Builtin::BI__sync_bool_compare_and_swap_4: 1819 case Builtin::BI__sync_bool_compare_and_swap_8: 1820 case Builtin::BI__sync_bool_compare_and_swap_16: 1821 case Builtin::BI__sync_lock_test_and_set: 1822 case Builtin::BI__sync_lock_test_and_set_1: 1823 case Builtin::BI__sync_lock_test_and_set_2: 1824 case Builtin::BI__sync_lock_test_and_set_4: 1825 case Builtin::BI__sync_lock_test_and_set_8: 1826 case Builtin::BI__sync_lock_test_and_set_16: 1827 case Builtin::BI__sync_lock_release: 1828 case Builtin::BI__sync_lock_release_1: 1829 case Builtin::BI__sync_lock_release_2: 1830 case Builtin::BI__sync_lock_release_4: 1831 case Builtin::BI__sync_lock_release_8: 1832 case Builtin::BI__sync_lock_release_16: 1833 case Builtin::BI__sync_swap: 1834 case Builtin::BI__sync_swap_1: 1835 case Builtin::BI__sync_swap_2: 1836 case Builtin::BI__sync_swap_4: 1837 case Builtin::BI__sync_swap_8: 1838 case Builtin::BI__sync_swap_16: 1839 return SemaBuiltinAtomicOverloaded(TheCallResult); 1840 case Builtin::BI__sync_synchronize: 1841 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1842 << TheCall->getCallee()->getSourceRange(); 1843 break; 1844 case Builtin::BI__builtin_nontemporal_load: 1845 case Builtin::BI__builtin_nontemporal_store: 1846 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1847 case Builtin::BI__builtin_memcpy_inline: { 1848 clang::Expr *SizeOp = TheCall->getArg(2); 1849 // We warn about copying to or from `nullptr` pointers when `size` is 1850 // greater than 0. When `size` is value dependent we cannot evaluate its 1851 // value so we bail out. 1852 if (SizeOp->isValueDependent()) 1853 break; 1854 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 1855 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1856 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1857 } 1858 break; 1859 } 1860 #define BUILTIN(ID, TYPE, ATTRS) 1861 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1862 case Builtin::BI##ID: \ 1863 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1864 #include "clang/Basic/Builtins.def" 1865 case Builtin::BI__annotation: 1866 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1867 return ExprError(); 1868 break; 1869 case Builtin::BI__builtin_annotation: 1870 if (SemaBuiltinAnnotation(*this, TheCall)) 1871 return ExprError(); 1872 break; 1873 case Builtin::BI__builtin_addressof: 1874 if (SemaBuiltinAddressof(*this, TheCall)) 1875 return ExprError(); 1876 break; 1877 case Builtin::BI__builtin_is_aligned: 1878 case Builtin::BI__builtin_align_up: 1879 case Builtin::BI__builtin_align_down: 1880 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1881 return ExprError(); 1882 break; 1883 case Builtin::BI__builtin_add_overflow: 1884 case Builtin::BI__builtin_sub_overflow: 1885 case Builtin::BI__builtin_mul_overflow: 1886 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1887 return ExprError(); 1888 break; 1889 case Builtin::BI__builtin_operator_new: 1890 case Builtin::BI__builtin_operator_delete: { 1891 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1892 ExprResult Res = 1893 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1894 if (Res.isInvalid()) 1895 CorrectDelayedTyposInExpr(TheCallResult.get()); 1896 return Res; 1897 } 1898 case Builtin::BI__builtin_dump_struct: { 1899 // We first want to ensure we are called with 2 arguments 1900 if (checkArgCount(*this, TheCall, 2)) 1901 return ExprError(); 1902 // Ensure that the first argument is of type 'struct XX *' 1903 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1904 const QualType PtrArgType = PtrArg->getType(); 1905 if (!PtrArgType->isPointerType() || 1906 !PtrArgType->getPointeeType()->isRecordType()) { 1907 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1908 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1909 << "structure pointer"; 1910 return ExprError(); 1911 } 1912 1913 // Ensure that the second argument is of type 'FunctionType' 1914 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1915 const QualType FnPtrArgType = FnPtrArg->getType(); 1916 if (!FnPtrArgType->isPointerType()) { 1917 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1918 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1919 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1920 return ExprError(); 1921 } 1922 1923 const auto *FuncType = 1924 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1925 1926 if (!FuncType) { 1927 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1928 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1929 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1930 return ExprError(); 1931 } 1932 1933 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1934 if (!FT->getNumParams()) { 1935 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1936 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1937 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1938 return ExprError(); 1939 } 1940 QualType PT = FT->getParamType(0); 1941 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1942 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1943 !PT->getPointeeType().isConstQualified()) { 1944 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1945 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1946 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1947 return ExprError(); 1948 } 1949 } 1950 1951 TheCall->setType(Context.IntTy); 1952 break; 1953 } 1954 case Builtin::BI__builtin_expect_with_probability: { 1955 // We first want to ensure we are called with 3 arguments 1956 if (checkArgCount(*this, TheCall, 3)) 1957 return ExprError(); 1958 // then check probability is constant float in range [0.0, 1.0] 1959 const Expr *ProbArg = TheCall->getArg(2); 1960 SmallVector<PartialDiagnosticAt, 8> Notes; 1961 Expr::EvalResult Eval; 1962 Eval.Diag = &Notes; 1963 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 1964 !Eval.Val.isFloat()) { 1965 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 1966 << ProbArg->getSourceRange(); 1967 for (const PartialDiagnosticAt &PDiag : Notes) 1968 Diag(PDiag.first, PDiag.second); 1969 return ExprError(); 1970 } 1971 llvm::APFloat Probability = Eval.Val.getFloat(); 1972 bool LoseInfo = false; 1973 Probability.convert(llvm::APFloat::IEEEdouble(), 1974 llvm::RoundingMode::Dynamic, &LoseInfo); 1975 if (!(Probability >= llvm::APFloat(0.0) && 1976 Probability <= llvm::APFloat(1.0))) { 1977 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 1978 << ProbArg->getSourceRange(); 1979 return ExprError(); 1980 } 1981 break; 1982 } 1983 case Builtin::BI__builtin_preserve_access_index: 1984 if (SemaBuiltinPreserveAI(*this, TheCall)) 1985 return ExprError(); 1986 break; 1987 case Builtin::BI__builtin_call_with_static_chain: 1988 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1989 return ExprError(); 1990 break; 1991 case Builtin::BI__exception_code: 1992 case Builtin::BI_exception_code: 1993 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1994 diag::err_seh___except_block)) 1995 return ExprError(); 1996 break; 1997 case Builtin::BI__exception_info: 1998 case Builtin::BI_exception_info: 1999 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2000 diag::err_seh___except_filter)) 2001 return ExprError(); 2002 break; 2003 case Builtin::BI__GetExceptionInfo: 2004 if (checkArgCount(*this, TheCall, 1)) 2005 return ExprError(); 2006 2007 if (CheckCXXThrowOperand( 2008 TheCall->getBeginLoc(), 2009 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2010 TheCall)) 2011 return ExprError(); 2012 2013 TheCall->setType(Context.VoidPtrTy); 2014 break; 2015 // OpenCL v2.0, s6.13.16 - Pipe functions 2016 case Builtin::BIread_pipe: 2017 case Builtin::BIwrite_pipe: 2018 // Since those two functions are declared with var args, we need a semantic 2019 // check for the argument. 2020 if (SemaBuiltinRWPipe(*this, TheCall)) 2021 return ExprError(); 2022 break; 2023 case Builtin::BIreserve_read_pipe: 2024 case Builtin::BIreserve_write_pipe: 2025 case Builtin::BIwork_group_reserve_read_pipe: 2026 case Builtin::BIwork_group_reserve_write_pipe: 2027 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2028 return ExprError(); 2029 break; 2030 case Builtin::BIsub_group_reserve_read_pipe: 2031 case Builtin::BIsub_group_reserve_write_pipe: 2032 if (checkOpenCLSubgroupExt(*this, TheCall) || 2033 SemaBuiltinReserveRWPipe(*this, TheCall)) 2034 return ExprError(); 2035 break; 2036 case Builtin::BIcommit_read_pipe: 2037 case Builtin::BIcommit_write_pipe: 2038 case Builtin::BIwork_group_commit_read_pipe: 2039 case Builtin::BIwork_group_commit_write_pipe: 2040 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2041 return ExprError(); 2042 break; 2043 case Builtin::BIsub_group_commit_read_pipe: 2044 case Builtin::BIsub_group_commit_write_pipe: 2045 if (checkOpenCLSubgroupExt(*this, TheCall) || 2046 SemaBuiltinCommitRWPipe(*this, TheCall)) 2047 return ExprError(); 2048 break; 2049 case Builtin::BIget_pipe_num_packets: 2050 case Builtin::BIget_pipe_max_packets: 2051 if (SemaBuiltinPipePackets(*this, TheCall)) 2052 return ExprError(); 2053 break; 2054 case Builtin::BIto_global: 2055 case Builtin::BIto_local: 2056 case Builtin::BIto_private: 2057 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2058 return ExprError(); 2059 break; 2060 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2061 case Builtin::BIenqueue_kernel: 2062 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2063 return ExprError(); 2064 break; 2065 case Builtin::BIget_kernel_work_group_size: 2066 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2067 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2068 return ExprError(); 2069 break; 2070 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2071 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2072 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2073 return ExprError(); 2074 break; 2075 case Builtin::BI__builtin_os_log_format: 2076 Cleanup.setExprNeedsCleanups(true); 2077 LLVM_FALLTHROUGH; 2078 case Builtin::BI__builtin_os_log_format_buffer_size: 2079 if (SemaBuiltinOSLogFormat(TheCall)) 2080 return ExprError(); 2081 break; 2082 case Builtin::BI__builtin_frame_address: 2083 case Builtin::BI__builtin_return_address: { 2084 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2085 return ExprError(); 2086 2087 // -Wframe-address warning if non-zero passed to builtin 2088 // return/frame address. 2089 Expr::EvalResult Result; 2090 if (!TheCall->getArg(0)->isValueDependent() && 2091 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2092 Result.Val.getInt() != 0) 2093 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2094 << ((BuiltinID == Builtin::BI__builtin_return_address) 2095 ? "__builtin_return_address" 2096 : "__builtin_frame_address") 2097 << TheCall->getSourceRange(); 2098 break; 2099 } 2100 2101 case Builtin::BI__builtin_elementwise_abs: 2102 if (SemaBuiltinElementwiseMathOneArg(TheCall)) 2103 return ExprError(); 2104 break; 2105 case Builtin::BI__builtin_elementwise_min: 2106 case Builtin::BI__builtin_elementwise_max: 2107 if (SemaBuiltinElementwiseMath(TheCall)) 2108 return ExprError(); 2109 break; 2110 case Builtin::BI__builtin_reduce_max: 2111 case Builtin::BI__builtin_reduce_min: 2112 if (SemaBuiltinReduceMath(TheCall)) 2113 return ExprError(); 2114 break; 2115 case Builtin::BI__builtin_matrix_transpose: 2116 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2117 2118 case Builtin::BI__builtin_matrix_column_major_load: 2119 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2120 2121 case Builtin::BI__builtin_matrix_column_major_store: 2122 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2123 2124 case Builtin::BI__builtin_get_device_side_mangled_name: { 2125 auto Check = [](CallExpr *TheCall) { 2126 if (TheCall->getNumArgs() != 1) 2127 return false; 2128 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2129 if (!DRE) 2130 return false; 2131 auto *D = DRE->getDecl(); 2132 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2133 return false; 2134 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2135 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2136 }; 2137 if (!Check(TheCall)) { 2138 Diag(TheCall->getBeginLoc(), 2139 diag::err_hip_invalid_args_builtin_mangled_name); 2140 return ExprError(); 2141 } 2142 } 2143 } 2144 2145 // Since the target specific builtins for each arch overlap, only check those 2146 // of the arch we are compiling for. 2147 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2148 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2149 assert(Context.getAuxTargetInfo() && 2150 "Aux Target Builtin, but not an aux target?"); 2151 2152 if (CheckTSBuiltinFunctionCall( 2153 *Context.getAuxTargetInfo(), 2154 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2155 return ExprError(); 2156 } else { 2157 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2158 TheCall)) 2159 return ExprError(); 2160 } 2161 } 2162 2163 return TheCallResult; 2164 } 2165 2166 // Get the valid immediate range for the specified NEON type code. 2167 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2168 NeonTypeFlags Type(t); 2169 int IsQuad = ForceQuad ? true : Type.isQuad(); 2170 switch (Type.getEltType()) { 2171 case NeonTypeFlags::Int8: 2172 case NeonTypeFlags::Poly8: 2173 return shift ? 7 : (8 << IsQuad) - 1; 2174 case NeonTypeFlags::Int16: 2175 case NeonTypeFlags::Poly16: 2176 return shift ? 15 : (4 << IsQuad) - 1; 2177 case NeonTypeFlags::Int32: 2178 return shift ? 31 : (2 << IsQuad) - 1; 2179 case NeonTypeFlags::Int64: 2180 case NeonTypeFlags::Poly64: 2181 return shift ? 63 : (1 << IsQuad) - 1; 2182 case NeonTypeFlags::Poly128: 2183 return shift ? 127 : (1 << IsQuad) - 1; 2184 case NeonTypeFlags::Float16: 2185 assert(!shift && "cannot shift float types!"); 2186 return (4 << IsQuad) - 1; 2187 case NeonTypeFlags::Float32: 2188 assert(!shift && "cannot shift float types!"); 2189 return (2 << IsQuad) - 1; 2190 case NeonTypeFlags::Float64: 2191 assert(!shift && "cannot shift float types!"); 2192 return (1 << IsQuad) - 1; 2193 case NeonTypeFlags::BFloat16: 2194 assert(!shift && "cannot shift float types!"); 2195 return (4 << IsQuad) - 1; 2196 } 2197 llvm_unreachable("Invalid NeonTypeFlag!"); 2198 } 2199 2200 /// getNeonEltType - Return the QualType corresponding to the elements of 2201 /// the vector type specified by the NeonTypeFlags. This is used to check 2202 /// the pointer arguments for Neon load/store intrinsics. 2203 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2204 bool IsPolyUnsigned, bool IsInt64Long) { 2205 switch (Flags.getEltType()) { 2206 case NeonTypeFlags::Int8: 2207 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2208 case NeonTypeFlags::Int16: 2209 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2210 case NeonTypeFlags::Int32: 2211 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2212 case NeonTypeFlags::Int64: 2213 if (IsInt64Long) 2214 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2215 else 2216 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2217 : Context.LongLongTy; 2218 case NeonTypeFlags::Poly8: 2219 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2220 case NeonTypeFlags::Poly16: 2221 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2222 case NeonTypeFlags::Poly64: 2223 if (IsInt64Long) 2224 return Context.UnsignedLongTy; 2225 else 2226 return Context.UnsignedLongLongTy; 2227 case NeonTypeFlags::Poly128: 2228 break; 2229 case NeonTypeFlags::Float16: 2230 return Context.HalfTy; 2231 case NeonTypeFlags::Float32: 2232 return Context.FloatTy; 2233 case NeonTypeFlags::Float64: 2234 return Context.DoubleTy; 2235 case NeonTypeFlags::BFloat16: 2236 return Context.BFloat16Ty; 2237 } 2238 llvm_unreachable("Invalid NeonTypeFlag!"); 2239 } 2240 2241 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2242 // Range check SVE intrinsics that take immediate values. 2243 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2244 2245 switch (BuiltinID) { 2246 default: 2247 return false; 2248 #define GET_SVE_IMMEDIATE_CHECK 2249 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2250 #undef GET_SVE_IMMEDIATE_CHECK 2251 } 2252 2253 // Perform all the immediate checks for this builtin call. 2254 bool HasError = false; 2255 for (auto &I : ImmChecks) { 2256 int ArgNum, CheckTy, ElementSizeInBits; 2257 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2258 2259 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2260 2261 // Function that checks whether the operand (ArgNum) is an immediate 2262 // that is one of the predefined values. 2263 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2264 int ErrDiag) -> bool { 2265 // We can't check the value of a dependent argument. 2266 Expr *Arg = TheCall->getArg(ArgNum); 2267 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2268 return false; 2269 2270 // Check constant-ness first. 2271 llvm::APSInt Imm; 2272 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2273 return true; 2274 2275 if (!CheckImm(Imm.getSExtValue())) 2276 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2277 return false; 2278 }; 2279 2280 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2281 case SVETypeFlags::ImmCheck0_31: 2282 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2283 HasError = true; 2284 break; 2285 case SVETypeFlags::ImmCheck0_13: 2286 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2287 HasError = true; 2288 break; 2289 case SVETypeFlags::ImmCheck1_16: 2290 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2291 HasError = true; 2292 break; 2293 case SVETypeFlags::ImmCheck0_7: 2294 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2295 HasError = true; 2296 break; 2297 case SVETypeFlags::ImmCheckExtract: 2298 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2299 (2048 / ElementSizeInBits) - 1)) 2300 HasError = true; 2301 break; 2302 case SVETypeFlags::ImmCheckShiftRight: 2303 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2304 HasError = true; 2305 break; 2306 case SVETypeFlags::ImmCheckShiftRightNarrow: 2307 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2308 ElementSizeInBits / 2)) 2309 HasError = true; 2310 break; 2311 case SVETypeFlags::ImmCheckShiftLeft: 2312 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2313 ElementSizeInBits - 1)) 2314 HasError = true; 2315 break; 2316 case SVETypeFlags::ImmCheckLaneIndex: 2317 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2318 (128 / (1 * ElementSizeInBits)) - 1)) 2319 HasError = true; 2320 break; 2321 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2322 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2323 (128 / (2 * ElementSizeInBits)) - 1)) 2324 HasError = true; 2325 break; 2326 case SVETypeFlags::ImmCheckLaneIndexDot: 2327 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2328 (128 / (4 * ElementSizeInBits)) - 1)) 2329 HasError = true; 2330 break; 2331 case SVETypeFlags::ImmCheckComplexRot90_270: 2332 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2333 diag::err_rotation_argument_to_cadd)) 2334 HasError = true; 2335 break; 2336 case SVETypeFlags::ImmCheckComplexRotAll90: 2337 if (CheckImmediateInSet( 2338 [](int64_t V) { 2339 return V == 0 || V == 90 || V == 180 || V == 270; 2340 }, 2341 diag::err_rotation_argument_to_cmla)) 2342 HasError = true; 2343 break; 2344 case SVETypeFlags::ImmCheck0_1: 2345 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2346 HasError = true; 2347 break; 2348 case SVETypeFlags::ImmCheck0_2: 2349 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2350 HasError = true; 2351 break; 2352 case SVETypeFlags::ImmCheck0_3: 2353 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2354 HasError = true; 2355 break; 2356 } 2357 } 2358 2359 return HasError; 2360 } 2361 2362 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2363 unsigned BuiltinID, CallExpr *TheCall) { 2364 llvm::APSInt Result; 2365 uint64_t mask = 0; 2366 unsigned TV = 0; 2367 int PtrArgNum = -1; 2368 bool HasConstPtr = false; 2369 switch (BuiltinID) { 2370 #define GET_NEON_OVERLOAD_CHECK 2371 #include "clang/Basic/arm_neon.inc" 2372 #include "clang/Basic/arm_fp16.inc" 2373 #undef GET_NEON_OVERLOAD_CHECK 2374 } 2375 2376 // For NEON intrinsics which are overloaded on vector element type, validate 2377 // the immediate which specifies which variant to emit. 2378 unsigned ImmArg = TheCall->getNumArgs()-1; 2379 if (mask) { 2380 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2381 return true; 2382 2383 TV = Result.getLimitedValue(64); 2384 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2385 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2386 << TheCall->getArg(ImmArg)->getSourceRange(); 2387 } 2388 2389 if (PtrArgNum >= 0) { 2390 // Check that pointer arguments have the specified type. 2391 Expr *Arg = TheCall->getArg(PtrArgNum); 2392 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2393 Arg = ICE->getSubExpr(); 2394 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2395 QualType RHSTy = RHS.get()->getType(); 2396 2397 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2398 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2399 Arch == llvm::Triple::aarch64_32 || 2400 Arch == llvm::Triple::aarch64_be; 2401 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2402 QualType EltTy = 2403 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2404 if (HasConstPtr) 2405 EltTy = EltTy.withConst(); 2406 QualType LHSTy = Context.getPointerType(EltTy); 2407 AssignConvertType ConvTy; 2408 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2409 if (RHS.isInvalid()) 2410 return true; 2411 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2412 RHS.get(), AA_Assigning)) 2413 return true; 2414 } 2415 2416 // For NEON intrinsics which take an immediate value as part of the 2417 // instruction, range check them here. 2418 unsigned i = 0, l = 0, u = 0; 2419 switch (BuiltinID) { 2420 default: 2421 return false; 2422 #define GET_NEON_IMMEDIATE_CHECK 2423 #include "clang/Basic/arm_neon.inc" 2424 #include "clang/Basic/arm_fp16.inc" 2425 #undef GET_NEON_IMMEDIATE_CHECK 2426 } 2427 2428 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2429 } 2430 2431 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2432 switch (BuiltinID) { 2433 default: 2434 return false; 2435 #include "clang/Basic/arm_mve_builtin_sema.inc" 2436 } 2437 } 2438 2439 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2440 CallExpr *TheCall) { 2441 bool Err = false; 2442 switch (BuiltinID) { 2443 default: 2444 return false; 2445 #include "clang/Basic/arm_cde_builtin_sema.inc" 2446 } 2447 2448 if (Err) 2449 return true; 2450 2451 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2452 } 2453 2454 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2455 const Expr *CoprocArg, bool WantCDE) { 2456 if (isConstantEvaluated()) 2457 return false; 2458 2459 // We can't check the value of a dependent argument. 2460 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2461 return false; 2462 2463 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2464 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2465 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2466 2467 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2468 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2469 2470 if (IsCDECoproc != WantCDE) 2471 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2472 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2473 2474 return false; 2475 } 2476 2477 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2478 unsigned MaxWidth) { 2479 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2480 BuiltinID == ARM::BI__builtin_arm_ldaex || 2481 BuiltinID == ARM::BI__builtin_arm_strex || 2482 BuiltinID == ARM::BI__builtin_arm_stlex || 2483 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2484 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2485 BuiltinID == AArch64::BI__builtin_arm_strex || 2486 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2487 "unexpected ARM builtin"); 2488 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2489 BuiltinID == ARM::BI__builtin_arm_ldaex || 2490 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2491 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2492 2493 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2494 2495 // Ensure that we have the proper number of arguments. 2496 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2497 return true; 2498 2499 // Inspect the pointer argument of the atomic builtin. This should always be 2500 // a pointer type, whose element is an integral scalar or pointer type. 2501 // Because it is a pointer type, we don't have to worry about any implicit 2502 // casts here. 2503 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2504 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2505 if (PointerArgRes.isInvalid()) 2506 return true; 2507 PointerArg = PointerArgRes.get(); 2508 2509 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2510 if (!pointerType) { 2511 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2512 << PointerArg->getType() << PointerArg->getSourceRange(); 2513 return true; 2514 } 2515 2516 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2517 // task is to insert the appropriate casts into the AST. First work out just 2518 // what the appropriate type is. 2519 QualType ValType = pointerType->getPointeeType(); 2520 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2521 if (IsLdrex) 2522 AddrType.addConst(); 2523 2524 // Issue a warning if the cast is dodgy. 2525 CastKind CastNeeded = CK_NoOp; 2526 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2527 CastNeeded = CK_BitCast; 2528 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2529 << PointerArg->getType() << Context.getPointerType(AddrType) 2530 << AA_Passing << PointerArg->getSourceRange(); 2531 } 2532 2533 // Finally, do the cast and replace the argument with the corrected version. 2534 AddrType = Context.getPointerType(AddrType); 2535 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2536 if (PointerArgRes.isInvalid()) 2537 return true; 2538 PointerArg = PointerArgRes.get(); 2539 2540 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2541 2542 // In general, we allow ints, floats and pointers to be loaded and stored. 2543 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2544 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2545 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2546 << PointerArg->getType() << PointerArg->getSourceRange(); 2547 return true; 2548 } 2549 2550 // But ARM doesn't have instructions to deal with 128-bit versions. 2551 if (Context.getTypeSize(ValType) > MaxWidth) { 2552 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2553 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2554 << PointerArg->getType() << PointerArg->getSourceRange(); 2555 return true; 2556 } 2557 2558 switch (ValType.getObjCLifetime()) { 2559 case Qualifiers::OCL_None: 2560 case Qualifiers::OCL_ExplicitNone: 2561 // okay 2562 break; 2563 2564 case Qualifiers::OCL_Weak: 2565 case Qualifiers::OCL_Strong: 2566 case Qualifiers::OCL_Autoreleasing: 2567 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2568 << ValType << PointerArg->getSourceRange(); 2569 return true; 2570 } 2571 2572 if (IsLdrex) { 2573 TheCall->setType(ValType); 2574 return false; 2575 } 2576 2577 // Initialize the argument to be stored. 2578 ExprResult ValArg = TheCall->getArg(0); 2579 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2580 Context, ValType, /*consume*/ false); 2581 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2582 if (ValArg.isInvalid()) 2583 return true; 2584 TheCall->setArg(0, ValArg.get()); 2585 2586 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2587 // but the custom checker bypasses all default analysis. 2588 TheCall->setType(Context.IntTy); 2589 return false; 2590 } 2591 2592 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2593 CallExpr *TheCall) { 2594 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2595 BuiltinID == ARM::BI__builtin_arm_ldaex || 2596 BuiltinID == ARM::BI__builtin_arm_strex || 2597 BuiltinID == ARM::BI__builtin_arm_stlex) { 2598 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2599 } 2600 2601 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2602 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2603 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2604 } 2605 2606 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2607 BuiltinID == ARM::BI__builtin_arm_wsr64) 2608 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2609 2610 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2611 BuiltinID == ARM::BI__builtin_arm_rsrp || 2612 BuiltinID == ARM::BI__builtin_arm_wsr || 2613 BuiltinID == ARM::BI__builtin_arm_wsrp) 2614 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2615 2616 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2617 return true; 2618 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2619 return true; 2620 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2621 return true; 2622 2623 // For intrinsics which take an immediate value as part of the instruction, 2624 // range check them here. 2625 // FIXME: VFP Intrinsics should error if VFP not present. 2626 switch (BuiltinID) { 2627 default: return false; 2628 case ARM::BI__builtin_arm_ssat: 2629 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2630 case ARM::BI__builtin_arm_usat: 2631 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2632 case ARM::BI__builtin_arm_ssat16: 2633 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2634 case ARM::BI__builtin_arm_usat16: 2635 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2636 case ARM::BI__builtin_arm_vcvtr_f: 2637 case ARM::BI__builtin_arm_vcvtr_d: 2638 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2639 case ARM::BI__builtin_arm_dmb: 2640 case ARM::BI__builtin_arm_dsb: 2641 case ARM::BI__builtin_arm_isb: 2642 case ARM::BI__builtin_arm_dbg: 2643 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2644 case ARM::BI__builtin_arm_cdp: 2645 case ARM::BI__builtin_arm_cdp2: 2646 case ARM::BI__builtin_arm_mcr: 2647 case ARM::BI__builtin_arm_mcr2: 2648 case ARM::BI__builtin_arm_mrc: 2649 case ARM::BI__builtin_arm_mrc2: 2650 case ARM::BI__builtin_arm_mcrr: 2651 case ARM::BI__builtin_arm_mcrr2: 2652 case ARM::BI__builtin_arm_mrrc: 2653 case ARM::BI__builtin_arm_mrrc2: 2654 case ARM::BI__builtin_arm_ldc: 2655 case ARM::BI__builtin_arm_ldcl: 2656 case ARM::BI__builtin_arm_ldc2: 2657 case ARM::BI__builtin_arm_ldc2l: 2658 case ARM::BI__builtin_arm_stc: 2659 case ARM::BI__builtin_arm_stcl: 2660 case ARM::BI__builtin_arm_stc2: 2661 case ARM::BI__builtin_arm_stc2l: 2662 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2663 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2664 /*WantCDE*/ false); 2665 } 2666 } 2667 2668 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2669 unsigned BuiltinID, 2670 CallExpr *TheCall) { 2671 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2672 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2673 BuiltinID == AArch64::BI__builtin_arm_strex || 2674 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2675 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2676 } 2677 2678 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2679 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2680 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2681 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2682 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2683 } 2684 2685 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2686 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2687 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2688 2689 // Memory Tagging Extensions (MTE) Intrinsics 2690 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2691 BuiltinID == AArch64::BI__builtin_arm_addg || 2692 BuiltinID == AArch64::BI__builtin_arm_gmi || 2693 BuiltinID == AArch64::BI__builtin_arm_ldg || 2694 BuiltinID == AArch64::BI__builtin_arm_stg || 2695 BuiltinID == AArch64::BI__builtin_arm_subp) { 2696 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2697 } 2698 2699 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2700 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2701 BuiltinID == AArch64::BI__builtin_arm_wsr || 2702 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2703 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2704 2705 // Only check the valid encoding range. Any constant in this range would be 2706 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2707 // an exception for incorrect registers. This matches MSVC behavior. 2708 if (BuiltinID == AArch64::BI_ReadStatusReg || 2709 BuiltinID == AArch64::BI_WriteStatusReg) 2710 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2711 2712 if (BuiltinID == AArch64::BI__getReg) 2713 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2714 2715 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2716 return true; 2717 2718 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2719 return true; 2720 2721 // For intrinsics which take an immediate value as part of the instruction, 2722 // range check them here. 2723 unsigned i = 0, l = 0, u = 0; 2724 switch (BuiltinID) { 2725 default: return false; 2726 case AArch64::BI__builtin_arm_dmb: 2727 case AArch64::BI__builtin_arm_dsb: 2728 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2729 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2730 } 2731 2732 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2733 } 2734 2735 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2736 if (Arg->getType()->getAsPlaceholderType()) 2737 return false; 2738 2739 // The first argument needs to be a record field access. 2740 // If it is an array element access, we delay decision 2741 // to BPF backend to check whether the access is a 2742 // field access or not. 2743 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2744 isa<MemberExpr>(Arg->IgnoreParens()) || 2745 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 2746 } 2747 2748 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 2749 QualType VectorTy, QualType EltTy) { 2750 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 2751 if (!Context.hasSameType(VectorEltTy, EltTy)) { 2752 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 2753 << Call->getSourceRange() << VectorEltTy << EltTy; 2754 return false; 2755 } 2756 return true; 2757 } 2758 2759 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 2760 QualType ArgType = Arg->getType(); 2761 if (ArgType->getAsPlaceholderType()) 2762 return false; 2763 2764 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 2765 // format: 2766 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 2767 // 2. <type> var; 2768 // __builtin_preserve_type_info(var, flag); 2769 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 2770 !isa<UnaryOperator>(Arg->IgnoreParens())) 2771 return false; 2772 2773 // Typedef type. 2774 if (ArgType->getAs<TypedefType>()) 2775 return true; 2776 2777 // Record type or Enum type. 2778 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2779 if (const auto *RT = Ty->getAs<RecordType>()) { 2780 if (!RT->getDecl()->getDeclName().isEmpty()) 2781 return true; 2782 } else if (const auto *ET = Ty->getAs<EnumType>()) { 2783 if (!ET->getDecl()->getDeclName().isEmpty()) 2784 return true; 2785 } 2786 2787 return false; 2788 } 2789 2790 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 2791 QualType ArgType = Arg->getType(); 2792 if (ArgType->getAsPlaceholderType()) 2793 return false; 2794 2795 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 2796 // format: 2797 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 2798 // flag); 2799 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 2800 if (!UO) 2801 return false; 2802 2803 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 2804 if (!CE) 2805 return false; 2806 if (CE->getCastKind() != CK_IntegralToPointer && 2807 CE->getCastKind() != CK_NullToPointer) 2808 return false; 2809 2810 // The integer must be from an EnumConstantDecl. 2811 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 2812 if (!DR) 2813 return false; 2814 2815 const EnumConstantDecl *Enumerator = 2816 dyn_cast<EnumConstantDecl>(DR->getDecl()); 2817 if (!Enumerator) 2818 return false; 2819 2820 // The type must be EnumType. 2821 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2822 const auto *ET = Ty->getAs<EnumType>(); 2823 if (!ET) 2824 return false; 2825 2826 // The enum value must be supported. 2827 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 2828 } 2829 2830 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2831 CallExpr *TheCall) { 2832 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2833 BuiltinID == BPF::BI__builtin_btf_type_id || 2834 BuiltinID == BPF::BI__builtin_preserve_type_info || 2835 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 2836 "unexpected BPF builtin"); 2837 2838 if (checkArgCount(*this, TheCall, 2)) 2839 return true; 2840 2841 // The second argument needs to be a constant int 2842 Expr *Arg = TheCall->getArg(1); 2843 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 2844 diag::kind kind; 2845 if (!Value) { 2846 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 2847 kind = diag::err_preserve_field_info_not_const; 2848 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 2849 kind = diag::err_btf_type_id_not_const; 2850 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 2851 kind = diag::err_preserve_type_info_not_const; 2852 else 2853 kind = diag::err_preserve_enum_value_not_const; 2854 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 2855 return true; 2856 } 2857 2858 // The first argument 2859 Arg = TheCall->getArg(0); 2860 bool InvalidArg = false; 2861 bool ReturnUnsignedInt = true; 2862 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 2863 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 2864 InvalidArg = true; 2865 kind = diag::err_preserve_field_info_not_field; 2866 } 2867 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 2868 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 2869 InvalidArg = true; 2870 kind = diag::err_preserve_type_info_invalid; 2871 } 2872 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 2873 if (!isValidBPFPreserveEnumValueArg(Arg)) { 2874 InvalidArg = true; 2875 kind = diag::err_preserve_enum_value_invalid; 2876 } 2877 ReturnUnsignedInt = false; 2878 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 2879 ReturnUnsignedInt = false; 2880 } 2881 2882 if (InvalidArg) { 2883 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 2884 return true; 2885 } 2886 2887 if (ReturnUnsignedInt) 2888 TheCall->setType(Context.UnsignedIntTy); 2889 else 2890 TheCall->setType(Context.UnsignedLongTy); 2891 return false; 2892 } 2893 2894 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2895 struct ArgInfo { 2896 uint8_t OpNum; 2897 bool IsSigned; 2898 uint8_t BitWidth; 2899 uint8_t Align; 2900 }; 2901 struct BuiltinInfo { 2902 unsigned BuiltinID; 2903 ArgInfo Infos[2]; 2904 }; 2905 2906 static BuiltinInfo Infos[] = { 2907 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2908 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2909 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2910 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 2911 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2912 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2913 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2914 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2915 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2916 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2917 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2918 2919 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2920 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2921 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2922 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2923 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2924 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2925 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2926 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2927 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2928 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2929 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2930 2931 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2932 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2933 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2934 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2935 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2936 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2937 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2938 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2939 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2940 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2941 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2942 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2943 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2944 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2945 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2946 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2947 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2948 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2949 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2950 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2951 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2952 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2953 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2954 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2955 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2956 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2957 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2958 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2959 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2960 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2961 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2962 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2963 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2964 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2965 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2966 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2967 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2968 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2969 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2970 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2971 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2972 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2973 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2974 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2975 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2976 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2977 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2978 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2979 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2980 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2981 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2982 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2983 {{ 1, false, 6, 0 }} }, 2984 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2985 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2986 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2987 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2988 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2989 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2990 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2991 {{ 1, false, 5, 0 }} }, 2992 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2993 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2994 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2995 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2996 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2997 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2998 { 2, false, 5, 0 }} }, 2999 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3000 { 2, false, 6, 0 }} }, 3001 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3002 { 3, false, 5, 0 }} }, 3003 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3004 { 3, false, 6, 0 }} }, 3005 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3006 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3007 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3008 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3009 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3010 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3011 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3012 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3013 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3014 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3015 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3016 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3017 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3018 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3019 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3020 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3021 {{ 2, false, 4, 0 }, 3022 { 3, false, 5, 0 }} }, 3023 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3024 {{ 2, false, 4, 0 }, 3025 { 3, false, 5, 0 }} }, 3026 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3027 {{ 2, false, 4, 0 }, 3028 { 3, false, 5, 0 }} }, 3029 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3030 {{ 2, false, 4, 0 }, 3031 { 3, false, 5, 0 }} }, 3032 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3033 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3034 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3035 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3036 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3037 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3038 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3039 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3040 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3041 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3042 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3043 { 2, false, 5, 0 }} }, 3044 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3045 { 2, false, 6, 0 }} }, 3046 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3047 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3048 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3049 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3050 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3051 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3052 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3053 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3054 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3055 {{ 1, false, 4, 0 }} }, 3056 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3057 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3058 {{ 1, false, 4, 0 }} }, 3059 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3060 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3061 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3062 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3063 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3064 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3065 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3066 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3067 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3068 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3069 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3070 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3071 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3072 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3073 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3074 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3075 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3076 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3077 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3078 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3079 {{ 3, false, 1, 0 }} }, 3080 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3081 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3082 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3083 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3084 {{ 3, false, 1, 0 }} }, 3085 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3086 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3087 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3088 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3089 {{ 3, false, 1, 0 }} }, 3090 }; 3091 3092 // Use a dynamically initialized static to sort the table exactly once on 3093 // first run. 3094 static const bool SortOnce = 3095 (llvm::sort(Infos, 3096 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3097 return LHS.BuiltinID < RHS.BuiltinID; 3098 }), 3099 true); 3100 (void)SortOnce; 3101 3102 const BuiltinInfo *F = llvm::partition_point( 3103 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3104 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3105 return false; 3106 3107 bool Error = false; 3108 3109 for (const ArgInfo &A : F->Infos) { 3110 // Ignore empty ArgInfo elements. 3111 if (A.BitWidth == 0) 3112 continue; 3113 3114 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3115 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3116 if (!A.Align) { 3117 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3118 } else { 3119 unsigned M = 1 << A.Align; 3120 Min *= M; 3121 Max *= M; 3122 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3123 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3124 } 3125 } 3126 return Error; 3127 } 3128 3129 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3130 CallExpr *TheCall) { 3131 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3132 } 3133 3134 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3135 unsigned BuiltinID, CallExpr *TheCall) { 3136 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3137 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3138 } 3139 3140 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3141 CallExpr *TheCall) { 3142 3143 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3144 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3145 if (!TI.hasFeature("dsp")) 3146 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3147 } 3148 3149 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3150 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3151 if (!TI.hasFeature("dspr2")) 3152 return Diag(TheCall->getBeginLoc(), 3153 diag::err_mips_builtin_requires_dspr2); 3154 } 3155 3156 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3157 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3158 if (!TI.hasFeature("msa")) 3159 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3160 } 3161 3162 return false; 3163 } 3164 3165 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3166 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3167 // ordering for DSP is unspecified. MSA is ordered by the data format used 3168 // by the underlying instruction i.e., df/m, df/n and then by size. 3169 // 3170 // FIXME: The size tests here should instead be tablegen'd along with the 3171 // definitions from include/clang/Basic/BuiltinsMips.def. 3172 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3173 // be too. 3174 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3175 unsigned i = 0, l = 0, u = 0, m = 0; 3176 switch (BuiltinID) { 3177 default: return false; 3178 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3179 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3180 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3181 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3182 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3183 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3184 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3185 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3186 // df/m field. 3187 // These intrinsics take an unsigned 3 bit immediate. 3188 case Mips::BI__builtin_msa_bclri_b: 3189 case Mips::BI__builtin_msa_bnegi_b: 3190 case Mips::BI__builtin_msa_bseti_b: 3191 case Mips::BI__builtin_msa_sat_s_b: 3192 case Mips::BI__builtin_msa_sat_u_b: 3193 case Mips::BI__builtin_msa_slli_b: 3194 case Mips::BI__builtin_msa_srai_b: 3195 case Mips::BI__builtin_msa_srari_b: 3196 case Mips::BI__builtin_msa_srli_b: 3197 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3198 case Mips::BI__builtin_msa_binsli_b: 3199 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3200 // These intrinsics take an unsigned 4 bit immediate. 3201 case Mips::BI__builtin_msa_bclri_h: 3202 case Mips::BI__builtin_msa_bnegi_h: 3203 case Mips::BI__builtin_msa_bseti_h: 3204 case Mips::BI__builtin_msa_sat_s_h: 3205 case Mips::BI__builtin_msa_sat_u_h: 3206 case Mips::BI__builtin_msa_slli_h: 3207 case Mips::BI__builtin_msa_srai_h: 3208 case Mips::BI__builtin_msa_srari_h: 3209 case Mips::BI__builtin_msa_srli_h: 3210 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3211 case Mips::BI__builtin_msa_binsli_h: 3212 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3213 // These intrinsics take an unsigned 5 bit immediate. 3214 // The first block of intrinsics actually have an unsigned 5 bit field, 3215 // not a df/n field. 3216 case Mips::BI__builtin_msa_cfcmsa: 3217 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3218 case Mips::BI__builtin_msa_clei_u_b: 3219 case Mips::BI__builtin_msa_clei_u_h: 3220 case Mips::BI__builtin_msa_clei_u_w: 3221 case Mips::BI__builtin_msa_clei_u_d: 3222 case Mips::BI__builtin_msa_clti_u_b: 3223 case Mips::BI__builtin_msa_clti_u_h: 3224 case Mips::BI__builtin_msa_clti_u_w: 3225 case Mips::BI__builtin_msa_clti_u_d: 3226 case Mips::BI__builtin_msa_maxi_u_b: 3227 case Mips::BI__builtin_msa_maxi_u_h: 3228 case Mips::BI__builtin_msa_maxi_u_w: 3229 case Mips::BI__builtin_msa_maxi_u_d: 3230 case Mips::BI__builtin_msa_mini_u_b: 3231 case Mips::BI__builtin_msa_mini_u_h: 3232 case Mips::BI__builtin_msa_mini_u_w: 3233 case Mips::BI__builtin_msa_mini_u_d: 3234 case Mips::BI__builtin_msa_addvi_b: 3235 case Mips::BI__builtin_msa_addvi_h: 3236 case Mips::BI__builtin_msa_addvi_w: 3237 case Mips::BI__builtin_msa_addvi_d: 3238 case Mips::BI__builtin_msa_bclri_w: 3239 case Mips::BI__builtin_msa_bnegi_w: 3240 case Mips::BI__builtin_msa_bseti_w: 3241 case Mips::BI__builtin_msa_sat_s_w: 3242 case Mips::BI__builtin_msa_sat_u_w: 3243 case Mips::BI__builtin_msa_slli_w: 3244 case Mips::BI__builtin_msa_srai_w: 3245 case Mips::BI__builtin_msa_srari_w: 3246 case Mips::BI__builtin_msa_srli_w: 3247 case Mips::BI__builtin_msa_srlri_w: 3248 case Mips::BI__builtin_msa_subvi_b: 3249 case Mips::BI__builtin_msa_subvi_h: 3250 case Mips::BI__builtin_msa_subvi_w: 3251 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3252 case Mips::BI__builtin_msa_binsli_w: 3253 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3254 // These intrinsics take an unsigned 6 bit immediate. 3255 case Mips::BI__builtin_msa_bclri_d: 3256 case Mips::BI__builtin_msa_bnegi_d: 3257 case Mips::BI__builtin_msa_bseti_d: 3258 case Mips::BI__builtin_msa_sat_s_d: 3259 case Mips::BI__builtin_msa_sat_u_d: 3260 case Mips::BI__builtin_msa_slli_d: 3261 case Mips::BI__builtin_msa_srai_d: 3262 case Mips::BI__builtin_msa_srari_d: 3263 case Mips::BI__builtin_msa_srli_d: 3264 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3265 case Mips::BI__builtin_msa_binsli_d: 3266 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3267 // These intrinsics take a signed 5 bit immediate. 3268 case Mips::BI__builtin_msa_ceqi_b: 3269 case Mips::BI__builtin_msa_ceqi_h: 3270 case Mips::BI__builtin_msa_ceqi_w: 3271 case Mips::BI__builtin_msa_ceqi_d: 3272 case Mips::BI__builtin_msa_clti_s_b: 3273 case Mips::BI__builtin_msa_clti_s_h: 3274 case Mips::BI__builtin_msa_clti_s_w: 3275 case Mips::BI__builtin_msa_clti_s_d: 3276 case Mips::BI__builtin_msa_clei_s_b: 3277 case Mips::BI__builtin_msa_clei_s_h: 3278 case Mips::BI__builtin_msa_clei_s_w: 3279 case Mips::BI__builtin_msa_clei_s_d: 3280 case Mips::BI__builtin_msa_maxi_s_b: 3281 case Mips::BI__builtin_msa_maxi_s_h: 3282 case Mips::BI__builtin_msa_maxi_s_w: 3283 case Mips::BI__builtin_msa_maxi_s_d: 3284 case Mips::BI__builtin_msa_mini_s_b: 3285 case Mips::BI__builtin_msa_mini_s_h: 3286 case Mips::BI__builtin_msa_mini_s_w: 3287 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3288 // These intrinsics take an unsigned 8 bit immediate. 3289 case Mips::BI__builtin_msa_andi_b: 3290 case Mips::BI__builtin_msa_nori_b: 3291 case Mips::BI__builtin_msa_ori_b: 3292 case Mips::BI__builtin_msa_shf_b: 3293 case Mips::BI__builtin_msa_shf_h: 3294 case Mips::BI__builtin_msa_shf_w: 3295 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3296 case Mips::BI__builtin_msa_bseli_b: 3297 case Mips::BI__builtin_msa_bmnzi_b: 3298 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3299 // df/n format 3300 // These intrinsics take an unsigned 4 bit immediate. 3301 case Mips::BI__builtin_msa_copy_s_b: 3302 case Mips::BI__builtin_msa_copy_u_b: 3303 case Mips::BI__builtin_msa_insve_b: 3304 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3305 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3306 // These intrinsics take an unsigned 3 bit immediate. 3307 case Mips::BI__builtin_msa_copy_s_h: 3308 case Mips::BI__builtin_msa_copy_u_h: 3309 case Mips::BI__builtin_msa_insve_h: 3310 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3311 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3312 // These intrinsics take an unsigned 2 bit immediate. 3313 case Mips::BI__builtin_msa_copy_s_w: 3314 case Mips::BI__builtin_msa_copy_u_w: 3315 case Mips::BI__builtin_msa_insve_w: 3316 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3317 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3318 // These intrinsics take an unsigned 1 bit immediate. 3319 case Mips::BI__builtin_msa_copy_s_d: 3320 case Mips::BI__builtin_msa_copy_u_d: 3321 case Mips::BI__builtin_msa_insve_d: 3322 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3323 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3324 // Memory offsets and immediate loads. 3325 // These intrinsics take a signed 10 bit immediate. 3326 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3327 case Mips::BI__builtin_msa_ldi_h: 3328 case Mips::BI__builtin_msa_ldi_w: 3329 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3330 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3331 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3332 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3333 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3334 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3335 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3336 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3337 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3338 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3339 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3340 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3341 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3342 } 3343 3344 if (!m) 3345 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3346 3347 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3348 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3349 } 3350 3351 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3352 /// advancing the pointer over the consumed characters. The decoded type is 3353 /// returned. If the decoded type represents a constant integer with a 3354 /// constraint on its value then Mask is set to that value. The type descriptors 3355 /// used in Str are specific to PPC MMA builtins and are documented in the file 3356 /// defining the PPC builtins. 3357 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3358 unsigned &Mask) { 3359 bool RequireICE = false; 3360 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3361 switch (*Str++) { 3362 case 'V': 3363 return Context.getVectorType(Context.UnsignedCharTy, 16, 3364 VectorType::VectorKind::AltiVecVector); 3365 case 'i': { 3366 char *End; 3367 unsigned size = strtoul(Str, &End, 10); 3368 assert(End != Str && "Missing constant parameter constraint"); 3369 Str = End; 3370 Mask = size; 3371 return Context.IntTy; 3372 } 3373 case 'W': { 3374 char *End; 3375 unsigned size = strtoul(Str, &End, 10); 3376 assert(End != Str && "Missing PowerPC MMA type size"); 3377 Str = End; 3378 QualType Type; 3379 switch (size) { 3380 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3381 case size: Type = Context.Id##Ty; break; 3382 #include "clang/Basic/PPCTypes.def" 3383 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3384 } 3385 bool CheckVectorArgs = false; 3386 while (!CheckVectorArgs) { 3387 switch (*Str++) { 3388 case '*': 3389 Type = Context.getPointerType(Type); 3390 break; 3391 case 'C': 3392 Type = Type.withConst(); 3393 break; 3394 default: 3395 CheckVectorArgs = true; 3396 --Str; 3397 break; 3398 } 3399 } 3400 return Type; 3401 } 3402 default: 3403 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3404 } 3405 } 3406 3407 static bool isPPC_64Builtin(unsigned BuiltinID) { 3408 // These builtins only work on PPC 64bit targets. 3409 switch (BuiltinID) { 3410 case PPC::BI__builtin_divde: 3411 case PPC::BI__builtin_divdeu: 3412 case PPC::BI__builtin_bpermd: 3413 case PPC::BI__builtin_ppc_ldarx: 3414 case PPC::BI__builtin_ppc_stdcx: 3415 case PPC::BI__builtin_ppc_tdw: 3416 case PPC::BI__builtin_ppc_trapd: 3417 case PPC::BI__builtin_ppc_cmpeqb: 3418 case PPC::BI__builtin_ppc_setb: 3419 case PPC::BI__builtin_ppc_mulhd: 3420 case PPC::BI__builtin_ppc_mulhdu: 3421 case PPC::BI__builtin_ppc_maddhd: 3422 case PPC::BI__builtin_ppc_maddhdu: 3423 case PPC::BI__builtin_ppc_maddld: 3424 case PPC::BI__builtin_ppc_load8r: 3425 case PPC::BI__builtin_ppc_store8r: 3426 case PPC::BI__builtin_ppc_insert_exp: 3427 case PPC::BI__builtin_ppc_extract_sig: 3428 case PPC::BI__builtin_ppc_addex: 3429 case PPC::BI__builtin_darn: 3430 case PPC::BI__builtin_darn_raw: 3431 case PPC::BI__builtin_ppc_compare_and_swaplp: 3432 case PPC::BI__builtin_ppc_fetch_and_addlp: 3433 case PPC::BI__builtin_ppc_fetch_and_andlp: 3434 case PPC::BI__builtin_ppc_fetch_and_orlp: 3435 case PPC::BI__builtin_ppc_fetch_and_swaplp: 3436 return true; 3437 } 3438 return false; 3439 } 3440 3441 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3442 StringRef FeatureToCheck, unsigned DiagID, 3443 StringRef DiagArg = "") { 3444 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3445 return false; 3446 3447 if (DiagArg.empty()) 3448 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3449 else 3450 S.Diag(TheCall->getBeginLoc(), DiagID) 3451 << DiagArg << TheCall->getSourceRange(); 3452 3453 return true; 3454 } 3455 3456 /// Returns true if the argument consists of one contiguous run of 1s with any 3457 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3458 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3459 /// since all 1s are not contiguous. 3460 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3461 llvm::APSInt Result; 3462 // We can't check the value of a dependent argument. 3463 Expr *Arg = TheCall->getArg(ArgNum); 3464 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3465 return false; 3466 3467 // Check constant-ness first. 3468 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3469 return true; 3470 3471 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3472 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3473 return false; 3474 3475 return Diag(TheCall->getBeginLoc(), 3476 diag::err_argument_not_contiguous_bit_field) 3477 << ArgNum << Arg->getSourceRange(); 3478 } 3479 3480 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3481 CallExpr *TheCall) { 3482 unsigned i = 0, l = 0, u = 0; 3483 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3484 llvm::APSInt Result; 3485 3486 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3487 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3488 << TheCall->getSourceRange(); 3489 3490 switch (BuiltinID) { 3491 default: return false; 3492 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3493 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3494 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3495 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3496 case PPC::BI__builtin_altivec_dss: 3497 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3498 case PPC::BI__builtin_tbegin: 3499 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3500 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3501 case PPC::BI__builtin_tabortwc: 3502 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3503 case PPC::BI__builtin_tabortwci: 3504 case PPC::BI__builtin_tabortdci: 3505 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3506 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3507 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 3508 // __builtin_(un)pack_longdouble are available only if long double uses IBM 3509 // extended double representation. 3510 case PPC::BI__builtin_unpack_longdouble: 3511 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 3512 return true; 3513 LLVM_FALLTHROUGH; 3514 case PPC::BI__builtin_pack_longdouble: 3515 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 3516 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 3517 << "ibmlongdouble"; 3518 return false; 3519 case PPC::BI__builtin_altivec_dst: 3520 case PPC::BI__builtin_altivec_dstt: 3521 case PPC::BI__builtin_altivec_dstst: 3522 case PPC::BI__builtin_altivec_dststt: 3523 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3524 case PPC::BI__builtin_vsx_xxpermdi: 3525 case PPC::BI__builtin_vsx_xxsldwi: 3526 return SemaBuiltinVSX(TheCall); 3527 case PPC::BI__builtin_divwe: 3528 case PPC::BI__builtin_divweu: 3529 case PPC::BI__builtin_divde: 3530 case PPC::BI__builtin_divdeu: 3531 return SemaFeatureCheck(*this, TheCall, "extdiv", 3532 diag::err_ppc_builtin_only_on_arch, "7"); 3533 case PPC::BI__builtin_bpermd: 3534 return SemaFeatureCheck(*this, TheCall, "bpermd", 3535 diag::err_ppc_builtin_only_on_arch, "7"); 3536 case PPC::BI__builtin_unpack_vector_int128: 3537 return SemaFeatureCheck(*this, TheCall, "vsx", 3538 diag::err_ppc_builtin_only_on_arch, "7") || 3539 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3540 case PPC::BI__builtin_pack_vector_int128: 3541 return SemaFeatureCheck(*this, TheCall, "vsx", 3542 diag::err_ppc_builtin_only_on_arch, "7"); 3543 case PPC::BI__builtin_altivec_vgnb: 3544 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3545 case PPC::BI__builtin_altivec_vec_replace_elt: 3546 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 3547 QualType VecTy = TheCall->getArg(0)->getType(); 3548 QualType EltTy = TheCall->getArg(1)->getType(); 3549 unsigned Width = Context.getIntWidth(EltTy); 3550 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 3551 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 3552 } 3553 case PPC::BI__builtin_vsx_xxeval: 3554 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3555 case PPC::BI__builtin_altivec_vsldbi: 3556 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3557 case PPC::BI__builtin_altivec_vsrdbi: 3558 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3559 case PPC::BI__builtin_vsx_xxpermx: 3560 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3561 case PPC::BI__builtin_ppc_tw: 3562 case PPC::BI__builtin_ppc_tdw: 3563 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 3564 case PPC::BI__builtin_ppc_cmpeqb: 3565 case PPC::BI__builtin_ppc_setb: 3566 case PPC::BI__builtin_ppc_maddhd: 3567 case PPC::BI__builtin_ppc_maddhdu: 3568 case PPC::BI__builtin_ppc_maddld: 3569 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3570 diag::err_ppc_builtin_only_on_arch, "9"); 3571 case PPC::BI__builtin_ppc_cmprb: 3572 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3573 diag::err_ppc_builtin_only_on_arch, "9") || 3574 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 3575 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 3576 // be a constant that represents a contiguous bit field. 3577 case PPC::BI__builtin_ppc_rlwnm: 3578 return SemaValueIsRunOfOnes(TheCall, 2); 3579 case PPC::BI__builtin_ppc_rlwimi: 3580 case PPC::BI__builtin_ppc_rldimi: 3581 return SemaBuiltinConstantArg(TheCall, 2, Result) || 3582 SemaValueIsRunOfOnes(TheCall, 3); 3583 case PPC::BI__builtin_ppc_extract_exp: 3584 case PPC::BI__builtin_ppc_extract_sig: 3585 case PPC::BI__builtin_ppc_insert_exp: 3586 return SemaFeatureCheck(*this, TheCall, "power9-vector", 3587 diag::err_ppc_builtin_only_on_arch, "9"); 3588 case PPC::BI__builtin_ppc_addex: { 3589 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3590 diag::err_ppc_builtin_only_on_arch, "9") || 3591 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 3592 return true; 3593 // Output warning for reserved values 1 to 3. 3594 int ArgValue = 3595 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 3596 if (ArgValue != 0) 3597 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 3598 << ArgValue; 3599 return false; 3600 } 3601 case PPC::BI__builtin_ppc_mtfsb0: 3602 case PPC::BI__builtin_ppc_mtfsb1: 3603 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3604 case PPC::BI__builtin_ppc_mtfsf: 3605 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 3606 case PPC::BI__builtin_ppc_mtfsfi: 3607 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3608 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3609 case PPC::BI__builtin_ppc_alignx: 3610 return SemaBuiltinConstantArgPower2(TheCall, 0); 3611 case PPC::BI__builtin_ppc_rdlam: 3612 return SemaValueIsRunOfOnes(TheCall, 2); 3613 case PPC::BI__builtin_ppc_icbt: 3614 case PPC::BI__builtin_ppc_sthcx: 3615 case PPC::BI__builtin_ppc_stbcx: 3616 case PPC::BI__builtin_ppc_lharx: 3617 case PPC::BI__builtin_ppc_lbarx: 3618 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3619 diag::err_ppc_builtin_only_on_arch, "8"); 3620 case PPC::BI__builtin_vsx_ldrmb: 3621 case PPC::BI__builtin_vsx_strmb: 3622 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3623 diag::err_ppc_builtin_only_on_arch, "8") || 3624 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3625 case PPC::BI__builtin_altivec_vcntmbb: 3626 case PPC::BI__builtin_altivec_vcntmbh: 3627 case PPC::BI__builtin_altivec_vcntmbw: 3628 case PPC::BI__builtin_altivec_vcntmbd: 3629 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3630 case PPC::BI__builtin_darn: 3631 case PPC::BI__builtin_darn_raw: 3632 case PPC::BI__builtin_darn_32: 3633 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3634 diag::err_ppc_builtin_only_on_arch, "9"); 3635 case PPC::BI__builtin_vsx_xxgenpcvbm: 3636 case PPC::BI__builtin_vsx_xxgenpcvhm: 3637 case PPC::BI__builtin_vsx_xxgenpcvwm: 3638 case PPC::BI__builtin_vsx_xxgenpcvdm: 3639 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 3640 case PPC::BI__builtin_ppc_compare_exp_uo: 3641 case PPC::BI__builtin_ppc_compare_exp_lt: 3642 case PPC::BI__builtin_ppc_compare_exp_gt: 3643 case PPC::BI__builtin_ppc_compare_exp_eq: 3644 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3645 diag::err_ppc_builtin_only_on_arch, "9") || 3646 SemaFeatureCheck(*this, TheCall, "vsx", 3647 diag::err_ppc_builtin_requires_vsx); 3648 case PPC::BI__builtin_ppc_test_data_class: { 3649 // Check if the first argument of the __builtin_ppc_test_data_class call is 3650 // valid. The argument must be either a 'float' or a 'double'. 3651 QualType ArgType = TheCall->getArg(0)->getType(); 3652 if (ArgType != QualType(Context.FloatTy) && 3653 ArgType != QualType(Context.DoubleTy)) 3654 return Diag(TheCall->getBeginLoc(), 3655 diag::err_ppc_invalid_test_data_class_type); 3656 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3657 diag::err_ppc_builtin_only_on_arch, "9") || 3658 SemaFeatureCheck(*this, TheCall, "vsx", 3659 diag::err_ppc_builtin_requires_vsx) || 3660 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 3661 } 3662 case PPC::BI__builtin_ppc_load8r: 3663 case PPC::BI__builtin_ppc_store8r: 3664 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 3665 diag::err_ppc_builtin_only_on_arch, "7"); 3666 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 3667 case PPC::BI__builtin_##Name: \ 3668 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 3669 #include "clang/Basic/BuiltinsPPC.def" 3670 } 3671 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3672 } 3673 3674 // Check if the given type is a non-pointer PPC MMA type. This function is used 3675 // in Sema to prevent invalid uses of restricted PPC MMA types. 3676 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 3677 if (Type->isPointerType() || Type->isArrayType()) 3678 return false; 3679 3680 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 3681 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 3682 if (false 3683 #include "clang/Basic/PPCTypes.def" 3684 ) { 3685 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 3686 return true; 3687 } 3688 return false; 3689 } 3690 3691 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3692 CallExpr *TheCall) { 3693 // position of memory order and scope arguments in the builtin 3694 unsigned OrderIndex, ScopeIndex; 3695 switch (BuiltinID) { 3696 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3697 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3698 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3699 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3700 OrderIndex = 2; 3701 ScopeIndex = 3; 3702 break; 3703 case AMDGPU::BI__builtin_amdgcn_fence: 3704 OrderIndex = 0; 3705 ScopeIndex = 1; 3706 break; 3707 default: 3708 return false; 3709 } 3710 3711 ExprResult Arg = TheCall->getArg(OrderIndex); 3712 auto ArgExpr = Arg.get(); 3713 Expr::EvalResult ArgResult; 3714 3715 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3716 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3717 << ArgExpr->getType(); 3718 auto Ord = ArgResult.Val.getInt().getZExtValue(); 3719 3720 // Check validity of memory ordering as per C11 / C++11's memody model. 3721 // Only fence needs check. Atomic dec/inc allow all memory orders. 3722 if (!llvm::isValidAtomicOrderingCABI(Ord)) 3723 return Diag(ArgExpr->getBeginLoc(), 3724 diag::warn_atomic_op_has_invalid_memory_order) 3725 << ArgExpr->getSourceRange(); 3726 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 3727 case llvm::AtomicOrderingCABI::relaxed: 3728 case llvm::AtomicOrderingCABI::consume: 3729 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 3730 return Diag(ArgExpr->getBeginLoc(), 3731 diag::warn_atomic_op_has_invalid_memory_order) 3732 << ArgExpr->getSourceRange(); 3733 break; 3734 case llvm::AtomicOrderingCABI::acquire: 3735 case llvm::AtomicOrderingCABI::release: 3736 case llvm::AtomicOrderingCABI::acq_rel: 3737 case llvm::AtomicOrderingCABI::seq_cst: 3738 break; 3739 } 3740 3741 Arg = TheCall->getArg(ScopeIndex); 3742 ArgExpr = Arg.get(); 3743 Expr::EvalResult ArgResult1; 3744 // Check that sync scope is a constant literal 3745 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 3746 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3747 << ArgExpr->getType(); 3748 3749 return false; 3750 } 3751 3752 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 3753 llvm::APSInt Result; 3754 3755 // We can't check the value of a dependent argument. 3756 Expr *Arg = TheCall->getArg(ArgNum); 3757 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3758 return false; 3759 3760 // Check constant-ness first. 3761 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3762 return true; 3763 3764 int64_t Val = Result.getSExtValue(); 3765 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 3766 return false; 3767 3768 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 3769 << Arg->getSourceRange(); 3770 } 3771 3772 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 3773 unsigned BuiltinID, 3774 CallExpr *TheCall) { 3775 // CodeGenFunction can also detect this, but this gives a better error 3776 // message. 3777 bool FeatureMissing = false; 3778 SmallVector<StringRef> ReqFeatures; 3779 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 3780 Features.split(ReqFeatures, ','); 3781 3782 // Check if each required feature is included 3783 for (StringRef F : ReqFeatures) { 3784 if (TI.hasFeature(F)) 3785 continue; 3786 3787 // If the feature is 64bit, alter the string so it will print better in 3788 // the diagnostic. 3789 if (F == "64bit") 3790 F = "RV64"; 3791 3792 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 3793 F.consume_front("experimental-"); 3794 std::string FeatureStr = F.str(); 3795 FeatureStr[0] = std::toupper(FeatureStr[0]); 3796 3797 // Error message 3798 FeatureMissing = true; 3799 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 3800 << TheCall->getSourceRange() << StringRef(FeatureStr); 3801 } 3802 3803 if (FeatureMissing) 3804 return true; 3805 3806 switch (BuiltinID) { 3807 case RISCVVector::BI__builtin_rvv_vsetvli: 3808 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 3809 CheckRISCVLMUL(TheCall, 2); 3810 case RISCVVector::BI__builtin_rvv_vsetvlimax: 3811 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 3812 CheckRISCVLMUL(TheCall, 1); 3813 } 3814 3815 return false; 3816 } 3817 3818 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3819 CallExpr *TheCall) { 3820 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3821 Expr *Arg = TheCall->getArg(0); 3822 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 3823 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 3824 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3825 << Arg->getSourceRange(); 3826 } 3827 3828 // For intrinsics which take an immediate value as part of the instruction, 3829 // range check them here. 3830 unsigned i = 0, l = 0, u = 0; 3831 switch (BuiltinID) { 3832 default: return false; 3833 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3834 case SystemZ::BI__builtin_s390_verimb: 3835 case SystemZ::BI__builtin_s390_verimh: 3836 case SystemZ::BI__builtin_s390_verimf: 3837 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3838 case SystemZ::BI__builtin_s390_vfaeb: 3839 case SystemZ::BI__builtin_s390_vfaeh: 3840 case SystemZ::BI__builtin_s390_vfaef: 3841 case SystemZ::BI__builtin_s390_vfaebs: 3842 case SystemZ::BI__builtin_s390_vfaehs: 3843 case SystemZ::BI__builtin_s390_vfaefs: 3844 case SystemZ::BI__builtin_s390_vfaezb: 3845 case SystemZ::BI__builtin_s390_vfaezh: 3846 case SystemZ::BI__builtin_s390_vfaezf: 3847 case SystemZ::BI__builtin_s390_vfaezbs: 3848 case SystemZ::BI__builtin_s390_vfaezhs: 3849 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3850 case SystemZ::BI__builtin_s390_vfisb: 3851 case SystemZ::BI__builtin_s390_vfidb: 3852 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3853 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3854 case SystemZ::BI__builtin_s390_vftcisb: 3855 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3856 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3857 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3858 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3859 case SystemZ::BI__builtin_s390_vstrcb: 3860 case SystemZ::BI__builtin_s390_vstrch: 3861 case SystemZ::BI__builtin_s390_vstrcf: 3862 case SystemZ::BI__builtin_s390_vstrczb: 3863 case SystemZ::BI__builtin_s390_vstrczh: 3864 case SystemZ::BI__builtin_s390_vstrczf: 3865 case SystemZ::BI__builtin_s390_vstrcbs: 3866 case SystemZ::BI__builtin_s390_vstrchs: 3867 case SystemZ::BI__builtin_s390_vstrcfs: 3868 case SystemZ::BI__builtin_s390_vstrczbs: 3869 case SystemZ::BI__builtin_s390_vstrczhs: 3870 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3871 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3872 case SystemZ::BI__builtin_s390_vfminsb: 3873 case SystemZ::BI__builtin_s390_vfmaxsb: 3874 case SystemZ::BI__builtin_s390_vfmindb: 3875 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3876 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3877 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3878 case SystemZ::BI__builtin_s390_vclfnhs: 3879 case SystemZ::BI__builtin_s390_vclfnls: 3880 case SystemZ::BI__builtin_s390_vcfn: 3881 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 3882 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 3883 } 3884 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3885 } 3886 3887 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3888 /// This checks that the target supports __builtin_cpu_supports and 3889 /// that the string argument is constant and valid. 3890 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 3891 CallExpr *TheCall) { 3892 Expr *Arg = TheCall->getArg(0); 3893 3894 // Check if the argument is a string literal. 3895 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3896 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3897 << Arg->getSourceRange(); 3898 3899 // Check the contents of the string. 3900 StringRef Feature = 3901 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3902 if (!TI.validateCpuSupports(Feature)) 3903 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3904 << Arg->getSourceRange(); 3905 return false; 3906 } 3907 3908 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3909 /// This checks that the target supports __builtin_cpu_is and 3910 /// that the string argument is constant and valid. 3911 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 3912 Expr *Arg = TheCall->getArg(0); 3913 3914 // Check if the argument is a string literal. 3915 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3916 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3917 << Arg->getSourceRange(); 3918 3919 // Check the contents of the string. 3920 StringRef Feature = 3921 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3922 if (!TI.validateCpuIs(Feature)) 3923 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3924 << Arg->getSourceRange(); 3925 return false; 3926 } 3927 3928 // Check if the rounding mode is legal. 3929 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3930 // Indicates if this instruction has rounding control or just SAE. 3931 bool HasRC = false; 3932 3933 unsigned ArgNum = 0; 3934 switch (BuiltinID) { 3935 default: 3936 return false; 3937 case X86::BI__builtin_ia32_vcvttsd2si32: 3938 case X86::BI__builtin_ia32_vcvttsd2si64: 3939 case X86::BI__builtin_ia32_vcvttsd2usi32: 3940 case X86::BI__builtin_ia32_vcvttsd2usi64: 3941 case X86::BI__builtin_ia32_vcvttss2si32: 3942 case X86::BI__builtin_ia32_vcvttss2si64: 3943 case X86::BI__builtin_ia32_vcvttss2usi32: 3944 case X86::BI__builtin_ia32_vcvttss2usi64: 3945 case X86::BI__builtin_ia32_vcvttsh2si32: 3946 case X86::BI__builtin_ia32_vcvttsh2si64: 3947 case X86::BI__builtin_ia32_vcvttsh2usi32: 3948 case X86::BI__builtin_ia32_vcvttsh2usi64: 3949 ArgNum = 1; 3950 break; 3951 case X86::BI__builtin_ia32_maxpd512: 3952 case X86::BI__builtin_ia32_maxps512: 3953 case X86::BI__builtin_ia32_minpd512: 3954 case X86::BI__builtin_ia32_minps512: 3955 case X86::BI__builtin_ia32_maxph512: 3956 case X86::BI__builtin_ia32_minph512: 3957 ArgNum = 2; 3958 break; 3959 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 3960 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 3961 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3962 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3963 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3964 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3965 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3966 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3967 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3968 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3969 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3970 case X86::BI__builtin_ia32_vcvttph2w512_mask: 3971 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 3972 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 3973 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 3974 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 3975 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 3976 case X86::BI__builtin_ia32_exp2pd_mask: 3977 case X86::BI__builtin_ia32_exp2ps_mask: 3978 case X86::BI__builtin_ia32_getexppd512_mask: 3979 case X86::BI__builtin_ia32_getexpps512_mask: 3980 case X86::BI__builtin_ia32_getexpph512_mask: 3981 case X86::BI__builtin_ia32_rcp28pd_mask: 3982 case X86::BI__builtin_ia32_rcp28ps_mask: 3983 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3984 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3985 case X86::BI__builtin_ia32_vcomisd: 3986 case X86::BI__builtin_ia32_vcomiss: 3987 case X86::BI__builtin_ia32_vcomish: 3988 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3989 ArgNum = 3; 3990 break; 3991 case X86::BI__builtin_ia32_cmppd512_mask: 3992 case X86::BI__builtin_ia32_cmpps512_mask: 3993 case X86::BI__builtin_ia32_cmpsd_mask: 3994 case X86::BI__builtin_ia32_cmpss_mask: 3995 case X86::BI__builtin_ia32_cmpsh_mask: 3996 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 3997 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 3998 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3999 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4000 case X86::BI__builtin_ia32_getexpss128_round_mask: 4001 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4002 case X86::BI__builtin_ia32_getmantpd512_mask: 4003 case X86::BI__builtin_ia32_getmantps512_mask: 4004 case X86::BI__builtin_ia32_getmantph512_mask: 4005 case X86::BI__builtin_ia32_maxsd_round_mask: 4006 case X86::BI__builtin_ia32_maxss_round_mask: 4007 case X86::BI__builtin_ia32_maxsh_round_mask: 4008 case X86::BI__builtin_ia32_minsd_round_mask: 4009 case X86::BI__builtin_ia32_minss_round_mask: 4010 case X86::BI__builtin_ia32_minsh_round_mask: 4011 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4012 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4013 case X86::BI__builtin_ia32_reducepd512_mask: 4014 case X86::BI__builtin_ia32_reduceps512_mask: 4015 case X86::BI__builtin_ia32_reduceph512_mask: 4016 case X86::BI__builtin_ia32_rndscalepd_mask: 4017 case X86::BI__builtin_ia32_rndscaleps_mask: 4018 case X86::BI__builtin_ia32_rndscaleph_mask: 4019 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4020 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4021 ArgNum = 4; 4022 break; 4023 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4024 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4025 case X86::BI__builtin_ia32_fixupimmps512_mask: 4026 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4027 case X86::BI__builtin_ia32_fixupimmsd_mask: 4028 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4029 case X86::BI__builtin_ia32_fixupimmss_mask: 4030 case X86::BI__builtin_ia32_fixupimmss_maskz: 4031 case X86::BI__builtin_ia32_getmantsd_round_mask: 4032 case X86::BI__builtin_ia32_getmantss_round_mask: 4033 case X86::BI__builtin_ia32_getmantsh_round_mask: 4034 case X86::BI__builtin_ia32_rangepd512_mask: 4035 case X86::BI__builtin_ia32_rangeps512_mask: 4036 case X86::BI__builtin_ia32_rangesd128_round_mask: 4037 case X86::BI__builtin_ia32_rangess128_round_mask: 4038 case X86::BI__builtin_ia32_reducesd_mask: 4039 case X86::BI__builtin_ia32_reducess_mask: 4040 case X86::BI__builtin_ia32_reducesh_mask: 4041 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4042 case X86::BI__builtin_ia32_rndscaless_round_mask: 4043 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4044 ArgNum = 5; 4045 break; 4046 case X86::BI__builtin_ia32_vcvtsd2si64: 4047 case X86::BI__builtin_ia32_vcvtsd2si32: 4048 case X86::BI__builtin_ia32_vcvtsd2usi32: 4049 case X86::BI__builtin_ia32_vcvtsd2usi64: 4050 case X86::BI__builtin_ia32_vcvtss2si32: 4051 case X86::BI__builtin_ia32_vcvtss2si64: 4052 case X86::BI__builtin_ia32_vcvtss2usi32: 4053 case X86::BI__builtin_ia32_vcvtss2usi64: 4054 case X86::BI__builtin_ia32_vcvtsh2si32: 4055 case X86::BI__builtin_ia32_vcvtsh2si64: 4056 case X86::BI__builtin_ia32_vcvtsh2usi32: 4057 case X86::BI__builtin_ia32_vcvtsh2usi64: 4058 case X86::BI__builtin_ia32_sqrtpd512: 4059 case X86::BI__builtin_ia32_sqrtps512: 4060 case X86::BI__builtin_ia32_sqrtph512: 4061 ArgNum = 1; 4062 HasRC = true; 4063 break; 4064 case X86::BI__builtin_ia32_addph512: 4065 case X86::BI__builtin_ia32_divph512: 4066 case X86::BI__builtin_ia32_mulph512: 4067 case X86::BI__builtin_ia32_subph512: 4068 case X86::BI__builtin_ia32_addpd512: 4069 case X86::BI__builtin_ia32_addps512: 4070 case X86::BI__builtin_ia32_divpd512: 4071 case X86::BI__builtin_ia32_divps512: 4072 case X86::BI__builtin_ia32_mulpd512: 4073 case X86::BI__builtin_ia32_mulps512: 4074 case X86::BI__builtin_ia32_subpd512: 4075 case X86::BI__builtin_ia32_subps512: 4076 case X86::BI__builtin_ia32_cvtsi2sd64: 4077 case X86::BI__builtin_ia32_cvtsi2ss32: 4078 case X86::BI__builtin_ia32_cvtsi2ss64: 4079 case X86::BI__builtin_ia32_cvtusi2sd64: 4080 case X86::BI__builtin_ia32_cvtusi2ss32: 4081 case X86::BI__builtin_ia32_cvtusi2ss64: 4082 case X86::BI__builtin_ia32_vcvtusi2sh: 4083 case X86::BI__builtin_ia32_vcvtusi642sh: 4084 case X86::BI__builtin_ia32_vcvtsi2sh: 4085 case X86::BI__builtin_ia32_vcvtsi642sh: 4086 ArgNum = 2; 4087 HasRC = true; 4088 break; 4089 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4090 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4091 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4092 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4093 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4094 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4095 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4096 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4097 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4098 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4099 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4100 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4101 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4102 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4103 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4104 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4105 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4106 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4107 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4108 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4109 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4110 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4111 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4112 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4113 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4114 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4115 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4116 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4117 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4118 ArgNum = 3; 4119 HasRC = true; 4120 break; 4121 case X86::BI__builtin_ia32_addsh_round_mask: 4122 case X86::BI__builtin_ia32_addss_round_mask: 4123 case X86::BI__builtin_ia32_addsd_round_mask: 4124 case X86::BI__builtin_ia32_divsh_round_mask: 4125 case X86::BI__builtin_ia32_divss_round_mask: 4126 case X86::BI__builtin_ia32_divsd_round_mask: 4127 case X86::BI__builtin_ia32_mulsh_round_mask: 4128 case X86::BI__builtin_ia32_mulss_round_mask: 4129 case X86::BI__builtin_ia32_mulsd_round_mask: 4130 case X86::BI__builtin_ia32_subsh_round_mask: 4131 case X86::BI__builtin_ia32_subss_round_mask: 4132 case X86::BI__builtin_ia32_subsd_round_mask: 4133 case X86::BI__builtin_ia32_scalefph512_mask: 4134 case X86::BI__builtin_ia32_scalefpd512_mask: 4135 case X86::BI__builtin_ia32_scalefps512_mask: 4136 case X86::BI__builtin_ia32_scalefsd_round_mask: 4137 case X86::BI__builtin_ia32_scalefss_round_mask: 4138 case X86::BI__builtin_ia32_scalefsh_round_mask: 4139 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4140 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4141 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4142 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4143 case X86::BI__builtin_ia32_sqrtss_round_mask: 4144 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4145 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4146 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4147 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4148 case X86::BI__builtin_ia32_vfmaddss3_mask: 4149 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4150 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4151 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4152 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4153 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4154 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4155 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4156 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4157 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4158 case X86::BI__builtin_ia32_vfmaddps512_mask: 4159 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4160 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4161 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4162 case X86::BI__builtin_ia32_vfmaddph512_mask: 4163 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4164 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4165 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4166 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4167 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4168 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4169 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4170 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4171 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4172 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4173 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4174 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4175 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4176 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4177 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4178 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4179 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4180 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4181 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4182 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4183 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4184 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4185 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4186 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4187 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4188 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4189 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4190 case X86::BI__builtin_ia32_vfmulcsh_mask: 4191 case X86::BI__builtin_ia32_vfmulcph512_mask: 4192 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4193 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4194 ArgNum = 4; 4195 HasRC = true; 4196 break; 4197 } 4198 4199 llvm::APSInt Result; 4200 4201 // We can't check the value of a dependent argument. 4202 Expr *Arg = TheCall->getArg(ArgNum); 4203 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4204 return false; 4205 4206 // Check constant-ness first. 4207 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4208 return true; 4209 4210 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4211 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4212 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4213 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4214 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4215 Result == 8/*ROUND_NO_EXC*/ || 4216 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4217 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4218 return false; 4219 4220 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4221 << Arg->getSourceRange(); 4222 } 4223 4224 // Check if the gather/scatter scale is legal. 4225 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4226 CallExpr *TheCall) { 4227 unsigned ArgNum = 0; 4228 switch (BuiltinID) { 4229 default: 4230 return false; 4231 case X86::BI__builtin_ia32_gatherpfdpd: 4232 case X86::BI__builtin_ia32_gatherpfdps: 4233 case X86::BI__builtin_ia32_gatherpfqpd: 4234 case X86::BI__builtin_ia32_gatherpfqps: 4235 case X86::BI__builtin_ia32_scatterpfdpd: 4236 case X86::BI__builtin_ia32_scatterpfdps: 4237 case X86::BI__builtin_ia32_scatterpfqpd: 4238 case X86::BI__builtin_ia32_scatterpfqps: 4239 ArgNum = 3; 4240 break; 4241 case X86::BI__builtin_ia32_gatherd_pd: 4242 case X86::BI__builtin_ia32_gatherd_pd256: 4243 case X86::BI__builtin_ia32_gatherq_pd: 4244 case X86::BI__builtin_ia32_gatherq_pd256: 4245 case X86::BI__builtin_ia32_gatherd_ps: 4246 case X86::BI__builtin_ia32_gatherd_ps256: 4247 case X86::BI__builtin_ia32_gatherq_ps: 4248 case X86::BI__builtin_ia32_gatherq_ps256: 4249 case X86::BI__builtin_ia32_gatherd_q: 4250 case X86::BI__builtin_ia32_gatherd_q256: 4251 case X86::BI__builtin_ia32_gatherq_q: 4252 case X86::BI__builtin_ia32_gatherq_q256: 4253 case X86::BI__builtin_ia32_gatherd_d: 4254 case X86::BI__builtin_ia32_gatherd_d256: 4255 case X86::BI__builtin_ia32_gatherq_d: 4256 case X86::BI__builtin_ia32_gatherq_d256: 4257 case X86::BI__builtin_ia32_gather3div2df: 4258 case X86::BI__builtin_ia32_gather3div2di: 4259 case X86::BI__builtin_ia32_gather3div4df: 4260 case X86::BI__builtin_ia32_gather3div4di: 4261 case X86::BI__builtin_ia32_gather3div4sf: 4262 case X86::BI__builtin_ia32_gather3div4si: 4263 case X86::BI__builtin_ia32_gather3div8sf: 4264 case X86::BI__builtin_ia32_gather3div8si: 4265 case X86::BI__builtin_ia32_gather3siv2df: 4266 case X86::BI__builtin_ia32_gather3siv2di: 4267 case X86::BI__builtin_ia32_gather3siv4df: 4268 case X86::BI__builtin_ia32_gather3siv4di: 4269 case X86::BI__builtin_ia32_gather3siv4sf: 4270 case X86::BI__builtin_ia32_gather3siv4si: 4271 case X86::BI__builtin_ia32_gather3siv8sf: 4272 case X86::BI__builtin_ia32_gather3siv8si: 4273 case X86::BI__builtin_ia32_gathersiv8df: 4274 case X86::BI__builtin_ia32_gathersiv16sf: 4275 case X86::BI__builtin_ia32_gatherdiv8df: 4276 case X86::BI__builtin_ia32_gatherdiv16sf: 4277 case X86::BI__builtin_ia32_gathersiv8di: 4278 case X86::BI__builtin_ia32_gathersiv16si: 4279 case X86::BI__builtin_ia32_gatherdiv8di: 4280 case X86::BI__builtin_ia32_gatherdiv16si: 4281 case X86::BI__builtin_ia32_scatterdiv2df: 4282 case X86::BI__builtin_ia32_scatterdiv2di: 4283 case X86::BI__builtin_ia32_scatterdiv4df: 4284 case X86::BI__builtin_ia32_scatterdiv4di: 4285 case X86::BI__builtin_ia32_scatterdiv4sf: 4286 case X86::BI__builtin_ia32_scatterdiv4si: 4287 case X86::BI__builtin_ia32_scatterdiv8sf: 4288 case X86::BI__builtin_ia32_scatterdiv8si: 4289 case X86::BI__builtin_ia32_scattersiv2df: 4290 case X86::BI__builtin_ia32_scattersiv2di: 4291 case X86::BI__builtin_ia32_scattersiv4df: 4292 case X86::BI__builtin_ia32_scattersiv4di: 4293 case X86::BI__builtin_ia32_scattersiv4sf: 4294 case X86::BI__builtin_ia32_scattersiv4si: 4295 case X86::BI__builtin_ia32_scattersiv8sf: 4296 case X86::BI__builtin_ia32_scattersiv8si: 4297 case X86::BI__builtin_ia32_scattersiv8df: 4298 case X86::BI__builtin_ia32_scattersiv16sf: 4299 case X86::BI__builtin_ia32_scatterdiv8df: 4300 case X86::BI__builtin_ia32_scatterdiv16sf: 4301 case X86::BI__builtin_ia32_scattersiv8di: 4302 case X86::BI__builtin_ia32_scattersiv16si: 4303 case X86::BI__builtin_ia32_scatterdiv8di: 4304 case X86::BI__builtin_ia32_scatterdiv16si: 4305 ArgNum = 4; 4306 break; 4307 } 4308 4309 llvm::APSInt Result; 4310 4311 // We can't check the value of a dependent argument. 4312 Expr *Arg = TheCall->getArg(ArgNum); 4313 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4314 return false; 4315 4316 // Check constant-ness first. 4317 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4318 return true; 4319 4320 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4321 return false; 4322 4323 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4324 << Arg->getSourceRange(); 4325 } 4326 4327 enum { TileRegLow = 0, TileRegHigh = 7 }; 4328 4329 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4330 ArrayRef<int> ArgNums) { 4331 for (int ArgNum : ArgNums) { 4332 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4333 return true; 4334 } 4335 return false; 4336 } 4337 4338 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4339 ArrayRef<int> ArgNums) { 4340 // Because the max number of tile register is TileRegHigh + 1, so here we use 4341 // each bit to represent the usage of them in bitset. 4342 std::bitset<TileRegHigh + 1> ArgValues; 4343 for (int ArgNum : ArgNums) { 4344 Expr *Arg = TheCall->getArg(ArgNum); 4345 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4346 continue; 4347 4348 llvm::APSInt Result; 4349 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4350 return true; 4351 int ArgExtValue = Result.getExtValue(); 4352 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4353 "Incorrect tile register num."); 4354 if (ArgValues.test(ArgExtValue)) 4355 return Diag(TheCall->getBeginLoc(), 4356 diag::err_x86_builtin_tile_arg_duplicate) 4357 << TheCall->getArg(ArgNum)->getSourceRange(); 4358 ArgValues.set(ArgExtValue); 4359 } 4360 return false; 4361 } 4362 4363 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 4364 ArrayRef<int> ArgNums) { 4365 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 4366 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 4367 } 4368 4369 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 4370 switch (BuiltinID) { 4371 default: 4372 return false; 4373 case X86::BI__builtin_ia32_tileloadd64: 4374 case X86::BI__builtin_ia32_tileloaddt164: 4375 case X86::BI__builtin_ia32_tilestored64: 4376 case X86::BI__builtin_ia32_tilezero: 4377 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 4378 case X86::BI__builtin_ia32_tdpbssd: 4379 case X86::BI__builtin_ia32_tdpbsud: 4380 case X86::BI__builtin_ia32_tdpbusd: 4381 case X86::BI__builtin_ia32_tdpbuud: 4382 case X86::BI__builtin_ia32_tdpbf16ps: 4383 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 4384 } 4385 } 4386 static bool isX86_32Builtin(unsigned BuiltinID) { 4387 // These builtins only work on x86-32 targets. 4388 switch (BuiltinID) { 4389 case X86::BI__builtin_ia32_readeflags_u32: 4390 case X86::BI__builtin_ia32_writeeflags_u32: 4391 return true; 4392 } 4393 4394 return false; 4395 } 4396 4397 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4398 CallExpr *TheCall) { 4399 if (BuiltinID == X86::BI__builtin_cpu_supports) 4400 return SemaBuiltinCpuSupports(*this, TI, TheCall); 4401 4402 if (BuiltinID == X86::BI__builtin_cpu_is) 4403 return SemaBuiltinCpuIs(*this, TI, TheCall); 4404 4405 // Check for 32-bit only builtins on a 64-bit target. 4406 const llvm::Triple &TT = TI.getTriple(); 4407 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 4408 return Diag(TheCall->getCallee()->getBeginLoc(), 4409 diag::err_32_bit_builtin_64_bit_tgt); 4410 4411 // If the intrinsic has rounding or SAE make sure its valid. 4412 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 4413 return true; 4414 4415 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 4416 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 4417 return true; 4418 4419 // If the intrinsic has a tile arguments, make sure they are valid. 4420 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 4421 return true; 4422 4423 // For intrinsics which take an immediate value as part of the instruction, 4424 // range check them here. 4425 int i = 0, l = 0, u = 0; 4426 switch (BuiltinID) { 4427 default: 4428 return false; 4429 case X86::BI__builtin_ia32_vec_ext_v2si: 4430 case X86::BI__builtin_ia32_vec_ext_v2di: 4431 case X86::BI__builtin_ia32_vextractf128_pd256: 4432 case X86::BI__builtin_ia32_vextractf128_ps256: 4433 case X86::BI__builtin_ia32_vextractf128_si256: 4434 case X86::BI__builtin_ia32_extract128i256: 4435 case X86::BI__builtin_ia32_extractf64x4_mask: 4436 case X86::BI__builtin_ia32_extracti64x4_mask: 4437 case X86::BI__builtin_ia32_extractf32x8_mask: 4438 case X86::BI__builtin_ia32_extracti32x8_mask: 4439 case X86::BI__builtin_ia32_extractf64x2_256_mask: 4440 case X86::BI__builtin_ia32_extracti64x2_256_mask: 4441 case X86::BI__builtin_ia32_extractf32x4_256_mask: 4442 case X86::BI__builtin_ia32_extracti32x4_256_mask: 4443 i = 1; l = 0; u = 1; 4444 break; 4445 case X86::BI__builtin_ia32_vec_set_v2di: 4446 case X86::BI__builtin_ia32_vinsertf128_pd256: 4447 case X86::BI__builtin_ia32_vinsertf128_ps256: 4448 case X86::BI__builtin_ia32_vinsertf128_si256: 4449 case X86::BI__builtin_ia32_insert128i256: 4450 case X86::BI__builtin_ia32_insertf32x8: 4451 case X86::BI__builtin_ia32_inserti32x8: 4452 case X86::BI__builtin_ia32_insertf64x4: 4453 case X86::BI__builtin_ia32_inserti64x4: 4454 case X86::BI__builtin_ia32_insertf64x2_256: 4455 case X86::BI__builtin_ia32_inserti64x2_256: 4456 case X86::BI__builtin_ia32_insertf32x4_256: 4457 case X86::BI__builtin_ia32_inserti32x4_256: 4458 i = 2; l = 0; u = 1; 4459 break; 4460 case X86::BI__builtin_ia32_vpermilpd: 4461 case X86::BI__builtin_ia32_vec_ext_v4hi: 4462 case X86::BI__builtin_ia32_vec_ext_v4si: 4463 case X86::BI__builtin_ia32_vec_ext_v4sf: 4464 case X86::BI__builtin_ia32_vec_ext_v4di: 4465 case X86::BI__builtin_ia32_extractf32x4_mask: 4466 case X86::BI__builtin_ia32_extracti32x4_mask: 4467 case X86::BI__builtin_ia32_extractf64x2_512_mask: 4468 case X86::BI__builtin_ia32_extracti64x2_512_mask: 4469 i = 1; l = 0; u = 3; 4470 break; 4471 case X86::BI_mm_prefetch: 4472 case X86::BI__builtin_ia32_vec_ext_v8hi: 4473 case X86::BI__builtin_ia32_vec_ext_v8si: 4474 i = 1; l = 0; u = 7; 4475 break; 4476 case X86::BI__builtin_ia32_sha1rnds4: 4477 case X86::BI__builtin_ia32_blendpd: 4478 case X86::BI__builtin_ia32_shufpd: 4479 case X86::BI__builtin_ia32_vec_set_v4hi: 4480 case X86::BI__builtin_ia32_vec_set_v4si: 4481 case X86::BI__builtin_ia32_vec_set_v4di: 4482 case X86::BI__builtin_ia32_shuf_f32x4_256: 4483 case X86::BI__builtin_ia32_shuf_f64x2_256: 4484 case X86::BI__builtin_ia32_shuf_i32x4_256: 4485 case X86::BI__builtin_ia32_shuf_i64x2_256: 4486 case X86::BI__builtin_ia32_insertf64x2_512: 4487 case X86::BI__builtin_ia32_inserti64x2_512: 4488 case X86::BI__builtin_ia32_insertf32x4: 4489 case X86::BI__builtin_ia32_inserti32x4: 4490 i = 2; l = 0; u = 3; 4491 break; 4492 case X86::BI__builtin_ia32_vpermil2pd: 4493 case X86::BI__builtin_ia32_vpermil2pd256: 4494 case X86::BI__builtin_ia32_vpermil2ps: 4495 case X86::BI__builtin_ia32_vpermil2ps256: 4496 i = 3; l = 0; u = 3; 4497 break; 4498 case X86::BI__builtin_ia32_cmpb128_mask: 4499 case X86::BI__builtin_ia32_cmpw128_mask: 4500 case X86::BI__builtin_ia32_cmpd128_mask: 4501 case X86::BI__builtin_ia32_cmpq128_mask: 4502 case X86::BI__builtin_ia32_cmpb256_mask: 4503 case X86::BI__builtin_ia32_cmpw256_mask: 4504 case X86::BI__builtin_ia32_cmpd256_mask: 4505 case X86::BI__builtin_ia32_cmpq256_mask: 4506 case X86::BI__builtin_ia32_cmpb512_mask: 4507 case X86::BI__builtin_ia32_cmpw512_mask: 4508 case X86::BI__builtin_ia32_cmpd512_mask: 4509 case X86::BI__builtin_ia32_cmpq512_mask: 4510 case X86::BI__builtin_ia32_ucmpb128_mask: 4511 case X86::BI__builtin_ia32_ucmpw128_mask: 4512 case X86::BI__builtin_ia32_ucmpd128_mask: 4513 case X86::BI__builtin_ia32_ucmpq128_mask: 4514 case X86::BI__builtin_ia32_ucmpb256_mask: 4515 case X86::BI__builtin_ia32_ucmpw256_mask: 4516 case X86::BI__builtin_ia32_ucmpd256_mask: 4517 case X86::BI__builtin_ia32_ucmpq256_mask: 4518 case X86::BI__builtin_ia32_ucmpb512_mask: 4519 case X86::BI__builtin_ia32_ucmpw512_mask: 4520 case X86::BI__builtin_ia32_ucmpd512_mask: 4521 case X86::BI__builtin_ia32_ucmpq512_mask: 4522 case X86::BI__builtin_ia32_vpcomub: 4523 case X86::BI__builtin_ia32_vpcomuw: 4524 case X86::BI__builtin_ia32_vpcomud: 4525 case X86::BI__builtin_ia32_vpcomuq: 4526 case X86::BI__builtin_ia32_vpcomb: 4527 case X86::BI__builtin_ia32_vpcomw: 4528 case X86::BI__builtin_ia32_vpcomd: 4529 case X86::BI__builtin_ia32_vpcomq: 4530 case X86::BI__builtin_ia32_vec_set_v8hi: 4531 case X86::BI__builtin_ia32_vec_set_v8si: 4532 i = 2; l = 0; u = 7; 4533 break; 4534 case X86::BI__builtin_ia32_vpermilpd256: 4535 case X86::BI__builtin_ia32_roundps: 4536 case X86::BI__builtin_ia32_roundpd: 4537 case X86::BI__builtin_ia32_roundps256: 4538 case X86::BI__builtin_ia32_roundpd256: 4539 case X86::BI__builtin_ia32_getmantpd128_mask: 4540 case X86::BI__builtin_ia32_getmantpd256_mask: 4541 case X86::BI__builtin_ia32_getmantps128_mask: 4542 case X86::BI__builtin_ia32_getmantps256_mask: 4543 case X86::BI__builtin_ia32_getmantpd512_mask: 4544 case X86::BI__builtin_ia32_getmantps512_mask: 4545 case X86::BI__builtin_ia32_getmantph128_mask: 4546 case X86::BI__builtin_ia32_getmantph256_mask: 4547 case X86::BI__builtin_ia32_getmantph512_mask: 4548 case X86::BI__builtin_ia32_vec_ext_v16qi: 4549 case X86::BI__builtin_ia32_vec_ext_v16hi: 4550 i = 1; l = 0; u = 15; 4551 break; 4552 case X86::BI__builtin_ia32_pblendd128: 4553 case X86::BI__builtin_ia32_blendps: 4554 case X86::BI__builtin_ia32_blendpd256: 4555 case X86::BI__builtin_ia32_shufpd256: 4556 case X86::BI__builtin_ia32_roundss: 4557 case X86::BI__builtin_ia32_roundsd: 4558 case X86::BI__builtin_ia32_rangepd128_mask: 4559 case X86::BI__builtin_ia32_rangepd256_mask: 4560 case X86::BI__builtin_ia32_rangepd512_mask: 4561 case X86::BI__builtin_ia32_rangeps128_mask: 4562 case X86::BI__builtin_ia32_rangeps256_mask: 4563 case X86::BI__builtin_ia32_rangeps512_mask: 4564 case X86::BI__builtin_ia32_getmantsd_round_mask: 4565 case X86::BI__builtin_ia32_getmantss_round_mask: 4566 case X86::BI__builtin_ia32_getmantsh_round_mask: 4567 case X86::BI__builtin_ia32_vec_set_v16qi: 4568 case X86::BI__builtin_ia32_vec_set_v16hi: 4569 i = 2; l = 0; u = 15; 4570 break; 4571 case X86::BI__builtin_ia32_vec_ext_v32qi: 4572 i = 1; l = 0; u = 31; 4573 break; 4574 case X86::BI__builtin_ia32_cmpps: 4575 case X86::BI__builtin_ia32_cmpss: 4576 case X86::BI__builtin_ia32_cmppd: 4577 case X86::BI__builtin_ia32_cmpsd: 4578 case X86::BI__builtin_ia32_cmpps256: 4579 case X86::BI__builtin_ia32_cmppd256: 4580 case X86::BI__builtin_ia32_cmpps128_mask: 4581 case X86::BI__builtin_ia32_cmppd128_mask: 4582 case X86::BI__builtin_ia32_cmpps256_mask: 4583 case X86::BI__builtin_ia32_cmppd256_mask: 4584 case X86::BI__builtin_ia32_cmpps512_mask: 4585 case X86::BI__builtin_ia32_cmppd512_mask: 4586 case X86::BI__builtin_ia32_cmpsd_mask: 4587 case X86::BI__builtin_ia32_cmpss_mask: 4588 case X86::BI__builtin_ia32_vec_set_v32qi: 4589 i = 2; l = 0; u = 31; 4590 break; 4591 case X86::BI__builtin_ia32_permdf256: 4592 case X86::BI__builtin_ia32_permdi256: 4593 case X86::BI__builtin_ia32_permdf512: 4594 case X86::BI__builtin_ia32_permdi512: 4595 case X86::BI__builtin_ia32_vpermilps: 4596 case X86::BI__builtin_ia32_vpermilps256: 4597 case X86::BI__builtin_ia32_vpermilpd512: 4598 case X86::BI__builtin_ia32_vpermilps512: 4599 case X86::BI__builtin_ia32_pshufd: 4600 case X86::BI__builtin_ia32_pshufd256: 4601 case X86::BI__builtin_ia32_pshufd512: 4602 case X86::BI__builtin_ia32_pshufhw: 4603 case X86::BI__builtin_ia32_pshufhw256: 4604 case X86::BI__builtin_ia32_pshufhw512: 4605 case X86::BI__builtin_ia32_pshuflw: 4606 case X86::BI__builtin_ia32_pshuflw256: 4607 case X86::BI__builtin_ia32_pshuflw512: 4608 case X86::BI__builtin_ia32_vcvtps2ph: 4609 case X86::BI__builtin_ia32_vcvtps2ph_mask: 4610 case X86::BI__builtin_ia32_vcvtps2ph256: 4611 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 4612 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 4613 case X86::BI__builtin_ia32_rndscaleps_128_mask: 4614 case X86::BI__builtin_ia32_rndscalepd_128_mask: 4615 case X86::BI__builtin_ia32_rndscaleps_256_mask: 4616 case X86::BI__builtin_ia32_rndscalepd_256_mask: 4617 case X86::BI__builtin_ia32_rndscaleps_mask: 4618 case X86::BI__builtin_ia32_rndscalepd_mask: 4619 case X86::BI__builtin_ia32_rndscaleph_mask: 4620 case X86::BI__builtin_ia32_reducepd128_mask: 4621 case X86::BI__builtin_ia32_reducepd256_mask: 4622 case X86::BI__builtin_ia32_reducepd512_mask: 4623 case X86::BI__builtin_ia32_reduceps128_mask: 4624 case X86::BI__builtin_ia32_reduceps256_mask: 4625 case X86::BI__builtin_ia32_reduceps512_mask: 4626 case X86::BI__builtin_ia32_reduceph128_mask: 4627 case X86::BI__builtin_ia32_reduceph256_mask: 4628 case X86::BI__builtin_ia32_reduceph512_mask: 4629 case X86::BI__builtin_ia32_prold512: 4630 case X86::BI__builtin_ia32_prolq512: 4631 case X86::BI__builtin_ia32_prold128: 4632 case X86::BI__builtin_ia32_prold256: 4633 case X86::BI__builtin_ia32_prolq128: 4634 case X86::BI__builtin_ia32_prolq256: 4635 case X86::BI__builtin_ia32_prord512: 4636 case X86::BI__builtin_ia32_prorq512: 4637 case X86::BI__builtin_ia32_prord128: 4638 case X86::BI__builtin_ia32_prord256: 4639 case X86::BI__builtin_ia32_prorq128: 4640 case X86::BI__builtin_ia32_prorq256: 4641 case X86::BI__builtin_ia32_fpclasspd128_mask: 4642 case X86::BI__builtin_ia32_fpclasspd256_mask: 4643 case X86::BI__builtin_ia32_fpclassps128_mask: 4644 case X86::BI__builtin_ia32_fpclassps256_mask: 4645 case X86::BI__builtin_ia32_fpclassps512_mask: 4646 case X86::BI__builtin_ia32_fpclasspd512_mask: 4647 case X86::BI__builtin_ia32_fpclassph128_mask: 4648 case X86::BI__builtin_ia32_fpclassph256_mask: 4649 case X86::BI__builtin_ia32_fpclassph512_mask: 4650 case X86::BI__builtin_ia32_fpclasssd_mask: 4651 case X86::BI__builtin_ia32_fpclassss_mask: 4652 case X86::BI__builtin_ia32_fpclasssh_mask: 4653 case X86::BI__builtin_ia32_pslldqi128_byteshift: 4654 case X86::BI__builtin_ia32_pslldqi256_byteshift: 4655 case X86::BI__builtin_ia32_pslldqi512_byteshift: 4656 case X86::BI__builtin_ia32_psrldqi128_byteshift: 4657 case X86::BI__builtin_ia32_psrldqi256_byteshift: 4658 case X86::BI__builtin_ia32_psrldqi512_byteshift: 4659 case X86::BI__builtin_ia32_kshiftliqi: 4660 case X86::BI__builtin_ia32_kshiftlihi: 4661 case X86::BI__builtin_ia32_kshiftlisi: 4662 case X86::BI__builtin_ia32_kshiftlidi: 4663 case X86::BI__builtin_ia32_kshiftriqi: 4664 case X86::BI__builtin_ia32_kshiftrihi: 4665 case X86::BI__builtin_ia32_kshiftrisi: 4666 case X86::BI__builtin_ia32_kshiftridi: 4667 i = 1; l = 0; u = 255; 4668 break; 4669 case X86::BI__builtin_ia32_vperm2f128_pd256: 4670 case X86::BI__builtin_ia32_vperm2f128_ps256: 4671 case X86::BI__builtin_ia32_vperm2f128_si256: 4672 case X86::BI__builtin_ia32_permti256: 4673 case X86::BI__builtin_ia32_pblendw128: 4674 case X86::BI__builtin_ia32_pblendw256: 4675 case X86::BI__builtin_ia32_blendps256: 4676 case X86::BI__builtin_ia32_pblendd256: 4677 case X86::BI__builtin_ia32_palignr128: 4678 case X86::BI__builtin_ia32_palignr256: 4679 case X86::BI__builtin_ia32_palignr512: 4680 case X86::BI__builtin_ia32_alignq512: 4681 case X86::BI__builtin_ia32_alignd512: 4682 case X86::BI__builtin_ia32_alignd128: 4683 case X86::BI__builtin_ia32_alignd256: 4684 case X86::BI__builtin_ia32_alignq128: 4685 case X86::BI__builtin_ia32_alignq256: 4686 case X86::BI__builtin_ia32_vcomisd: 4687 case X86::BI__builtin_ia32_vcomiss: 4688 case X86::BI__builtin_ia32_shuf_f32x4: 4689 case X86::BI__builtin_ia32_shuf_f64x2: 4690 case X86::BI__builtin_ia32_shuf_i32x4: 4691 case X86::BI__builtin_ia32_shuf_i64x2: 4692 case X86::BI__builtin_ia32_shufpd512: 4693 case X86::BI__builtin_ia32_shufps: 4694 case X86::BI__builtin_ia32_shufps256: 4695 case X86::BI__builtin_ia32_shufps512: 4696 case X86::BI__builtin_ia32_dbpsadbw128: 4697 case X86::BI__builtin_ia32_dbpsadbw256: 4698 case X86::BI__builtin_ia32_dbpsadbw512: 4699 case X86::BI__builtin_ia32_vpshldd128: 4700 case X86::BI__builtin_ia32_vpshldd256: 4701 case X86::BI__builtin_ia32_vpshldd512: 4702 case X86::BI__builtin_ia32_vpshldq128: 4703 case X86::BI__builtin_ia32_vpshldq256: 4704 case X86::BI__builtin_ia32_vpshldq512: 4705 case X86::BI__builtin_ia32_vpshldw128: 4706 case X86::BI__builtin_ia32_vpshldw256: 4707 case X86::BI__builtin_ia32_vpshldw512: 4708 case X86::BI__builtin_ia32_vpshrdd128: 4709 case X86::BI__builtin_ia32_vpshrdd256: 4710 case X86::BI__builtin_ia32_vpshrdd512: 4711 case X86::BI__builtin_ia32_vpshrdq128: 4712 case X86::BI__builtin_ia32_vpshrdq256: 4713 case X86::BI__builtin_ia32_vpshrdq512: 4714 case X86::BI__builtin_ia32_vpshrdw128: 4715 case X86::BI__builtin_ia32_vpshrdw256: 4716 case X86::BI__builtin_ia32_vpshrdw512: 4717 i = 2; l = 0; u = 255; 4718 break; 4719 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4720 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4721 case X86::BI__builtin_ia32_fixupimmps512_mask: 4722 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4723 case X86::BI__builtin_ia32_fixupimmsd_mask: 4724 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4725 case X86::BI__builtin_ia32_fixupimmss_mask: 4726 case X86::BI__builtin_ia32_fixupimmss_maskz: 4727 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4728 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4729 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4730 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4731 case X86::BI__builtin_ia32_fixupimmps128_mask: 4732 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4733 case X86::BI__builtin_ia32_fixupimmps256_mask: 4734 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4735 case X86::BI__builtin_ia32_pternlogd512_mask: 4736 case X86::BI__builtin_ia32_pternlogd512_maskz: 4737 case X86::BI__builtin_ia32_pternlogq512_mask: 4738 case X86::BI__builtin_ia32_pternlogq512_maskz: 4739 case X86::BI__builtin_ia32_pternlogd128_mask: 4740 case X86::BI__builtin_ia32_pternlogd128_maskz: 4741 case X86::BI__builtin_ia32_pternlogd256_mask: 4742 case X86::BI__builtin_ia32_pternlogd256_maskz: 4743 case X86::BI__builtin_ia32_pternlogq128_mask: 4744 case X86::BI__builtin_ia32_pternlogq128_maskz: 4745 case X86::BI__builtin_ia32_pternlogq256_mask: 4746 case X86::BI__builtin_ia32_pternlogq256_maskz: 4747 i = 3; l = 0; u = 255; 4748 break; 4749 case X86::BI__builtin_ia32_gatherpfdpd: 4750 case X86::BI__builtin_ia32_gatherpfdps: 4751 case X86::BI__builtin_ia32_gatherpfqpd: 4752 case X86::BI__builtin_ia32_gatherpfqps: 4753 case X86::BI__builtin_ia32_scatterpfdpd: 4754 case X86::BI__builtin_ia32_scatterpfdps: 4755 case X86::BI__builtin_ia32_scatterpfqpd: 4756 case X86::BI__builtin_ia32_scatterpfqps: 4757 i = 4; l = 2; u = 3; 4758 break; 4759 case X86::BI__builtin_ia32_reducesd_mask: 4760 case X86::BI__builtin_ia32_reducess_mask: 4761 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4762 case X86::BI__builtin_ia32_rndscaless_round_mask: 4763 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4764 case X86::BI__builtin_ia32_reducesh_mask: 4765 i = 4; l = 0; u = 255; 4766 break; 4767 } 4768 4769 // Note that we don't force a hard error on the range check here, allowing 4770 // template-generated or macro-generated dead code to potentially have out-of- 4771 // range values. These need to code generate, but don't need to necessarily 4772 // make any sense. We use a warning that defaults to an error. 4773 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4774 } 4775 4776 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4777 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4778 /// Returns true when the format fits the function and the FormatStringInfo has 4779 /// been populated. 4780 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4781 FormatStringInfo *FSI) { 4782 FSI->HasVAListArg = Format->getFirstArg() == 0; 4783 FSI->FormatIdx = Format->getFormatIdx() - 1; 4784 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4785 4786 // The way the format attribute works in GCC, the implicit this argument 4787 // of member functions is counted. However, it doesn't appear in our own 4788 // lists, so decrement format_idx in that case. 4789 if (IsCXXMember) { 4790 if(FSI->FormatIdx == 0) 4791 return false; 4792 --FSI->FormatIdx; 4793 if (FSI->FirstDataArg != 0) 4794 --FSI->FirstDataArg; 4795 } 4796 return true; 4797 } 4798 4799 /// Checks if a the given expression evaluates to null. 4800 /// 4801 /// Returns true if the value evaluates to null. 4802 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4803 // If the expression has non-null type, it doesn't evaluate to null. 4804 if (auto nullability 4805 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4806 if (*nullability == NullabilityKind::NonNull) 4807 return false; 4808 } 4809 4810 // As a special case, transparent unions initialized with zero are 4811 // considered null for the purposes of the nonnull attribute. 4812 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4813 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4814 if (const CompoundLiteralExpr *CLE = 4815 dyn_cast<CompoundLiteralExpr>(Expr)) 4816 if (const InitListExpr *ILE = 4817 dyn_cast<InitListExpr>(CLE->getInitializer())) 4818 Expr = ILE->getInit(0); 4819 } 4820 4821 bool Result; 4822 return (!Expr->isValueDependent() && 4823 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4824 !Result); 4825 } 4826 4827 static void CheckNonNullArgument(Sema &S, 4828 const Expr *ArgExpr, 4829 SourceLocation CallSiteLoc) { 4830 if (CheckNonNullExpr(S, ArgExpr)) 4831 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4832 S.PDiag(diag::warn_null_arg) 4833 << ArgExpr->getSourceRange()); 4834 } 4835 4836 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4837 FormatStringInfo FSI; 4838 if ((GetFormatStringType(Format) == FST_NSString) && 4839 getFormatStringInfo(Format, false, &FSI)) { 4840 Idx = FSI.FormatIdx; 4841 return true; 4842 } 4843 return false; 4844 } 4845 4846 /// Diagnose use of %s directive in an NSString which is being passed 4847 /// as formatting string to formatting method. 4848 static void 4849 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4850 const NamedDecl *FDecl, 4851 Expr **Args, 4852 unsigned NumArgs) { 4853 unsigned Idx = 0; 4854 bool Format = false; 4855 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4856 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4857 Idx = 2; 4858 Format = true; 4859 } 4860 else 4861 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4862 if (S.GetFormatNSStringIdx(I, Idx)) { 4863 Format = true; 4864 break; 4865 } 4866 } 4867 if (!Format || NumArgs <= Idx) 4868 return; 4869 const Expr *FormatExpr = Args[Idx]; 4870 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4871 FormatExpr = CSCE->getSubExpr(); 4872 const StringLiteral *FormatString; 4873 if (const ObjCStringLiteral *OSL = 4874 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4875 FormatString = OSL->getString(); 4876 else 4877 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4878 if (!FormatString) 4879 return; 4880 if (S.FormatStringHasSArg(FormatString)) { 4881 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4882 << "%s" << 1 << 1; 4883 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4884 << FDecl->getDeclName(); 4885 } 4886 } 4887 4888 /// Determine whether the given type has a non-null nullability annotation. 4889 static bool isNonNullType(ASTContext &ctx, QualType type) { 4890 if (auto nullability = type->getNullability(ctx)) 4891 return *nullability == NullabilityKind::NonNull; 4892 4893 return false; 4894 } 4895 4896 static void CheckNonNullArguments(Sema &S, 4897 const NamedDecl *FDecl, 4898 const FunctionProtoType *Proto, 4899 ArrayRef<const Expr *> Args, 4900 SourceLocation CallSiteLoc) { 4901 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4902 4903 // Already checked by by constant evaluator. 4904 if (S.isConstantEvaluated()) 4905 return; 4906 // Check the attributes attached to the method/function itself. 4907 llvm::SmallBitVector NonNullArgs; 4908 if (FDecl) { 4909 // Handle the nonnull attribute on the function/method declaration itself. 4910 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4911 if (!NonNull->args_size()) { 4912 // Easy case: all pointer arguments are nonnull. 4913 for (const auto *Arg : Args) 4914 if (S.isValidPointerAttrType(Arg->getType())) 4915 CheckNonNullArgument(S, Arg, CallSiteLoc); 4916 return; 4917 } 4918 4919 for (const ParamIdx &Idx : NonNull->args()) { 4920 unsigned IdxAST = Idx.getASTIndex(); 4921 if (IdxAST >= Args.size()) 4922 continue; 4923 if (NonNullArgs.empty()) 4924 NonNullArgs.resize(Args.size()); 4925 NonNullArgs.set(IdxAST); 4926 } 4927 } 4928 } 4929 4930 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4931 // Handle the nonnull attribute on the parameters of the 4932 // function/method. 4933 ArrayRef<ParmVarDecl*> parms; 4934 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4935 parms = FD->parameters(); 4936 else 4937 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4938 4939 unsigned ParamIndex = 0; 4940 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4941 I != E; ++I, ++ParamIndex) { 4942 const ParmVarDecl *PVD = *I; 4943 if (PVD->hasAttr<NonNullAttr>() || 4944 isNonNullType(S.Context, PVD->getType())) { 4945 if (NonNullArgs.empty()) 4946 NonNullArgs.resize(Args.size()); 4947 4948 NonNullArgs.set(ParamIndex); 4949 } 4950 } 4951 } else { 4952 // If we have a non-function, non-method declaration but no 4953 // function prototype, try to dig out the function prototype. 4954 if (!Proto) { 4955 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4956 QualType type = VD->getType().getNonReferenceType(); 4957 if (auto pointerType = type->getAs<PointerType>()) 4958 type = pointerType->getPointeeType(); 4959 else if (auto blockType = type->getAs<BlockPointerType>()) 4960 type = blockType->getPointeeType(); 4961 // FIXME: data member pointers? 4962 4963 // Dig out the function prototype, if there is one. 4964 Proto = type->getAs<FunctionProtoType>(); 4965 } 4966 } 4967 4968 // Fill in non-null argument information from the nullability 4969 // information on the parameter types (if we have them). 4970 if (Proto) { 4971 unsigned Index = 0; 4972 for (auto paramType : Proto->getParamTypes()) { 4973 if (isNonNullType(S.Context, paramType)) { 4974 if (NonNullArgs.empty()) 4975 NonNullArgs.resize(Args.size()); 4976 4977 NonNullArgs.set(Index); 4978 } 4979 4980 ++Index; 4981 } 4982 } 4983 } 4984 4985 // Check for non-null arguments. 4986 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4987 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4988 if (NonNullArgs[ArgIndex]) 4989 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4990 } 4991 } 4992 4993 /// Warn if a pointer or reference argument passed to a function points to an 4994 /// object that is less aligned than the parameter. This can happen when 4995 /// creating a typedef with a lower alignment than the original type and then 4996 /// calling functions defined in terms of the original type. 4997 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 4998 StringRef ParamName, QualType ArgTy, 4999 QualType ParamTy) { 5000 5001 // If a function accepts a pointer or reference type 5002 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5003 return; 5004 5005 // If the parameter is a pointer type, get the pointee type for the 5006 // argument too. If the parameter is a reference type, don't try to get 5007 // the pointee type for the argument. 5008 if (ParamTy->isPointerType()) 5009 ArgTy = ArgTy->getPointeeType(); 5010 5011 // Remove reference or pointer 5012 ParamTy = ParamTy->getPointeeType(); 5013 5014 // Find expected alignment, and the actual alignment of the passed object. 5015 // getTypeAlignInChars requires complete types 5016 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 5017 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 5018 ArgTy->isUndeducedType()) 5019 return; 5020 5021 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5022 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5023 5024 // If the argument is less aligned than the parameter, there is a 5025 // potential alignment issue. 5026 if (ArgAlign < ParamAlign) 5027 Diag(Loc, diag::warn_param_mismatched_alignment) 5028 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5029 << ParamName << (FDecl != nullptr) << FDecl; 5030 } 5031 5032 /// Handles the checks for format strings, non-POD arguments to vararg 5033 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5034 /// attributes. 5035 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5036 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5037 bool IsMemberFunction, SourceLocation Loc, 5038 SourceRange Range, VariadicCallType CallType) { 5039 // FIXME: We should check as much as we can in the template definition. 5040 if (CurContext->isDependentContext()) 5041 return; 5042 5043 // Printf and scanf checking. 5044 llvm::SmallBitVector CheckedVarArgs; 5045 if (FDecl) { 5046 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5047 // Only create vector if there are format attributes. 5048 CheckedVarArgs.resize(Args.size()); 5049 5050 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5051 CheckedVarArgs); 5052 } 5053 } 5054 5055 // Refuse POD arguments that weren't caught by the format string 5056 // checks above. 5057 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5058 if (CallType != VariadicDoesNotApply && 5059 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5060 unsigned NumParams = Proto ? Proto->getNumParams() 5061 : FDecl && isa<FunctionDecl>(FDecl) 5062 ? cast<FunctionDecl>(FDecl)->getNumParams() 5063 : FDecl && isa<ObjCMethodDecl>(FDecl) 5064 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5065 : 0; 5066 5067 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5068 // Args[ArgIdx] can be null in malformed code. 5069 if (const Expr *Arg = Args[ArgIdx]) { 5070 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5071 checkVariadicArgument(Arg, CallType); 5072 } 5073 } 5074 } 5075 5076 if (FDecl || Proto) { 5077 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5078 5079 // Type safety checking. 5080 if (FDecl) { 5081 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5082 CheckArgumentWithTypeTag(I, Args, Loc); 5083 } 5084 } 5085 5086 // Check that passed arguments match the alignment of original arguments. 5087 // Try to get the missing prototype from the declaration. 5088 if (!Proto && FDecl) { 5089 const auto *FT = FDecl->getFunctionType(); 5090 if (isa_and_nonnull<FunctionProtoType>(FT)) 5091 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5092 } 5093 if (Proto) { 5094 // For variadic functions, we may have more args than parameters. 5095 // For some K&R functions, we may have less args than parameters. 5096 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5097 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5098 // Args[ArgIdx] can be null in malformed code. 5099 if (const Expr *Arg = Args[ArgIdx]) { 5100 if (Arg->containsErrors()) 5101 continue; 5102 5103 QualType ParamTy = Proto->getParamType(ArgIdx); 5104 QualType ArgTy = Arg->getType(); 5105 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5106 ArgTy, ParamTy); 5107 } 5108 } 5109 } 5110 5111 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5112 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5113 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5114 if (!Arg->isValueDependent()) { 5115 Expr::EvalResult Align; 5116 if (Arg->EvaluateAsInt(Align, Context)) { 5117 const llvm::APSInt &I = Align.Val.getInt(); 5118 if (!I.isPowerOf2()) 5119 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5120 << Arg->getSourceRange(); 5121 5122 if (I > Sema::MaximumAlignment) 5123 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5124 << Arg->getSourceRange() << Sema::MaximumAlignment; 5125 } 5126 } 5127 } 5128 5129 if (FD) 5130 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5131 } 5132 5133 /// CheckConstructorCall - Check a constructor call for correctness and safety 5134 /// properties not enforced by the C type system. 5135 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5136 ArrayRef<const Expr *> Args, 5137 const FunctionProtoType *Proto, 5138 SourceLocation Loc) { 5139 VariadicCallType CallType = 5140 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5141 5142 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5143 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5144 Context.getPointerType(Ctor->getThisObjectType())); 5145 5146 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5147 Loc, SourceRange(), CallType); 5148 } 5149 5150 /// CheckFunctionCall - Check a direct function call for various correctness 5151 /// and safety properties not strictly enforced by the C type system. 5152 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5153 const FunctionProtoType *Proto) { 5154 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5155 isa<CXXMethodDecl>(FDecl); 5156 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5157 IsMemberOperatorCall; 5158 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5159 TheCall->getCallee()); 5160 Expr** Args = TheCall->getArgs(); 5161 unsigned NumArgs = TheCall->getNumArgs(); 5162 5163 Expr *ImplicitThis = nullptr; 5164 if (IsMemberOperatorCall) { 5165 // If this is a call to a member operator, hide the first argument 5166 // from checkCall. 5167 // FIXME: Our choice of AST representation here is less than ideal. 5168 ImplicitThis = Args[0]; 5169 ++Args; 5170 --NumArgs; 5171 } else if (IsMemberFunction) 5172 ImplicitThis = 5173 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5174 5175 if (ImplicitThis) { 5176 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5177 // used. 5178 QualType ThisType = ImplicitThis->getType(); 5179 if (!ThisType->isPointerType()) { 5180 assert(!ThisType->isReferenceType()); 5181 ThisType = Context.getPointerType(ThisType); 5182 } 5183 5184 QualType ThisTypeFromDecl = 5185 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5186 5187 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5188 ThisTypeFromDecl); 5189 } 5190 5191 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5192 IsMemberFunction, TheCall->getRParenLoc(), 5193 TheCall->getCallee()->getSourceRange(), CallType); 5194 5195 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5196 // None of the checks below are needed for functions that don't have 5197 // simple names (e.g., C++ conversion functions). 5198 if (!FnInfo) 5199 return false; 5200 5201 CheckTCBEnforcement(TheCall, FDecl); 5202 5203 CheckAbsoluteValueFunction(TheCall, FDecl); 5204 CheckMaxUnsignedZero(TheCall, FDecl); 5205 5206 if (getLangOpts().ObjC) 5207 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5208 5209 unsigned CMId = FDecl->getMemoryFunctionKind(); 5210 5211 // Handle memory setting and copying functions. 5212 switch (CMId) { 5213 case 0: 5214 return false; 5215 case Builtin::BIstrlcpy: // fallthrough 5216 case Builtin::BIstrlcat: 5217 CheckStrlcpycatArguments(TheCall, FnInfo); 5218 break; 5219 case Builtin::BIstrncat: 5220 CheckStrncatArguments(TheCall, FnInfo); 5221 break; 5222 case Builtin::BIfree: 5223 CheckFreeArguments(TheCall); 5224 break; 5225 default: 5226 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5227 } 5228 5229 return false; 5230 } 5231 5232 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5233 ArrayRef<const Expr *> Args) { 5234 VariadicCallType CallType = 5235 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5236 5237 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5238 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5239 CallType); 5240 5241 return false; 5242 } 5243 5244 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5245 const FunctionProtoType *Proto) { 5246 QualType Ty; 5247 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5248 Ty = V->getType().getNonReferenceType(); 5249 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5250 Ty = F->getType().getNonReferenceType(); 5251 else 5252 return false; 5253 5254 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5255 !Ty->isFunctionProtoType()) 5256 return false; 5257 5258 VariadicCallType CallType; 5259 if (!Proto || !Proto->isVariadic()) { 5260 CallType = VariadicDoesNotApply; 5261 } else if (Ty->isBlockPointerType()) { 5262 CallType = VariadicBlock; 5263 } else { // Ty->isFunctionPointerType() 5264 CallType = VariadicFunction; 5265 } 5266 5267 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5268 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5269 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5270 TheCall->getCallee()->getSourceRange(), CallType); 5271 5272 return false; 5273 } 5274 5275 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5276 /// such as function pointers returned from functions. 5277 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5278 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5279 TheCall->getCallee()); 5280 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5281 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5282 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5283 TheCall->getCallee()->getSourceRange(), CallType); 5284 5285 return false; 5286 } 5287 5288 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5289 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5290 return false; 5291 5292 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5293 switch (Op) { 5294 case AtomicExpr::AO__c11_atomic_init: 5295 case AtomicExpr::AO__opencl_atomic_init: 5296 llvm_unreachable("There is no ordering argument for an init"); 5297 5298 case AtomicExpr::AO__c11_atomic_load: 5299 case AtomicExpr::AO__opencl_atomic_load: 5300 case AtomicExpr::AO__hip_atomic_load: 5301 case AtomicExpr::AO__atomic_load_n: 5302 case AtomicExpr::AO__atomic_load: 5303 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5304 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5305 5306 case AtomicExpr::AO__c11_atomic_store: 5307 case AtomicExpr::AO__opencl_atomic_store: 5308 case AtomicExpr::AO__hip_atomic_store: 5309 case AtomicExpr::AO__atomic_store: 5310 case AtomicExpr::AO__atomic_store_n: 5311 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5312 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5313 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5314 5315 default: 5316 return true; 5317 } 5318 } 5319 5320 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5321 AtomicExpr::AtomicOp Op) { 5322 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5323 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5324 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5325 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5326 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5327 Op); 5328 } 5329 5330 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5331 SourceLocation RParenLoc, MultiExprArg Args, 5332 AtomicExpr::AtomicOp Op, 5333 AtomicArgumentOrder ArgOrder) { 5334 // All the non-OpenCL operations take one of the following forms. 5335 // The OpenCL operations take the __c11 forms with one extra argument for 5336 // synchronization scope. 5337 enum { 5338 // C __c11_atomic_init(A *, C) 5339 Init, 5340 5341 // C __c11_atomic_load(A *, int) 5342 Load, 5343 5344 // void __atomic_load(A *, CP, int) 5345 LoadCopy, 5346 5347 // void __atomic_store(A *, CP, int) 5348 Copy, 5349 5350 // C __c11_atomic_add(A *, M, int) 5351 Arithmetic, 5352 5353 // C __atomic_exchange_n(A *, CP, int) 5354 Xchg, 5355 5356 // void __atomic_exchange(A *, C *, CP, int) 5357 GNUXchg, 5358 5359 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 5360 C11CmpXchg, 5361 5362 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 5363 GNUCmpXchg 5364 } Form = Init; 5365 5366 const unsigned NumForm = GNUCmpXchg + 1; 5367 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 5368 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 5369 // where: 5370 // C is an appropriate type, 5371 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 5372 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 5373 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 5374 // the int parameters are for orderings. 5375 5376 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 5377 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 5378 "need to update code for modified forms"); 5379 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 5380 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 5381 AtomicExpr::AO__atomic_load, 5382 "need to update code for modified C11 atomics"); 5383 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 5384 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 5385 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 5386 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 5387 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 5388 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 5389 IsOpenCL; 5390 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 5391 Op == AtomicExpr::AO__atomic_store_n || 5392 Op == AtomicExpr::AO__atomic_exchange_n || 5393 Op == AtomicExpr::AO__atomic_compare_exchange_n; 5394 bool IsAddSub = false; 5395 5396 switch (Op) { 5397 case AtomicExpr::AO__c11_atomic_init: 5398 case AtomicExpr::AO__opencl_atomic_init: 5399 Form = Init; 5400 break; 5401 5402 case AtomicExpr::AO__c11_atomic_load: 5403 case AtomicExpr::AO__opencl_atomic_load: 5404 case AtomicExpr::AO__hip_atomic_load: 5405 case AtomicExpr::AO__atomic_load_n: 5406 Form = Load; 5407 break; 5408 5409 case AtomicExpr::AO__atomic_load: 5410 Form = LoadCopy; 5411 break; 5412 5413 case AtomicExpr::AO__c11_atomic_store: 5414 case AtomicExpr::AO__opencl_atomic_store: 5415 case AtomicExpr::AO__hip_atomic_store: 5416 case AtomicExpr::AO__atomic_store: 5417 case AtomicExpr::AO__atomic_store_n: 5418 Form = Copy; 5419 break; 5420 case AtomicExpr::AO__hip_atomic_fetch_add: 5421 case AtomicExpr::AO__hip_atomic_fetch_min: 5422 case AtomicExpr::AO__hip_atomic_fetch_max: 5423 case AtomicExpr::AO__c11_atomic_fetch_add: 5424 case AtomicExpr::AO__c11_atomic_fetch_sub: 5425 case AtomicExpr::AO__opencl_atomic_fetch_add: 5426 case AtomicExpr::AO__opencl_atomic_fetch_sub: 5427 case AtomicExpr::AO__atomic_fetch_add: 5428 case AtomicExpr::AO__atomic_fetch_sub: 5429 case AtomicExpr::AO__atomic_add_fetch: 5430 case AtomicExpr::AO__atomic_sub_fetch: 5431 IsAddSub = true; 5432 Form = Arithmetic; 5433 break; 5434 case AtomicExpr::AO__c11_atomic_fetch_and: 5435 case AtomicExpr::AO__c11_atomic_fetch_or: 5436 case AtomicExpr::AO__c11_atomic_fetch_xor: 5437 case AtomicExpr::AO__hip_atomic_fetch_and: 5438 case AtomicExpr::AO__hip_atomic_fetch_or: 5439 case AtomicExpr::AO__hip_atomic_fetch_xor: 5440 case AtomicExpr::AO__c11_atomic_fetch_nand: 5441 case AtomicExpr::AO__opencl_atomic_fetch_and: 5442 case AtomicExpr::AO__opencl_atomic_fetch_or: 5443 case AtomicExpr::AO__opencl_atomic_fetch_xor: 5444 case AtomicExpr::AO__atomic_fetch_and: 5445 case AtomicExpr::AO__atomic_fetch_or: 5446 case AtomicExpr::AO__atomic_fetch_xor: 5447 case AtomicExpr::AO__atomic_fetch_nand: 5448 case AtomicExpr::AO__atomic_and_fetch: 5449 case AtomicExpr::AO__atomic_or_fetch: 5450 case AtomicExpr::AO__atomic_xor_fetch: 5451 case AtomicExpr::AO__atomic_nand_fetch: 5452 Form = Arithmetic; 5453 break; 5454 case AtomicExpr::AO__c11_atomic_fetch_min: 5455 case AtomicExpr::AO__c11_atomic_fetch_max: 5456 case AtomicExpr::AO__opencl_atomic_fetch_min: 5457 case AtomicExpr::AO__opencl_atomic_fetch_max: 5458 case AtomicExpr::AO__atomic_min_fetch: 5459 case AtomicExpr::AO__atomic_max_fetch: 5460 case AtomicExpr::AO__atomic_fetch_min: 5461 case AtomicExpr::AO__atomic_fetch_max: 5462 Form = Arithmetic; 5463 break; 5464 5465 case AtomicExpr::AO__c11_atomic_exchange: 5466 case AtomicExpr::AO__hip_atomic_exchange: 5467 case AtomicExpr::AO__opencl_atomic_exchange: 5468 case AtomicExpr::AO__atomic_exchange_n: 5469 Form = Xchg; 5470 break; 5471 5472 case AtomicExpr::AO__atomic_exchange: 5473 Form = GNUXchg; 5474 break; 5475 5476 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 5477 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 5478 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 5479 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 5480 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 5481 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 5482 Form = C11CmpXchg; 5483 break; 5484 5485 case AtomicExpr::AO__atomic_compare_exchange: 5486 case AtomicExpr::AO__atomic_compare_exchange_n: 5487 Form = GNUCmpXchg; 5488 break; 5489 } 5490 5491 unsigned AdjustedNumArgs = NumArgs[Form]; 5492 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 5493 ++AdjustedNumArgs; 5494 // Check we have the right number of arguments. 5495 if (Args.size() < AdjustedNumArgs) { 5496 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 5497 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5498 << ExprRange; 5499 return ExprError(); 5500 } else if (Args.size() > AdjustedNumArgs) { 5501 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 5502 diag::err_typecheck_call_too_many_args) 5503 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5504 << ExprRange; 5505 return ExprError(); 5506 } 5507 5508 // Inspect the first argument of the atomic operation. 5509 Expr *Ptr = Args[0]; 5510 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 5511 if (ConvertedPtr.isInvalid()) 5512 return ExprError(); 5513 5514 Ptr = ConvertedPtr.get(); 5515 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 5516 if (!pointerType) { 5517 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 5518 << Ptr->getType() << Ptr->getSourceRange(); 5519 return ExprError(); 5520 } 5521 5522 // For a __c11 builtin, this should be a pointer to an _Atomic type. 5523 QualType AtomTy = pointerType->getPointeeType(); // 'A' 5524 QualType ValType = AtomTy; // 'C' 5525 if (IsC11) { 5526 if (!AtomTy->isAtomicType()) { 5527 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 5528 << Ptr->getType() << Ptr->getSourceRange(); 5529 return ExprError(); 5530 } 5531 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 5532 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 5533 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 5534 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 5535 << Ptr->getSourceRange(); 5536 return ExprError(); 5537 } 5538 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 5539 } else if (Form != Load && Form != LoadCopy) { 5540 if (ValType.isConstQualified()) { 5541 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 5542 << Ptr->getType() << Ptr->getSourceRange(); 5543 return ExprError(); 5544 } 5545 } 5546 5547 // For an arithmetic operation, the implied arithmetic must be well-formed. 5548 if (Form == Arithmetic) { 5549 // GCC does not enforce these rules for GNU atomics, but we do to help catch 5550 // trivial type errors. 5551 auto IsAllowedValueType = [&](QualType ValType) { 5552 if (ValType->isIntegerType()) 5553 return true; 5554 if (ValType->isPointerType()) 5555 return true; 5556 if (!ValType->isFloatingType()) 5557 return false; 5558 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 5559 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 5560 &Context.getTargetInfo().getLongDoubleFormat() == 5561 &llvm::APFloat::x87DoubleExtended()) 5562 return false; 5563 return true; 5564 }; 5565 if (IsAddSub && !IsAllowedValueType(ValType)) { 5566 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 5567 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5568 return ExprError(); 5569 } 5570 if (!IsAddSub && !ValType->isIntegerType()) { 5571 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 5572 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5573 return ExprError(); 5574 } 5575 if (IsC11 && ValType->isPointerType() && 5576 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 5577 diag::err_incomplete_type)) { 5578 return ExprError(); 5579 } 5580 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 5581 // For __atomic_*_n operations, the value type must be a scalar integral or 5582 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 5583 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 5584 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5585 return ExprError(); 5586 } 5587 5588 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 5589 !AtomTy->isScalarType()) { 5590 // For GNU atomics, require a trivially-copyable type. This is not part of 5591 // the GNU atomics specification but we enforce it for consistency with 5592 // other atomics which generally all require a trivially-copyable type. This 5593 // is because atomics just copy bits. 5594 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 5595 << Ptr->getType() << Ptr->getSourceRange(); 5596 return ExprError(); 5597 } 5598 5599 switch (ValType.getObjCLifetime()) { 5600 case Qualifiers::OCL_None: 5601 case Qualifiers::OCL_ExplicitNone: 5602 // okay 5603 break; 5604 5605 case Qualifiers::OCL_Weak: 5606 case Qualifiers::OCL_Strong: 5607 case Qualifiers::OCL_Autoreleasing: 5608 // FIXME: Can this happen? By this point, ValType should be known 5609 // to be trivially copyable. 5610 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 5611 << ValType << Ptr->getSourceRange(); 5612 return ExprError(); 5613 } 5614 5615 // All atomic operations have an overload which takes a pointer to a volatile 5616 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 5617 // into the result or the other operands. Similarly atomic_load takes a 5618 // pointer to a const 'A'. 5619 ValType.removeLocalVolatile(); 5620 ValType.removeLocalConst(); 5621 QualType ResultType = ValType; 5622 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 5623 Form == Init) 5624 ResultType = Context.VoidTy; 5625 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 5626 ResultType = Context.BoolTy; 5627 5628 // The type of a parameter passed 'by value'. In the GNU atomics, such 5629 // arguments are actually passed as pointers. 5630 QualType ByValType = ValType; // 'CP' 5631 bool IsPassedByAddress = false; 5632 if (!IsC11 && !IsHIP && !IsN) { 5633 ByValType = Ptr->getType(); 5634 IsPassedByAddress = true; 5635 } 5636 5637 SmallVector<Expr *, 5> APIOrderedArgs; 5638 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 5639 APIOrderedArgs.push_back(Args[0]); 5640 switch (Form) { 5641 case Init: 5642 case Load: 5643 APIOrderedArgs.push_back(Args[1]); // Val1/Order 5644 break; 5645 case LoadCopy: 5646 case Copy: 5647 case Arithmetic: 5648 case Xchg: 5649 APIOrderedArgs.push_back(Args[2]); // Val1 5650 APIOrderedArgs.push_back(Args[1]); // Order 5651 break; 5652 case GNUXchg: 5653 APIOrderedArgs.push_back(Args[2]); // Val1 5654 APIOrderedArgs.push_back(Args[3]); // Val2 5655 APIOrderedArgs.push_back(Args[1]); // Order 5656 break; 5657 case C11CmpXchg: 5658 APIOrderedArgs.push_back(Args[2]); // Val1 5659 APIOrderedArgs.push_back(Args[4]); // Val2 5660 APIOrderedArgs.push_back(Args[1]); // Order 5661 APIOrderedArgs.push_back(Args[3]); // OrderFail 5662 break; 5663 case GNUCmpXchg: 5664 APIOrderedArgs.push_back(Args[2]); // Val1 5665 APIOrderedArgs.push_back(Args[4]); // Val2 5666 APIOrderedArgs.push_back(Args[5]); // Weak 5667 APIOrderedArgs.push_back(Args[1]); // Order 5668 APIOrderedArgs.push_back(Args[3]); // OrderFail 5669 break; 5670 } 5671 } else 5672 APIOrderedArgs.append(Args.begin(), Args.end()); 5673 5674 // The first argument's non-CV pointer type is used to deduce the type of 5675 // subsequent arguments, except for: 5676 // - weak flag (always converted to bool) 5677 // - memory order (always converted to int) 5678 // - scope (always converted to int) 5679 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 5680 QualType Ty; 5681 if (i < NumVals[Form] + 1) { 5682 switch (i) { 5683 case 0: 5684 // The first argument is always a pointer. It has a fixed type. 5685 // It is always dereferenced, a nullptr is undefined. 5686 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5687 // Nothing else to do: we already know all we want about this pointer. 5688 continue; 5689 case 1: 5690 // The second argument is the non-atomic operand. For arithmetic, this 5691 // is always passed by value, and for a compare_exchange it is always 5692 // passed by address. For the rest, GNU uses by-address and C11 uses 5693 // by-value. 5694 assert(Form != Load); 5695 if (Form == Arithmetic && ValType->isPointerType()) 5696 Ty = Context.getPointerDiffType(); 5697 else if (Form == Init || Form == Arithmetic) 5698 Ty = ValType; 5699 else if (Form == Copy || Form == Xchg) { 5700 if (IsPassedByAddress) { 5701 // The value pointer is always dereferenced, a nullptr is undefined. 5702 CheckNonNullArgument(*this, APIOrderedArgs[i], 5703 ExprRange.getBegin()); 5704 } 5705 Ty = ByValType; 5706 } else { 5707 Expr *ValArg = APIOrderedArgs[i]; 5708 // The value pointer is always dereferenced, a nullptr is undefined. 5709 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 5710 LangAS AS = LangAS::Default; 5711 // Keep address space of non-atomic pointer type. 5712 if (const PointerType *PtrTy = 5713 ValArg->getType()->getAs<PointerType>()) { 5714 AS = PtrTy->getPointeeType().getAddressSpace(); 5715 } 5716 Ty = Context.getPointerType( 5717 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 5718 } 5719 break; 5720 case 2: 5721 // The third argument to compare_exchange / GNU exchange is the desired 5722 // value, either by-value (for the C11 and *_n variant) or as a pointer. 5723 if (IsPassedByAddress) 5724 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5725 Ty = ByValType; 5726 break; 5727 case 3: 5728 // The fourth argument to GNU compare_exchange is a 'weak' flag. 5729 Ty = Context.BoolTy; 5730 break; 5731 } 5732 } else { 5733 // The order(s) and scope are always converted to int. 5734 Ty = Context.IntTy; 5735 } 5736 5737 InitializedEntity Entity = 5738 InitializedEntity::InitializeParameter(Context, Ty, false); 5739 ExprResult Arg = APIOrderedArgs[i]; 5740 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5741 if (Arg.isInvalid()) 5742 return true; 5743 APIOrderedArgs[i] = Arg.get(); 5744 } 5745 5746 // Permute the arguments into a 'consistent' order. 5747 SmallVector<Expr*, 5> SubExprs; 5748 SubExprs.push_back(Ptr); 5749 switch (Form) { 5750 case Init: 5751 // Note, AtomicExpr::getVal1() has a special case for this atomic. 5752 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5753 break; 5754 case Load: 5755 SubExprs.push_back(APIOrderedArgs[1]); // Order 5756 break; 5757 case LoadCopy: 5758 case Copy: 5759 case Arithmetic: 5760 case Xchg: 5761 SubExprs.push_back(APIOrderedArgs[2]); // Order 5762 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5763 break; 5764 case GNUXchg: 5765 // Note, AtomicExpr::getVal2() has a special case for this atomic. 5766 SubExprs.push_back(APIOrderedArgs[3]); // Order 5767 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5768 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5769 break; 5770 case C11CmpXchg: 5771 SubExprs.push_back(APIOrderedArgs[3]); // Order 5772 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5773 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 5774 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5775 break; 5776 case GNUCmpXchg: 5777 SubExprs.push_back(APIOrderedArgs[4]); // Order 5778 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5779 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 5780 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5781 SubExprs.push_back(APIOrderedArgs[3]); // Weak 5782 break; 5783 } 5784 5785 if (SubExprs.size() >= 2 && Form != Init) { 5786 if (Optional<llvm::APSInt> Result = 5787 SubExprs[1]->getIntegerConstantExpr(Context)) 5788 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 5789 Diag(SubExprs[1]->getBeginLoc(), 5790 diag::warn_atomic_op_has_invalid_memory_order) 5791 << SubExprs[1]->getSourceRange(); 5792 } 5793 5794 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 5795 auto *Scope = Args[Args.size() - 1]; 5796 if (Optional<llvm::APSInt> Result = 5797 Scope->getIntegerConstantExpr(Context)) { 5798 if (!ScopeModel->isValid(Result->getZExtValue())) 5799 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 5800 << Scope->getSourceRange(); 5801 } 5802 SubExprs.push_back(Scope); 5803 } 5804 5805 AtomicExpr *AE = new (Context) 5806 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 5807 5808 if ((Op == AtomicExpr::AO__c11_atomic_load || 5809 Op == AtomicExpr::AO__c11_atomic_store || 5810 Op == AtomicExpr::AO__opencl_atomic_load || 5811 Op == AtomicExpr::AO__hip_atomic_load || 5812 Op == AtomicExpr::AO__opencl_atomic_store || 5813 Op == AtomicExpr::AO__hip_atomic_store) && 5814 Context.AtomicUsesUnsupportedLibcall(AE)) 5815 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 5816 << ((Op == AtomicExpr::AO__c11_atomic_load || 5817 Op == AtomicExpr::AO__opencl_atomic_load || 5818 Op == AtomicExpr::AO__hip_atomic_load) 5819 ? 0 5820 : 1); 5821 5822 if (ValType->isExtIntType()) { 5823 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit); 5824 return ExprError(); 5825 } 5826 5827 return AE; 5828 } 5829 5830 /// checkBuiltinArgument - Given a call to a builtin function, perform 5831 /// normal type-checking on the given argument, updating the call in 5832 /// place. This is useful when a builtin function requires custom 5833 /// type-checking for some of its arguments but not necessarily all of 5834 /// them. 5835 /// 5836 /// Returns true on error. 5837 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 5838 FunctionDecl *Fn = E->getDirectCallee(); 5839 assert(Fn && "builtin call without direct callee!"); 5840 5841 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 5842 InitializedEntity Entity = 5843 InitializedEntity::InitializeParameter(S.Context, Param); 5844 5845 ExprResult Arg = E->getArg(0); 5846 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 5847 if (Arg.isInvalid()) 5848 return true; 5849 5850 E->setArg(ArgIndex, Arg.get()); 5851 return false; 5852 } 5853 5854 /// We have a call to a function like __sync_fetch_and_add, which is an 5855 /// overloaded function based on the pointer type of its first argument. 5856 /// The main BuildCallExpr routines have already promoted the types of 5857 /// arguments because all of these calls are prototyped as void(...). 5858 /// 5859 /// This function goes through and does final semantic checking for these 5860 /// builtins, as well as generating any warnings. 5861 ExprResult 5862 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 5863 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 5864 Expr *Callee = TheCall->getCallee(); 5865 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 5866 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5867 5868 // Ensure that we have at least one argument to do type inference from. 5869 if (TheCall->getNumArgs() < 1) { 5870 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5871 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 5872 return ExprError(); 5873 } 5874 5875 // Inspect the first argument of the atomic builtin. This should always be 5876 // a pointer type, whose element is an integral scalar or pointer type. 5877 // Because it is a pointer type, we don't have to worry about any implicit 5878 // casts here. 5879 // FIXME: We don't allow floating point scalars as input. 5880 Expr *FirstArg = TheCall->getArg(0); 5881 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 5882 if (FirstArgResult.isInvalid()) 5883 return ExprError(); 5884 FirstArg = FirstArgResult.get(); 5885 TheCall->setArg(0, FirstArg); 5886 5887 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 5888 if (!pointerType) { 5889 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 5890 << FirstArg->getType() << FirstArg->getSourceRange(); 5891 return ExprError(); 5892 } 5893 5894 QualType ValType = pointerType->getPointeeType(); 5895 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5896 !ValType->isBlockPointerType()) { 5897 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 5898 << FirstArg->getType() << FirstArg->getSourceRange(); 5899 return ExprError(); 5900 } 5901 5902 if (ValType.isConstQualified()) { 5903 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 5904 << FirstArg->getType() << FirstArg->getSourceRange(); 5905 return ExprError(); 5906 } 5907 5908 switch (ValType.getObjCLifetime()) { 5909 case Qualifiers::OCL_None: 5910 case Qualifiers::OCL_ExplicitNone: 5911 // okay 5912 break; 5913 5914 case Qualifiers::OCL_Weak: 5915 case Qualifiers::OCL_Strong: 5916 case Qualifiers::OCL_Autoreleasing: 5917 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 5918 << ValType << FirstArg->getSourceRange(); 5919 return ExprError(); 5920 } 5921 5922 // Strip any qualifiers off ValType. 5923 ValType = ValType.getUnqualifiedType(); 5924 5925 // The majority of builtins return a value, but a few have special return 5926 // types, so allow them to override appropriately below. 5927 QualType ResultType = ValType; 5928 5929 // We need to figure out which concrete builtin this maps onto. For example, 5930 // __sync_fetch_and_add with a 2 byte object turns into 5931 // __sync_fetch_and_add_2. 5932 #define BUILTIN_ROW(x) \ 5933 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 5934 Builtin::BI##x##_8, Builtin::BI##x##_16 } 5935 5936 static const unsigned BuiltinIndices[][5] = { 5937 BUILTIN_ROW(__sync_fetch_and_add), 5938 BUILTIN_ROW(__sync_fetch_and_sub), 5939 BUILTIN_ROW(__sync_fetch_and_or), 5940 BUILTIN_ROW(__sync_fetch_and_and), 5941 BUILTIN_ROW(__sync_fetch_and_xor), 5942 BUILTIN_ROW(__sync_fetch_and_nand), 5943 5944 BUILTIN_ROW(__sync_add_and_fetch), 5945 BUILTIN_ROW(__sync_sub_and_fetch), 5946 BUILTIN_ROW(__sync_and_and_fetch), 5947 BUILTIN_ROW(__sync_or_and_fetch), 5948 BUILTIN_ROW(__sync_xor_and_fetch), 5949 BUILTIN_ROW(__sync_nand_and_fetch), 5950 5951 BUILTIN_ROW(__sync_val_compare_and_swap), 5952 BUILTIN_ROW(__sync_bool_compare_and_swap), 5953 BUILTIN_ROW(__sync_lock_test_and_set), 5954 BUILTIN_ROW(__sync_lock_release), 5955 BUILTIN_ROW(__sync_swap) 5956 }; 5957 #undef BUILTIN_ROW 5958 5959 // Determine the index of the size. 5960 unsigned SizeIndex; 5961 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 5962 case 1: SizeIndex = 0; break; 5963 case 2: SizeIndex = 1; break; 5964 case 4: SizeIndex = 2; break; 5965 case 8: SizeIndex = 3; break; 5966 case 16: SizeIndex = 4; break; 5967 default: 5968 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5969 << FirstArg->getType() << FirstArg->getSourceRange(); 5970 return ExprError(); 5971 } 5972 5973 // Each of these builtins has one pointer argument, followed by some number of 5974 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5975 // that we ignore. Find out which row of BuiltinIndices to read from as well 5976 // as the number of fixed args. 5977 unsigned BuiltinID = FDecl->getBuiltinID(); 5978 unsigned BuiltinIndex, NumFixed = 1; 5979 bool WarnAboutSemanticsChange = false; 5980 switch (BuiltinID) { 5981 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5982 case Builtin::BI__sync_fetch_and_add: 5983 case Builtin::BI__sync_fetch_and_add_1: 5984 case Builtin::BI__sync_fetch_and_add_2: 5985 case Builtin::BI__sync_fetch_and_add_4: 5986 case Builtin::BI__sync_fetch_and_add_8: 5987 case Builtin::BI__sync_fetch_and_add_16: 5988 BuiltinIndex = 0; 5989 break; 5990 5991 case Builtin::BI__sync_fetch_and_sub: 5992 case Builtin::BI__sync_fetch_and_sub_1: 5993 case Builtin::BI__sync_fetch_and_sub_2: 5994 case Builtin::BI__sync_fetch_and_sub_4: 5995 case Builtin::BI__sync_fetch_and_sub_8: 5996 case Builtin::BI__sync_fetch_and_sub_16: 5997 BuiltinIndex = 1; 5998 break; 5999 6000 case Builtin::BI__sync_fetch_and_or: 6001 case Builtin::BI__sync_fetch_and_or_1: 6002 case Builtin::BI__sync_fetch_and_or_2: 6003 case Builtin::BI__sync_fetch_and_or_4: 6004 case Builtin::BI__sync_fetch_and_or_8: 6005 case Builtin::BI__sync_fetch_and_or_16: 6006 BuiltinIndex = 2; 6007 break; 6008 6009 case Builtin::BI__sync_fetch_and_and: 6010 case Builtin::BI__sync_fetch_and_and_1: 6011 case Builtin::BI__sync_fetch_and_and_2: 6012 case Builtin::BI__sync_fetch_and_and_4: 6013 case Builtin::BI__sync_fetch_and_and_8: 6014 case Builtin::BI__sync_fetch_and_and_16: 6015 BuiltinIndex = 3; 6016 break; 6017 6018 case Builtin::BI__sync_fetch_and_xor: 6019 case Builtin::BI__sync_fetch_and_xor_1: 6020 case Builtin::BI__sync_fetch_and_xor_2: 6021 case Builtin::BI__sync_fetch_and_xor_4: 6022 case Builtin::BI__sync_fetch_and_xor_8: 6023 case Builtin::BI__sync_fetch_and_xor_16: 6024 BuiltinIndex = 4; 6025 break; 6026 6027 case Builtin::BI__sync_fetch_and_nand: 6028 case Builtin::BI__sync_fetch_and_nand_1: 6029 case Builtin::BI__sync_fetch_and_nand_2: 6030 case Builtin::BI__sync_fetch_and_nand_4: 6031 case Builtin::BI__sync_fetch_and_nand_8: 6032 case Builtin::BI__sync_fetch_and_nand_16: 6033 BuiltinIndex = 5; 6034 WarnAboutSemanticsChange = true; 6035 break; 6036 6037 case Builtin::BI__sync_add_and_fetch: 6038 case Builtin::BI__sync_add_and_fetch_1: 6039 case Builtin::BI__sync_add_and_fetch_2: 6040 case Builtin::BI__sync_add_and_fetch_4: 6041 case Builtin::BI__sync_add_and_fetch_8: 6042 case Builtin::BI__sync_add_and_fetch_16: 6043 BuiltinIndex = 6; 6044 break; 6045 6046 case Builtin::BI__sync_sub_and_fetch: 6047 case Builtin::BI__sync_sub_and_fetch_1: 6048 case Builtin::BI__sync_sub_and_fetch_2: 6049 case Builtin::BI__sync_sub_and_fetch_4: 6050 case Builtin::BI__sync_sub_and_fetch_8: 6051 case Builtin::BI__sync_sub_and_fetch_16: 6052 BuiltinIndex = 7; 6053 break; 6054 6055 case Builtin::BI__sync_and_and_fetch: 6056 case Builtin::BI__sync_and_and_fetch_1: 6057 case Builtin::BI__sync_and_and_fetch_2: 6058 case Builtin::BI__sync_and_and_fetch_4: 6059 case Builtin::BI__sync_and_and_fetch_8: 6060 case Builtin::BI__sync_and_and_fetch_16: 6061 BuiltinIndex = 8; 6062 break; 6063 6064 case Builtin::BI__sync_or_and_fetch: 6065 case Builtin::BI__sync_or_and_fetch_1: 6066 case Builtin::BI__sync_or_and_fetch_2: 6067 case Builtin::BI__sync_or_and_fetch_4: 6068 case Builtin::BI__sync_or_and_fetch_8: 6069 case Builtin::BI__sync_or_and_fetch_16: 6070 BuiltinIndex = 9; 6071 break; 6072 6073 case Builtin::BI__sync_xor_and_fetch: 6074 case Builtin::BI__sync_xor_and_fetch_1: 6075 case Builtin::BI__sync_xor_and_fetch_2: 6076 case Builtin::BI__sync_xor_and_fetch_4: 6077 case Builtin::BI__sync_xor_and_fetch_8: 6078 case Builtin::BI__sync_xor_and_fetch_16: 6079 BuiltinIndex = 10; 6080 break; 6081 6082 case Builtin::BI__sync_nand_and_fetch: 6083 case Builtin::BI__sync_nand_and_fetch_1: 6084 case Builtin::BI__sync_nand_and_fetch_2: 6085 case Builtin::BI__sync_nand_and_fetch_4: 6086 case Builtin::BI__sync_nand_and_fetch_8: 6087 case Builtin::BI__sync_nand_and_fetch_16: 6088 BuiltinIndex = 11; 6089 WarnAboutSemanticsChange = true; 6090 break; 6091 6092 case Builtin::BI__sync_val_compare_and_swap: 6093 case Builtin::BI__sync_val_compare_and_swap_1: 6094 case Builtin::BI__sync_val_compare_and_swap_2: 6095 case Builtin::BI__sync_val_compare_and_swap_4: 6096 case Builtin::BI__sync_val_compare_and_swap_8: 6097 case Builtin::BI__sync_val_compare_and_swap_16: 6098 BuiltinIndex = 12; 6099 NumFixed = 2; 6100 break; 6101 6102 case Builtin::BI__sync_bool_compare_and_swap: 6103 case Builtin::BI__sync_bool_compare_and_swap_1: 6104 case Builtin::BI__sync_bool_compare_and_swap_2: 6105 case Builtin::BI__sync_bool_compare_and_swap_4: 6106 case Builtin::BI__sync_bool_compare_and_swap_8: 6107 case Builtin::BI__sync_bool_compare_and_swap_16: 6108 BuiltinIndex = 13; 6109 NumFixed = 2; 6110 ResultType = Context.BoolTy; 6111 break; 6112 6113 case Builtin::BI__sync_lock_test_and_set: 6114 case Builtin::BI__sync_lock_test_and_set_1: 6115 case Builtin::BI__sync_lock_test_and_set_2: 6116 case Builtin::BI__sync_lock_test_and_set_4: 6117 case Builtin::BI__sync_lock_test_and_set_8: 6118 case Builtin::BI__sync_lock_test_and_set_16: 6119 BuiltinIndex = 14; 6120 break; 6121 6122 case Builtin::BI__sync_lock_release: 6123 case Builtin::BI__sync_lock_release_1: 6124 case Builtin::BI__sync_lock_release_2: 6125 case Builtin::BI__sync_lock_release_4: 6126 case Builtin::BI__sync_lock_release_8: 6127 case Builtin::BI__sync_lock_release_16: 6128 BuiltinIndex = 15; 6129 NumFixed = 0; 6130 ResultType = Context.VoidTy; 6131 break; 6132 6133 case Builtin::BI__sync_swap: 6134 case Builtin::BI__sync_swap_1: 6135 case Builtin::BI__sync_swap_2: 6136 case Builtin::BI__sync_swap_4: 6137 case Builtin::BI__sync_swap_8: 6138 case Builtin::BI__sync_swap_16: 6139 BuiltinIndex = 16; 6140 break; 6141 } 6142 6143 // Now that we know how many fixed arguments we expect, first check that we 6144 // have at least that many. 6145 if (TheCall->getNumArgs() < 1+NumFixed) { 6146 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6147 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6148 << Callee->getSourceRange(); 6149 return ExprError(); 6150 } 6151 6152 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6153 << Callee->getSourceRange(); 6154 6155 if (WarnAboutSemanticsChange) { 6156 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6157 << Callee->getSourceRange(); 6158 } 6159 6160 // Get the decl for the concrete builtin from this, we can tell what the 6161 // concrete integer type we should convert to is. 6162 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6163 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6164 FunctionDecl *NewBuiltinDecl; 6165 if (NewBuiltinID == BuiltinID) 6166 NewBuiltinDecl = FDecl; 6167 else { 6168 // Perform builtin lookup to avoid redeclaring it. 6169 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6170 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6171 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6172 assert(Res.getFoundDecl()); 6173 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6174 if (!NewBuiltinDecl) 6175 return ExprError(); 6176 } 6177 6178 // The first argument --- the pointer --- has a fixed type; we 6179 // deduce the types of the rest of the arguments accordingly. Walk 6180 // the remaining arguments, converting them to the deduced value type. 6181 for (unsigned i = 0; i != NumFixed; ++i) { 6182 ExprResult Arg = TheCall->getArg(i+1); 6183 6184 // GCC does an implicit conversion to the pointer or integer ValType. This 6185 // can fail in some cases (1i -> int**), check for this error case now. 6186 // Initialize the argument. 6187 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6188 ValType, /*consume*/ false); 6189 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6190 if (Arg.isInvalid()) 6191 return ExprError(); 6192 6193 // Okay, we have something that *can* be converted to the right type. Check 6194 // to see if there is a potentially weird extension going on here. This can 6195 // happen when you do an atomic operation on something like an char* and 6196 // pass in 42. The 42 gets converted to char. This is even more strange 6197 // for things like 45.123 -> char, etc. 6198 // FIXME: Do this check. 6199 TheCall->setArg(i+1, Arg.get()); 6200 } 6201 6202 // Create a new DeclRefExpr to refer to the new decl. 6203 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6204 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6205 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6206 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6207 6208 // Set the callee in the CallExpr. 6209 // FIXME: This loses syntactic information. 6210 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6211 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6212 CK_BuiltinFnToFnPtr); 6213 TheCall->setCallee(PromotedCall.get()); 6214 6215 // Change the result type of the call to match the original value type. This 6216 // is arbitrary, but the codegen for these builtins ins design to handle it 6217 // gracefully. 6218 TheCall->setType(ResultType); 6219 6220 // Prohibit use of _ExtInt with atomic builtins. 6221 // The arguments would have already been converted to the first argument's 6222 // type, so only need to check the first argument. 6223 const auto *ExtIntValType = ValType->getAs<ExtIntType>(); 6224 if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) { 6225 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6226 return ExprError(); 6227 } 6228 6229 return TheCallResult; 6230 } 6231 6232 /// SemaBuiltinNontemporalOverloaded - We have a call to 6233 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6234 /// overloaded function based on the pointer type of its last argument. 6235 /// 6236 /// This function goes through and does final semantic checking for these 6237 /// builtins. 6238 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6239 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6240 DeclRefExpr *DRE = 6241 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6242 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6243 unsigned BuiltinID = FDecl->getBuiltinID(); 6244 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6245 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6246 "Unexpected nontemporal load/store builtin!"); 6247 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6248 unsigned numArgs = isStore ? 2 : 1; 6249 6250 // Ensure that we have the proper number of arguments. 6251 if (checkArgCount(*this, TheCall, numArgs)) 6252 return ExprError(); 6253 6254 // Inspect the last argument of the nontemporal builtin. This should always 6255 // be a pointer type, from which we imply the type of the memory access. 6256 // Because it is a pointer type, we don't have to worry about any implicit 6257 // casts here. 6258 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6259 ExprResult PointerArgResult = 6260 DefaultFunctionArrayLvalueConversion(PointerArg); 6261 6262 if (PointerArgResult.isInvalid()) 6263 return ExprError(); 6264 PointerArg = PointerArgResult.get(); 6265 TheCall->setArg(numArgs - 1, PointerArg); 6266 6267 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6268 if (!pointerType) { 6269 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6270 << PointerArg->getType() << PointerArg->getSourceRange(); 6271 return ExprError(); 6272 } 6273 6274 QualType ValType = pointerType->getPointeeType(); 6275 6276 // Strip any qualifiers off ValType. 6277 ValType = ValType.getUnqualifiedType(); 6278 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6279 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6280 !ValType->isVectorType()) { 6281 Diag(DRE->getBeginLoc(), 6282 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6283 << PointerArg->getType() << PointerArg->getSourceRange(); 6284 return ExprError(); 6285 } 6286 6287 if (!isStore) { 6288 TheCall->setType(ValType); 6289 return TheCallResult; 6290 } 6291 6292 ExprResult ValArg = TheCall->getArg(0); 6293 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6294 Context, ValType, /*consume*/ false); 6295 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6296 if (ValArg.isInvalid()) 6297 return ExprError(); 6298 6299 TheCall->setArg(0, ValArg.get()); 6300 TheCall->setType(Context.VoidTy); 6301 return TheCallResult; 6302 } 6303 6304 /// CheckObjCString - Checks that the argument to the builtin 6305 /// CFString constructor is correct 6306 /// Note: It might also make sense to do the UTF-16 conversion here (would 6307 /// simplify the backend). 6308 bool Sema::CheckObjCString(Expr *Arg) { 6309 Arg = Arg->IgnoreParenCasts(); 6310 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6311 6312 if (!Literal || !Literal->isAscii()) { 6313 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6314 << Arg->getSourceRange(); 6315 return true; 6316 } 6317 6318 if (Literal->containsNonAsciiOrNull()) { 6319 StringRef String = Literal->getString(); 6320 unsigned NumBytes = String.size(); 6321 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6322 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6323 llvm::UTF16 *ToPtr = &ToBuf[0]; 6324 6325 llvm::ConversionResult Result = 6326 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6327 ToPtr + NumBytes, llvm::strictConversion); 6328 // Check for conversion failure. 6329 if (Result != llvm::conversionOK) 6330 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6331 << Arg->getSourceRange(); 6332 } 6333 return false; 6334 } 6335 6336 /// CheckObjCString - Checks that the format string argument to the os_log() 6337 /// and os_trace() functions is correct, and converts it to const char *. 6338 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6339 Arg = Arg->IgnoreParenCasts(); 6340 auto *Literal = dyn_cast<StringLiteral>(Arg); 6341 if (!Literal) { 6342 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6343 Literal = ObjcLiteral->getString(); 6344 } 6345 } 6346 6347 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6348 return ExprError( 6349 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 6350 << Arg->getSourceRange()); 6351 } 6352 6353 ExprResult Result(Literal); 6354 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 6355 InitializedEntity Entity = 6356 InitializedEntity::InitializeParameter(Context, ResultTy, false); 6357 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 6358 return Result; 6359 } 6360 6361 /// Check that the user is calling the appropriate va_start builtin for the 6362 /// target and calling convention. 6363 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 6364 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 6365 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 6366 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 6367 TT.getArch() == llvm::Triple::aarch64_32); 6368 bool IsWindows = TT.isOSWindows(); 6369 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 6370 if (IsX64 || IsAArch64) { 6371 CallingConv CC = CC_C; 6372 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 6373 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 6374 if (IsMSVAStart) { 6375 // Don't allow this in System V ABI functions. 6376 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 6377 return S.Diag(Fn->getBeginLoc(), 6378 diag::err_ms_va_start_used_in_sysv_function); 6379 } else { 6380 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 6381 // On x64 Windows, don't allow this in System V ABI functions. 6382 // (Yes, that means there's no corresponding way to support variadic 6383 // System V ABI functions on Windows.) 6384 if ((IsWindows && CC == CC_X86_64SysV) || 6385 (!IsWindows && CC == CC_Win64)) 6386 return S.Diag(Fn->getBeginLoc(), 6387 diag::err_va_start_used_in_wrong_abi_function) 6388 << !IsWindows; 6389 } 6390 return false; 6391 } 6392 6393 if (IsMSVAStart) 6394 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 6395 return false; 6396 } 6397 6398 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 6399 ParmVarDecl **LastParam = nullptr) { 6400 // Determine whether the current function, block, or obj-c method is variadic 6401 // and get its parameter list. 6402 bool IsVariadic = false; 6403 ArrayRef<ParmVarDecl *> Params; 6404 DeclContext *Caller = S.CurContext; 6405 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 6406 IsVariadic = Block->isVariadic(); 6407 Params = Block->parameters(); 6408 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 6409 IsVariadic = FD->isVariadic(); 6410 Params = FD->parameters(); 6411 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 6412 IsVariadic = MD->isVariadic(); 6413 // FIXME: This isn't correct for methods (results in bogus warning). 6414 Params = MD->parameters(); 6415 } else if (isa<CapturedDecl>(Caller)) { 6416 // We don't support va_start in a CapturedDecl. 6417 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 6418 return true; 6419 } else { 6420 // This must be some other declcontext that parses exprs. 6421 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 6422 return true; 6423 } 6424 6425 if (!IsVariadic) { 6426 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 6427 return true; 6428 } 6429 6430 if (LastParam) 6431 *LastParam = Params.empty() ? nullptr : Params.back(); 6432 6433 return false; 6434 } 6435 6436 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 6437 /// for validity. Emit an error and return true on failure; return false 6438 /// on success. 6439 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 6440 Expr *Fn = TheCall->getCallee(); 6441 6442 if (checkVAStartABI(*this, BuiltinID, Fn)) 6443 return true; 6444 6445 if (checkArgCount(*this, TheCall, 2)) 6446 return true; 6447 6448 // Type-check the first argument normally. 6449 if (checkBuiltinArgument(*this, TheCall, 0)) 6450 return true; 6451 6452 // Check that the current function is variadic, and get its last parameter. 6453 ParmVarDecl *LastParam; 6454 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 6455 return true; 6456 6457 // Verify that the second argument to the builtin is the last argument of the 6458 // current function or method. 6459 bool SecondArgIsLastNamedArgument = false; 6460 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 6461 6462 // These are valid if SecondArgIsLastNamedArgument is false after the next 6463 // block. 6464 QualType Type; 6465 SourceLocation ParamLoc; 6466 bool IsCRegister = false; 6467 6468 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 6469 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 6470 SecondArgIsLastNamedArgument = PV == LastParam; 6471 6472 Type = PV->getType(); 6473 ParamLoc = PV->getLocation(); 6474 IsCRegister = 6475 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 6476 } 6477 } 6478 6479 if (!SecondArgIsLastNamedArgument) 6480 Diag(TheCall->getArg(1)->getBeginLoc(), 6481 diag::warn_second_arg_of_va_start_not_last_named_param); 6482 else if (IsCRegister || Type->isReferenceType() || 6483 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 6484 // Promotable integers are UB, but enumerations need a bit of 6485 // extra checking to see what their promotable type actually is. 6486 if (!Type->isPromotableIntegerType()) 6487 return false; 6488 if (!Type->isEnumeralType()) 6489 return true; 6490 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 6491 return !(ED && 6492 Context.typesAreCompatible(ED->getPromotionType(), Type)); 6493 }()) { 6494 unsigned Reason = 0; 6495 if (Type->isReferenceType()) Reason = 1; 6496 else if (IsCRegister) Reason = 2; 6497 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 6498 Diag(ParamLoc, diag::note_parameter_type) << Type; 6499 } 6500 6501 TheCall->setType(Context.VoidTy); 6502 return false; 6503 } 6504 6505 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 6506 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 6507 const LangOptions &LO = getLangOpts(); 6508 6509 if (LO.CPlusPlus) 6510 return Arg->getType() 6511 .getCanonicalType() 6512 .getTypePtr() 6513 ->getPointeeType() 6514 .withoutLocalFastQualifiers() == Context.CharTy; 6515 6516 // In C, allow aliasing through `char *`, this is required for AArch64 at 6517 // least. 6518 return true; 6519 }; 6520 6521 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 6522 // const char *named_addr); 6523 6524 Expr *Func = Call->getCallee(); 6525 6526 if (Call->getNumArgs() < 3) 6527 return Diag(Call->getEndLoc(), 6528 diag::err_typecheck_call_too_few_args_at_least) 6529 << 0 /*function call*/ << 3 << Call->getNumArgs(); 6530 6531 // Type-check the first argument normally. 6532 if (checkBuiltinArgument(*this, Call, 0)) 6533 return true; 6534 6535 // Check that the current function is variadic. 6536 if (checkVAStartIsInVariadicFunction(*this, Func)) 6537 return true; 6538 6539 // __va_start on Windows does not validate the parameter qualifiers 6540 6541 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 6542 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 6543 6544 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 6545 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 6546 6547 const QualType &ConstCharPtrTy = 6548 Context.getPointerType(Context.CharTy.withConst()); 6549 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 6550 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6551 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 6552 << 0 /* qualifier difference */ 6553 << 3 /* parameter mismatch */ 6554 << 2 << Arg1->getType() << ConstCharPtrTy; 6555 6556 const QualType SizeTy = Context.getSizeType(); 6557 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 6558 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6559 << Arg2->getType() << SizeTy << 1 /* different class */ 6560 << 0 /* qualifier difference */ 6561 << 3 /* parameter mismatch */ 6562 << 3 << Arg2->getType() << SizeTy; 6563 6564 return false; 6565 } 6566 6567 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 6568 /// friends. This is declared to take (...), so we have to check everything. 6569 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 6570 if (checkArgCount(*this, TheCall, 2)) 6571 return true; 6572 6573 ExprResult OrigArg0 = TheCall->getArg(0); 6574 ExprResult OrigArg1 = TheCall->getArg(1); 6575 6576 // Do standard promotions between the two arguments, returning their common 6577 // type. 6578 QualType Res = UsualArithmeticConversions( 6579 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 6580 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 6581 return true; 6582 6583 // Make sure any conversions are pushed back into the call; this is 6584 // type safe since unordered compare builtins are declared as "_Bool 6585 // foo(...)". 6586 TheCall->setArg(0, OrigArg0.get()); 6587 TheCall->setArg(1, OrigArg1.get()); 6588 6589 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 6590 return false; 6591 6592 // If the common type isn't a real floating type, then the arguments were 6593 // invalid for this operation. 6594 if (Res.isNull() || !Res->isRealFloatingType()) 6595 return Diag(OrigArg0.get()->getBeginLoc(), 6596 diag::err_typecheck_call_invalid_ordered_compare) 6597 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 6598 << SourceRange(OrigArg0.get()->getBeginLoc(), 6599 OrigArg1.get()->getEndLoc()); 6600 6601 return false; 6602 } 6603 6604 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 6605 /// __builtin_isnan and friends. This is declared to take (...), so we have 6606 /// to check everything. We expect the last argument to be a floating point 6607 /// value. 6608 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 6609 if (checkArgCount(*this, TheCall, NumArgs)) 6610 return true; 6611 6612 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 6613 // on all preceding parameters just being int. Try all of those. 6614 for (unsigned i = 0; i < NumArgs - 1; ++i) { 6615 Expr *Arg = TheCall->getArg(i); 6616 6617 if (Arg->isTypeDependent()) 6618 return false; 6619 6620 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 6621 6622 if (Res.isInvalid()) 6623 return true; 6624 TheCall->setArg(i, Res.get()); 6625 } 6626 6627 Expr *OrigArg = TheCall->getArg(NumArgs-1); 6628 6629 if (OrigArg->isTypeDependent()) 6630 return false; 6631 6632 // Usual Unary Conversions will convert half to float, which we want for 6633 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 6634 // type how it is, but do normal L->Rvalue conversions. 6635 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 6636 OrigArg = UsualUnaryConversions(OrigArg).get(); 6637 else 6638 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 6639 TheCall->setArg(NumArgs - 1, OrigArg); 6640 6641 // This operation requires a non-_Complex floating-point number. 6642 if (!OrigArg->getType()->isRealFloatingType()) 6643 return Diag(OrigArg->getBeginLoc(), 6644 diag::err_typecheck_call_invalid_unary_fp) 6645 << OrigArg->getType() << OrigArg->getSourceRange(); 6646 6647 return false; 6648 } 6649 6650 /// Perform semantic analysis for a call to __builtin_complex. 6651 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 6652 if (checkArgCount(*this, TheCall, 2)) 6653 return true; 6654 6655 bool Dependent = false; 6656 for (unsigned I = 0; I != 2; ++I) { 6657 Expr *Arg = TheCall->getArg(I); 6658 QualType T = Arg->getType(); 6659 if (T->isDependentType()) { 6660 Dependent = true; 6661 continue; 6662 } 6663 6664 // Despite supporting _Complex int, GCC requires a real floating point type 6665 // for the operands of __builtin_complex. 6666 if (!T->isRealFloatingType()) { 6667 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 6668 << Arg->getType() << Arg->getSourceRange(); 6669 } 6670 6671 ExprResult Converted = DefaultLvalueConversion(Arg); 6672 if (Converted.isInvalid()) 6673 return true; 6674 TheCall->setArg(I, Converted.get()); 6675 } 6676 6677 if (Dependent) { 6678 TheCall->setType(Context.DependentTy); 6679 return false; 6680 } 6681 6682 Expr *Real = TheCall->getArg(0); 6683 Expr *Imag = TheCall->getArg(1); 6684 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 6685 return Diag(Real->getBeginLoc(), 6686 diag::err_typecheck_call_different_arg_types) 6687 << Real->getType() << Imag->getType() 6688 << Real->getSourceRange() << Imag->getSourceRange(); 6689 } 6690 6691 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 6692 // don't allow this builtin to form those types either. 6693 // FIXME: Should we allow these types? 6694 if (Real->getType()->isFloat16Type()) 6695 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6696 << "_Float16"; 6697 if (Real->getType()->isHalfType()) 6698 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6699 << "half"; 6700 6701 TheCall->setType(Context.getComplexType(Real->getType())); 6702 return false; 6703 } 6704 6705 // Customized Sema Checking for VSX builtins that have the following signature: 6706 // vector [...] builtinName(vector [...], vector [...], const int); 6707 // Which takes the same type of vectors (any legal vector type) for the first 6708 // two arguments and takes compile time constant for the third argument. 6709 // Example builtins are : 6710 // vector double vec_xxpermdi(vector double, vector double, int); 6711 // vector short vec_xxsldwi(vector short, vector short, int); 6712 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 6713 unsigned ExpectedNumArgs = 3; 6714 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 6715 return true; 6716 6717 // Check the third argument is a compile time constant 6718 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 6719 return Diag(TheCall->getBeginLoc(), 6720 diag::err_vsx_builtin_nonconstant_argument) 6721 << 3 /* argument index */ << TheCall->getDirectCallee() 6722 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 6723 TheCall->getArg(2)->getEndLoc()); 6724 6725 QualType Arg1Ty = TheCall->getArg(0)->getType(); 6726 QualType Arg2Ty = TheCall->getArg(1)->getType(); 6727 6728 // Check the type of argument 1 and argument 2 are vectors. 6729 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 6730 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 6731 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 6732 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 6733 << TheCall->getDirectCallee() 6734 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6735 TheCall->getArg(1)->getEndLoc()); 6736 } 6737 6738 // Check the first two arguments are the same type. 6739 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 6740 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 6741 << TheCall->getDirectCallee() 6742 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6743 TheCall->getArg(1)->getEndLoc()); 6744 } 6745 6746 // When default clang type checking is turned off and the customized type 6747 // checking is used, the returning type of the function must be explicitly 6748 // set. Otherwise it is _Bool by default. 6749 TheCall->setType(Arg1Ty); 6750 6751 return false; 6752 } 6753 6754 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 6755 // This is declared to take (...), so we have to check everything. 6756 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 6757 if (TheCall->getNumArgs() < 2) 6758 return ExprError(Diag(TheCall->getEndLoc(), 6759 diag::err_typecheck_call_too_few_args_at_least) 6760 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 6761 << TheCall->getSourceRange()); 6762 6763 // Determine which of the following types of shufflevector we're checking: 6764 // 1) unary, vector mask: (lhs, mask) 6765 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 6766 QualType resType = TheCall->getArg(0)->getType(); 6767 unsigned numElements = 0; 6768 6769 if (!TheCall->getArg(0)->isTypeDependent() && 6770 !TheCall->getArg(1)->isTypeDependent()) { 6771 QualType LHSType = TheCall->getArg(0)->getType(); 6772 QualType RHSType = TheCall->getArg(1)->getType(); 6773 6774 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 6775 return ExprError( 6776 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 6777 << TheCall->getDirectCallee() 6778 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6779 TheCall->getArg(1)->getEndLoc())); 6780 6781 numElements = LHSType->castAs<VectorType>()->getNumElements(); 6782 unsigned numResElements = TheCall->getNumArgs() - 2; 6783 6784 // Check to see if we have a call with 2 vector arguments, the unary shuffle 6785 // with mask. If so, verify that RHS is an integer vector type with the 6786 // same number of elts as lhs. 6787 if (TheCall->getNumArgs() == 2) { 6788 if (!RHSType->hasIntegerRepresentation() || 6789 RHSType->castAs<VectorType>()->getNumElements() != numElements) 6790 return ExprError(Diag(TheCall->getBeginLoc(), 6791 diag::err_vec_builtin_incompatible_vector) 6792 << TheCall->getDirectCallee() 6793 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 6794 TheCall->getArg(1)->getEndLoc())); 6795 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 6796 return ExprError(Diag(TheCall->getBeginLoc(), 6797 diag::err_vec_builtin_incompatible_vector) 6798 << TheCall->getDirectCallee() 6799 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6800 TheCall->getArg(1)->getEndLoc())); 6801 } else if (numElements != numResElements) { 6802 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 6803 resType = Context.getVectorType(eltType, numResElements, 6804 VectorType::GenericVector); 6805 } 6806 } 6807 6808 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 6809 if (TheCall->getArg(i)->isTypeDependent() || 6810 TheCall->getArg(i)->isValueDependent()) 6811 continue; 6812 6813 Optional<llvm::APSInt> Result; 6814 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 6815 return ExprError(Diag(TheCall->getBeginLoc(), 6816 diag::err_shufflevector_nonconstant_argument) 6817 << TheCall->getArg(i)->getSourceRange()); 6818 6819 // Allow -1 which will be translated to undef in the IR. 6820 if (Result->isSigned() && Result->isAllOnes()) 6821 continue; 6822 6823 if (Result->getActiveBits() > 64 || 6824 Result->getZExtValue() >= numElements * 2) 6825 return ExprError(Diag(TheCall->getBeginLoc(), 6826 diag::err_shufflevector_argument_too_large) 6827 << TheCall->getArg(i)->getSourceRange()); 6828 } 6829 6830 SmallVector<Expr*, 32> exprs; 6831 6832 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 6833 exprs.push_back(TheCall->getArg(i)); 6834 TheCall->setArg(i, nullptr); 6835 } 6836 6837 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 6838 TheCall->getCallee()->getBeginLoc(), 6839 TheCall->getRParenLoc()); 6840 } 6841 6842 /// SemaConvertVectorExpr - Handle __builtin_convertvector 6843 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 6844 SourceLocation BuiltinLoc, 6845 SourceLocation RParenLoc) { 6846 ExprValueKind VK = VK_PRValue; 6847 ExprObjectKind OK = OK_Ordinary; 6848 QualType DstTy = TInfo->getType(); 6849 QualType SrcTy = E->getType(); 6850 6851 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 6852 return ExprError(Diag(BuiltinLoc, 6853 diag::err_convertvector_non_vector) 6854 << E->getSourceRange()); 6855 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 6856 return ExprError(Diag(BuiltinLoc, 6857 diag::err_convertvector_non_vector_type)); 6858 6859 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 6860 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 6861 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 6862 if (SrcElts != DstElts) 6863 return ExprError(Diag(BuiltinLoc, 6864 diag::err_convertvector_incompatible_vector) 6865 << E->getSourceRange()); 6866 } 6867 6868 return new (Context) 6869 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 6870 } 6871 6872 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 6873 // This is declared to take (const void*, ...) and can take two 6874 // optional constant int args. 6875 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 6876 unsigned NumArgs = TheCall->getNumArgs(); 6877 6878 if (NumArgs > 3) 6879 return Diag(TheCall->getEndLoc(), 6880 diag::err_typecheck_call_too_many_args_at_most) 6881 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6882 6883 // Argument 0 is checked for us and the remaining arguments must be 6884 // constant integers. 6885 for (unsigned i = 1; i != NumArgs; ++i) 6886 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 6887 return true; 6888 6889 return false; 6890 } 6891 6892 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 6893 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 6894 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 6895 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 6896 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6897 if (checkArgCount(*this, TheCall, 1)) 6898 return true; 6899 Expr *Arg = TheCall->getArg(0); 6900 if (Arg->isInstantiationDependent()) 6901 return false; 6902 6903 QualType ArgTy = Arg->getType(); 6904 if (!ArgTy->hasFloatingRepresentation()) 6905 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 6906 << ArgTy; 6907 if (Arg->isLValue()) { 6908 ExprResult FirstArg = DefaultLvalueConversion(Arg); 6909 TheCall->setArg(0, FirstArg.get()); 6910 } 6911 TheCall->setType(TheCall->getArg(0)->getType()); 6912 return false; 6913 } 6914 6915 /// SemaBuiltinAssume - Handle __assume (MS Extension). 6916 // __assume does not evaluate its arguments, and should warn if its argument 6917 // has side effects. 6918 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 6919 Expr *Arg = TheCall->getArg(0); 6920 if (Arg->isInstantiationDependent()) return false; 6921 6922 if (Arg->HasSideEffects(Context)) 6923 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 6924 << Arg->getSourceRange() 6925 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 6926 6927 return false; 6928 } 6929 6930 /// Handle __builtin_alloca_with_align. This is declared 6931 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 6932 /// than 8. 6933 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 6934 // The alignment must be a constant integer. 6935 Expr *Arg = TheCall->getArg(1); 6936 6937 // We can't check the value of a dependent argument. 6938 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6939 if (const auto *UE = 6940 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 6941 if (UE->getKind() == UETT_AlignOf || 6942 UE->getKind() == UETT_PreferredAlignOf) 6943 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 6944 << Arg->getSourceRange(); 6945 6946 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 6947 6948 if (!Result.isPowerOf2()) 6949 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6950 << Arg->getSourceRange(); 6951 6952 if (Result < Context.getCharWidth()) 6953 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 6954 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 6955 6956 if (Result > std::numeric_limits<int32_t>::max()) 6957 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 6958 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 6959 } 6960 6961 return false; 6962 } 6963 6964 /// Handle __builtin_assume_aligned. This is declared 6965 /// as (const void*, size_t, ...) and can take one optional constant int arg. 6966 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 6967 unsigned NumArgs = TheCall->getNumArgs(); 6968 6969 if (NumArgs > 3) 6970 return Diag(TheCall->getEndLoc(), 6971 diag::err_typecheck_call_too_many_args_at_most) 6972 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6973 6974 // The alignment must be a constant integer. 6975 Expr *Arg = TheCall->getArg(1); 6976 6977 // We can't check the value of a dependent argument. 6978 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6979 llvm::APSInt Result; 6980 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6981 return true; 6982 6983 if (!Result.isPowerOf2()) 6984 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6985 << Arg->getSourceRange(); 6986 6987 if (Result > Sema::MaximumAlignment) 6988 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 6989 << Arg->getSourceRange() << Sema::MaximumAlignment; 6990 } 6991 6992 if (NumArgs > 2) { 6993 ExprResult Arg(TheCall->getArg(2)); 6994 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6995 Context.getSizeType(), false); 6996 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6997 if (Arg.isInvalid()) return true; 6998 TheCall->setArg(2, Arg.get()); 6999 } 7000 7001 return false; 7002 } 7003 7004 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7005 unsigned BuiltinID = 7006 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7007 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7008 7009 unsigned NumArgs = TheCall->getNumArgs(); 7010 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7011 if (NumArgs < NumRequiredArgs) { 7012 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7013 << 0 /* function call */ << NumRequiredArgs << NumArgs 7014 << TheCall->getSourceRange(); 7015 } 7016 if (NumArgs >= NumRequiredArgs + 0x100) { 7017 return Diag(TheCall->getEndLoc(), 7018 diag::err_typecheck_call_too_many_args_at_most) 7019 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7020 << TheCall->getSourceRange(); 7021 } 7022 unsigned i = 0; 7023 7024 // For formatting call, check buffer arg. 7025 if (!IsSizeCall) { 7026 ExprResult Arg(TheCall->getArg(i)); 7027 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7028 Context, Context.VoidPtrTy, false); 7029 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7030 if (Arg.isInvalid()) 7031 return true; 7032 TheCall->setArg(i, Arg.get()); 7033 i++; 7034 } 7035 7036 // Check string literal arg. 7037 unsigned FormatIdx = i; 7038 { 7039 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7040 if (Arg.isInvalid()) 7041 return true; 7042 TheCall->setArg(i, Arg.get()); 7043 i++; 7044 } 7045 7046 // Make sure variadic args are scalar. 7047 unsigned FirstDataArg = i; 7048 while (i < NumArgs) { 7049 ExprResult Arg = DefaultVariadicArgumentPromotion( 7050 TheCall->getArg(i), VariadicFunction, nullptr); 7051 if (Arg.isInvalid()) 7052 return true; 7053 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7054 if (ArgSize.getQuantity() >= 0x100) { 7055 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7056 << i << (int)ArgSize.getQuantity() << 0xff 7057 << TheCall->getSourceRange(); 7058 } 7059 TheCall->setArg(i, Arg.get()); 7060 i++; 7061 } 7062 7063 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7064 // call to avoid duplicate diagnostics. 7065 if (!IsSizeCall) { 7066 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7067 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7068 bool Success = CheckFormatArguments( 7069 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 7070 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7071 CheckedVarArgs); 7072 if (!Success) 7073 return true; 7074 } 7075 7076 if (IsSizeCall) { 7077 TheCall->setType(Context.getSizeType()); 7078 } else { 7079 TheCall->setType(Context.VoidPtrTy); 7080 } 7081 return false; 7082 } 7083 7084 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7085 /// TheCall is a constant expression. 7086 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7087 llvm::APSInt &Result) { 7088 Expr *Arg = TheCall->getArg(ArgNum); 7089 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7090 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7091 7092 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7093 7094 Optional<llvm::APSInt> R; 7095 if (!(R = Arg->getIntegerConstantExpr(Context))) 7096 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7097 << FDecl->getDeclName() << Arg->getSourceRange(); 7098 Result = *R; 7099 return false; 7100 } 7101 7102 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7103 /// TheCall is a constant expression in the range [Low, High]. 7104 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7105 int Low, int High, bool RangeIsError) { 7106 if (isConstantEvaluated()) 7107 return false; 7108 llvm::APSInt Result; 7109 7110 // We can't check the value of a dependent argument. 7111 Expr *Arg = TheCall->getArg(ArgNum); 7112 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7113 return false; 7114 7115 // Check constant-ness first. 7116 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7117 return true; 7118 7119 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7120 if (RangeIsError) 7121 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7122 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7123 else 7124 // Defer the warning until we know if the code will be emitted so that 7125 // dead code can ignore this. 7126 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7127 PDiag(diag::warn_argument_invalid_range) 7128 << toString(Result, 10) << Low << High 7129 << Arg->getSourceRange()); 7130 } 7131 7132 return false; 7133 } 7134 7135 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7136 /// TheCall is a constant expression is a multiple of Num.. 7137 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7138 unsigned Num) { 7139 llvm::APSInt Result; 7140 7141 // We can't check the value of a dependent argument. 7142 Expr *Arg = TheCall->getArg(ArgNum); 7143 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7144 return false; 7145 7146 // Check constant-ness first. 7147 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7148 return true; 7149 7150 if (Result.getSExtValue() % Num != 0) 7151 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7152 << Num << Arg->getSourceRange(); 7153 7154 return false; 7155 } 7156 7157 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7158 /// constant expression representing a power of 2. 7159 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7160 llvm::APSInt Result; 7161 7162 // We can't check the value of a dependent argument. 7163 Expr *Arg = TheCall->getArg(ArgNum); 7164 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7165 return false; 7166 7167 // Check constant-ness first. 7168 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7169 return true; 7170 7171 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7172 // and only if x is a power of 2. 7173 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7174 return false; 7175 7176 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7177 << Arg->getSourceRange(); 7178 } 7179 7180 static bool IsShiftedByte(llvm::APSInt Value) { 7181 if (Value.isNegative()) 7182 return false; 7183 7184 // Check if it's a shifted byte, by shifting it down 7185 while (true) { 7186 // If the value fits in the bottom byte, the check passes. 7187 if (Value < 0x100) 7188 return true; 7189 7190 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7191 // fails. 7192 if ((Value & 0xFF) != 0) 7193 return false; 7194 7195 // If the bottom 8 bits are all 0, but something above that is nonzero, 7196 // then shifting the value right by 8 bits won't affect whether it's a 7197 // shifted byte or not. So do that, and go round again. 7198 Value >>= 8; 7199 } 7200 } 7201 7202 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7203 /// a constant expression representing an arbitrary byte value shifted left by 7204 /// a multiple of 8 bits. 7205 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7206 unsigned ArgBits) { 7207 llvm::APSInt Result; 7208 7209 // We can't check the value of a dependent argument. 7210 Expr *Arg = TheCall->getArg(ArgNum); 7211 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7212 return false; 7213 7214 // Check constant-ness first. 7215 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7216 return true; 7217 7218 // Truncate to the given size. 7219 Result = Result.getLoBits(ArgBits); 7220 Result.setIsUnsigned(true); 7221 7222 if (IsShiftedByte(Result)) 7223 return false; 7224 7225 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7226 << Arg->getSourceRange(); 7227 } 7228 7229 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7230 /// TheCall is a constant expression representing either a shifted byte value, 7231 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7232 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7233 /// Arm MVE intrinsics. 7234 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7235 int ArgNum, 7236 unsigned ArgBits) { 7237 llvm::APSInt Result; 7238 7239 // We can't check the value of a dependent argument. 7240 Expr *Arg = TheCall->getArg(ArgNum); 7241 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7242 return false; 7243 7244 // Check constant-ness first. 7245 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7246 return true; 7247 7248 // Truncate to the given size. 7249 Result = Result.getLoBits(ArgBits); 7250 Result.setIsUnsigned(true); 7251 7252 // Check to see if it's in either of the required forms. 7253 if (IsShiftedByte(Result) || 7254 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7255 return false; 7256 7257 return Diag(TheCall->getBeginLoc(), 7258 diag::err_argument_not_shifted_byte_or_xxff) 7259 << Arg->getSourceRange(); 7260 } 7261 7262 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7263 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7264 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7265 if (checkArgCount(*this, TheCall, 2)) 7266 return true; 7267 Expr *Arg0 = TheCall->getArg(0); 7268 Expr *Arg1 = TheCall->getArg(1); 7269 7270 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7271 if (FirstArg.isInvalid()) 7272 return true; 7273 QualType FirstArgType = FirstArg.get()->getType(); 7274 if (!FirstArgType->isAnyPointerType()) 7275 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7276 << "first" << FirstArgType << Arg0->getSourceRange(); 7277 TheCall->setArg(0, FirstArg.get()); 7278 7279 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7280 if (SecArg.isInvalid()) 7281 return true; 7282 QualType SecArgType = SecArg.get()->getType(); 7283 if (!SecArgType->isIntegerType()) 7284 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7285 << "second" << SecArgType << Arg1->getSourceRange(); 7286 7287 // Derive the return type from the pointer argument. 7288 TheCall->setType(FirstArgType); 7289 return false; 7290 } 7291 7292 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7293 if (checkArgCount(*this, TheCall, 2)) 7294 return true; 7295 7296 Expr *Arg0 = TheCall->getArg(0); 7297 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7298 if (FirstArg.isInvalid()) 7299 return true; 7300 QualType FirstArgType = FirstArg.get()->getType(); 7301 if (!FirstArgType->isAnyPointerType()) 7302 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7303 << "first" << FirstArgType << Arg0->getSourceRange(); 7304 TheCall->setArg(0, FirstArg.get()); 7305 7306 // Derive the return type from the pointer argument. 7307 TheCall->setType(FirstArgType); 7308 7309 // Second arg must be an constant in range [0,15] 7310 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7311 } 7312 7313 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7314 if (checkArgCount(*this, TheCall, 2)) 7315 return true; 7316 Expr *Arg0 = TheCall->getArg(0); 7317 Expr *Arg1 = TheCall->getArg(1); 7318 7319 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7320 if (FirstArg.isInvalid()) 7321 return true; 7322 QualType FirstArgType = FirstArg.get()->getType(); 7323 if (!FirstArgType->isAnyPointerType()) 7324 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7325 << "first" << FirstArgType << Arg0->getSourceRange(); 7326 7327 QualType SecArgType = Arg1->getType(); 7328 if (!SecArgType->isIntegerType()) 7329 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7330 << "second" << SecArgType << Arg1->getSourceRange(); 7331 TheCall->setType(Context.IntTy); 7332 return false; 7333 } 7334 7335 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7336 BuiltinID == AArch64::BI__builtin_arm_stg) { 7337 if (checkArgCount(*this, TheCall, 1)) 7338 return true; 7339 Expr *Arg0 = TheCall->getArg(0); 7340 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7341 if (FirstArg.isInvalid()) 7342 return true; 7343 7344 QualType FirstArgType = FirstArg.get()->getType(); 7345 if (!FirstArgType->isAnyPointerType()) 7346 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7347 << "first" << FirstArgType << Arg0->getSourceRange(); 7348 TheCall->setArg(0, FirstArg.get()); 7349 7350 // Derive the return type from the pointer argument. 7351 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 7352 TheCall->setType(FirstArgType); 7353 return false; 7354 } 7355 7356 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 7357 Expr *ArgA = TheCall->getArg(0); 7358 Expr *ArgB = TheCall->getArg(1); 7359 7360 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 7361 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 7362 7363 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 7364 return true; 7365 7366 QualType ArgTypeA = ArgExprA.get()->getType(); 7367 QualType ArgTypeB = ArgExprB.get()->getType(); 7368 7369 auto isNull = [&] (Expr *E) -> bool { 7370 return E->isNullPointerConstant( 7371 Context, Expr::NPC_ValueDependentIsNotNull); }; 7372 7373 // argument should be either a pointer or null 7374 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 7375 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7376 << "first" << ArgTypeA << ArgA->getSourceRange(); 7377 7378 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 7379 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7380 << "second" << ArgTypeB << ArgB->getSourceRange(); 7381 7382 // Ensure Pointee types are compatible 7383 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 7384 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 7385 QualType pointeeA = ArgTypeA->getPointeeType(); 7386 QualType pointeeB = ArgTypeB->getPointeeType(); 7387 if (!Context.typesAreCompatible( 7388 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 7389 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 7390 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 7391 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 7392 << ArgB->getSourceRange(); 7393 } 7394 } 7395 7396 // at least one argument should be pointer type 7397 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 7398 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 7399 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 7400 7401 if (isNull(ArgA)) // adopt type of the other pointer 7402 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 7403 7404 if (isNull(ArgB)) 7405 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 7406 7407 TheCall->setArg(0, ArgExprA.get()); 7408 TheCall->setArg(1, ArgExprB.get()); 7409 TheCall->setType(Context.LongLongTy); 7410 return false; 7411 } 7412 assert(false && "Unhandled ARM MTE intrinsic"); 7413 return true; 7414 } 7415 7416 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 7417 /// TheCall is an ARM/AArch64 special register string literal. 7418 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 7419 int ArgNum, unsigned ExpectedFieldNum, 7420 bool AllowName) { 7421 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 7422 BuiltinID == ARM::BI__builtin_arm_wsr64 || 7423 BuiltinID == ARM::BI__builtin_arm_rsr || 7424 BuiltinID == ARM::BI__builtin_arm_rsrp || 7425 BuiltinID == ARM::BI__builtin_arm_wsr || 7426 BuiltinID == ARM::BI__builtin_arm_wsrp; 7427 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 7428 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 7429 BuiltinID == AArch64::BI__builtin_arm_rsr || 7430 BuiltinID == AArch64::BI__builtin_arm_rsrp || 7431 BuiltinID == AArch64::BI__builtin_arm_wsr || 7432 BuiltinID == AArch64::BI__builtin_arm_wsrp; 7433 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 7434 7435 // We can't check the value of a dependent argument. 7436 Expr *Arg = TheCall->getArg(ArgNum); 7437 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7438 return false; 7439 7440 // Check if the argument is a string literal. 7441 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 7442 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 7443 << Arg->getSourceRange(); 7444 7445 // Check the type of special register given. 7446 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 7447 SmallVector<StringRef, 6> Fields; 7448 Reg.split(Fields, ":"); 7449 7450 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 7451 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7452 << Arg->getSourceRange(); 7453 7454 // If the string is the name of a register then we cannot check that it is 7455 // valid here but if the string is of one the forms described in ACLE then we 7456 // can check that the supplied fields are integers and within the valid 7457 // ranges. 7458 if (Fields.size() > 1) { 7459 bool FiveFields = Fields.size() == 5; 7460 7461 bool ValidString = true; 7462 if (IsARMBuiltin) { 7463 ValidString &= Fields[0].startswith_insensitive("cp") || 7464 Fields[0].startswith_insensitive("p"); 7465 if (ValidString) 7466 Fields[0] = Fields[0].drop_front( 7467 Fields[0].startswith_insensitive("cp") ? 2 : 1); 7468 7469 ValidString &= Fields[2].startswith_insensitive("c"); 7470 if (ValidString) 7471 Fields[2] = Fields[2].drop_front(1); 7472 7473 if (FiveFields) { 7474 ValidString &= Fields[3].startswith_insensitive("c"); 7475 if (ValidString) 7476 Fields[3] = Fields[3].drop_front(1); 7477 } 7478 } 7479 7480 SmallVector<int, 5> Ranges; 7481 if (FiveFields) 7482 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 7483 else 7484 Ranges.append({15, 7, 15}); 7485 7486 for (unsigned i=0; i<Fields.size(); ++i) { 7487 int IntField; 7488 ValidString &= !Fields[i].getAsInteger(10, IntField); 7489 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 7490 } 7491 7492 if (!ValidString) 7493 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7494 << Arg->getSourceRange(); 7495 } else if (IsAArch64Builtin && Fields.size() == 1) { 7496 // If the register name is one of those that appear in the condition below 7497 // and the special register builtin being used is one of the write builtins, 7498 // then we require that the argument provided for writing to the register 7499 // is an integer constant expression. This is because it will be lowered to 7500 // an MSR (immediate) instruction, so we need to know the immediate at 7501 // compile time. 7502 if (TheCall->getNumArgs() != 2) 7503 return false; 7504 7505 std::string RegLower = Reg.lower(); 7506 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 7507 RegLower != "pan" && RegLower != "uao") 7508 return false; 7509 7510 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7511 } 7512 7513 return false; 7514 } 7515 7516 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 7517 /// Emit an error and return true on failure; return false on success. 7518 /// TypeStr is a string containing the type descriptor of the value returned by 7519 /// the builtin and the descriptors of the expected type of the arguments. 7520 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 7521 const char *TypeStr) { 7522 7523 assert((TypeStr[0] != '\0') && 7524 "Invalid types in PPC MMA builtin declaration"); 7525 7526 switch (BuiltinID) { 7527 default: 7528 // This function is called in CheckPPCBuiltinFunctionCall where the 7529 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 7530 // we are isolating the pair vector memop builtins that can be used with mma 7531 // off so the default case is every builtin that requires mma and paired 7532 // vector memops. 7533 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 7534 diag::err_ppc_builtin_only_on_arch, "10") || 7535 SemaFeatureCheck(*this, TheCall, "mma", 7536 diag::err_ppc_builtin_only_on_arch, "10")) 7537 return true; 7538 break; 7539 case PPC::BI__builtin_vsx_lxvp: 7540 case PPC::BI__builtin_vsx_stxvp: 7541 case PPC::BI__builtin_vsx_assemble_pair: 7542 case PPC::BI__builtin_vsx_disassemble_pair: 7543 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 7544 diag::err_ppc_builtin_only_on_arch, "10")) 7545 return true; 7546 break; 7547 } 7548 7549 unsigned Mask = 0; 7550 unsigned ArgNum = 0; 7551 7552 // The first type in TypeStr is the type of the value returned by the 7553 // builtin. So we first read that type and change the type of TheCall. 7554 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7555 TheCall->setType(type); 7556 7557 while (*TypeStr != '\0') { 7558 Mask = 0; 7559 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7560 if (ArgNum >= TheCall->getNumArgs()) { 7561 ArgNum++; 7562 break; 7563 } 7564 7565 Expr *Arg = TheCall->getArg(ArgNum); 7566 QualType PassedType = Arg->getType(); 7567 QualType StrippedRVType = PassedType.getCanonicalType(); 7568 7569 // Strip Restrict/Volatile qualifiers. 7570 if (StrippedRVType.isRestrictQualified() || 7571 StrippedRVType.isVolatileQualified()) 7572 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 7573 7574 // The only case where the argument type and expected type are allowed to 7575 // mismatch is if the argument type is a non-void pointer (or array) and 7576 // expected type is a void pointer. 7577 if (StrippedRVType != ExpectedType) 7578 if (!(ExpectedType->isVoidPointerType() && 7579 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 7580 return Diag(Arg->getBeginLoc(), 7581 diag::err_typecheck_convert_incompatible) 7582 << PassedType << ExpectedType << 1 << 0 << 0; 7583 7584 // If the value of the Mask is not 0, we have a constraint in the size of 7585 // the integer argument so here we ensure the argument is a constant that 7586 // is in the valid range. 7587 if (Mask != 0 && 7588 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 7589 return true; 7590 7591 ArgNum++; 7592 } 7593 7594 // In case we exited early from the previous loop, there are other types to 7595 // read from TypeStr. So we need to read them all to ensure we have the right 7596 // number of arguments in TheCall and if it is not the case, to display a 7597 // better error message. 7598 while (*TypeStr != '\0') { 7599 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7600 ArgNum++; 7601 } 7602 if (checkArgCount(*this, TheCall, ArgNum)) 7603 return true; 7604 7605 return false; 7606 } 7607 7608 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 7609 /// This checks that the target supports __builtin_longjmp and 7610 /// that val is a constant 1. 7611 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 7612 if (!Context.getTargetInfo().hasSjLjLowering()) 7613 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 7614 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7615 7616 Expr *Arg = TheCall->getArg(1); 7617 llvm::APSInt Result; 7618 7619 // TODO: This is less than ideal. Overload this to take a value. 7620 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7621 return true; 7622 7623 if (Result != 1) 7624 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 7625 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 7626 7627 return false; 7628 } 7629 7630 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 7631 /// This checks that the target supports __builtin_setjmp. 7632 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 7633 if (!Context.getTargetInfo().hasSjLjLowering()) 7634 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 7635 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7636 return false; 7637 } 7638 7639 namespace { 7640 7641 class UncoveredArgHandler { 7642 enum { Unknown = -1, AllCovered = -2 }; 7643 7644 signed FirstUncoveredArg = Unknown; 7645 SmallVector<const Expr *, 4> DiagnosticExprs; 7646 7647 public: 7648 UncoveredArgHandler() = default; 7649 7650 bool hasUncoveredArg() const { 7651 return (FirstUncoveredArg >= 0); 7652 } 7653 7654 unsigned getUncoveredArg() const { 7655 assert(hasUncoveredArg() && "no uncovered argument"); 7656 return FirstUncoveredArg; 7657 } 7658 7659 void setAllCovered() { 7660 // A string has been found with all arguments covered, so clear out 7661 // the diagnostics. 7662 DiagnosticExprs.clear(); 7663 FirstUncoveredArg = AllCovered; 7664 } 7665 7666 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 7667 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 7668 7669 // Don't update if a previous string covers all arguments. 7670 if (FirstUncoveredArg == AllCovered) 7671 return; 7672 7673 // UncoveredArgHandler tracks the highest uncovered argument index 7674 // and with it all the strings that match this index. 7675 if (NewFirstUncoveredArg == FirstUncoveredArg) 7676 DiagnosticExprs.push_back(StrExpr); 7677 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 7678 DiagnosticExprs.clear(); 7679 DiagnosticExprs.push_back(StrExpr); 7680 FirstUncoveredArg = NewFirstUncoveredArg; 7681 } 7682 } 7683 7684 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 7685 }; 7686 7687 enum StringLiteralCheckType { 7688 SLCT_NotALiteral, 7689 SLCT_UncheckedLiteral, 7690 SLCT_CheckedLiteral 7691 }; 7692 7693 } // namespace 7694 7695 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 7696 BinaryOperatorKind BinOpKind, 7697 bool AddendIsRight) { 7698 unsigned BitWidth = Offset.getBitWidth(); 7699 unsigned AddendBitWidth = Addend.getBitWidth(); 7700 // There might be negative interim results. 7701 if (Addend.isUnsigned()) { 7702 Addend = Addend.zext(++AddendBitWidth); 7703 Addend.setIsSigned(true); 7704 } 7705 // Adjust the bit width of the APSInts. 7706 if (AddendBitWidth > BitWidth) { 7707 Offset = Offset.sext(AddendBitWidth); 7708 BitWidth = AddendBitWidth; 7709 } else if (BitWidth > AddendBitWidth) { 7710 Addend = Addend.sext(BitWidth); 7711 } 7712 7713 bool Ov = false; 7714 llvm::APSInt ResOffset = Offset; 7715 if (BinOpKind == BO_Add) 7716 ResOffset = Offset.sadd_ov(Addend, Ov); 7717 else { 7718 assert(AddendIsRight && BinOpKind == BO_Sub && 7719 "operator must be add or sub with addend on the right"); 7720 ResOffset = Offset.ssub_ov(Addend, Ov); 7721 } 7722 7723 // We add an offset to a pointer here so we should support an offset as big as 7724 // possible. 7725 if (Ov) { 7726 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 7727 "index (intermediate) result too big"); 7728 Offset = Offset.sext(2 * BitWidth); 7729 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 7730 return; 7731 } 7732 7733 Offset = ResOffset; 7734 } 7735 7736 namespace { 7737 7738 // This is a wrapper class around StringLiteral to support offsetted string 7739 // literals as format strings. It takes the offset into account when returning 7740 // the string and its length or the source locations to display notes correctly. 7741 class FormatStringLiteral { 7742 const StringLiteral *FExpr; 7743 int64_t Offset; 7744 7745 public: 7746 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 7747 : FExpr(fexpr), Offset(Offset) {} 7748 7749 StringRef getString() const { 7750 return FExpr->getString().drop_front(Offset); 7751 } 7752 7753 unsigned getByteLength() const { 7754 return FExpr->getByteLength() - getCharByteWidth() * Offset; 7755 } 7756 7757 unsigned getLength() const { return FExpr->getLength() - Offset; } 7758 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 7759 7760 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 7761 7762 QualType getType() const { return FExpr->getType(); } 7763 7764 bool isAscii() const { return FExpr->isAscii(); } 7765 bool isWide() const { return FExpr->isWide(); } 7766 bool isUTF8() const { return FExpr->isUTF8(); } 7767 bool isUTF16() const { return FExpr->isUTF16(); } 7768 bool isUTF32() const { return FExpr->isUTF32(); } 7769 bool isPascal() const { return FExpr->isPascal(); } 7770 7771 SourceLocation getLocationOfByte( 7772 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 7773 const TargetInfo &Target, unsigned *StartToken = nullptr, 7774 unsigned *StartTokenByteOffset = nullptr) const { 7775 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 7776 StartToken, StartTokenByteOffset); 7777 } 7778 7779 SourceLocation getBeginLoc() const LLVM_READONLY { 7780 return FExpr->getBeginLoc().getLocWithOffset(Offset); 7781 } 7782 7783 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 7784 }; 7785 7786 } // namespace 7787 7788 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 7789 const Expr *OrigFormatExpr, 7790 ArrayRef<const Expr *> Args, 7791 bool HasVAListArg, unsigned format_idx, 7792 unsigned firstDataArg, 7793 Sema::FormatStringType Type, 7794 bool inFunctionCall, 7795 Sema::VariadicCallType CallType, 7796 llvm::SmallBitVector &CheckedVarArgs, 7797 UncoveredArgHandler &UncoveredArg, 7798 bool IgnoreStringsWithoutSpecifiers); 7799 7800 // Determine if an expression is a string literal or constant string. 7801 // If this function returns false on the arguments to a function expecting a 7802 // format string, we will usually need to emit a warning. 7803 // True string literals are then checked by CheckFormatString. 7804 static StringLiteralCheckType 7805 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 7806 bool HasVAListArg, unsigned format_idx, 7807 unsigned firstDataArg, Sema::FormatStringType Type, 7808 Sema::VariadicCallType CallType, bool InFunctionCall, 7809 llvm::SmallBitVector &CheckedVarArgs, 7810 UncoveredArgHandler &UncoveredArg, 7811 llvm::APSInt Offset, 7812 bool IgnoreStringsWithoutSpecifiers = false) { 7813 if (S.isConstantEvaluated()) 7814 return SLCT_NotALiteral; 7815 tryAgain: 7816 assert(Offset.isSigned() && "invalid offset"); 7817 7818 if (E->isTypeDependent() || E->isValueDependent()) 7819 return SLCT_NotALiteral; 7820 7821 E = E->IgnoreParenCasts(); 7822 7823 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 7824 // Technically -Wformat-nonliteral does not warn about this case. 7825 // The behavior of printf and friends in this case is implementation 7826 // dependent. Ideally if the format string cannot be null then 7827 // it should have a 'nonnull' attribute in the function prototype. 7828 return SLCT_UncheckedLiteral; 7829 7830 switch (E->getStmtClass()) { 7831 case Stmt::BinaryConditionalOperatorClass: 7832 case Stmt::ConditionalOperatorClass: { 7833 // The expression is a literal if both sub-expressions were, and it was 7834 // completely checked only if both sub-expressions were checked. 7835 const AbstractConditionalOperator *C = 7836 cast<AbstractConditionalOperator>(E); 7837 7838 // Determine whether it is necessary to check both sub-expressions, for 7839 // example, because the condition expression is a constant that can be 7840 // evaluated at compile time. 7841 bool CheckLeft = true, CheckRight = true; 7842 7843 bool Cond; 7844 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 7845 S.isConstantEvaluated())) { 7846 if (Cond) 7847 CheckRight = false; 7848 else 7849 CheckLeft = false; 7850 } 7851 7852 // We need to maintain the offsets for the right and the left hand side 7853 // separately to check if every possible indexed expression is a valid 7854 // string literal. They might have different offsets for different string 7855 // literals in the end. 7856 StringLiteralCheckType Left; 7857 if (!CheckLeft) 7858 Left = SLCT_UncheckedLiteral; 7859 else { 7860 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 7861 HasVAListArg, format_idx, firstDataArg, 7862 Type, CallType, InFunctionCall, 7863 CheckedVarArgs, UncoveredArg, Offset, 7864 IgnoreStringsWithoutSpecifiers); 7865 if (Left == SLCT_NotALiteral || !CheckRight) { 7866 return Left; 7867 } 7868 } 7869 7870 StringLiteralCheckType Right = checkFormatStringExpr( 7871 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 7872 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7873 IgnoreStringsWithoutSpecifiers); 7874 7875 return (CheckLeft && Left < Right) ? Left : Right; 7876 } 7877 7878 case Stmt::ImplicitCastExprClass: 7879 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 7880 goto tryAgain; 7881 7882 case Stmt::OpaqueValueExprClass: 7883 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 7884 E = src; 7885 goto tryAgain; 7886 } 7887 return SLCT_NotALiteral; 7888 7889 case Stmt::PredefinedExprClass: 7890 // While __func__, etc., are technically not string literals, they 7891 // cannot contain format specifiers and thus are not a security 7892 // liability. 7893 return SLCT_UncheckedLiteral; 7894 7895 case Stmt::DeclRefExprClass: { 7896 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 7897 7898 // As an exception, do not flag errors for variables binding to 7899 // const string literals. 7900 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 7901 bool isConstant = false; 7902 QualType T = DR->getType(); 7903 7904 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 7905 isConstant = AT->getElementType().isConstant(S.Context); 7906 } else if (const PointerType *PT = T->getAs<PointerType>()) { 7907 isConstant = T.isConstant(S.Context) && 7908 PT->getPointeeType().isConstant(S.Context); 7909 } else if (T->isObjCObjectPointerType()) { 7910 // In ObjC, there is usually no "const ObjectPointer" type, 7911 // so don't check if the pointee type is constant. 7912 isConstant = T.isConstant(S.Context); 7913 } 7914 7915 if (isConstant) { 7916 if (const Expr *Init = VD->getAnyInitializer()) { 7917 // Look through initializers like const char c[] = { "foo" } 7918 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 7919 if (InitList->isStringLiteralInit()) 7920 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 7921 } 7922 return checkFormatStringExpr(S, Init, Args, 7923 HasVAListArg, format_idx, 7924 firstDataArg, Type, CallType, 7925 /*InFunctionCall*/ false, CheckedVarArgs, 7926 UncoveredArg, Offset); 7927 } 7928 } 7929 7930 // For vprintf* functions (i.e., HasVAListArg==true), we add a 7931 // special check to see if the format string is a function parameter 7932 // of the function calling the printf function. If the function 7933 // has an attribute indicating it is a printf-like function, then we 7934 // should suppress warnings concerning non-literals being used in a call 7935 // to a vprintf function. For example: 7936 // 7937 // void 7938 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 7939 // va_list ap; 7940 // va_start(ap, fmt); 7941 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 7942 // ... 7943 // } 7944 if (HasVAListArg) { 7945 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 7946 if (const Decl *D = dyn_cast<Decl>(PV->getDeclContext())) { 7947 int PVIndex = PV->getFunctionScopeIndex() + 1; 7948 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 7949 // adjust for implicit parameter 7950 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) 7951 if (MD->isInstance()) 7952 ++PVIndex; 7953 // We also check if the formats are compatible. 7954 // We can't pass a 'scanf' string to a 'printf' function. 7955 if (PVIndex == PVFormat->getFormatIdx() && 7956 Type == S.GetFormatStringType(PVFormat)) 7957 return SLCT_UncheckedLiteral; 7958 } 7959 } 7960 } 7961 } 7962 } 7963 7964 return SLCT_NotALiteral; 7965 } 7966 7967 case Stmt::CallExprClass: 7968 case Stmt::CXXMemberCallExprClass: { 7969 const CallExpr *CE = cast<CallExpr>(E); 7970 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 7971 bool IsFirst = true; 7972 StringLiteralCheckType CommonResult; 7973 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 7974 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 7975 StringLiteralCheckType Result = checkFormatStringExpr( 7976 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7977 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7978 IgnoreStringsWithoutSpecifiers); 7979 if (IsFirst) { 7980 CommonResult = Result; 7981 IsFirst = false; 7982 } 7983 } 7984 if (!IsFirst) 7985 return CommonResult; 7986 7987 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 7988 unsigned BuiltinID = FD->getBuiltinID(); 7989 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 7990 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 7991 const Expr *Arg = CE->getArg(0); 7992 return checkFormatStringExpr(S, Arg, Args, 7993 HasVAListArg, format_idx, 7994 firstDataArg, Type, CallType, 7995 InFunctionCall, CheckedVarArgs, 7996 UncoveredArg, Offset, 7997 IgnoreStringsWithoutSpecifiers); 7998 } 7999 } 8000 } 8001 8002 return SLCT_NotALiteral; 8003 } 8004 case Stmt::ObjCMessageExprClass: { 8005 const auto *ME = cast<ObjCMessageExpr>(E); 8006 if (const auto *MD = ME->getMethodDecl()) { 8007 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8008 // As a special case heuristic, if we're using the method -[NSBundle 8009 // localizedStringForKey:value:table:], ignore any key strings that lack 8010 // format specifiers. The idea is that if the key doesn't have any 8011 // format specifiers then its probably just a key to map to the 8012 // localized strings. If it does have format specifiers though, then its 8013 // likely that the text of the key is the format string in the 8014 // programmer's language, and should be checked. 8015 const ObjCInterfaceDecl *IFace; 8016 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8017 IFace->getIdentifier()->isStr("NSBundle") && 8018 MD->getSelector().isKeywordSelector( 8019 {"localizedStringForKey", "value", "table"})) { 8020 IgnoreStringsWithoutSpecifiers = true; 8021 } 8022 8023 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8024 return checkFormatStringExpr( 8025 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8026 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8027 IgnoreStringsWithoutSpecifiers); 8028 } 8029 } 8030 8031 return SLCT_NotALiteral; 8032 } 8033 case Stmt::ObjCStringLiteralClass: 8034 case Stmt::StringLiteralClass: { 8035 const StringLiteral *StrE = nullptr; 8036 8037 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 8038 StrE = ObjCFExpr->getString(); 8039 else 8040 StrE = cast<StringLiteral>(E); 8041 8042 if (StrE) { 8043 if (Offset.isNegative() || Offset > StrE->getLength()) { 8044 // TODO: It would be better to have an explicit warning for out of 8045 // bounds literals. 8046 return SLCT_NotALiteral; 8047 } 8048 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 8049 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 8050 firstDataArg, Type, InFunctionCall, CallType, 8051 CheckedVarArgs, UncoveredArg, 8052 IgnoreStringsWithoutSpecifiers); 8053 return SLCT_CheckedLiteral; 8054 } 8055 8056 return SLCT_NotALiteral; 8057 } 8058 case Stmt::BinaryOperatorClass: { 8059 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 8060 8061 // A string literal + an int offset is still a string literal. 8062 if (BinOp->isAdditiveOp()) { 8063 Expr::EvalResult LResult, RResult; 8064 8065 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 8066 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8067 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 8068 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8069 8070 if (LIsInt != RIsInt) { 8071 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 8072 8073 if (LIsInt) { 8074 if (BinOpKind == BO_Add) { 8075 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8076 E = BinOp->getRHS(); 8077 goto tryAgain; 8078 } 8079 } else { 8080 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8081 E = BinOp->getLHS(); 8082 goto tryAgain; 8083 } 8084 } 8085 } 8086 8087 return SLCT_NotALiteral; 8088 } 8089 case Stmt::UnaryOperatorClass: { 8090 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8091 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8092 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8093 Expr::EvalResult IndexResult; 8094 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8095 Expr::SE_NoSideEffects, 8096 S.isConstantEvaluated())) { 8097 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8098 /*RHS is int*/ true); 8099 E = ASE->getBase(); 8100 goto tryAgain; 8101 } 8102 } 8103 8104 return SLCT_NotALiteral; 8105 } 8106 8107 default: 8108 return SLCT_NotALiteral; 8109 } 8110 } 8111 8112 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8113 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8114 .Case("scanf", FST_Scanf) 8115 .Cases("printf", "printf0", FST_Printf) 8116 .Cases("NSString", "CFString", FST_NSString) 8117 .Case("strftime", FST_Strftime) 8118 .Case("strfmon", FST_Strfmon) 8119 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8120 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8121 .Case("os_trace", FST_OSLog) 8122 .Case("os_log", FST_OSLog) 8123 .Default(FST_Unknown); 8124 } 8125 8126 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8127 /// functions) for correct use of format strings. 8128 /// Returns true if a format string has been fully checked. 8129 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8130 ArrayRef<const Expr *> Args, 8131 bool IsCXXMember, 8132 VariadicCallType CallType, 8133 SourceLocation Loc, SourceRange Range, 8134 llvm::SmallBitVector &CheckedVarArgs) { 8135 FormatStringInfo FSI; 8136 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 8137 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 8138 FSI.FirstDataArg, GetFormatStringType(Format), 8139 CallType, Loc, Range, CheckedVarArgs); 8140 return false; 8141 } 8142 8143 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8144 bool HasVAListArg, unsigned format_idx, 8145 unsigned firstDataArg, FormatStringType Type, 8146 VariadicCallType CallType, 8147 SourceLocation Loc, SourceRange Range, 8148 llvm::SmallBitVector &CheckedVarArgs) { 8149 // CHECK: printf/scanf-like function is called with no format string. 8150 if (format_idx >= Args.size()) { 8151 Diag(Loc, diag::warn_missing_format_string) << Range; 8152 return false; 8153 } 8154 8155 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8156 8157 // CHECK: format string is not a string literal. 8158 // 8159 // Dynamically generated format strings are difficult to 8160 // automatically vet at compile time. Requiring that format strings 8161 // are string literals: (1) permits the checking of format strings by 8162 // the compiler and thereby (2) can practically remove the source of 8163 // many format string exploits. 8164 8165 // Format string can be either ObjC string (e.g. @"%d") or 8166 // C string (e.g. "%d") 8167 // ObjC string uses the same format specifiers as C string, so we can use 8168 // the same format string checking logic for both ObjC and C strings. 8169 UncoveredArgHandler UncoveredArg; 8170 StringLiteralCheckType CT = 8171 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 8172 format_idx, firstDataArg, Type, CallType, 8173 /*IsFunctionCall*/ true, CheckedVarArgs, 8174 UncoveredArg, 8175 /*no string offset*/ llvm::APSInt(64, false) = 0); 8176 8177 // Generate a diagnostic where an uncovered argument is detected. 8178 if (UncoveredArg.hasUncoveredArg()) { 8179 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8180 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8181 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8182 } 8183 8184 if (CT != SLCT_NotALiteral) 8185 // Literal format string found, check done! 8186 return CT == SLCT_CheckedLiteral; 8187 8188 // Strftime is particular as it always uses a single 'time' argument, 8189 // so it is safe to pass a non-literal string. 8190 if (Type == FST_Strftime) 8191 return false; 8192 8193 // Do not emit diag when the string param is a macro expansion and the 8194 // format is either NSString or CFString. This is a hack to prevent 8195 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8196 // which are usually used in place of NS and CF string literals. 8197 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8198 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8199 return false; 8200 8201 // If there are no arguments specified, warn with -Wformat-security, otherwise 8202 // warn only with -Wformat-nonliteral. 8203 if (Args.size() == firstDataArg) { 8204 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8205 << OrigFormatExpr->getSourceRange(); 8206 switch (Type) { 8207 default: 8208 break; 8209 case FST_Kprintf: 8210 case FST_FreeBSDKPrintf: 8211 case FST_Printf: 8212 Diag(FormatLoc, diag::note_format_security_fixit) 8213 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8214 break; 8215 case FST_NSString: 8216 Diag(FormatLoc, diag::note_format_security_fixit) 8217 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8218 break; 8219 } 8220 } else { 8221 Diag(FormatLoc, diag::warn_format_nonliteral) 8222 << OrigFormatExpr->getSourceRange(); 8223 } 8224 return false; 8225 } 8226 8227 namespace { 8228 8229 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8230 protected: 8231 Sema &S; 8232 const FormatStringLiteral *FExpr; 8233 const Expr *OrigFormatExpr; 8234 const Sema::FormatStringType FSType; 8235 const unsigned FirstDataArg; 8236 const unsigned NumDataArgs; 8237 const char *Beg; // Start of format string. 8238 const bool HasVAListArg; 8239 ArrayRef<const Expr *> Args; 8240 unsigned FormatIdx; 8241 llvm::SmallBitVector CoveredArgs; 8242 bool usesPositionalArgs = false; 8243 bool atFirstArg = true; 8244 bool inFunctionCall; 8245 Sema::VariadicCallType CallType; 8246 llvm::SmallBitVector &CheckedVarArgs; 8247 UncoveredArgHandler &UncoveredArg; 8248 8249 public: 8250 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8251 const Expr *origFormatExpr, 8252 const Sema::FormatStringType type, unsigned firstDataArg, 8253 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8254 ArrayRef<const Expr *> Args, unsigned formatIdx, 8255 bool inFunctionCall, Sema::VariadicCallType callType, 8256 llvm::SmallBitVector &CheckedVarArgs, 8257 UncoveredArgHandler &UncoveredArg) 8258 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8259 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8260 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8261 inFunctionCall(inFunctionCall), CallType(callType), 8262 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8263 CoveredArgs.resize(numDataArgs); 8264 CoveredArgs.reset(); 8265 } 8266 8267 void DoneProcessing(); 8268 8269 void HandleIncompleteSpecifier(const char *startSpecifier, 8270 unsigned specifierLen) override; 8271 8272 void HandleInvalidLengthModifier( 8273 const analyze_format_string::FormatSpecifier &FS, 8274 const analyze_format_string::ConversionSpecifier &CS, 8275 const char *startSpecifier, unsigned specifierLen, 8276 unsigned DiagID); 8277 8278 void HandleNonStandardLengthModifier( 8279 const analyze_format_string::FormatSpecifier &FS, 8280 const char *startSpecifier, unsigned specifierLen); 8281 8282 void HandleNonStandardConversionSpecifier( 8283 const analyze_format_string::ConversionSpecifier &CS, 8284 const char *startSpecifier, unsigned specifierLen); 8285 8286 void HandlePosition(const char *startPos, unsigned posLen) override; 8287 8288 void HandleInvalidPosition(const char *startSpecifier, 8289 unsigned specifierLen, 8290 analyze_format_string::PositionContext p) override; 8291 8292 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8293 8294 void HandleNullChar(const char *nullCharacter) override; 8295 8296 template <typename Range> 8297 static void 8298 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8299 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8300 bool IsStringLocation, Range StringRange, 8301 ArrayRef<FixItHint> Fixit = None); 8302 8303 protected: 8304 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8305 const char *startSpec, 8306 unsigned specifierLen, 8307 const char *csStart, unsigned csLen); 8308 8309 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8310 const char *startSpec, 8311 unsigned specifierLen); 8312 8313 SourceRange getFormatStringRange(); 8314 CharSourceRange getSpecifierRange(const char *startSpecifier, 8315 unsigned specifierLen); 8316 SourceLocation getLocationOfByte(const char *x); 8317 8318 const Expr *getDataArg(unsigned i) const; 8319 8320 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8321 const analyze_format_string::ConversionSpecifier &CS, 8322 const char *startSpecifier, unsigned specifierLen, 8323 unsigned argIndex); 8324 8325 template <typename Range> 8326 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8327 bool IsStringLocation, Range StringRange, 8328 ArrayRef<FixItHint> Fixit = None); 8329 }; 8330 8331 } // namespace 8332 8333 SourceRange CheckFormatHandler::getFormatStringRange() { 8334 return OrigFormatExpr->getSourceRange(); 8335 } 8336 8337 CharSourceRange CheckFormatHandler:: 8338 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8339 SourceLocation Start = getLocationOfByte(startSpecifier); 8340 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8341 8342 // Advance the end SourceLocation by one due to half-open ranges. 8343 End = End.getLocWithOffset(1); 8344 8345 return CharSourceRange::getCharRange(Start, End); 8346 } 8347 8348 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 8349 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 8350 S.getLangOpts(), S.Context.getTargetInfo()); 8351 } 8352 8353 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 8354 unsigned specifierLen){ 8355 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 8356 getLocationOfByte(startSpecifier), 8357 /*IsStringLocation*/true, 8358 getSpecifierRange(startSpecifier, specifierLen)); 8359 } 8360 8361 void CheckFormatHandler::HandleInvalidLengthModifier( 8362 const analyze_format_string::FormatSpecifier &FS, 8363 const analyze_format_string::ConversionSpecifier &CS, 8364 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 8365 using namespace analyze_format_string; 8366 8367 const LengthModifier &LM = FS.getLengthModifier(); 8368 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8369 8370 // See if we know how to fix this length modifier. 8371 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8372 if (FixedLM) { 8373 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8374 getLocationOfByte(LM.getStart()), 8375 /*IsStringLocation*/true, 8376 getSpecifierRange(startSpecifier, specifierLen)); 8377 8378 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8379 << FixedLM->toString() 8380 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8381 8382 } else { 8383 FixItHint Hint; 8384 if (DiagID == diag::warn_format_nonsensical_length) 8385 Hint = FixItHint::CreateRemoval(LMRange); 8386 8387 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8388 getLocationOfByte(LM.getStart()), 8389 /*IsStringLocation*/true, 8390 getSpecifierRange(startSpecifier, specifierLen), 8391 Hint); 8392 } 8393 } 8394 8395 void CheckFormatHandler::HandleNonStandardLengthModifier( 8396 const analyze_format_string::FormatSpecifier &FS, 8397 const char *startSpecifier, unsigned specifierLen) { 8398 using namespace analyze_format_string; 8399 8400 const LengthModifier &LM = FS.getLengthModifier(); 8401 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8402 8403 // See if we know how to fix this length modifier. 8404 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8405 if (FixedLM) { 8406 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8407 << LM.toString() << 0, 8408 getLocationOfByte(LM.getStart()), 8409 /*IsStringLocation*/true, 8410 getSpecifierRange(startSpecifier, specifierLen)); 8411 8412 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8413 << FixedLM->toString() 8414 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8415 8416 } else { 8417 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8418 << LM.toString() << 0, 8419 getLocationOfByte(LM.getStart()), 8420 /*IsStringLocation*/true, 8421 getSpecifierRange(startSpecifier, specifierLen)); 8422 } 8423 } 8424 8425 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 8426 const analyze_format_string::ConversionSpecifier &CS, 8427 const char *startSpecifier, unsigned specifierLen) { 8428 using namespace analyze_format_string; 8429 8430 // See if we know how to fix this conversion specifier. 8431 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 8432 if (FixedCS) { 8433 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8434 << CS.toString() << /*conversion specifier*/1, 8435 getLocationOfByte(CS.getStart()), 8436 /*IsStringLocation*/true, 8437 getSpecifierRange(startSpecifier, specifierLen)); 8438 8439 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 8440 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 8441 << FixedCS->toString() 8442 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 8443 } else { 8444 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8445 << CS.toString() << /*conversion specifier*/1, 8446 getLocationOfByte(CS.getStart()), 8447 /*IsStringLocation*/true, 8448 getSpecifierRange(startSpecifier, specifierLen)); 8449 } 8450 } 8451 8452 void CheckFormatHandler::HandlePosition(const char *startPos, 8453 unsigned posLen) { 8454 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 8455 getLocationOfByte(startPos), 8456 /*IsStringLocation*/true, 8457 getSpecifierRange(startPos, posLen)); 8458 } 8459 8460 void 8461 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 8462 analyze_format_string::PositionContext p) { 8463 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 8464 << (unsigned) p, 8465 getLocationOfByte(startPos), /*IsStringLocation*/true, 8466 getSpecifierRange(startPos, posLen)); 8467 } 8468 8469 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 8470 unsigned posLen) { 8471 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 8472 getLocationOfByte(startPos), 8473 /*IsStringLocation*/true, 8474 getSpecifierRange(startPos, posLen)); 8475 } 8476 8477 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 8478 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 8479 // The presence of a null character is likely an error. 8480 EmitFormatDiagnostic( 8481 S.PDiag(diag::warn_printf_format_string_contains_null_char), 8482 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 8483 getFormatStringRange()); 8484 } 8485 } 8486 8487 // Note that this may return NULL if there was an error parsing or building 8488 // one of the argument expressions. 8489 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 8490 return Args[FirstDataArg + i]; 8491 } 8492 8493 void CheckFormatHandler::DoneProcessing() { 8494 // Does the number of data arguments exceed the number of 8495 // format conversions in the format string? 8496 if (!HasVAListArg) { 8497 // Find any arguments that weren't covered. 8498 CoveredArgs.flip(); 8499 signed notCoveredArg = CoveredArgs.find_first(); 8500 if (notCoveredArg >= 0) { 8501 assert((unsigned)notCoveredArg < NumDataArgs); 8502 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 8503 } else { 8504 UncoveredArg.setAllCovered(); 8505 } 8506 } 8507 } 8508 8509 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 8510 const Expr *ArgExpr) { 8511 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 8512 "Invalid state"); 8513 8514 if (!ArgExpr) 8515 return; 8516 8517 SourceLocation Loc = ArgExpr->getBeginLoc(); 8518 8519 if (S.getSourceManager().isInSystemMacro(Loc)) 8520 return; 8521 8522 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 8523 for (auto E : DiagnosticExprs) 8524 PDiag << E->getSourceRange(); 8525 8526 CheckFormatHandler::EmitFormatDiagnostic( 8527 S, IsFunctionCall, DiagnosticExprs[0], 8528 PDiag, Loc, /*IsStringLocation*/false, 8529 DiagnosticExprs[0]->getSourceRange()); 8530 } 8531 8532 bool 8533 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 8534 SourceLocation Loc, 8535 const char *startSpec, 8536 unsigned specifierLen, 8537 const char *csStart, 8538 unsigned csLen) { 8539 bool keepGoing = true; 8540 if (argIndex < NumDataArgs) { 8541 // Consider the argument coverered, even though the specifier doesn't 8542 // make sense. 8543 CoveredArgs.set(argIndex); 8544 } 8545 else { 8546 // If argIndex exceeds the number of data arguments we 8547 // don't issue a warning because that is just a cascade of warnings (and 8548 // they may have intended '%%' anyway). We don't want to continue processing 8549 // the format string after this point, however, as we will like just get 8550 // gibberish when trying to match arguments. 8551 keepGoing = false; 8552 } 8553 8554 StringRef Specifier(csStart, csLen); 8555 8556 // If the specifier in non-printable, it could be the first byte of a UTF-8 8557 // sequence. In that case, print the UTF-8 code point. If not, print the byte 8558 // hex value. 8559 std::string CodePointStr; 8560 if (!llvm::sys::locale::isPrint(*csStart)) { 8561 llvm::UTF32 CodePoint; 8562 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 8563 const llvm::UTF8 *E = 8564 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 8565 llvm::ConversionResult Result = 8566 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 8567 8568 if (Result != llvm::conversionOK) { 8569 unsigned char FirstChar = *csStart; 8570 CodePoint = (llvm::UTF32)FirstChar; 8571 } 8572 8573 llvm::raw_string_ostream OS(CodePointStr); 8574 if (CodePoint < 256) 8575 OS << "\\x" << llvm::format("%02x", CodePoint); 8576 else if (CodePoint <= 0xFFFF) 8577 OS << "\\u" << llvm::format("%04x", CodePoint); 8578 else 8579 OS << "\\U" << llvm::format("%08x", CodePoint); 8580 OS.flush(); 8581 Specifier = CodePointStr; 8582 } 8583 8584 EmitFormatDiagnostic( 8585 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 8586 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 8587 8588 return keepGoing; 8589 } 8590 8591 void 8592 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 8593 const char *startSpec, 8594 unsigned specifierLen) { 8595 EmitFormatDiagnostic( 8596 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 8597 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 8598 } 8599 8600 bool 8601 CheckFormatHandler::CheckNumArgs( 8602 const analyze_format_string::FormatSpecifier &FS, 8603 const analyze_format_string::ConversionSpecifier &CS, 8604 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 8605 8606 if (argIndex >= NumDataArgs) { 8607 PartialDiagnostic PDiag = FS.usesPositionalArg() 8608 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 8609 << (argIndex+1) << NumDataArgs) 8610 : S.PDiag(diag::warn_printf_insufficient_data_args); 8611 EmitFormatDiagnostic( 8612 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 8613 getSpecifierRange(startSpecifier, specifierLen)); 8614 8615 // Since more arguments than conversion tokens are given, by extension 8616 // all arguments are covered, so mark this as so. 8617 UncoveredArg.setAllCovered(); 8618 return false; 8619 } 8620 return true; 8621 } 8622 8623 template<typename Range> 8624 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 8625 SourceLocation Loc, 8626 bool IsStringLocation, 8627 Range StringRange, 8628 ArrayRef<FixItHint> FixIt) { 8629 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 8630 Loc, IsStringLocation, StringRange, FixIt); 8631 } 8632 8633 /// If the format string is not within the function call, emit a note 8634 /// so that the function call and string are in diagnostic messages. 8635 /// 8636 /// \param InFunctionCall if true, the format string is within the function 8637 /// call and only one diagnostic message will be produced. Otherwise, an 8638 /// extra note will be emitted pointing to location of the format string. 8639 /// 8640 /// \param ArgumentExpr the expression that is passed as the format string 8641 /// argument in the function call. Used for getting locations when two 8642 /// diagnostics are emitted. 8643 /// 8644 /// \param PDiag the callee should already have provided any strings for the 8645 /// diagnostic message. This function only adds locations and fixits 8646 /// to diagnostics. 8647 /// 8648 /// \param Loc primary location for diagnostic. If two diagnostics are 8649 /// required, one will be at Loc and a new SourceLocation will be created for 8650 /// the other one. 8651 /// 8652 /// \param IsStringLocation if true, Loc points to the format string should be 8653 /// used for the note. Otherwise, Loc points to the argument list and will 8654 /// be used with PDiag. 8655 /// 8656 /// \param StringRange some or all of the string to highlight. This is 8657 /// templated so it can accept either a CharSourceRange or a SourceRange. 8658 /// 8659 /// \param FixIt optional fix it hint for the format string. 8660 template <typename Range> 8661 void CheckFormatHandler::EmitFormatDiagnostic( 8662 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 8663 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 8664 Range StringRange, ArrayRef<FixItHint> FixIt) { 8665 if (InFunctionCall) { 8666 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 8667 D << StringRange; 8668 D << FixIt; 8669 } else { 8670 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 8671 << ArgumentExpr->getSourceRange(); 8672 8673 const Sema::SemaDiagnosticBuilder &Note = 8674 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 8675 diag::note_format_string_defined); 8676 8677 Note << StringRange; 8678 Note << FixIt; 8679 } 8680 } 8681 8682 //===--- CHECK: Printf format string checking ------------------------------===// 8683 8684 namespace { 8685 8686 class CheckPrintfHandler : public CheckFormatHandler { 8687 public: 8688 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 8689 const Expr *origFormatExpr, 8690 const Sema::FormatStringType type, unsigned firstDataArg, 8691 unsigned numDataArgs, bool isObjC, const char *beg, 8692 bool hasVAListArg, ArrayRef<const Expr *> Args, 8693 unsigned formatIdx, bool inFunctionCall, 8694 Sema::VariadicCallType CallType, 8695 llvm::SmallBitVector &CheckedVarArgs, 8696 UncoveredArgHandler &UncoveredArg) 8697 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8698 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8699 inFunctionCall, CallType, CheckedVarArgs, 8700 UncoveredArg) {} 8701 8702 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 8703 8704 /// Returns true if '%@' specifiers are allowed in the format string. 8705 bool allowsObjCArg() const { 8706 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 8707 FSType == Sema::FST_OSTrace; 8708 } 8709 8710 bool HandleInvalidPrintfConversionSpecifier( 8711 const analyze_printf::PrintfSpecifier &FS, 8712 const char *startSpecifier, 8713 unsigned specifierLen) override; 8714 8715 void handleInvalidMaskType(StringRef MaskType) override; 8716 8717 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 8718 const char *startSpecifier, 8719 unsigned specifierLen) override; 8720 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8721 const char *StartSpecifier, 8722 unsigned SpecifierLen, 8723 const Expr *E); 8724 8725 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 8726 const char *startSpecifier, unsigned specifierLen); 8727 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 8728 const analyze_printf::OptionalAmount &Amt, 8729 unsigned type, 8730 const char *startSpecifier, unsigned specifierLen); 8731 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8732 const analyze_printf::OptionalFlag &flag, 8733 const char *startSpecifier, unsigned specifierLen); 8734 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 8735 const analyze_printf::OptionalFlag &ignoredFlag, 8736 const analyze_printf::OptionalFlag &flag, 8737 const char *startSpecifier, unsigned specifierLen); 8738 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 8739 const Expr *E); 8740 8741 void HandleEmptyObjCModifierFlag(const char *startFlag, 8742 unsigned flagLen) override; 8743 8744 void HandleInvalidObjCModifierFlag(const char *startFlag, 8745 unsigned flagLen) override; 8746 8747 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 8748 const char *flagsEnd, 8749 const char *conversionPosition) 8750 override; 8751 }; 8752 8753 } // namespace 8754 8755 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 8756 const analyze_printf::PrintfSpecifier &FS, 8757 const char *startSpecifier, 8758 unsigned specifierLen) { 8759 const analyze_printf::PrintfConversionSpecifier &CS = 8760 FS.getConversionSpecifier(); 8761 8762 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8763 getLocationOfByte(CS.getStart()), 8764 startSpecifier, specifierLen, 8765 CS.getStart(), CS.getLength()); 8766 } 8767 8768 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 8769 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 8770 } 8771 8772 bool CheckPrintfHandler::HandleAmount( 8773 const analyze_format_string::OptionalAmount &Amt, 8774 unsigned k, const char *startSpecifier, 8775 unsigned specifierLen) { 8776 if (Amt.hasDataArgument()) { 8777 if (!HasVAListArg) { 8778 unsigned argIndex = Amt.getArgIndex(); 8779 if (argIndex >= NumDataArgs) { 8780 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 8781 << k, 8782 getLocationOfByte(Amt.getStart()), 8783 /*IsStringLocation*/true, 8784 getSpecifierRange(startSpecifier, specifierLen)); 8785 // Don't do any more checking. We will just emit 8786 // spurious errors. 8787 return false; 8788 } 8789 8790 // Type check the data argument. It should be an 'int'. 8791 // Although not in conformance with C99, we also allow the argument to be 8792 // an 'unsigned int' as that is a reasonably safe case. GCC also 8793 // doesn't emit a warning for that case. 8794 CoveredArgs.set(argIndex); 8795 const Expr *Arg = getDataArg(argIndex); 8796 if (!Arg) 8797 return false; 8798 8799 QualType T = Arg->getType(); 8800 8801 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 8802 assert(AT.isValid()); 8803 8804 if (!AT.matchesType(S.Context, T)) { 8805 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 8806 << k << AT.getRepresentativeTypeName(S.Context) 8807 << T << Arg->getSourceRange(), 8808 getLocationOfByte(Amt.getStart()), 8809 /*IsStringLocation*/true, 8810 getSpecifierRange(startSpecifier, specifierLen)); 8811 // Don't do any more checking. We will just emit 8812 // spurious errors. 8813 return false; 8814 } 8815 } 8816 } 8817 return true; 8818 } 8819 8820 void CheckPrintfHandler::HandleInvalidAmount( 8821 const analyze_printf::PrintfSpecifier &FS, 8822 const analyze_printf::OptionalAmount &Amt, 8823 unsigned type, 8824 const char *startSpecifier, 8825 unsigned specifierLen) { 8826 const analyze_printf::PrintfConversionSpecifier &CS = 8827 FS.getConversionSpecifier(); 8828 8829 FixItHint fixit = 8830 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 8831 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 8832 Amt.getConstantLength())) 8833 : FixItHint(); 8834 8835 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 8836 << type << CS.toString(), 8837 getLocationOfByte(Amt.getStart()), 8838 /*IsStringLocation*/true, 8839 getSpecifierRange(startSpecifier, specifierLen), 8840 fixit); 8841 } 8842 8843 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8844 const analyze_printf::OptionalFlag &flag, 8845 const char *startSpecifier, 8846 unsigned specifierLen) { 8847 // Warn about pointless flag with a fixit removal. 8848 const analyze_printf::PrintfConversionSpecifier &CS = 8849 FS.getConversionSpecifier(); 8850 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 8851 << flag.toString() << CS.toString(), 8852 getLocationOfByte(flag.getPosition()), 8853 /*IsStringLocation*/true, 8854 getSpecifierRange(startSpecifier, specifierLen), 8855 FixItHint::CreateRemoval( 8856 getSpecifierRange(flag.getPosition(), 1))); 8857 } 8858 8859 void CheckPrintfHandler::HandleIgnoredFlag( 8860 const analyze_printf::PrintfSpecifier &FS, 8861 const analyze_printf::OptionalFlag &ignoredFlag, 8862 const analyze_printf::OptionalFlag &flag, 8863 const char *startSpecifier, 8864 unsigned specifierLen) { 8865 // Warn about ignored flag with a fixit removal. 8866 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 8867 << ignoredFlag.toString() << flag.toString(), 8868 getLocationOfByte(ignoredFlag.getPosition()), 8869 /*IsStringLocation*/true, 8870 getSpecifierRange(startSpecifier, specifierLen), 8871 FixItHint::CreateRemoval( 8872 getSpecifierRange(ignoredFlag.getPosition(), 1))); 8873 } 8874 8875 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 8876 unsigned flagLen) { 8877 // Warn about an empty flag. 8878 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 8879 getLocationOfByte(startFlag), 8880 /*IsStringLocation*/true, 8881 getSpecifierRange(startFlag, flagLen)); 8882 } 8883 8884 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 8885 unsigned flagLen) { 8886 // Warn about an invalid flag. 8887 auto Range = getSpecifierRange(startFlag, flagLen); 8888 StringRef flag(startFlag, flagLen); 8889 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 8890 getLocationOfByte(startFlag), 8891 /*IsStringLocation*/true, 8892 Range, FixItHint::CreateRemoval(Range)); 8893 } 8894 8895 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 8896 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 8897 // Warn about using '[...]' without a '@' conversion. 8898 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 8899 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 8900 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 8901 getLocationOfByte(conversionPosition), 8902 /*IsStringLocation*/true, 8903 Range, FixItHint::CreateRemoval(Range)); 8904 } 8905 8906 // Determines if the specified is a C++ class or struct containing 8907 // a member with the specified name and kind (e.g. a CXXMethodDecl named 8908 // "c_str()"). 8909 template<typename MemberKind> 8910 static llvm::SmallPtrSet<MemberKind*, 1> 8911 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 8912 const RecordType *RT = Ty->getAs<RecordType>(); 8913 llvm::SmallPtrSet<MemberKind*, 1> Results; 8914 8915 if (!RT) 8916 return Results; 8917 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 8918 if (!RD || !RD->getDefinition()) 8919 return Results; 8920 8921 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 8922 Sema::LookupMemberName); 8923 R.suppressDiagnostics(); 8924 8925 // We just need to include all members of the right kind turned up by the 8926 // filter, at this point. 8927 if (S.LookupQualifiedName(R, RT->getDecl())) 8928 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 8929 NamedDecl *decl = (*I)->getUnderlyingDecl(); 8930 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 8931 Results.insert(FK); 8932 } 8933 return Results; 8934 } 8935 8936 /// Check if we could call '.c_str()' on an object. 8937 /// 8938 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 8939 /// allow the call, or if it would be ambiguous). 8940 bool Sema::hasCStrMethod(const Expr *E) { 8941 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8942 8943 MethodSet Results = 8944 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 8945 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8946 MI != ME; ++MI) 8947 if ((*MI)->getMinRequiredArguments() == 0) 8948 return true; 8949 return false; 8950 } 8951 8952 // Check if a (w)string was passed when a (w)char* was needed, and offer a 8953 // better diagnostic if so. AT is assumed to be valid. 8954 // Returns true when a c_str() conversion method is found. 8955 bool CheckPrintfHandler::checkForCStrMembers( 8956 const analyze_printf::ArgType &AT, const Expr *E) { 8957 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8958 8959 MethodSet Results = 8960 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 8961 8962 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8963 MI != ME; ++MI) { 8964 const CXXMethodDecl *Method = *MI; 8965 if (Method->getMinRequiredArguments() == 0 && 8966 AT.matchesType(S.Context, Method->getReturnType())) { 8967 // FIXME: Suggest parens if the expression needs them. 8968 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 8969 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 8970 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 8971 return true; 8972 } 8973 } 8974 8975 return false; 8976 } 8977 8978 bool 8979 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 8980 &FS, 8981 const char *startSpecifier, 8982 unsigned specifierLen) { 8983 using namespace analyze_format_string; 8984 using namespace analyze_printf; 8985 8986 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 8987 8988 if (FS.consumesDataArgument()) { 8989 if (atFirstArg) { 8990 atFirstArg = false; 8991 usesPositionalArgs = FS.usesPositionalArg(); 8992 } 8993 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8994 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8995 startSpecifier, specifierLen); 8996 return false; 8997 } 8998 } 8999 9000 // First check if the field width, precision, and conversion specifier 9001 // have matching data arguments. 9002 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9003 startSpecifier, specifierLen)) { 9004 return false; 9005 } 9006 9007 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9008 startSpecifier, specifierLen)) { 9009 return false; 9010 } 9011 9012 if (!CS.consumesDataArgument()) { 9013 // FIXME: Technically specifying a precision or field width here 9014 // makes no sense. Worth issuing a warning at some point. 9015 return true; 9016 } 9017 9018 // Consume the argument. 9019 unsigned argIndex = FS.getArgIndex(); 9020 if (argIndex < NumDataArgs) { 9021 // The check to see if the argIndex is valid will come later. 9022 // We set the bit here because we may exit early from this 9023 // function if we encounter some other error. 9024 CoveredArgs.set(argIndex); 9025 } 9026 9027 // FreeBSD kernel extensions. 9028 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 9029 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 9030 // We need at least two arguments. 9031 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 9032 return false; 9033 9034 // Claim the second argument. 9035 CoveredArgs.set(argIndex + 1); 9036 9037 // Type check the first argument (int for %b, pointer for %D) 9038 const Expr *Ex = getDataArg(argIndex); 9039 const analyze_printf::ArgType &AT = 9040 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 9041 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 9042 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 9043 EmitFormatDiagnostic( 9044 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9045 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 9046 << false << Ex->getSourceRange(), 9047 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9048 getSpecifierRange(startSpecifier, specifierLen)); 9049 9050 // Type check the second argument (char * for both %b and %D) 9051 Ex = getDataArg(argIndex + 1); 9052 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 9053 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 9054 EmitFormatDiagnostic( 9055 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9056 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 9057 << false << Ex->getSourceRange(), 9058 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9059 getSpecifierRange(startSpecifier, specifierLen)); 9060 9061 return true; 9062 } 9063 9064 // Check for using an Objective-C specific conversion specifier 9065 // in a non-ObjC literal. 9066 if (!allowsObjCArg() && CS.isObjCArg()) { 9067 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9068 specifierLen); 9069 } 9070 9071 // %P can only be used with os_log. 9072 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 9073 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9074 specifierLen); 9075 } 9076 9077 // %n is not allowed with os_log. 9078 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9079 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9080 getLocationOfByte(CS.getStart()), 9081 /*IsStringLocation*/ false, 9082 getSpecifierRange(startSpecifier, specifierLen)); 9083 9084 return true; 9085 } 9086 9087 // Only scalars are allowed for os_trace. 9088 if (FSType == Sema::FST_OSTrace && 9089 (CS.getKind() == ConversionSpecifier::PArg || 9090 CS.getKind() == ConversionSpecifier::sArg || 9091 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9092 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9093 specifierLen); 9094 } 9095 9096 // Check for use of public/private annotation outside of os_log(). 9097 if (FSType != Sema::FST_OSLog) { 9098 if (FS.isPublic().isSet()) { 9099 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9100 << "public", 9101 getLocationOfByte(FS.isPublic().getPosition()), 9102 /*IsStringLocation*/ false, 9103 getSpecifierRange(startSpecifier, specifierLen)); 9104 } 9105 if (FS.isPrivate().isSet()) { 9106 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9107 << "private", 9108 getLocationOfByte(FS.isPrivate().getPosition()), 9109 /*IsStringLocation*/ false, 9110 getSpecifierRange(startSpecifier, specifierLen)); 9111 } 9112 } 9113 9114 // Check for invalid use of field width 9115 if (!FS.hasValidFieldWidth()) { 9116 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9117 startSpecifier, specifierLen); 9118 } 9119 9120 // Check for invalid use of precision 9121 if (!FS.hasValidPrecision()) { 9122 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9123 startSpecifier, specifierLen); 9124 } 9125 9126 // Precision is mandatory for %P specifier. 9127 if (CS.getKind() == ConversionSpecifier::PArg && 9128 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9129 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9130 getLocationOfByte(startSpecifier), 9131 /*IsStringLocation*/ false, 9132 getSpecifierRange(startSpecifier, specifierLen)); 9133 } 9134 9135 // Check each flag does not conflict with any other component. 9136 if (!FS.hasValidThousandsGroupingPrefix()) 9137 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9138 if (!FS.hasValidLeadingZeros()) 9139 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9140 if (!FS.hasValidPlusPrefix()) 9141 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9142 if (!FS.hasValidSpacePrefix()) 9143 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9144 if (!FS.hasValidAlternativeForm()) 9145 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9146 if (!FS.hasValidLeftJustified()) 9147 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9148 9149 // Check that flags are not ignored by another flag 9150 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9151 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9152 startSpecifier, specifierLen); 9153 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9154 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9155 startSpecifier, specifierLen); 9156 9157 // Check the length modifier is valid with the given conversion specifier. 9158 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9159 S.getLangOpts())) 9160 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9161 diag::warn_format_nonsensical_length); 9162 else if (!FS.hasStandardLengthModifier()) 9163 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9164 else if (!FS.hasStandardLengthConversionCombination()) 9165 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9166 diag::warn_format_non_standard_conversion_spec); 9167 9168 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9169 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9170 9171 // The remaining checks depend on the data arguments. 9172 if (HasVAListArg) 9173 return true; 9174 9175 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9176 return false; 9177 9178 const Expr *Arg = getDataArg(argIndex); 9179 if (!Arg) 9180 return true; 9181 9182 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9183 } 9184 9185 static bool requiresParensToAddCast(const Expr *E) { 9186 // FIXME: We should have a general way to reason about operator 9187 // precedence and whether parens are actually needed here. 9188 // Take care of a few common cases where they aren't. 9189 const Expr *Inside = E->IgnoreImpCasts(); 9190 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9191 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9192 9193 switch (Inside->getStmtClass()) { 9194 case Stmt::ArraySubscriptExprClass: 9195 case Stmt::CallExprClass: 9196 case Stmt::CharacterLiteralClass: 9197 case Stmt::CXXBoolLiteralExprClass: 9198 case Stmt::DeclRefExprClass: 9199 case Stmt::FloatingLiteralClass: 9200 case Stmt::IntegerLiteralClass: 9201 case Stmt::MemberExprClass: 9202 case Stmt::ObjCArrayLiteralClass: 9203 case Stmt::ObjCBoolLiteralExprClass: 9204 case Stmt::ObjCBoxedExprClass: 9205 case Stmt::ObjCDictionaryLiteralClass: 9206 case Stmt::ObjCEncodeExprClass: 9207 case Stmt::ObjCIvarRefExprClass: 9208 case Stmt::ObjCMessageExprClass: 9209 case Stmt::ObjCPropertyRefExprClass: 9210 case Stmt::ObjCStringLiteralClass: 9211 case Stmt::ObjCSubscriptRefExprClass: 9212 case Stmt::ParenExprClass: 9213 case Stmt::StringLiteralClass: 9214 case Stmt::UnaryOperatorClass: 9215 return false; 9216 default: 9217 return true; 9218 } 9219 } 9220 9221 static std::pair<QualType, StringRef> 9222 shouldNotPrintDirectly(const ASTContext &Context, 9223 QualType IntendedTy, 9224 const Expr *E) { 9225 // Use a 'while' to peel off layers of typedefs. 9226 QualType TyTy = IntendedTy; 9227 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9228 StringRef Name = UserTy->getDecl()->getName(); 9229 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9230 .Case("CFIndex", Context.getNSIntegerType()) 9231 .Case("NSInteger", Context.getNSIntegerType()) 9232 .Case("NSUInteger", Context.getNSUIntegerType()) 9233 .Case("SInt32", Context.IntTy) 9234 .Case("UInt32", Context.UnsignedIntTy) 9235 .Default(QualType()); 9236 9237 if (!CastTy.isNull()) 9238 return std::make_pair(CastTy, Name); 9239 9240 TyTy = UserTy->desugar(); 9241 } 9242 9243 // Strip parens if necessary. 9244 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9245 return shouldNotPrintDirectly(Context, 9246 PE->getSubExpr()->getType(), 9247 PE->getSubExpr()); 9248 9249 // If this is a conditional expression, then its result type is constructed 9250 // via usual arithmetic conversions and thus there might be no necessary 9251 // typedef sugar there. Recurse to operands to check for NSInteger & 9252 // Co. usage condition. 9253 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9254 QualType TrueTy, FalseTy; 9255 StringRef TrueName, FalseName; 9256 9257 std::tie(TrueTy, TrueName) = 9258 shouldNotPrintDirectly(Context, 9259 CO->getTrueExpr()->getType(), 9260 CO->getTrueExpr()); 9261 std::tie(FalseTy, FalseName) = 9262 shouldNotPrintDirectly(Context, 9263 CO->getFalseExpr()->getType(), 9264 CO->getFalseExpr()); 9265 9266 if (TrueTy == FalseTy) 9267 return std::make_pair(TrueTy, TrueName); 9268 else if (TrueTy.isNull()) 9269 return std::make_pair(FalseTy, FalseName); 9270 else if (FalseTy.isNull()) 9271 return std::make_pair(TrueTy, TrueName); 9272 } 9273 9274 return std::make_pair(QualType(), StringRef()); 9275 } 9276 9277 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9278 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9279 /// type do not count. 9280 static bool 9281 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9282 QualType From = ICE->getSubExpr()->getType(); 9283 QualType To = ICE->getType(); 9284 // It's an integer promotion if the destination type is the promoted 9285 // source type. 9286 if (ICE->getCastKind() == CK_IntegralCast && 9287 From->isPromotableIntegerType() && 9288 S.Context.getPromotedIntegerType(From) == To) 9289 return true; 9290 // Look through vector types, since we do default argument promotion for 9291 // those in OpenCL. 9292 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9293 From = VecTy->getElementType(); 9294 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9295 To = VecTy->getElementType(); 9296 // It's a floating promotion if the source type is a lower rank. 9297 return ICE->getCastKind() == CK_FloatingCast && 9298 S.Context.getFloatingTypeOrder(From, To) < 0; 9299 } 9300 9301 bool 9302 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9303 const char *StartSpecifier, 9304 unsigned SpecifierLen, 9305 const Expr *E) { 9306 using namespace analyze_format_string; 9307 using namespace analyze_printf; 9308 9309 // Now type check the data expression that matches the 9310 // format specifier. 9311 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9312 if (!AT.isValid()) 9313 return true; 9314 9315 QualType ExprTy = E->getType(); 9316 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9317 ExprTy = TET->getUnderlyingExpr()->getType(); 9318 } 9319 9320 // Diagnose attempts to print a boolean value as a character. Unlike other 9321 // -Wformat diagnostics, this is fine from a type perspective, but it still 9322 // doesn't make sense. 9323 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9324 E->isKnownToHaveBooleanValue()) { 9325 const CharSourceRange &CSR = 9326 getSpecifierRange(StartSpecifier, SpecifierLen); 9327 SmallString<4> FSString; 9328 llvm::raw_svector_ostream os(FSString); 9329 FS.toString(os); 9330 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9331 << FSString, 9332 E->getExprLoc(), false, CSR); 9333 return true; 9334 } 9335 9336 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9337 if (Match == analyze_printf::ArgType::Match) 9338 return true; 9339 9340 // Look through argument promotions for our error message's reported type. 9341 // This includes the integral and floating promotions, but excludes array 9342 // and function pointer decay (seeing that an argument intended to be a 9343 // string has type 'char [6]' is probably more confusing than 'char *') and 9344 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 9345 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 9346 if (isArithmeticArgumentPromotion(S, ICE)) { 9347 E = ICE->getSubExpr(); 9348 ExprTy = E->getType(); 9349 9350 // Check if we didn't match because of an implicit cast from a 'char' 9351 // or 'short' to an 'int'. This is done because printf is a varargs 9352 // function. 9353 if (ICE->getType() == S.Context.IntTy || 9354 ICE->getType() == S.Context.UnsignedIntTy) { 9355 // All further checking is done on the subexpression 9356 const analyze_printf::ArgType::MatchKind ImplicitMatch = 9357 AT.matchesType(S.Context, ExprTy); 9358 if (ImplicitMatch == analyze_printf::ArgType::Match) 9359 return true; 9360 if (ImplicitMatch == ArgType::NoMatchPedantic || 9361 ImplicitMatch == ArgType::NoMatchTypeConfusion) 9362 Match = ImplicitMatch; 9363 } 9364 } 9365 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 9366 // Special case for 'a', which has type 'int' in C. 9367 // Note, however, that we do /not/ want to treat multibyte constants like 9368 // 'MooV' as characters! This form is deprecated but still exists. In 9369 // addition, don't treat expressions as of type 'char' if one byte length 9370 // modifier is provided. 9371 if (ExprTy == S.Context.IntTy && 9372 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 9373 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 9374 ExprTy = S.Context.CharTy; 9375 } 9376 9377 // Look through enums to their underlying type. 9378 bool IsEnum = false; 9379 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 9380 ExprTy = EnumTy->getDecl()->getIntegerType(); 9381 IsEnum = true; 9382 } 9383 9384 // %C in an Objective-C context prints a unichar, not a wchar_t. 9385 // If the argument is an integer of some kind, believe the %C and suggest 9386 // a cast instead of changing the conversion specifier. 9387 QualType IntendedTy = ExprTy; 9388 if (isObjCContext() && 9389 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 9390 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 9391 !ExprTy->isCharType()) { 9392 // 'unichar' is defined as a typedef of unsigned short, but we should 9393 // prefer using the typedef if it is visible. 9394 IntendedTy = S.Context.UnsignedShortTy; 9395 9396 // While we are here, check if the value is an IntegerLiteral that happens 9397 // to be within the valid range. 9398 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 9399 const llvm::APInt &V = IL->getValue(); 9400 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 9401 return true; 9402 } 9403 9404 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 9405 Sema::LookupOrdinaryName); 9406 if (S.LookupName(Result, S.getCurScope())) { 9407 NamedDecl *ND = Result.getFoundDecl(); 9408 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 9409 if (TD->getUnderlyingType() == IntendedTy) 9410 IntendedTy = S.Context.getTypedefType(TD); 9411 } 9412 } 9413 } 9414 9415 // Special-case some of Darwin's platform-independence types by suggesting 9416 // casts to primitive types that are known to be large enough. 9417 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 9418 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 9419 QualType CastTy; 9420 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 9421 if (!CastTy.isNull()) { 9422 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 9423 // (long in ASTContext). Only complain to pedants. 9424 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 9425 (AT.isSizeT() || AT.isPtrdiffT()) && 9426 AT.matchesType(S.Context, CastTy)) 9427 Match = ArgType::NoMatchPedantic; 9428 IntendedTy = CastTy; 9429 ShouldNotPrintDirectly = true; 9430 } 9431 } 9432 9433 // We may be able to offer a FixItHint if it is a supported type. 9434 PrintfSpecifier fixedFS = FS; 9435 bool Success = 9436 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 9437 9438 if (Success) { 9439 // Get the fix string from the fixed format specifier 9440 SmallString<16> buf; 9441 llvm::raw_svector_ostream os(buf); 9442 fixedFS.toString(os); 9443 9444 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 9445 9446 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 9447 unsigned Diag; 9448 switch (Match) { 9449 case ArgType::Match: llvm_unreachable("expected non-matching"); 9450 case ArgType::NoMatchPedantic: 9451 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9452 break; 9453 case ArgType::NoMatchTypeConfusion: 9454 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9455 break; 9456 case ArgType::NoMatch: 9457 Diag = diag::warn_format_conversion_argument_type_mismatch; 9458 break; 9459 } 9460 9461 // In this case, the specifier is wrong and should be changed to match 9462 // the argument. 9463 EmitFormatDiagnostic(S.PDiag(Diag) 9464 << AT.getRepresentativeTypeName(S.Context) 9465 << IntendedTy << IsEnum << E->getSourceRange(), 9466 E->getBeginLoc(), 9467 /*IsStringLocation*/ false, SpecRange, 9468 FixItHint::CreateReplacement(SpecRange, os.str())); 9469 } else { 9470 // The canonical type for formatting this value is different from the 9471 // actual type of the expression. (This occurs, for example, with Darwin's 9472 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 9473 // should be printed as 'long' for 64-bit compatibility.) 9474 // Rather than emitting a normal format/argument mismatch, we want to 9475 // add a cast to the recommended type (and correct the format string 9476 // if necessary). 9477 SmallString<16> CastBuf; 9478 llvm::raw_svector_ostream CastFix(CastBuf); 9479 CastFix << "("; 9480 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 9481 CastFix << ")"; 9482 9483 SmallVector<FixItHint,4> Hints; 9484 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 9485 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 9486 9487 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 9488 // If there's already a cast present, just replace it. 9489 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 9490 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 9491 9492 } else if (!requiresParensToAddCast(E)) { 9493 // If the expression has high enough precedence, 9494 // just write the C-style cast. 9495 Hints.push_back( 9496 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9497 } else { 9498 // Otherwise, add parens around the expression as well as the cast. 9499 CastFix << "("; 9500 Hints.push_back( 9501 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9502 9503 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 9504 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 9505 } 9506 9507 if (ShouldNotPrintDirectly) { 9508 // The expression has a type that should not be printed directly. 9509 // We extract the name from the typedef because we don't want to show 9510 // the underlying type in the diagnostic. 9511 StringRef Name; 9512 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 9513 Name = TypedefTy->getDecl()->getName(); 9514 else 9515 Name = CastTyName; 9516 unsigned Diag = Match == ArgType::NoMatchPedantic 9517 ? diag::warn_format_argument_needs_cast_pedantic 9518 : diag::warn_format_argument_needs_cast; 9519 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 9520 << E->getSourceRange(), 9521 E->getBeginLoc(), /*IsStringLocation=*/false, 9522 SpecRange, Hints); 9523 } else { 9524 // In this case, the expression could be printed using a different 9525 // specifier, but we've decided that the specifier is probably correct 9526 // and we should cast instead. Just use the normal warning message. 9527 EmitFormatDiagnostic( 9528 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9529 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 9530 << E->getSourceRange(), 9531 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 9532 } 9533 } 9534 } else { 9535 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 9536 SpecifierLen); 9537 // Since the warning for passing non-POD types to variadic functions 9538 // was deferred until now, we emit a warning for non-POD 9539 // arguments here. 9540 switch (S.isValidVarArgType(ExprTy)) { 9541 case Sema::VAK_Valid: 9542 case Sema::VAK_ValidInCXX11: { 9543 unsigned Diag; 9544 switch (Match) { 9545 case ArgType::Match: llvm_unreachable("expected non-matching"); 9546 case ArgType::NoMatchPedantic: 9547 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9548 break; 9549 case ArgType::NoMatchTypeConfusion: 9550 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9551 break; 9552 case ArgType::NoMatch: 9553 Diag = diag::warn_format_conversion_argument_type_mismatch; 9554 break; 9555 } 9556 9557 EmitFormatDiagnostic( 9558 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 9559 << IsEnum << CSR << E->getSourceRange(), 9560 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9561 break; 9562 } 9563 case Sema::VAK_Undefined: 9564 case Sema::VAK_MSVCUndefined: 9565 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 9566 << S.getLangOpts().CPlusPlus11 << ExprTy 9567 << CallType 9568 << AT.getRepresentativeTypeName(S.Context) << CSR 9569 << E->getSourceRange(), 9570 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9571 checkForCStrMembers(AT, E); 9572 break; 9573 9574 case Sema::VAK_Invalid: 9575 if (ExprTy->isObjCObjectType()) 9576 EmitFormatDiagnostic( 9577 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 9578 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 9579 << AT.getRepresentativeTypeName(S.Context) << CSR 9580 << E->getSourceRange(), 9581 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9582 else 9583 // FIXME: If this is an initializer list, suggest removing the braces 9584 // or inserting a cast to the target type. 9585 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 9586 << isa<InitListExpr>(E) << ExprTy << CallType 9587 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 9588 break; 9589 } 9590 9591 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 9592 "format string specifier index out of range"); 9593 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 9594 } 9595 9596 return true; 9597 } 9598 9599 //===--- CHECK: Scanf format string checking ------------------------------===// 9600 9601 namespace { 9602 9603 class CheckScanfHandler : public CheckFormatHandler { 9604 public: 9605 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 9606 const Expr *origFormatExpr, Sema::FormatStringType type, 9607 unsigned firstDataArg, unsigned numDataArgs, 9608 const char *beg, bool hasVAListArg, 9609 ArrayRef<const Expr *> Args, unsigned formatIdx, 9610 bool inFunctionCall, Sema::VariadicCallType CallType, 9611 llvm::SmallBitVector &CheckedVarArgs, 9612 UncoveredArgHandler &UncoveredArg) 9613 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9614 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9615 inFunctionCall, CallType, CheckedVarArgs, 9616 UncoveredArg) {} 9617 9618 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 9619 const char *startSpecifier, 9620 unsigned specifierLen) override; 9621 9622 bool HandleInvalidScanfConversionSpecifier( 9623 const analyze_scanf::ScanfSpecifier &FS, 9624 const char *startSpecifier, 9625 unsigned specifierLen) override; 9626 9627 void HandleIncompleteScanList(const char *start, const char *end) override; 9628 }; 9629 9630 } // namespace 9631 9632 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 9633 const char *end) { 9634 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 9635 getLocationOfByte(end), /*IsStringLocation*/true, 9636 getSpecifierRange(start, end - start)); 9637 } 9638 9639 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 9640 const analyze_scanf::ScanfSpecifier &FS, 9641 const char *startSpecifier, 9642 unsigned specifierLen) { 9643 const analyze_scanf::ScanfConversionSpecifier &CS = 9644 FS.getConversionSpecifier(); 9645 9646 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9647 getLocationOfByte(CS.getStart()), 9648 startSpecifier, specifierLen, 9649 CS.getStart(), CS.getLength()); 9650 } 9651 9652 bool CheckScanfHandler::HandleScanfSpecifier( 9653 const analyze_scanf::ScanfSpecifier &FS, 9654 const char *startSpecifier, 9655 unsigned specifierLen) { 9656 using namespace analyze_scanf; 9657 using namespace analyze_format_string; 9658 9659 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 9660 9661 // Handle case where '%' and '*' don't consume an argument. These shouldn't 9662 // be used to decide if we are using positional arguments consistently. 9663 if (FS.consumesDataArgument()) { 9664 if (atFirstArg) { 9665 atFirstArg = false; 9666 usesPositionalArgs = FS.usesPositionalArg(); 9667 } 9668 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9669 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9670 startSpecifier, specifierLen); 9671 return false; 9672 } 9673 } 9674 9675 // Check if the field with is non-zero. 9676 const OptionalAmount &Amt = FS.getFieldWidth(); 9677 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 9678 if (Amt.getConstantAmount() == 0) { 9679 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 9680 Amt.getConstantLength()); 9681 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 9682 getLocationOfByte(Amt.getStart()), 9683 /*IsStringLocation*/true, R, 9684 FixItHint::CreateRemoval(R)); 9685 } 9686 } 9687 9688 if (!FS.consumesDataArgument()) { 9689 // FIXME: Technically specifying a precision or field width here 9690 // makes no sense. Worth issuing a warning at some point. 9691 return true; 9692 } 9693 9694 // Consume the argument. 9695 unsigned argIndex = FS.getArgIndex(); 9696 if (argIndex < NumDataArgs) { 9697 // The check to see if the argIndex is valid will come later. 9698 // We set the bit here because we may exit early from this 9699 // function if we encounter some other error. 9700 CoveredArgs.set(argIndex); 9701 } 9702 9703 // Check the length modifier is valid with the given conversion specifier. 9704 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9705 S.getLangOpts())) 9706 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9707 diag::warn_format_nonsensical_length); 9708 else if (!FS.hasStandardLengthModifier()) 9709 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9710 else if (!FS.hasStandardLengthConversionCombination()) 9711 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9712 diag::warn_format_non_standard_conversion_spec); 9713 9714 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9715 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9716 9717 // The remaining checks depend on the data arguments. 9718 if (HasVAListArg) 9719 return true; 9720 9721 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9722 return false; 9723 9724 // Check that the argument type matches the format specifier. 9725 const Expr *Ex = getDataArg(argIndex); 9726 if (!Ex) 9727 return true; 9728 9729 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 9730 9731 if (!AT.isValid()) { 9732 return true; 9733 } 9734 9735 analyze_format_string::ArgType::MatchKind Match = 9736 AT.matchesType(S.Context, Ex->getType()); 9737 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 9738 if (Match == analyze_format_string::ArgType::Match) 9739 return true; 9740 9741 ScanfSpecifier fixedFS = FS; 9742 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 9743 S.getLangOpts(), S.Context); 9744 9745 unsigned Diag = 9746 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 9747 : diag::warn_format_conversion_argument_type_mismatch; 9748 9749 if (Success) { 9750 // Get the fix string from the fixed format specifier. 9751 SmallString<128> buf; 9752 llvm::raw_svector_ostream os(buf); 9753 fixedFS.toString(os); 9754 9755 EmitFormatDiagnostic( 9756 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 9757 << Ex->getType() << false << Ex->getSourceRange(), 9758 Ex->getBeginLoc(), 9759 /*IsStringLocation*/ false, 9760 getSpecifierRange(startSpecifier, specifierLen), 9761 FixItHint::CreateReplacement( 9762 getSpecifierRange(startSpecifier, specifierLen), os.str())); 9763 } else { 9764 EmitFormatDiagnostic(S.PDiag(Diag) 9765 << AT.getRepresentativeTypeName(S.Context) 9766 << Ex->getType() << false << Ex->getSourceRange(), 9767 Ex->getBeginLoc(), 9768 /*IsStringLocation*/ false, 9769 getSpecifierRange(startSpecifier, specifierLen)); 9770 } 9771 9772 return true; 9773 } 9774 9775 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 9776 const Expr *OrigFormatExpr, 9777 ArrayRef<const Expr *> Args, 9778 bool HasVAListArg, unsigned format_idx, 9779 unsigned firstDataArg, 9780 Sema::FormatStringType Type, 9781 bool inFunctionCall, 9782 Sema::VariadicCallType CallType, 9783 llvm::SmallBitVector &CheckedVarArgs, 9784 UncoveredArgHandler &UncoveredArg, 9785 bool IgnoreStringsWithoutSpecifiers) { 9786 // CHECK: is the format string a wide literal? 9787 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 9788 CheckFormatHandler::EmitFormatDiagnostic( 9789 S, inFunctionCall, Args[format_idx], 9790 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 9791 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9792 return; 9793 } 9794 9795 // Str - The format string. NOTE: this is NOT null-terminated! 9796 StringRef StrRef = FExpr->getString(); 9797 const char *Str = StrRef.data(); 9798 // Account for cases where the string literal is truncated in a declaration. 9799 const ConstantArrayType *T = 9800 S.Context.getAsConstantArrayType(FExpr->getType()); 9801 assert(T && "String literal not of constant array type!"); 9802 size_t TypeSize = T->getSize().getZExtValue(); 9803 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9804 const unsigned numDataArgs = Args.size() - firstDataArg; 9805 9806 if (IgnoreStringsWithoutSpecifiers && 9807 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 9808 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 9809 return; 9810 9811 // Emit a warning if the string literal is truncated and does not contain an 9812 // embedded null character. 9813 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 9814 CheckFormatHandler::EmitFormatDiagnostic( 9815 S, inFunctionCall, Args[format_idx], 9816 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 9817 FExpr->getBeginLoc(), 9818 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 9819 return; 9820 } 9821 9822 // CHECK: empty format string? 9823 if (StrLen == 0 && numDataArgs > 0) { 9824 CheckFormatHandler::EmitFormatDiagnostic( 9825 S, inFunctionCall, Args[format_idx], 9826 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 9827 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9828 return; 9829 } 9830 9831 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 9832 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 9833 Type == Sema::FST_OSTrace) { 9834 CheckPrintfHandler H( 9835 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 9836 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 9837 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 9838 CheckedVarArgs, UncoveredArg); 9839 9840 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 9841 S.getLangOpts(), 9842 S.Context.getTargetInfo(), 9843 Type == Sema::FST_FreeBSDKPrintf)) 9844 H.DoneProcessing(); 9845 } else if (Type == Sema::FST_Scanf) { 9846 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 9847 numDataArgs, Str, HasVAListArg, Args, format_idx, 9848 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 9849 9850 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 9851 S.getLangOpts(), 9852 S.Context.getTargetInfo())) 9853 H.DoneProcessing(); 9854 } // TODO: handle other formats 9855 } 9856 9857 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 9858 // Str - The format string. NOTE: this is NOT null-terminated! 9859 StringRef StrRef = FExpr->getString(); 9860 const char *Str = StrRef.data(); 9861 // Account for cases where the string literal is truncated in a declaration. 9862 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 9863 assert(T && "String literal not of constant array type!"); 9864 size_t TypeSize = T->getSize().getZExtValue(); 9865 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9866 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 9867 getLangOpts(), 9868 Context.getTargetInfo()); 9869 } 9870 9871 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 9872 9873 // Returns the related absolute value function that is larger, of 0 if one 9874 // does not exist. 9875 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 9876 switch (AbsFunction) { 9877 default: 9878 return 0; 9879 9880 case Builtin::BI__builtin_abs: 9881 return Builtin::BI__builtin_labs; 9882 case Builtin::BI__builtin_labs: 9883 return Builtin::BI__builtin_llabs; 9884 case Builtin::BI__builtin_llabs: 9885 return 0; 9886 9887 case Builtin::BI__builtin_fabsf: 9888 return Builtin::BI__builtin_fabs; 9889 case Builtin::BI__builtin_fabs: 9890 return Builtin::BI__builtin_fabsl; 9891 case Builtin::BI__builtin_fabsl: 9892 return 0; 9893 9894 case Builtin::BI__builtin_cabsf: 9895 return Builtin::BI__builtin_cabs; 9896 case Builtin::BI__builtin_cabs: 9897 return Builtin::BI__builtin_cabsl; 9898 case Builtin::BI__builtin_cabsl: 9899 return 0; 9900 9901 case Builtin::BIabs: 9902 return Builtin::BIlabs; 9903 case Builtin::BIlabs: 9904 return Builtin::BIllabs; 9905 case Builtin::BIllabs: 9906 return 0; 9907 9908 case Builtin::BIfabsf: 9909 return Builtin::BIfabs; 9910 case Builtin::BIfabs: 9911 return Builtin::BIfabsl; 9912 case Builtin::BIfabsl: 9913 return 0; 9914 9915 case Builtin::BIcabsf: 9916 return Builtin::BIcabs; 9917 case Builtin::BIcabs: 9918 return Builtin::BIcabsl; 9919 case Builtin::BIcabsl: 9920 return 0; 9921 } 9922 } 9923 9924 // Returns the argument type of the absolute value function. 9925 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 9926 unsigned AbsType) { 9927 if (AbsType == 0) 9928 return QualType(); 9929 9930 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 9931 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 9932 if (Error != ASTContext::GE_None) 9933 return QualType(); 9934 9935 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 9936 if (!FT) 9937 return QualType(); 9938 9939 if (FT->getNumParams() != 1) 9940 return QualType(); 9941 9942 return FT->getParamType(0); 9943 } 9944 9945 // Returns the best absolute value function, or zero, based on type and 9946 // current absolute value function. 9947 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 9948 unsigned AbsFunctionKind) { 9949 unsigned BestKind = 0; 9950 uint64_t ArgSize = Context.getTypeSize(ArgType); 9951 for (unsigned Kind = AbsFunctionKind; Kind != 0; 9952 Kind = getLargerAbsoluteValueFunction(Kind)) { 9953 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 9954 if (Context.getTypeSize(ParamType) >= ArgSize) { 9955 if (BestKind == 0) 9956 BestKind = Kind; 9957 else if (Context.hasSameType(ParamType, ArgType)) { 9958 BestKind = Kind; 9959 break; 9960 } 9961 } 9962 } 9963 return BestKind; 9964 } 9965 9966 enum AbsoluteValueKind { 9967 AVK_Integer, 9968 AVK_Floating, 9969 AVK_Complex 9970 }; 9971 9972 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 9973 if (T->isIntegralOrEnumerationType()) 9974 return AVK_Integer; 9975 if (T->isRealFloatingType()) 9976 return AVK_Floating; 9977 if (T->isAnyComplexType()) 9978 return AVK_Complex; 9979 9980 llvm_unreachable("Type not integer, floating, or complex"); 9981 } 9982 9983 // Changes the absolute value function to a different type. Preserves whether 9984 // the function is a builtin. 9985 static unsigned changeAbsFunction(unsigned AbsKind, 9986 AbsoluteValueKind ValueKind) { 9987 switch (ValueKind) { 9988 case AVK_Integer: 9989 switch (AbsKind) { 9990 default: 9991 return 0; 9992 case Builtin::BI__builtin_fabsf: 9993 case Builtin::BI__builtin_fabs: 9994 case Builtin::BI__builtin_fabsl: 9995 case Builtin::BI__builtin_cabsf: 9996 case Builtin::BI__builtin_cabs: 9997 case Builtin::BI__builtin_cabsl: 9998 return Builtin::BI__builtin_abs; 9999 case Builtin::BIfabsf: 10000 case Builtin::BIfabs: 10001 case Builtin::BIfabsl: 10002 case Builtin::BIcabsf: 10003 case Builtin::BIcabs: 10004 case Builtin::BIcabsl: 10005 return Builtin::BIabs; 10006 } 10007 case AVK_Floating: 10008 switch (AbsKind) { 10009 default: 10010 return 0; 10011 case Builtin::BI__builtin_abs: 10012 case Builtin::BI__builtin_labs: 10013 case Builtin::BI__builtin_llabs: 10014 case Builtin::BI__builtin_cabsf: 10015 case Builtin::BI__builtin_cabs: 10016 case Builtin::BI__builtin_cabsl: 10017 return Builtin::BI__builtin_fabsf; 10018 case Builtin::BIabs: 10019 case Builtin::BIlabs: 10020 case Builtin::BIllabs: 10021 case Builtin::BIcabsf: 10022 case Builtin::BIcabs: 10023 case Builtin::BIcabsl: 10024 return Builtin::BIfabsf; 10025 } 10026 case AVK_Complex: 10027 switch (AbsKind) { 10028 default: 10029 return 0; 10030 case Builtin::BI__builtin_abs: 10031 case Builtin::BI__builtin_labs: 10032 case Builtin::BI__builtin_llabs: 10033 case Builtin::BI__builtin_fabsf: 10034 case Builtin::BI__builtin_fabs: 10035 case Builtin::BI__builtin_fabsl: 10036 return Builtin::BI__builtin_cabsf; 10037 case Builtin::BIabs: 10038 case Builtin::BIlabs: 10039 case Builtin::BIllabs: 10040 case Builtin::BIfabsf: 10041 case Builtin::BIfabs: 10042 case Builtin::BIfabsl: 10043 return Builtin::BIcabsf; 10044 } 10045 } 10046 llvm_unreachable("Unable to convert function"); 10047 } 10048 10049 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 10050 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 10051 if (!FnInfo) 10052 return 0; 10053 10054 switch (FDecl->getBuiltinID()) { 10055 default: 10056 return 0; 10057 case Builtin::BI__builtin_abs: 10058 case Builtin::BI__builtin_fabs: 10059 case Builtin::BI__builtin_fabsf: 10060 case Builtin::BI__builtin_fabsl: 10061 case Builtin::BI__builtin_labs: 10062 case Builtin::BI__builtin_llabs: 10063 case Builtin::BI__builtin_cabs: 10064 case Builtin::BI__builtin_cabsf: 10065 case Builtin::BI__builtin_cabsl: 10066 case Builtin::BIabs: 10067 case Builtin::BIlabs: 10068 case Builtin::BIllabs: 10069 case Builtin::BIfabs: 10070 case Builtin::BIfabsf: 10071 case Builtin::BIfabsl: 10072 case Builtin::BIcabs: 10073 case Builtin::BIcabsf: 10074 case Builtin::BIcabsl: 10075 return FDecl->getBuiltinID(); 10076 } 10077 llvm_unreachable("Unknown Builtin type"); 10078 } 10079 10080 // If the replacement is valid, emit a note with replacement function. 10081 // Additionally, suggest including the proper header if not already included. 10082 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10083 unsigned AbsKind, QualType ArgType) { 10084 bool EmitHeaderHint = true; 10085 const char *HeaderName = nullptr; 10086 const char *FunctionName = nullptr; 10087 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10088 FunctionName = "std::abs"; 10089 if (ArgType->isIntegralOrEnumerationType()) { 10090 HeaderName = "cstdlib"; 10091 } else if (ArgType->isRealFloatingType()) { 10092 HeaderName = "cmath"; 10093 } else { 10094 llvm_unreachable("Invalid Type"); 10095 } 10096 10097 // Lookup all std::abs 10098 if (NamespaceDecl *Std = S.getStdNamespace()) { 10099 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10100 R.suppressDiagnostics(); 10101 S.LookupQualifiedName(R, Std); 10102 10103 for (const auto *I : R) { 10104 const FunctionDecl *FDecl = nullptr; 10105 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10106 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10107 } else { 10108 FDecl = dyn_cast<FunctionDecl>(I); 10109 } 10110 if (!FDecl) 10111 continue; 10112 10113 // Found std::abs(), check that they are the right ones. 10114 if (FDecl->getNumParams() != 1) 10115 continue; 10116 10117 // Check that the parameter type can handle the argument. 10118 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10119 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10120 S.Context.getTypeSize(ArgType) <= 10121 S.Context.getTypeSize(ParamType)) { 10122 // Found a function, don't need the header hint. 10123 EmitHeaderHint = false; 10124 break; 10125 } 10126 } 10127 } 10128 } else { 10129 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10130 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10131 10132 if (HeaderName) { 10133 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10134 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10135 R.suppressDiagnostics(); 10136 S.LookupName(R, S.getCurScope()); 10137 10138 if (R.isSingleResult()) { 10139 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10140 if (FD && FD->getBuiltinID() == AbsKind) { 10141 EmitHeaderHint = false; 10142 } else { 10143 return; 10144 } 10145 } else if (!R.empty()) { 10146 return; 10147 } 10148 } 10149 } 10150 10151 S.Diag(Loc, diag::note_replace_abs_function) 10152 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10153 10154 if (!HeaderName) 10155 return; 10156 10157 if (!EmitHeaderHint) 10158 return; 10159 10160 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10161 << FunctionName; 10162 } 10163 10164 template <std::size_t StrLen> 10165 static bool IsStdFunction(const FunctionDecl *FDecl, 10166 const char (&Str)[StrLen]) { 10167 if (!FDecl) 10168 return false; 10169 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10170 return false; 10171 if (!FDecl->isInStdNamespace()) 10172 return false; 10173 10174 return true; 10175 } 10176 10177 // Warn when using the wrong abs() function. 10178 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10179 const FunctionDecl *FDecl) { 10180 if (Call->getNumArgs() != 1) 10181 return; 10182 10183 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10184 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10185 if (AbsKind == 0 && !IsStdAbs) 10186 return; 10187 10188 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10189 QualType ParamType = Call->getArg(0)->getType(); 10190 10191 // Unsigned types cannot be negative. Suggest removing the absolute value 10192 // function call. 10193 if (ArgType->isUnsignedIntegerType()) { 10194 const char *FunctionName = 10195 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10196 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10197 Diag(Call->getExprLoc(), diag::note_remove_abs) 10198 << FunctionName 10199 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10200 return; 10201 } 10202 10203 // Taking the absolute value of a pointer is very suspicious, they probably 10204 // wanted to index into an array, dereference a pointer, call a function, etc. 10205 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10206 unsigned DiagType = 0; 10207 if (ArgType->isFunctionType()) 10208 DiagType = 1; 10209 else if (ArgType->isArrayType()) 10210 DiagType = 2; 10211 10212 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10213 return; 10214 } 10215 10216 // std::abs has overloads which prevent most of the absolute value problems 10217 // from occurring. 10218 if (IsStdAbs) 10219 return; 10220 10221 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10222 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10223 10224 // The argument and parameter are the same kind. Check if they are the right 10225 // size. 10226 if (ArgValueKind == ParamValueKind) { 10227 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10228 return; 10229 10230 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10231 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10232 << FDecl << ArgType << ParamType; 10233 10234 if (NewAbsKind == 0) 10235 return; 10236 10237 emitReplacement(*this, Call->getExprLoc(), 10238 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10239 return; 10240 } 10241 10242 // ArgValueKind != ParamValueKind 10243 // The wrong type of absolute value function was used. Attempt to find the 10244 // proper one. 10245 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10246 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10247 if (NewAbsKind == 0) 10248 return; 10249 10250 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10251 << FDecl << ParamValueKind << ArgValueKind; 10252 10253 emitReplacement(*this, Call->getExprLoc(), 10254 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10255 } 10256 10257 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10258 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10259 const FunctionDecl *FDecl) { 10260 if (!Call || !FDecl) return; 10261 10262 // Ignore template specializations and macros. 10263 if (inTemplateInstantiation()) return; 10264 if (Call->getExprLoc().isMacroID()) return; 10265 10266 // Only care about the one template argument, two function parameter std::max 10267 if (Call->getNumArgs() != 2) return; 10268 if (!IsStdFunction(FDecl, "max")) return; 10269 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10270 if (!ArgList) return; 10271 if (ArgList->size() != 1) return; 10272 10273 // Check that template type argument is unsigned integer. 10274 const auto& TA = ArgList->get(0); 10275 if (TA.getKind() != TemplateArgument::Type) return; 10276 QualType ArgType = TA.getAsType(); 10277 if (!ArgType->isUnsignedIntegerType()) return; 10278 10279 // See if either argument is a literal zero. 10280 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10281 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10282 if (!MTE) return false; 10283 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10284 if (!Num) return false; 10285 if (Num->getValue() != 0) return false; 10286 return true; 10287 }; 10288 10289 const Expr *FirstArg = Call->getArg(0); 10290 const Expr *SecondArg = Call->getArg(1); 10291 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10292 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10293 10294 // Only warn when exactly one argument is zero. 10295 if (IsFirstArgZero == IsSecondArgZero) return; 10296 10297 SourceRange FirstRange = FirstArg->getSourceRange(); 10298 SourceRange SecondRange = SecondArg->getSourceRange(); 10299 10300 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10301 10302 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10303 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10304 10305 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10306 SourceRange RemovalRange; 10307 if (IsFirstArgZero) { 10308 RemovalRange = SourceRange(FirstRange.getBegin(), 10309 SecondRange.getBegin().getLocWithOffset(-1)); 10310 } else { 10311 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10312 SecondRange.getEnd()); 10313 } 10314 10315 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10316 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10317 << FixItHint::CreateRemoval(RemovalRange); 10318 } 10319 10320 //===--- CHECK: Standard memory functions ---------------------------------===// 10321 10322 /// Takes the expression passed to the size_t parameter of functions 10323 /// such as memcmp, strncat, etc and warns if it's a comparison. 10324 /// 10325 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10326 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10327 IdentifierInfo *FnName, 10328 SourceLocation FnLoc, 10329 SourceLocation RParenLoc) { 10330 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10331 if (!Size) 10332 return false; 10333 10334 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10335 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10336 return false; 10337 10338 SourceRange SizeRange = Size->getSourceRange(); 10339 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10340 << SizeRange << FnName; 10341 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 10342 << FnName 10343 << FixItHint::CreateInsertion( 10344 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 10345 << FixItHint::CreateRemoval(RParenLoc); 10346 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 10347 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 10348 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 10349 ")"); 10350 10351 return true; 10352 } 10353 10354 /// Determine whether the given type is or contains a dynamic class type 10355 /// (e.g., whether it has a vtable). 10356 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 10357 bool &IsContained) { 10358 // Look through array types while ignoring qualifiers. 10359 const Type *Ty = T->getBaseElementTypeUnsafe(); 10360 IsContained = false; 10361 10362 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 10363 RD = RD ? RD->getDefinition() : nullptr; 10364 if (!RD || RD->isInvalidDecl()) 10365 return nullptr; 10366 10367 if (RD->isDynamicClass()) 10368 return RD; 10369 10370 // Check all the fields. If any bases were dynamic, the class is dynamic. 10371 // It's impossible for a class to transitively contain itself by value, so 10372 // infinite recursion is impossible. 10373 for (auto *FD : RD->fields()) { 10374 bool SubContained; 10375 if (const CXXRecordDecl *ContainedRD = 10376 getContainedDynamicClass(FD->getType(), SubContained)) { 10377 IsContained = true; 10378 return ContainedRD; 10379 } 10380 } 10381 10382 return nullptr; 10383 } 10384 10385 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 10386 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 10387 if (Unary->getKind() == UETT_SizeOf) 10388 return Unary; 10389 return nullptr; 10390 } 10391 10392 /// If E is a sizeof expression, returns its argument expression, 10393 /// otherwise returns NULL. 10394 static const Expr *getSizeOfExprArg(const Expr *E) { 10395 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10396 if (!SizeOf->isArgumentType()) 10397 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 10398 return nullptr; 10399 } 10400 10401 /// If E is a sizeof expression, returns its argument type. 10402 static QualType getSizeOfArgType(const Expr *E) { 10403 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10404 return SizeOf->getTypeOfArgument(); 10405 return QualType(); 10406 } 10407 10408 namespace { 10409 10410 struct SearchNonTrivialToInitializeField 10411 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 10412 using Super = 10413 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 10414 10415 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 10416 10417 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 10418 SourceLocation SL) { 10419 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10420 asDerived().visitArray(PDIK, AT, SL); 10421 return; 10422 } 10423 10424 Super::visitWithKind(PDIK, FT, SL); 10425 } 10426 10427 void visitARCStrong(QualType FT, SourceLocation SL) { 10428 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10429 } 10430 void visitARCWeak(QualType FT, SourceLocation SL) { 10431 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10432 } 10433 void visitStruct(QualType FT, SourceLocation SL) { 10434 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10435 visit(FD->getType(), FD->getLocation()); 10436 } 10437 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 10438 const ArrayType *AT, SourceLocation SL) { 10439 visit(getContext().getBaseElementType(AT), SL); 10440 } 10441 void visitTrivial(QualType FT, SourceLocation SL) {} 10442 10443 static void diag(QualType RT, const Expr *E, Sema &S) { 10444 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 10445 } 10446 10447 ASTContext &getContext() { return S.getASTContext(); } 10448 10449 const Expr *E; 10450 Sema &S; 10451 }; 10452 10453 struct SearchNonTrivialToCopyField 10454 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 10455 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 10456 10457 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 10458 10459 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 10460 SourceLocation SL) { 10461 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10462 asDerived().visitArray(PCK, AT, SL); 10463 return; 10464 } 10465 10466 Super::visitWithKind(PCK, FT, SL); 10467 } 10468 10469 void visitARCStrong(QualType FT, SourceLocation SL) { 10470 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10471 } 10472 void visitARCWeak(QualType FT, SourceLocation SL) { 10473 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10474 } 10475 void visitStruct(QualType FT, SourceLocation SL) { 10476 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10477 visit(FD->getType(), FD->getLocation()); 10478 } 10479 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 10480 SourceLocation SL) { 10481 visit(getContext().getBaseElementType(AT), SL); 10482 } 10483 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 10484 SourceLocation SL) {} 10485 void visitTrivial(QualType FT, SourceLocation SL) {} 10486 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 10487 10488 static void diag(QualType RT, const Expr *E, Sema &S) { 10489 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 10490 } 10491 10492 ASTContext &getContext() { return S.getASTContext(); } 10493 10494 const Expr *E; 10495 Sema &S; 10496 }; 10497 10498 } 10499 10500 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 10501 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 10502 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 10503 10504 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 10505 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 10506 return false; 10507 10508 return doesExprLikelyComputeSize(BO->getLHS()) || 10509 doesExprLikelyComputeSize(BO->getRHS()); 10510 } 10511 10512 return getAsSizeOfExpr(SizeofExpr) != nullptr; 10513 } 10514 10515 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 10516 /// 10517 /// \code 10518 /// #define MACRO 0 10519 /// foo(MACRO); 10520 /// foo(0); 10521 /// \endcode 10522 /// 10523 /// This should return true for the first call to foo, but not for the second 10524 /// (regardless of whether foo is a macro or function). 10525 static bool isArgumentExpandedFromMacro(SourceManager &SM, 10526 SourceLocation CallLoc, 10527 SourceLocation ArgLoc) { 10528 if (!CallLoc.isMacroID()) 10529 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 10530 10531 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 10532 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 10533 } 10534 10535 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 10536 /// last two arguments transposed. 10537 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 10538 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 10539 return; 10540 10541 const Expr *SizeArg = 10542 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 10543 10544 auto isLiteralZero = [](const Expr *E) { 10545 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 10546 }; 10547 10548 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 10549 SourceLocation CallLoc = Call->getRParenLoc(); 10550 SourceManager &SM = S.getSourceManager(); 10551 if (isLiteralZero(SizeArg) && 10552 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 10553 10554 SourceLocation DiagLoc = SizeArg->getExprLoc(); 10555 10556 // Some platforms #define bzero to __builtin_memset. See if this is the 10557 // case, and if so, emit a better diagnostic. 10558 if (BId == Builtin::BIbzero || 10559 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 10560 CallLoc, SM, S.getLangOpts()) == "bzero")) { 10561 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 10562 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 10563 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 10564 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 10565 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 10566 } 10567 return; 10568 } 10569 10570 // If the second argument to a memset is a sizeof expression and the third 10571 // isn't, this is also likely an error. This should catch 10572 // 'memset(buf, sizeof(buf), 0xff)'. 10573 if (BId == Builtin::BImemset && 10574 doesExprLikelyComputeSize(Call->getArg(1)) && 10575 !doesExprLikelyComputeSize(Call->getArg(2))) { 10576 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 10577 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 10578 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 10579 return; 10580 } 10581 } 10582 10583 /// Check for dangerous or invalid arguments to memset(). 10584 /// 10585 /// This issues warnings on known problematic, dangerous or unspecified 10586 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 10587 /// function calls. 10588 /// 10589 /// \param Call The call expression to diagnose. 10590 void Sema::CheckMemaccessArguments(const CallExpr *Call, 10591 unsigned BId, 10592 IdentifierInfo *FnName) { 10593 assert(BId != 0); 10594 10595 // It is possible to have a non-standard definition of memset. Validate 10596 // we have enough arguments, and if not, abort further checking. 10597 unsigned ExpectedNumArgs = 10598 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 10599 if (Call->getNumArgs() < ExpectedNumArgs) 10600 return; 10601 10602 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 10603 BId == Builtin::BIstrndup ? 1 : 2); 10604 unsigned LenArg = 10605 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 10606 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 10607 10608 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 10609 Call->getBeginLoc(), Call->getRParenLoc())) 10610 return; 10611 10612 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 10613 CheckMemaccessSize(*this, BId, Call); 10614 10615 // We have special checking when the length is a sizeof expression. 10616 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 10617 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 10618 llvm::FoldingSetNodeID SizeOfArgID; 10619 10620 // Although widely used, 'bzero' is not a standard function. Be more strict 10621 // with the argument types before allowing diagnostics and only allow the 10622 // form bzero(ptr, sizeof(...)). 10623 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10624 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 10625 return; 10626 10627 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 10628 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 10629 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 10630 10631 QualType DestTy = Dest->getType(); 10632 QualType PointeeTy; 10633 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 10634 PointeeTy = DestPtrTy->getPointeeType(); 10635 10636 // Never warn about void type pointers. This can be used to suppress 10637 // false positives. 10638 if (PointeeTy->isVoidType()) 10639 continue; 10640 10641 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 10642 // actually comparing the expressions for equality. Because computing the 10643 // expression IDs can be expensive, we only do this if the diagnostic is 10644 // enabled. 10645 if (SizeOfArg && 10646 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 10647 SizeOfArg->getExprLoc())) { 10648 // We only compute IDs for expressions if the warning is enabled, and 10649 // cache the sizeof arg's ID. 10650 if (SizeOfArgID == llvm::FoldingSetNodeID()) 10651 SizeOfArg->Profile(SizeOfArgID, Context, true); 10652 llvm::FoldingSetNodeID DestID; 10653 Dest->Profile(DestID, Context, true); 10654 if (DestID == SizeOfArgID) { 10655 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 10656 // over sizeof(src) as well. 10657 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 10658 StringRef ReadableName = FnName->getName(); 10659 10660 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 10661 if (UnaryOp->getOpcode() == UO_AddrOf) 10662 ActionIdx = 1; // If its an address-of operator, just remove it. 10663 if (!PointeeTy->isIncompleteType() && 10664 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 10665 ActionIdx = 2; // If the pointee's size is sizeof(char), 10666 // suggest an explicit length. 10667 10668 // If the function is defined as a builtin macro, do not show macro 10669 // expansion. 10670 SourceLocation SL = SizeOfArg->getExprLoc(); 10671 SourceRange DSR = Dest->getSourceRange(); 10672 SourceRange SSR = SizeOfArg->getSourceRange(); 10673 SourceManager &SM = getSourceManager(); 10674 10675 if (SM.isMacroArgExpansion(SL)) { 10676 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 10677 SL = SM.getSpellingLoc(SL); 10678 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 10679 SM.getSpellingLoc(DSR.getEnd())); 10680 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 10681 SM.getSpellingLoc(SSR.getEnd())); 10682 } 10683 10684 DiagRuntimeBehavior(SL, SizeOfArg, 10685 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 10686 << ReadableName 10687 << PointeeTy 10688 << DestTy 10689 << DSR 10690 << SSR); 10691 DiagRuntimeBehavior(SL, SizeOfArg, 10692 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 10693 << ActionIdx 10694 << SSR); 10695 10696 break; 10697 } 10698 } 10699 10700 // Also check for cases where the sizeof argument is the exact same 10701 // type as the memory argument, and where it points to a user-defined 10702 // record type. 10703 if (SizeOfArgTy != QualType()) { 10704 if (PointeeTy->isRecordType() && 10705 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 10706 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 10707 PDiag(diag::warn_sizeof_pointer_type_memaccess) 10708 << FnName << SizeOfArgTy << ArgIdx 10709 << PointeeTy << Dest->getSourceRange() 10710 << LenExpr->getSourceRange()); 10711 break; 10712 } 10713 } 10714 } else if (DestTy->isArrayType()) { 10715 PointeeTy = DestTy; 10716 } 10717 10718 if (PointeeTy == QualType()) 10719 continue; 10720 10721 // Always complain about dynamic classes. 10722 bool IsContained; 10723 if (const CXXRecordDecl *ContainedRD = 10724 getContainedDynamicClass(PointeeTy, IsContained)) { 10725 10726 unsigned OperationType = 0; 10727 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 10728 // "overwritten" if we're warning about the destination for any call 10729 // but memcmp; otherwise a verb appropriate to the call. 10730 if (ArgIdx != 0 || IsCmp) { 10731 if (BId == Builtin::BImemcpy) 10732 OperationType = 1; 10733 else if(BId == Builtin::BImemmove) 10734 OperationType = 2; 10735 else if (IsCmp) 10736 OperationType = 3; 10737 } 10738 10739 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10740 PDiag(diag::warn_dyn_class_memaccess) 10741 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 10742 << IsContained << ContainedRD << OperationType 10743 << Call->getCallee()->getSourceRange()); 10744 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 10745 BId != Builtin::BImemset) 10746 DiagRuntimeBehavior( 10747 Dest->getExprLoc(), Dest, 10748 PDiag(diag::warn_arc_object_memaccess) 10749 << ArgIdx << FnName << PointeeTy 10750 << Call->getCallee()->getSourceRange()); 10751 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 10752 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 10753 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 10754 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10755 PDiag(diag::warn_cstruct_memaccess) 10756 << ArgIdx << FnName << PointeeTy << 0); 10757 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 10758 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 10759 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 10760 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10761 PDiag(diag::warn_cstruct_memaccess) 10762 << ArgIdx << FnName << PointeeTy << 1); 10763 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 10764 } else { 10765 continue; 10766 } 10767 } else 10768 continue; 10769 10770 DiagRuntimeBehavior( 10771 Dest->getExprLoc(), Dest, 10772 PDiag(diag::note_bad_memaccess_silence) 10773 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 10774 break; 10775 } 10776 } 10777 10778 // A little helper routine: ignore addition and subtraction of integer literals. 10779 // This intentionally does not ignore all integer constant expressions because 10780 // we don't want to remove sizeof(). 10781 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 10782 Ex = Ex->IgnoreParenCasts(); 10783 10784 while (true) { 10785 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 10786 if (!BO || !BO->isAdditiveOp()) 10787 break; 10788 10789 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 10790 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 10791 10792 if (isa<IntegerLiteral>(RHS)) 10793 Ex = LHS; 10794 else if (isa<IntegerLiteral>(LHS)) 10795 Ex = RHS; 10796 else 10797 break; 10798 } 10799 10800 return Ex; 10801 } 10802 10803 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 10804 ASTContext &Context) { 10805 // Only handle constant-sized or VLAs, but not flexible members. 10806 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 10807 // Only issue the FIXIT for arrays of size > 1. 10808 if (CAT->getSize().getSExtValue() <= 1) 10809 return false; 10810 } else if (!Ty->isVariableArrayType()) { 10811 return false; 10812 } 10813 return true; 10814 } 10815 10816 // Warn if the user has made the 'size' argument to strlcpy or strlcat 10817 // be the size of the source, instead of the destination. 10818 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 10819 IdentifierInfo *FnName) { 10820 10821 // Don't crash if the user has the wrong number of arguments 10822 unsigned NumArgs = Call->getNumArgs(); 10823 if ((NumArgs != 3) && (NumArgs != 4)) 10824 return; 10825 10826 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 10827 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 10828 const Expr *CompareWithSrc = nullptr; 10829 10830 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 10831 Call->getBeginLoc(), Call->getRParenLoc())) 10832 return; 10833 10834 // Look for 'strlcpy(dst, x, sizeof(x))' 10835 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 10836 CompareWithSrc = Ex; 10837 else { 10838 // Look for 'strlcpy(dst, x, strlen(x))' 10839 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 10840 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 10841 SizeCall->getNumArgs() == 1) 10842 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 10843 } 10844 } 10845 10846 if (!CompareWithSrc) 10847 return; 10848 10849 // Determine if the argument to sizeof/strlen is equal to the source 10850 // argument. In principle there's all kinds of things you could do 10851 // here, for instance creating an == expression and evaluating it with 10852 // EvaluateAsBooleanCondition, but this uses a more direct technique: 10853 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 10854 if (!SrcArgDRE) 10855 return; 10856 10857 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 10858 if (!CompareWithSrcDRE || 10859 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 10860 return; 10861 10862 const Expr *OriginalSizeArg = Call->getArg(2); 10863 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 10864 << OriginalSizeArg->getSourceRange() << FnName; 10865 10866 // Output a FIXIT hint if the destination is an array (rather than a 10867 // pointer to an array). This could be enhanced to handle some 10868 // pointers if we know the actual size, like if DstArg is 'array+2' 10869 // we could say 'sizeof(array)-2'. 10870 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 10871 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 10872 return; 10873 10874 SmallString<128> sizeString; 10875 llvm::raw_svector_ostream OS(sizeString); 10876 OS << "sizeof("; 10877 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10878 OS << ")"; 10879 10880 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 10881 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 10882 OS.str()); 10883 } 10884 10885 /// Check if two expressions refer to the same declaration. 10886 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 10887 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 10888 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 10889 return D1->getDecl() == D2->getDecl(); 10890 return false; 10891 } 10892 10893 static const Expr *getStrlenExprArg(const Expr *E) { 10894 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 10895 const FunctionDecl *FD = CE->getDirectCallee(); 10896 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 10897 return nullptr; 10898 return CE->getArg(0)->IgnoreParenCasts(); 10899 } 10900 return nullptr; 10901 } 10902 10903 // Warn on anti-patterns as the 'size' argument to strncat. 10904 // The correct size argument should look like following: 10905 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 10906 void Sema::CheckStrncatArguments(const CallExpr *CE, 10907 IdentifierInfo *FnName) { 10908 // Don't crash if the user has the wrong number of arguments. 10909 if (CE->getNumArgs() < 3) 10910 return; 10911 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 10912 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 10913 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 10914 10915 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 10916 CE->getRParenLoc())) 10917 return; 10918 10919 // Identify common expressions, which are wrongly used as the size argument 10920 // to strncat and may lead to buffer overflows. 10921 unsigned PatternType = 0; 10922 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 10923 // - sizeof(dst) 10924 if (referToTheSameDecl(SizeOfArg, DstArg)) 10925 PatternType = 1; 10926 // - sizeof(src) 10927 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 10928 PatternType = 2; 10929 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 10930 if (BE->getOpcode() == BO_Sub) { 10931 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 10932 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 10933 // - sizeof(dst) - strlen(dst) 10934 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 10935 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 10936 PatternType = 1; 10937 // - sizeof(src) - (anything) 10938 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 10939 PatternType = 2; 10940 } 10941 } 10942 10943 if (PatternType == 0) 10944 return; 10945 10946 // Generate the diagnostic. 10947 SourceLocation SL = LenArg->getBeginLoc(); 10948 SourceRange SR = LenArg->getSourceRange(); 10949 SourceManager &SM = getSourceManager(); 10950 10951 // If the function is defined as a builtin macro, do not show macro expansion. 10952 if (SM.isMacroArgExpansion(SL)) { 10953 SL = SM.getSpellingLoc(SL); 10954 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 10955 SM.getSpellingLoc(SR.getEnd())); 10956 } 10957 10958 // Check if the destination is an array (rather than a pointer to an array). 10959 QualType DstTy = DstArg->getType(); 10960 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 10961 Context); 10962 if (!isKnownSizeArray) { 10963 if (PatternType == 1) 10964 Diag(SL, diag::warn_strncat_wrong_size) << SR; 10965 else 10966 Diag(SL, diag::warn_strncat_src_size) << SR; 10967 return; 10968 } 10969 10970 if (PatternType == 1) 10971 Diag(SL, diag::warn_strncat_large_size) << SR; 10972 else 10973 Diag(SL, diag::warn_strncat_src_size) << SR; 10974 10975 SmallString<128> sizeString; 10976 llvm::raw_svector_ostream OS(sizeString); 10977 OS << "sizeof("; 10978 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10979 OS << ") - "; 10980 OS << "strlen("; 10981 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10982 OS << ") - 1"; 10983 10984 Diag(SL, diag::note_strncat_wrong_size) 10985 << FixItHint::CreateReplacement(SR, OS.str()); 10986 } 10987 10988 namespace { 10989 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 10990 const UnaryOperator *UnaryExpr, const Decl *D) { 10991 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 10992 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 10993 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 10994 return; 10995 } 10996 } 10997 10998 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 10999 const UnaryOperator *UnaryExpr) { 11000 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 11001 const Decl *D = Lvalue->getDecl(); 11002 if (isa<DeclaratorDecl>(D)) 11003 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 11004 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 11005 } 11006 11007 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 11008 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 11009 Lvalue->getMemberDecl()); 11010 } 11011 11012 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 11013 const UnaryOperator *UnaryExpr) { 11014 const auto *Lambda = dyn_cast<LambdaExpr>( 11015 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 11016 if (!Lambda) 11017 return; 11018 11019 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 11020 << CalleeName << 2 /*object: lambda expression*/; 11021 } 11022 11023 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 11024 const DeclRefExpr *Lvalue) { 11025 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 11026 if (Var == nullptr) 11027 return; 11028 11029 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 11030 << CalleeName << 0 /*object: */ << Var; 11031 } 11032 11033 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 11034 const CastExpr *Cast) { 11035 SmallString<128> SizeString; 11036 llvm::raw_svector_ostream OS(SizeString); 11037 11038 clang::CastKind Kind = Cast->getCastKind(); 11039 if (Kind == clang::CK_BitCast && 11040 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 11041 return; 11042 if (Kind == clang::CK_IntegralToPointer && 11043 !isa<IntegerLiteral>( 11044 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 11045 return; 11046 11047 switch (Cast->getCastKind()) { 11048 case clang::CK_BitCast: 11049 case clang::CK_IntegralToPointer: 11050 case clang::CK_FunctionToPointerDecay: 11051 OS << '\''; 11052 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 11053 OS << '\''; 11054 break; 11055 default: 11056 return; 11057 } 11058 11059 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 11060 << CalleeName << 0 /*object: */ << OS.str(); 11061 } 11062 } // namespace 11063 11064 /// Alerts the user that they are attempting to free a non-malloc'd object. 11065 void Sema::CheckFreeArguments(const CallExpr *E) { 11066 const std::string CalleeName = 11067 dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 11068 11069 { // Prefer something that doesn't involve a cast to make things simpler. 11070 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 11071 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 11072 switch (UnaryExpr->getOpcode()) { 11073 case UnaryOperator::Opcode::UO_AddrOf: 11074 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11075 case UnaryOperator::Opcode::UO_Plus: 11076 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11077 default: 11078 break; 11079 } 11080 11081 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11082 if (Lvalue->getType()->isArrayType()) 11083 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11084 11085 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11086 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11087 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11088 return; 11089 } 11090 11091 if (isa<BlockExpr>(Arg)) { 11092 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11093 << CalleeName << 1 /*object: block*/; 11094 return; 11095 } 11096 } 11097 // Maybe the cast was important, check after the other cases. 11098 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11099 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11100 } 11101 11102 void 11103 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11104 SourceLocation ReturnLoc, 11105 bool isObjCMethod, 11106 const AttrVec *Attrs, 11107 const FunctionDecl *FD) { 11108 // Check if the return value is null but should not be. 11109 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11110 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11111 CheckNonNullExpr(*this, RetValExp)) 11112 Diag(ReturnLoc, diag::warn_null_ret) 11113 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11114 11115 // C++11 [basic.stc.dynamic.allocation]p4: 11116 // If an allocation function declared with a non-throwing 11117 // exception-specification fails to allocate storage, it shall return 11118 // a null pointer. Any other allocation function that fails to allocate 11119 // storage shall indicate failure only by throwing an exception [...] 11120 if (FD) { 11121 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11122 if (Op == OO_New || Op == OO_Array_New) { 11123 const FunctionProtoType *Proto 11124 = FD->getType()->castAs<FunctionProtoType>(); 11125 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11126 CheckNonNullExpr(*this, RetValExp)) 11127 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11128 << FD << getLangOpts().CPlusPlus11; 11129 } 11130 } 11131 11132 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11133 // here prevent the user from using a PPC MMA type as trailing return type. 11134 if (Context.getTargetInfo().getTriple().isPPC64()) 11135 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11136 } 11137 11138 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 11139 11140 /// Check for comparisons of floating point operands using != and ==. 11141 /// Issue a warning if these are no self-comparisons, as they are not likely 11142 /// to do what the programmer intended. 11143 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 11144 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11145 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11146 11147 // Special case: check for x == x (which is OK). 11148 // Do not emit warnings for such cases. 11149 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11150 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11151 if (DRL->getDecl() == DRR->getDecl()) 11152 return; 11153 11154 // Special case: check for comparisons against literals that can be exactly 11155 // represented by APFloat. In such cases, do not emit a warning. This 11156 // is a heuristic: often comparison against such literals are used to 11157 // detect if a value in a variable has not changed. This clearly can 11158 // lead to false negatives. 11159 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11160 if (FLL->isExact()) 11161 return; 11162 } else 11163 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11164 if (FLR->isExact()) 11165 return; 11166 11167 // Check for comparisons with builtin types. 11168 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11169 if (CL->getBuiltinCallee()) 11170 return; 11171 11172 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11173 if (CR->getBuiltinCallee()) 11174 return; 11175 11176 // Emit the diagnostic. 11177 Diag(Loc, diag::warn_floatingpoint_eq) 11178 << LHS->getSourceRange() << RHS->getSourceRange(); 11179 } 11180 11181 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11182 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11183 11184 namespace { 11185 11186 /// Structure recording the 'active' range of an integer-valued 11187 /// expression. 11188 struct IntRange { 11189 /// The number of bits active in the int. Note that this includes exactly one 11190 /// sign bit if !NonNegative. 11191 unsigned Width; 11192 11193 /// True if the int is known not to have negative values. If so, all leading 11194 /// bits before Width are known zero, otherwise they are known to be the 11195 /// same as the MSB within Width. 11196 bool NonNegative; 11197 11198 IntRange(unsigned Width, bool NonNegative) 11199 : Width(Width), NonNegative(NonNegative) {} 11200 11201 /// Number of bits excluding the sign bit. 11202 unsigned valueBits() const { 11203 return NonNegative ? Width : Width - 1; 11204 } 11205 11206 /// Returns the range of the bool type. 11207 static IntRange forBoolType() { 11208 return IntRange(1, true); 11209 } 11210 11211 /// Returns the range of an opaque value of the given integral type. 11212 static IntRange forValueOfType(ASTContext &C, QualType T) { 11213 return forValueOfCanonicalType(C, 11214 T->getCanonicalTypeInternal().getTypePtr()); 11215 } 11216 11217 /// Returns the range of an opaque value of a canonical integral type. 11218 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11219 assert(T->isCanonicalUnqualified()); 11220 11221 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11222 T = VT->getElementType().getTypePtr(); 11223 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11224 T = CT->getElementType().getTypePtr(); 11225 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11226 T = AT->getValueType().getTypePtr(); 11227 11228 if (!C.getLangOpts().CPlusPlus) { 11229 // For enum types in C code, use the underlying datatype. 11230 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11231 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11232 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11233 // For enum types in C++, use the known bit width of the enumerators. 11234 EnumDecl *Enum = ET->getDecl(); 11235 // In C++11, enums can have a fixed underlying type. Use this type to 11236 // compute the range. 11237 if (Enum->isFixed()) { 11238 return IntRange(C.getIntWidth(QualType(T, 0)), 11239 !ET->isSignedIntegerOrEnumerationType()); 11240 } 11241 11242 unsigned NumPositive = Enum->getNumPositiveBits(); 11243 unsigned NumNegative = Enum->getNumNegativeBits(); 11244 11245 if (NumNegative == 0) 11246 return IntRange(NumPositive, true/*NonNegative*/); 11247 else 11248 return IntRange(std::max(NumPositive + 1, NumNegative), 11249 false/*NonNegative*/); 11250 } 11251 11252 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 11253 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11254 11255 const BuiltinType *BT = cast<BuiltinType>(T); 11256 assert(BT->isInteger()); 11257 11258 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11259 } 11260 11261 /// Returns the "target" range of a canonical integral type, i.e. 11262 /// the range of values expressible in the type. 11263 /// 11264 /// This matches forValueOfCanonicalType except that enums have the 11265 /// full range of their type, not the range of their enumerators. 11266 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11267 assert(T->isCanonicalUnqualified()); 11268 11269 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11270 T = VT->getElementType().getTypePtr(); 11271 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11272 T = CT->getElementType().getTypePtr(); 11273 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11274 T = AT->getValueType().getTypePtr(); 11275 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11276 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11277 11278 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 11279 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11280 11281 const BuiltinType *BT = cast<BuiltinType>(T); 11282 assert(BT->isInteger()); 11283 11284 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11285 } 11286 11287 /// Returns the supremum of two ranges: i.e. their conservative merge. 11288 static IntRange join(IntRange L, IntRange R) { 11289 bool Unsigned = L.NonNegative && R.NonNegative; 11290 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11291 L.NonNegative && R.NonNegative); 11292 } 11293 11294 /// Return the range of a bitwise-AND of the two ranges. 11295 static IntRange bit_and(IntRange L, IntRange R) { 11296 unsigned Bits = std::max(L.Width, R.Width); 11297 bool NonNegative = false; 11298 if (L.NonNegative) { 11299 Bits = std::min(Bits, L.Width); 11300 NonNegative = true; 11301 } 11302 if (R.NonNegative) { 11303 Bits = std::min(Bits, R.Width); 11304 NonNegative = true; 11305 } 11306 return IntRange(Bits, NonNegative); 11307 } 11308 11309 /// Return the range of a sum of the two ranges. 11310 static IntRange sum(IntRange L, IntRange R) { 11311 bool Unsigned = L.NonNegative && R.NonNegative; 11312 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 11313 Unsigned); 11314 } 11315 11316 /// Return the range of a difference of the two ranges. 11317 static IntRange difference(IntRange L, IntRange R) { 11318 // We need a 1-bit-wider range if: 11319 // 1) LHS can be negative: least value can be reduced. 11320 // 2) RHS can be negative: greatest value can be increased. 11321 bool CanWiden = !L.NonNegative || !R.NonNegative; 11322 bool Unsigned = L.NonNegative && R.Width == 0; 11323 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 11324 !Unsigned, 11325 Unsigned); 11326 } 11327 11328 /// Return the range of a product of the two ranges. 11329 static IntRange product(IntRange L, IntRange R) { 11330 // If both LHS and RHS can be negative, we can form 11331 // -2^L * -2^R = 2^(L + R) 11332 // which requires L + R + 1 value bits to represent. 11333 bool CanWiden = !L.NonNegative && !R.NonNegative; 11334 bool Unsigned = L.NonNegative && R.NonNegative; 11335 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 11336 Unsigned); 11337 } 11338 11339 /// Return the range of a remainder operation between the two ranges. 11340 static IntRange rem(IntRange L, IntRange R) { 11341 // The result of a remainder can't be larger than the result of 11342 // either side. The sign of the result is the sign of the LHS. 11343 bool Unsigned = L.NonNegative; 11344 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 11345 Unsigned); 11346 } 11347 }; 11348 11349 } // namespace 11350 11351 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 11352 unsigned MaxWidth) { 11353 if (value.isSigned() && value.isNegative()) 11354 return IntRange(value.getMinSignedBits(), false); 11355 11356 if (value.getBitWidth() > MaxWidth) 11357 value = value.trunc(MaxWidth); 11358 11359 // isNonNegative() just checks the sign bit without considering 11360 // signedness. 11361 return IntRange(value.getActiveBits(), true); 11362 } 11363 11364 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 11365 unsigned MaxWidth) { 11366 if (result.isInt()) 11367 return GetValueRange(C, result.getInt(), MaxWidth); 11368 11369 if (result.isVector()) { 11370 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 11371 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 11372 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 11373 R = IntRange::join(R, El); 11374 } 11375 return R; 11376 } 11377 11378 if (result.isComplexInt()) { 11379 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 11380 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 11381 return IntRange::join(R, I); 11382 } 11383 11384 // This can happen with lossless casts to intptr_t of "based" lvalues. 11385 // Assume it might use arbitrary bits. 11386 // FIXME: The only reason we need to pass the type in here is to get 11387 // the sign right on this one case. It would be nice if APValue 11388 // preserved this. 11389 assert(result.isLValue() || result.isAddrLabelDiff()); 11390 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 11391 } 11392 11393 static QualType GetExprType(const Expr *E) { 11394 QualType Ty = E->getType(); 11395 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 11396 Ty = AtomicRHS->getValueType(); 11397 return Ty; 11398 } 11399 11400 /// Pseudo-evaluate the given integer expression, estimating the 11401 /// range of values it might take. 11402 /// 11403 /// \param MaxWidth The width to which the value will be truncated. 11404 /// \param Approximate If \c true, return a likely range for the result: in 11405 /// particular, assume that arithmetic on narrower types doesn't leave 11406 /// those types. If \c false, return a range including all possible 11407 /// result values. 11408 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 11409 bool InConstantContext, bool Approximate) { 11410 E = E->IgnoreParens(); 11411 11412 // Try a full evaluation first. 11413 Expr::EvalResult result; 11414 if (E->EvaluateAsRValue(result, C, InConstantContext)) 11415 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 11416 11417 // I think we only want to look through implicit casts here; if the 11418 // user has an explicit widening cast, we should treat the value as 11419 // being of the new, wider type. 11420 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 11421 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 11422 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 11423 Approximate); 11424 11425 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 11426 11427 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 11428 CE->getCastKind() == CK_BooleanToSignedIntegral; 11429 11430 // Assume that non-integer casts can span the full range of the type. 11431 if (!isIntegerCast) 11432 return OutputTypeRange; 11433 11434 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 11435 std::min(MaxWidth, OutputTypeRange.Width), 11436 InConstantContext, Approximate); 11437 11438 // Bail out if the subexpr's range is as wide as the cast type. 11439 if (SubRange.Width >= OutputTypeRange.Width) 11440 return OutputTypeRange; 11441 11442 // Otherwise, we take the smaller width, and we're non-negative if 11443 // either the output type or the subexpr is. 11444 return IntRange(SubRange.Width, 11445 SubRange.NonNegative || OutputTypeRange.NonNegative); 11446 } 11447 11448 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11449 // If we can fold the condition, just take that operand. 11450 bool CondResult; 11451 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 11452 return GetExprRange(C, 11453 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 11454 MaxWidth, InConstantContext, Approximate); 11455 11456 // Otherwise, conservatively merge. 11457 // GetExprRange requires an integer expression, but a throw expression 11458 // results in a void type. 11459 Expr *E = CO->getTrueExpr(); 11460 IntRange L = E->getType()->isVoidType() 11461 ? IntRange{0, true} 11462 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11463 E = CO->getFalseExpr(); 11464 IntRange R = E->getType()->isVoidType() 11465 ? IntRange{0, true} 11466 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11467 return IntRange::join(L, R); 11468 } 11469 11470 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11471 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 11472 11473 switch (BO->getOpcode()) { 11474 case BO_Cmp: 11475 llvm_unreachable("builtin <=> should have class type"); 11476 11477 // Boolean-valued operations are single-bit and positive. 11478 case BO_LAnd: 11479 case BO_LOr: 11480 case BO_LT: 11481 case BO_GT: 11482 case BO_LE: 11483 case BO_GE: 11484 case BO_EQ: 11485 case BO_NE: 11486 return IntRange::forBoolType(); 11487 11488 // The type of the assignments is the type of the LHS, so the RHS 11489 // is not necessarily the same type. 11490 case BO_MulAssign: 11491 case BO_DivAssign: 11492 case BO_RemAssign: 11493 case BO_AddAssign: 11494 case BO_SubAssign: 11495 case BO_XorAssign: 11496 case BO_OrAssign: 11497 // TODO: bitfields? 11498 return IntRange::forValueOfType(C, GetExprType(E)); 11499 11500 // Simple assignments just pass through the RHS, which will have 11501 // been coerced to the LHS type. 11502 case BO_Assign: 11503 // TODO: bitfields? 11504 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11505 Approximate); 11506 11507 // Operations with opaque sources are black-listed. 11508 case BO_PtrMemD: 11509 case BO_PtrMemI: 11510 return IntRange::forValueOfType(C, GetExprType(E)); 11511 11512 // Bitwise-and uses the *infinum* of the two source ranges. 11513 case BO_And: 11514 case BO_AndAssign: 11515 Combine = IntRange::bit_and; 11516 break; 11517 11518 // Left shift gets black-listed based on a judgement call. 11519 case BO_Shl: 11520 // ...except that we want to treat '1 << (blah)' as logically 11521 // positive. It's an important idiom. 11522 if (IntegerLiteral *I 11523 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 11524 if (I->getValue() == 1) { 11525 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 11526 return IntRange(R.Width, /*NonNegative*/ true); 11527 } 11528 } 11529 LLVM_FALLTHROUGH; 11530 11531 case BO_ShlAssign: 11532 return IntRange::forValueOfType(C, GetExprType(E)); 11533 11534 // Right shift by a constant can narrow its left argument. 11535 case BO_Shr: 11536 case BO_ShrAssign: { 11537 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 11538 Approximate); 11539 11540 // If the shift amount is a positive constant, drop the width by 11541 // that much. 11542 if (Optional<llvm::APSInt> shift = 11543 BO->getRHS()->getIntegerConstantExpr(C)) { 11544 if (shift->isNonNegative()) { 11545 unsigned zext = shift->getZExtValue(); 11546 if (zext >= L.Width) 11547 L.Width = (L.NonNegative ? 0 : 1); 11548 else 11549 L.Width -= zext; 11550 } 11551 } 11552 11553 return L; 11554 } 11555 11556 // Comma acts as its right operand. 11557 case BO_Comma: 11558 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11559 Approximate); 11560 11561 case BO_Add: 11562 if (!Approximate) 11563 Combine = IntRange::sum; 11564 break; 11565 11566 case BO_Sub: 11567 if (BO->getLHS()->getType()->isPointerType()) 11568 return IntRange::forValueOfType(C, GetExprType(E)); 11569 if (!Approximate) 11570 Combine = IntRange::difference; 11571 break; 11572 11573 case BO_Mul: 11574 if (!Approximate) 11575 Combine = IntRange::product; 11576 break; 11577 11578 // The width of a division result is mostly determined by the size 11579 // of the LHS. 11580 case BO_Div: { 11581 // Don't 'pre-truncate' the operands. 11582 unsigned opWidth = C.getIntWidth(GetExprType(E)); 11583 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 11584 Approximate); 11585 11586 // If the divisor is constant, use that. 11587 if (Optional<llvm::APSInt> divisor = 11588 BO->getRHS()->getIntegerConstantExpr(C)) { 11589 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 11590 if (log2 >= L.Width) 11591 L.Width = (L.NonNegative ? 0 : 1); 11592 else 11593 L.Width = std::min(L.Width - log2, MaxWidth); 11594 return L; 11595 } 11596 11597 // Otherwise, just use the LHS's width. 11598 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 11599 // could be -1. 11600 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 11601 Approximate); 11602 return IntRange(L.Width, L.NonNegative && R.NonNegative); 11603 } 11604 11605 case BO_Rem: 11606 Combine = IntRange::rem; 11607 break; 11608 11609 // The default behavior is okay for these. 11610 case BO_Xor: 11611 case BO_Or: 11612 break; 11613 } 11614 11615 // Combine the two ranges, but limit the result to the type in which we 11616 // performed the computation. 11617 QualType T = GetExprType(E); 11618 unsigned opWidth = C.getIntWidth(T); 11619 IntRange L = 11620 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 11621 IntRange R = 11622 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 11623 IntRange C = Combine(L, R); 11624 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 11625 C.Width = std::min(C.Width, MaxWidth); 11626 return C; 11627 } 11628 11629 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 11630 switch (UO->getOpcode()) { 11631 // Boolean-valued operations are white-listed. 11632 case UO_LNot: 11633 return IntRange::forBoolType(); 11634 11635 // Operations with opaque sources are black-listed. 11636 case UO_Deref: 11637 case UO_AddrOf: // should be impossible 11638 return IntRange::forValueOfType(C, GetExprType(E)); 11639 11640 default: 11641 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 11642 Approximate); 11643 } 11644 } 11645 11646 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 11647 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 11648 Approximate); 11649 11650 if (const auto *BitField = E->getSourceBitField()) 11651 return IntRange(BitField->getBitWidthValue(C), 11652 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 11653 11654 return IntRange::forValueOfType(C, GetExprType(E)); 11655 } 11656 11657 static IntRange GetExprRange(ASTContext &C, const Expr *E, 11658 bool InConstantContext, bool Approximate) { 11659 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 11660 Approximate); 11661 } 11662 11663 /// Checks whether the given value, which currently has the given 11664 /// source semantics, has the same value when coerced through the 11665 /// target semantics. 11666 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 11667 const llvm::fltSemantics &Src, 11668 const llvm::fltSemantics &Tgt) { 11669 llvm::APFloat truncated = value; 11670 11671 bool ignored; 11672 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 11673 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 11674 11675 return truncated.bitwiseIsEqual(value); 11676 } 11677 11678 /// Checks whether the given value, which currently has the given 11679 /// source semantics, has the same value when coerced through the 11680 /// target semantics. 11681 /// 11682 /// The value might be a vector of floats (or a complex number). 11683 static bool IsSameFloatAfterCast(const APValue &value, 11684 const llvm::fltSemantics &Src, 11685 const llvm::fltSemantics &Tgt) { 11686 if (value.isFloat()) 11687 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 11688 11689 if (value.isVector()) { 11690 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 11691 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 11692 return false; 11693 return true; 11694 } 11695 11696 assert(value.isComplexFloat()); 11697 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 11698 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 11699 } 11700 11701 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 11702 bool IsListInit = false); 11703 11704 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 11705 // Suppress cases where we are comparing against an enum constant. 11706 if (const DeclRefExpr *DR = 11707 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 11708 if (isa<EnumConstantDecl>(DR->getDecl())) 11709 return true; 11710 11711 // Suppress cases where the value is expanded from a macro, unless that macro 11712 // is how a language represents a boolean literal. This is the case in both C 11713 // and Objective-C. 11714 SourceLocation BeginLoc = E->getBeginLoc(); 11715 if (BeginLoc.isMacroID()) { 11716 StringRef MacroName = Lexer::getImmediateMacroName( 11717 BeginLoc, S.getSourceManager(), S.getLangOpts()); 11718 return MacroName != "YES" && MacroName != "NO" && 11719 MacroName != "true" && MacroName != "false"; 11720 } 11721 11722 return false; 11723 } 11724 11725 static bool isKnownToHaveUnsignedValue(Expr *E) { 11726 return E->getType()->isIntegerType() && 11727 (!E->getType()->isSignedIntegerType() || 11728 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 11729 } 11730 11731 namespace { 11732 /// The promoted range of values of a type. In general this has the 11733 /// following structure: 11734 /// 11735 /// |-----------| . . . |-----------| 11736 /// ^ ^ ^ ^ 11737 /// Min HoleMin HoleMax Max 11738 /// 11739 /// ... where there is only a hole if a signed type is promoted to unsigned 11740 /// (in which case Min and Max are the smallest and largest representable 11741 /// values). 11742 struct PromotedRange { 11743 // Min, or HoleMax if there is a hole. 11744 llvm::APSInt PromotedMin; 11745 // Max, or HoleMin if there is a hole. 11746 llvm::APSInt PromotedMax; 11747 11748 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 11749 if (R.Width == 0) 11750 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 11751 else if (R.Width >= BitWidth && !Unsigned) { 11752 // Promotion made the type *narrower*. This happens when promoting 11753 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 11754 // Treat all values of 'signed int' as being in range for now. 11755 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 11756 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 11757 } else { 11758 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 11759 .extOrTrunc(BitWidth); 11760 PromotedMin.setIsUnsigned(Unsigned); 11761 11762 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 11763 .extOrTrunc(BitWidth); 11764 PromotedMax.setIsUnsigned(Unsigned); 11765 } 11766 } 11767 11768 // Determine whether this range is contiguous (has no hole). 11769 bool isContiguous() const { return PromotedMin <= PromotedMax; } 11770 11771 // Where a constant value is within the range. 11772 enum ComparisonResult { 11773 LT = 0x1, 11774 LE = 0x2, 11775 GT = 0x4, 11776 GE = 0x8, 11777 EQ = 0x10, 11778 NE = 0x20, 11779 InRangeFlag = 0x40, 11780 11781 Less = LE | LT | NE, 11782 Min = LE | InRangeFlag, 11783 InRange = InRangeFlag, 11784 Max = GE | InRangeFlag, 11785 Greater = GE | GT | NE, 11786 11787 OnlyValue = LE | GE | EQ | InRangeFlag, 11788 InHole = NE 11789 }; 11790 11791 ComparisonResult compare(const llvm::APSInt &Value) const { 11792 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 11793 Value.isUnsigned() == PromotedMin.isUnsigned()); 11794 if (!isContiguous()) { 11795 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 11796 if (Value.isMinValue()) return Min; 11797 if (Value.isMaxValue()) return Max; 11798 if (Value >= PromotedMin) return InRange; 11799 if (Value <= PromotedMax) return InRange; 11800 return InHole; 11801 } 11802 11803 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 11804 case -1: return Less; 11805 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 11806 case 1: 11807 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 11808 case -1: return InRange; 11809 case 0: return Max; 11810 case 1: return Greater; 11811 } 11812 } 11813 11814 llvm_unreachable("impossible compare result"); 11815 } 11816 11817 static llvm::Optional<StringRef> 11818 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 11819 if (Op == BO_Cmp) { 11820 ComparisonResult LTFlag = LT, GTFlag = GT; 11821 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 11822 11823 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 11824 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 11825 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 11826 return llvm::None; 11827 } 11828 11829 ComparisonResult TrueFlag, FalseFlag; 11830 if (Op == BO_EQ) { 11831 TrueFlag = EQ; 11832 FalseFlag = NE; 11833 } else if (Op == BO_NE) { 11834 TrueFlag = NE; 11835 FalseFlag = EQ; 11836 } else { 11837 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 11838 TrueFlag = LT; 11839 FalseFlag = GE; 11840 } else { 11841 TrueFlag = GT; 11842 FalseFlag = LE; 11843 } 11844 if (Op == BO_GE || Op == BO_LE) 11845 std::swap(TrueFlag, FalseFlag); 11846 } 11847 if (R & TrueFlag) 11848 return StringRef("true"); 11849 if (R & FalseFlag) 11850 return StringRef("false"); 11851 return llvm::None; 11852 } 11853 }; 11854 } 11855 11856 static bool HasEnumType(Expr *E) { 11857 // Strip off implicit integral promotions. 11858 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 11859 if (ICE->getCastKind() != CK_IntegralCast && 11860 ICE->getCastKind() != CK_NoOp) 11861 break; 11862 E = ICE->getSubExpr(); 11863 } 11864 11865 return E->getType()->isEnumeralType(); 11866 } 11867 11868 static int classifyConstantValue(Expr *Constant) { 11869 // The values of this enumeration are used in the diagnostics 11870 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 11871 enum ConstantValueKind { 11872 Miscellaneous = 0, 11873 LiteralTrue, 11874 LiteralFalse 11875 }; 11876 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 11877 return BL->getValue() ? ConstantValueKind::LiteralTrue 11878 : ConstantValueKind::LiteralFalse; 11879 return ConstantValueKind::Miscellaneous; 11880 } 11881 11882 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 11883 Expr *Constant, Expr *Other, 11884 const llvm::APSInt &Value, 11885 bool RhsConstant) { 11886 if (S.inTemplateInstantiation()) 11887 return false; 11888 11889 Expr *OriginalOther = Other; 11890 11891 Constant = Constant->IgnoreParenImpCasts(); 11892 Other = Other->IgnoreParenImpCasts(); 11893 11894 // Suppress warnings on tautological comparisons between values of the same 11895 // enumeration type. There are only two ways we could warn on this: 11896 // - If the constant is outside the range of representable values of 11897 // the enumeration. In such a case, we should warn about the cast 11898 // to enumeration type, not about the comparison. 11899 // - If the constant is the maximum / minimum in-range value. For an 11900 // enumeratin type, such comparisons can be meaningful and useful. 11901 if (Constant->getType()->isEnumeralType() && 11902 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 11903 return false; 11904 11905 IntRange OtherValueRange = GetExprRange( 11906 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 11907 11908 QualType OtherT = Other->getType(); 11909 if (const auto *AT = OtherT->getAs<AtomicType>()) 11910 OtherT = AT->getValueType(); 11911 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 11912 11913 // Special case for ObjC BOOL on targets where its a typedef for a signed char 11914 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 11915 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 11916 S.NSAPIObj->isObjCBOOLType(OtherT) && 11917 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 11918 11919 // Whether we're treating Other as being a bool because of the form of 11920 // expression despite it having another type (typically 'int' in C). 11921 bool OtherIsBooleanDespiteType = 11922 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 11923 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 11924 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 11925 11926 // Check if all values in the range of possible values of this expression 11927 // lead to the same comparison outcome. 11928 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 11929 Value.isUnsigned()); 11930 auto Cmp = OtherPromotedValueRange.compare(Value); 11931 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 11932 if (!Result) 11933 return false; 11934 11935 // Also consider the range determined by the type alone. This allows us to 11936 // classify the warning under the proper diagnostic group. 11937 bool TautologicalTypeCompare = false; 11938 { 11939 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 11940 Value.isUnsigned()); 11941 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 11942 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 11943 RhsConstant)) { 11944 TautologicalTypeCompare = true; 11945 Cmp = TypeCmp; 11946 Result = TypeResult; 11947 } 11948 } 11949 11950 // Don't warn if the non-constant operand actually always evaluates to the 11951 // same value. 11952 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 11953 return false; 11954 11955 // Suppress the diagnostic for an in-range comparison if the constant comes 11956 // from a macro or enumerator. We don't want to diagnose 11957 // 11958 // some_long_value <= INT_MAX 11959 // 11960 // when sizeof(int) == sizeof(long). 11961 bool InRange = Cmp & PromotedRange::InRangeFlag; 11962 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 11963 return false; 11964 11965 // A comparison of an unsigned bit-field against 0 is really a type problem, 11966 // even though at the type level the bit-field might promote to 'signed int'. 11967 if (Other->refersToBitField() && InRange && Value == 0 && 11968 Other->getType()->isUnsignedIntegerOrEnumerationType()) 11969 TautologicalTypeCompare = true; 11970 11971 // If this is a comparison to an enum constant, include that 11972 // constant in the diagnostic. 11973 const EnumConstantDecl *ED = nullptr; 11974 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 11975 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 11976 11977 // Should be enough for uint128 (39 decimal digits) 11978 SmallString<64> PrettySourceValue; 11979 llvm::raw_svector_ostream OS(PrettySourceValue); 11980 if (ED) { 11981 OS << '\'' << *ED << "' (" << Value << ")"; 11982 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 11983 Constant->IgnoreParenImpCasts())) { 11984 OS << (BL->getValue() ? "YES" : "NO"); 11985 } else { 11986 OS << Value; 11987 } 11988 11989 if (!TautologicalTypeCompare) { 11990 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 11991 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 11992 << E->getOpcodeStr() << OS.str() << *Result 11993 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11994 return true; 11995 } 11996 11997 if (IsObjCSignedCharBool) { 11998 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 11999 S.PDiag(diag::warn_tautological_compare_objc_bool) 12000 << OS.str() << *Result); 12001 return true; 12002 } 12003 12004 // FIXME: We use a somewhat different formatting for the in-range cases and 12005 // cases involving boolean values for historical reasons. We should pick a 12006 // consistent way of presenting these diagnostics. 12007 if (!InRange || Other->isKnownToHaveBooleanValue()) { 12008 12009 S.DiagRuntimeBehavior( 12010 E->getOperatorLoc(), E, 12011 S.PDiag(!InRange ? diag::warn_out_of_range_compare 12012 : diag::warn_tautological_bool_compare) 12013 << OS.str() << classifyConstantValue(Constant) << OtherT 12014 << OtherIsBooleanDespiteType << *Result 12015 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 12016 } else { 12017 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 12018 unsigned Diag = 12019 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 12020 ? (HasEnumType(OriginalOther) 12021 ? diag::warn_unsigned_enum_always_true_comparison 12022 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 12023 : diag::warn_unsigned_always_true_comparison) 12024 : diag::warn_tautological_constant_compare; 12025 12026 S.Diag(E->getOperatorLoc(), Diag) 12027 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 12028 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12029 } 12030 12031 return true; 12032 } 12033 12034 /// Analyze the operands of the given comparison. Implements the 12035 /// fallback case from AnalyzeComparison. 12036 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 12037 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12038 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12039 } 12040 12041 /// Implements -Wsign-compare. 12042 /// 12043 /// \param E the binary operator to check for warnings 12044 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 12045 // The type the comparison is being performed in. 12046 QualType T = E->getLHS()->getType(); 12047 12048 // Only analyze comparison operators where both sides have been converted to 12049 // the same type. 12050 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 12051 return AnalyzeImpConvsInComparison(S, E); 12052 12053 // Don't analyze value-dependent comparisons directly. 12054 if (E->isValueDependent()) 12055 return AnalyzeImpConvsInComparison(S, E); 12056 12057 Expr *LHS = E->getLHS(); 12058 Expr *RHS = E->getRHS(); 12059 12060 if (T->isIntegralType(S.Context)) { 12061 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 12062 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 12063 12064 // We don't care about expressions whose result is a constant. 12065 if (RHSValue && LHSValue) 12066 return AnalyzeImpConvsInComparison(S, E); 12067 12068 // We only care about expressions where just one side is literal 12069 if ((bool)RHSValue ^ (bool)LHSValue) { 12070 // Is the constant on the RHS or LHS? 12071 const bool RhsConstant = (bool)RHSValue; 12072 Expr *Const = RhsConstant ? RHS : LHS; 12073 Expr *Other = RhsConstant ? LHS : RHS; 12074 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12075 12076 // Check whether an integer constant comparison results in a value 12077 // of 'true' or 'false'. 12078 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12079 return AnalyzeImpConvsInComparison(S, E); 12080 } 12081 } 12082 12083 if (!T->hasUnsignedIntegerRepresentation()) { 12084 // We don't do anything special if this isn't an unsigned integral 12085 // comparison: we're only interested in integral comparisons, and 12086 // signed comparisons only happen in cases we don't care to warn about. 12087 return AnalyzeImpConvsInComparison(S, E); 12088 } 12089 12090 LHS = LHS->IgnoreParenImpCasts(); 12091 RHS = RHS->IgnoreParenImpCasts(); 12092 12093 if (!S.getLangOpts().CPlusPlus) { 12094 // Avoid warning about comparison of integers with different signs when 12095 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12096 // the type of `E`. 12097 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12098 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12099 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12100 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12101 } 12102 12103 // Check to see if one of the (unmodified) operands is of different 12104 // signedness. 12105 Expr *signedOperand, *unsignedOperand; 12106 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12107 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12108 "unsigned comparison between two signed integer expressions?"); 12109 signedOperand = LHS; 12110 unsignedOperand = RHS; 12111 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12112 signedOperand = RHS; 12113 unsignedOperand = LHS; 12114 } else { 12115 return AnalyzeImpConvsInComparison(S, E); 12116 } 12117 12118 // Otherwise, calculate the effective range of the signed operand. 12119 IntRange signedRange = GetExprRange( 12120 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12121 12122 // Go ahead and analyze implicit conversions in the operands. Note 12123 // that we skip the implicit conversions on both sides. 12124 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12125 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12126 12127 // If the signed range is non-negative, -Wsign-compare won't fire. 12128 if (signedRange.NonNegative) 12129 return; 12130 12131 // For (in)equality comparisons, if the unsigned operand is a 12132 // constant which cannot collide with a overflowed signed operand, 12133 // then reinterpreting the signed operand as unsigned will not 12134 // change the result of the comparison. 12135 if (E->isEqualityOp()) { 12136 unsigned comparisonWidth = S.Context.getIntWidth(T); 12137 IntRange unsignedRange = 12138 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12139 /*Approximate*/ true); 12140 12141 // We should never be unable to prove that the unsigned operand is 12142 // non-negative. 12143 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12144 12145 if (unsignedRange.Width < comparisonWidth) 12146 return; 12147 } 12148 12149 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12150 S.PDiag(diag::warn_mixed_sign_comparison) 12151 << LHS->getType() << RHS->getType() 12152 << LHS->getSourceRange() << RHS->getSourceRange()); 12153 } 12154 12155 /// Analyzes an attempt to assign the given value to a bitfield. 12156 /// 12157 /// Returns true if there was something fishy about the attempt. 12158 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12159 SourceLocation InitLoc) { 12160 assert(Bitfield->isBitField()); 12161 if (Bitfield->isInvalidDecl()) 12162 return false; 12163 12164 // White-list bool bitfields. 12165 QualType BitfieldType = Bitfield->getType(); 12166 if (BitfieldType->isBooleanType()) 12167 return false; 12168 12169 if (BitfieldType->isEnumeralType()) { 12170 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12171 // If the underlying enum type was not explicitly specified as an unsigned 12172 // type and the enum contain only positive values, MSVC++ will cause an 12173 // inconsistency by storing this as a signed type. 12174 if (S.getLangOpts().CPlusPlus11 && 12175 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12176 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12177 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12178 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12179 << BitfieldEnumDecl; 12180 } 12181 } 12182 12183 if (Bitfield->getType()->isBooleanType()) 12184 return false; 12185 12186 // Ignore value- or type-dependent expressions. 12187 if (Bitfield->getBitWidth()->isValueDependent() || 12188 Bitfield->getBitWidth()->isTypeDependent() || 12189 Init->isValueDependent() || 12190 Init->isTypeDependent()) 12191 return false; 12192 12193 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12194 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12195 12196 Expr::EvalResult Result; 12197 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12198 Expr::SE_AllowSideEffects)) { 12199 // The RHS is not constant. If the RHS has an enum type, make sure the 12200 // bitfield is wide enough to hold all the values of the enum without 12201 // truncation. 12202 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12203 EnumDecl *ED = EnumTy->getDecl(); 12204 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12205 12206 // Enum types are implicitly signed on Windows, so check if there are any 12207 // negative enumerators to see if the enum was intended to be signed or 12208 // not. 12209 bool SignedEnum = ED->getNumNegativeBits() > 0; 12210 12211 // Check for surprising sign changes when assigning enum values to a 12212 // bitfield of different signedness. If the bitfield is signed and we 12213 // have exactly the right number of bits to store this unsigned enum, 12214 // suggest changing the enum to an unsigned type. This typically happens 12215 // on Windows where unfixed enums always use an underlying type of 'int'. 12216 unsigned DiagID = 0; 12217 if (SignedEnum && !SignedBitfield) { 12218 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12219 } else if (SignedBitfield && !SignedEnum && 12220 ED->getNumPositiveBits() == FieldWidth) { 12221 DiagID = diag::warn_signed_bitfield_enum_conversion; 12222 } 12223 12224 if (DiagID) { 12225 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12226 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12227 SourceRange TypeRange = 12228 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12229 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12230 << SignedEnum << TypeRange; 12231 } 12232 12233 // Compute the required bitwidth. If the enum has negative values, we need 12234 // one more bit than the normal number of positive bits to represent the 12235 // sign bit. 12236 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12237 ED->getNumNegativeBits()) 12238 : ED->getNumPositiveBits(); 12239 12240 // Check the bitwidth. 12241 if (BitsNeeded > FieldWidth) { 12242 Expr *WidthExpr = Bitfield->getBitWidth(); 12243 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12244 << Bitfield << ED; 12245 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12246 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12247 } 12248 } 12249 12250 return false; 12251 } 12252 12253 llvm::APSInt Value = Result.Val.getInt(); 12254 12255 unsigned OriginalWidth = Value.getBitWidth(); 12256 12257 if (!Value.isSigned() || Value.isNegative()) 12258 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12259 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12260 OriginalWidth = Value.getMinSignedBits(); 12261 12262 if (OriginalWidth <= FieldWidth) 12263 return false; 12264 12265 // Compute the value which the bitfield will contain. 12266 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12267 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12268 12269 // Check whether the stored value is equal to the original value. 12270 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12271 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12272 return false; 12273 12274 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12275 // therefore don't strictly fit into a signed bitfield of width 1. 12276 if (FieldWidth == 1 && Value == 1) 12277 return false; 12278 12279 std::string PrettyValue = toString(Value, 10); 12280 std::string PrettyTrunc = toString(TruncatedValue, 10); 12281 12282 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12283 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12284 << Init->getSourceRange(); 12285 12286 return true; 12287 } 12288 12289 /// Analyze the given simple or compound assignment for warning-worthy 12290 /// operations. 12291 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12292 // Just recurse on the LHS. 12293 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12294 12295 // We want to recurse on the RHS as normal unless we're assigning to 12296 // a bitfield. 12297 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12298 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12299 E->getOperatorLoc())) { 12300 // Recurse, ignoring any implicit conversions on the RHS. 12301 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12302 E->getOperatorLoc()); 12303 } 12304 } 12305 12306 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12307 12308 // Diagnose implicitly sequentially-consistent atomic assignment. 12309 if (E->getLHS()->getType()->isAtomicType()) 12310 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12311 } 12312 12313 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12314 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 12315 SourceLocation CContext, unsigned diag, 12316 bool pruneControlFlow = false) { 12317 if (pruneControlFlow) { 12318 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12319 S.PDiag(diag) 12320 << SourceType << T << E->getSourceRange() 12321 << SourceRange(CContext)); 12322 return; 12323 } 12324 S.Diag(E->getExprLoc(), diag) 12325 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 12326 } 12327 12328 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12329 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 12330 SourceLocation CContext, 12331 unsigned diag, bool pruneControlFlow = false) { 12332 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 12333 } 12334 12335 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 12336 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 12337 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 12338 } 12339 12340 static void adornObjCBoolConversionDiagWithTernaryFixit( 12341 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 12342 Expr *Ignored = SourceExpr->IgnoreImplicit(); 12343 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 12344 Ignored = OVE->getSourceExpr(); 12345 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 12346 isa<BinaryOperator>(Ignored) || 12347 isa<CXXOperatorCallExpr>(Ignored); 12348 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 12349 if (NeedsParens) 12350 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 12351 << FixItHint::CreateInsertion(EndLoc, ")"); 12352 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 12353 } 12354 12355 /// Diagnose an implicit cast from a floating point value to an integer value. 12356 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 12357 SourceLocation CContext) { 12358 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 12359 const bool PruneWarnings = S.inTemplateInstantiation(); 12360 12361 Expr *InnerE = E->IgnoreParenImpCasts(); 12362 // We also want to warn on, e.g., "int i = -1.234" 12363 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 12364 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 12365 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 12366 12367 const bool IsLiteral = 12368 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 12369 12370 llvm::APFloat Value(0.0); 12371 bool IsConstant = 12372 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 12373 if (!IsConstant) { 12374 if (isObjCSignedCharBool(S, T)) { 12375 return adornObjCBoolConversionDiagWithTernaryFixit( 12376 S, E, 12377 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 12378 << E->getType()); 12379 } 12380 12381 return DiagnoseImpCast(S, E, T, CContext, 12382 diag::warn_impcast_float_integer, PruneWarnings); 12383 } 12384 12385 bool isExact = false; 12386 12387 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 12388 T->hasUnsignedIntegerRepresentation()); 12389 llvm::APFloat::opStatus Result = Value.convertToInteger( 12390 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 12391 12392 // FIXME: Force the precision of the source value down so we don't print 12393 // digits which are usually useless (we don't really care here if we 12394 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 12395 // would automatically print the shortest representation, but it's a bit 12396 // tricky to implement. 12397 SmallString<16> PrettySourceValue; 12398 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 12399 precision = (precision * 59 + 195) / 196; 12400 Value.toString(PrettySourceValue, precision); 12401 12402 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 12403 return adornObjCBoolConversionDiagWithTernaryFixit( 12404 S, E, 12405 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 12406 << PrettySourceValue); 12407 } 12408 12409 if (Result == llvm::APFloat::opOK && isExact) { 12410 if (IsLiteral) return; 12411 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 12412 PruneWarnings); 12413 } 12414 12415 // Conversion of a floating-point value to a non-bool integer where the 12416 // integral part cannot be represented by the integer type is undefined. 12417 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 12418 return DiagnoseImpCast( 12419 S, E, T, CContext, 12420 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 12421 : diag::warn_impcast_float_to_integer_out_of_range, 12422 PruneWarnings); 12423 12424 unsigned DiagID = 0; 12425 if (IsLiteral) { 12426 // Warn on floating point literal to integer. 12427 DiagID = diag::warn_impcast_literal_float_to_integer; 12428 } else if (IntegerValue == 0) { 12429 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 12430 return DiagnoseImpCast(S, E, T, CContext, 12431 diag::warn_impcast_float_integer, PruneWarnings); 12432 } 12433 // Warn on non-zero to zero conversion. 12434 DiagID = diag::warn_impcast_float_to_integer_zero; 12435 } else { 12436 if (IntegerValue.isUnsigned()) { 12437 if (!IntegerValue.isMaxValue()) { 12438 return DiagnoseImpCast(S, E, T, CContext, 12439 diag::warn_impcast_float_integer, PruneWarnings); 12440 } 12441 } else { // IntegerValue.isSigned() 12442 if (!IntegerValue.isMaxSignedValue() && 12443 !IntegerValue.isMinSignedValue()) { 12444 return DiagnoseImpCast(S, E, T, CContext, 12445 diag::warn_impcast_float_integer, PruneWarnings); 12446 } 12447 } 12448 // Warn on evaluatable floating point expression to integer conversion. 12449 DiagID = diag::warn_impcast_float_to_integer; 12450 } 12451 12452 SmallString<16> PrettyTargetValue; 12453 if (IsBool) 12454 PrettyTargetValue = Value.isZero() ? "false" : "true"; 12455 else 12456 IntegerValue.toString(PrettyTargetValue); 12457 12458 if (PruneWarnings) { 12459 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12460 S.PDiag(DiagID) 12461 << E->getType() << T.getUnqualifiedType() 12462 << PrettySourceValue << PrettyTargetValue 12463 << E->getSourceRange() << SourceRange(CContext)); 12464 } else { 12465 S.Diag(E->getExprLoc(), DiagID) 12466 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 12467 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 12468 } 12469 } 12470 12471 /// Analyze the given compound assignment for the possible losing of 12472 /// floating-point precision. 12473 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 12474 assert(isa<CompoundAssignOperator>(E) && 12475 "Must be compound assignment operation"); 12476 // Recurse on the LHS and RHS in here 12477 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12478 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12479 12480 if (E->getLHS()->getType()->isAtomicType()) 12481 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 12482 12483 // Now check the outermost expression 12484 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 12485 const auto *RBT = cast<CompoundAssignOperator>(E) 12486 ->getComputationResultType() 12487 ->getAs<BuiltinType>(); 12488 12489 // The below checks assume source is floating point. 12490 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 12491 12492 // If source is floating point but target is an integer. 12493 if (ResultBT->isInteger()) 12494 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 12495 E->getExprLoc(), diag::warn_impcast_float_integer); 12496 12497 if (!ResultBT->isFloatingPoint()) 12498 return; 12499 12500 // If both source and target are floating points, warn about losing precision. 12501 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12502 QualType(ResultBT, 0), QualType(RBT, 0)); 12503 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 12504 // warn about dropping FP rank. 12505 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 12506 diag::warn_impcast_float_result_precision); 12507 } 12508 12509 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 12510 IntRange Range) { 12511 if (!Range.Width) return "0"; 12512 12513 llvm::APSInt ValueInRange = Value; 12514 ValueInRange.setIsSigned(!Range.NonNegative); 12515 ValueInRange = ValueInRange.trunc(Range.Width); 12516 return toString(ValueInRange, 10); 12517 } 12518 12519 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 12520 if (!isa<ImplicitCastExpr>(Ex)) 12521 return false; 12522 12523 Expr *InnerE = Ex->IgnoreParenImpCasts(); 12524 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 12525 const Type *Source = 12526 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 12527 if (Target->isDependentType()) 12528 return false; 12529 12530 const BuiltinType *FloatCandidateBT = 12531 dyn_cast<BuiltinType>(ToBool ? Source : Target); 12532 const Type *BoolCandidateType = ToBool ? Target : Source; 12533 12534 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 12535 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 12536 } 12537 12538 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 12539 SourceLocation CC) { 12540 unsigned NumArgs = TheCall->getNumArgs(); 12541 for (unsigned i = 0; i < NumArgs; ++i) { 12542 Expr *CurrA = TheCall->getArg(i); 12543 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 12544 continue; 12545 12546 bool IsSwapped = ((i > 0) && 12547 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 12548 IsSwapped |= ((i < (NumArgs - 1)) && 12549 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 12550 if (IsSwapped) { 12551 // Warn on this floating-point to bool conversion. 12552 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 12553 CurrA->getType(), CC, 12554 diag::warn_impcast_floating_point_to_bool); 12555 } 12556 } 12557 } 12558 12559 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 12560 SourceLocation CC) { 12561 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 12562 E->getExprLoc())) 12563 return; 12564 12565 // Don't warn on functions which have return type nullptr_t. 12566 if (isa<CallExpr>(E)) 12567 return; 12568 12569 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 12570 const Expr::NullPointerConstantKind NullKind = 12571 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 12572 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 12573 return; 12574 12575 // Return if target type is a safe conversion. 12576 if (T->isAnyPointerType() || T->isBlockPointerType() || 12577 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 12578 return; 12579 12580 SourceLocation Loc = E->getSourceRange().getBegin(); 12581 12582 // Venture through the macro stacks to get to the source of macro arguments. 12583 // The new location is a better location than the complete location that was 12584 // passed in. 12585 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 12586 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 12587 12588 // __null is usually wrapped in a macro. Go up a macro if that is the case. 12589 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 12590 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 12591 Loc, S.SourceMgr, S.getLangOpts()); 12592 if (MacroName == "NULL") 12593 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 12594 } 12595 12596 // Only warn if the null and context location are in the same macro expansion. 12597 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 12598 return; 12599 12600 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 12601 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 12602 << FixItHint::CreateReplacement(Loc, 12603 S.getFixItZeroLiteralForType(T, Loc)); 12604 } 12605 12606 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12607 ObjCArrayLiteral *ArrayLiteral); 12608 12609 static void 12610 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12611 ObjCDictionaryLiteral *DictionaryLiteral); 12612 12613 /// Check a single element within a collection literal against the 12614 /// target element type. 12615 static void checkObjCCollectionLiteralElement(Sema &S, 12616 QualType TargetElementType, 12617 Expr *Element, 12618 unsigned ElementKind) { 12619 // Skip a bitcast to 'id' or qualified 'id'. 12620 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 12621 if (ICE->getCastKind() == CK_BitCast && 12622 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 12623 Element = ICE->getSubExpr(); 12624 } 12625 12626 QualType ElementType = Element->getType(); 12627 ExprResult ElementResult(Element); 12628 if (ElementType->getAs<ObjCObjectPointerType>() && 12629 S.CheckSingleAssignmentConstraints(TargetElementType, 12630 ElementResult, 12631 false, false) 12632 != Sema::Compatible) { 12633 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 12634 << ElementType << ElementKind << TargetElementType 12635 << Element->getSourceRange(); 12636 } 12637 12638 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 12639 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 12640 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 12641 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 12642 } 12643 12644 /// Check an Objective-C array literal being converted to the given 12645 /// target type. 12646 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12647 ObjCArrayLiteral *ArrayLiteral) { 12648 if (!S.NSArrayDecl) 12649 return; 12650 12651 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12652 if (!TargetObjCPtr) 12653 return; 12654 12655 if (TargetObjCPtr->isUnspecialized() || 12656 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12657 != S.NSArrayDecl->getCanonicalDecl()) 12658 return; 12659 12660 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12661 if (TypeArgs.size() != 1) 12662 return; 12663 12664 QualType TargetElementType = TypeArgs[0]; 12665 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 12666 checkObjCCollectionLiteralElement(S, TargetElementType, 12667 ArrayLiteral->getElement(I), 12668 0); 12669 } 12670 } 12671 12672 /// Check an Objective-C dictionary literal being converted to the given 12673 /// target type. 12674 static void 12675 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12676 ObjCDictionaryLiteral *DictionaryLiteral) { 12677 if (!S.NSDictionaryDecl) 12678 return; 12679 12680 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12681 if (!TargetObjCPtr) 12682 return; 12683 12684 if (TargetObjCPtr->isUnspecialized() || 12685 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12686 != S.NSDictionaryDecl->getCanonicalDecl()) 12687 return; 12688 12689 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12690 if (TypeArgs.size() != 2) 12691 return; 12692 12693 QualType TargetKeyType = TypeArgs[0]; 12694 QualType TargetObjectType = TypeArgs[1]; 12695 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 12696 auto Element = DictionaryLiteral->getKeyValueElement(I); 12697 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 12698 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 12699 } 12700 } 12701 12702 // Helper function to filter out cases for constant width constant conversion. 12703 // Don't warn on char array initialization or for non-decimal values. 12704 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 12705 SourceLocation CC) { 12706 // If initializing from a constant, and the constant starts with '0', 12707 // then it is a binary, octal, or hexadecimal. Allow these constants 12708 // to fill all the bits, even if there is a sign change. 12709 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 12710 const char FirstLiteralCharacter = 12711 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 12712 if (FirstLiteralCharacter == '0') 12713 return false; 12714 } 12715 12716 // If the CC location points to a '{', and the type is char, then assume 12717 // assume it is an array initialization. 12718 if (CC.isValid() && T->isCharType()) { 12719 const char FirstContextCharacter = 12720 S.getSourceManager().getCharacterData(CC)[0]; 12721 if (FirstContextCharacter == '{') 12722 return false; 12723 } 12724 12725 return true; 12726 } 12727 12728 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 12729 const auto *IL = dyn_cast<IntegerLiteral>(E); 12730 if (!IL) { 12731 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 12732 if (UO->getOpcode() == UO_Minus) 12733 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 12734 } 12735 } 12736 12737 return IL; 12738 } 12739 12740 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 12741 E = E->IgnoreParenImpCasts(); 12742 SourceLocation ExprLoc = E->getExprLoc(); 12743 12744 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12745 BinaryOperator::Opcode Opc = BO->getOpcode(); 12746 Expr::EvalResult Result; 12747 // Do not diagnose unsigned shifts. 12748 if (Opc == BO_Shl) { 12749 const auto *LHS = getIntegerLiteral(BO->getLHS()); 12750 const auto *RHS = getIntegerLiteral(BO->getRHS()); 12751 if (LHS && LHS->getValue() == 0) 12752 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 12753 else if (!E->isValueDependent() && LHS && RHS && 12754 RHS->getValue().isNonNegative() && 12755 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 12756 S.Diag(ExprLoc, diag::warn_left_shift_always) 12757 << (Result.Val.getInt() != 0); 12758 else if (E->getType()->isSignedIntegerType()) 12759 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 12760 } 12761 } 12762 12763 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12764 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 12765 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 12766 if (!LHS || !RHS) 12767 return; 12768 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 12769 (RHS->getValue() == 0 || RHS->getValue() == 1)) 12770 // Do not diagnose common idioms. 12771 return; 12772 if (LHS->getValue() != 0 && RHS->getValue() != 0) 12773 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 12774 } 12775 } 12776 12777 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 12778 SourceLocation CC, 12779 bool *ICContext = nullptr, 12780 bool IsListInit = false) { 12781 if (E->isTypeDependent() || E->isValueDependent()) return; 12782 12783 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 12784 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 12785 if (Source == Target) return; 12786 if (Target->isDependentType()) return; 12787 12788 // If the conversion context location is invalid don't complain. We also 12789 // don't want to emit a warning if the issue occurs from the expansion of 12790 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 12791 // delay this check as long as possible. Once we detect we are in that 12792 // scenario, we just return. 12793 if (CC.isInvalid()) 12794 return; 12795 12796 if (Source->isAtomicType()) 12797 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 12798 12799 // Diagnose implicit casts to bool. 12800 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 12801 if (isa<StringLiteral>(E)) 12802 // Warn on string literal to bool. Checks for string literals in logical 12803 // and expressions, for instance, assert(0 && "error here"), are 12804 // prevented by a check in AnalyzeImplicitConversions(). 12805 return DiagnoseImpCast(S, E, T, CC, 12806 diag::warn_impcast_string_literal_to_bool); 12807 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 12808 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 12809 // This covers the literal expressions that evaluate to Objective-C 12810 // objects. 12811 return DiagnoseImpCast(S, E, T, CC, 12812 diag::warn_impcast_objective_c_literal_to_bool); 12813 } 12814 if (Source->isPointerType() || Source->canDecayToPointerType()) { 12815 // Warn on pointer to bool conversion that is always true. 12816 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 12817 SourceRange(CC)); 12818 } 12819 } 12820 12821 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 12822 // is a typedef for signed char (macOS), then that constant value has to be 1 12823 // or 0. 12824 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 12825 Expr::EvalResult Result; 12826 if (E->EvaluateAsInt(Result, S.getASTContext(), 12827 Expr::SE_AllowSideEffects)) { 12828 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 12829 adornObjCBoolConversionDiagWithTernaryFixit( 12830 S, E, 12831 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 12832 << toString(Result.Val.getInt(), 10)); 12833 } 12834 return; 12835 } 12836 } 12837 12838 // Check implicit casts from Objective-C collection literals to specialized 12839 // collection types, e.g., NSArray<NSString *> *. 12840 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 12841 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 12842 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 12843 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 12844 12845 // Strip vector types. 12846 if (isa<VectorType>(Source)) { 12847 if (Target->isVLSTBuiltinType() && 12848 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 12849 QualType(Source, 0)) || 12850 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 12851 QualType(Source, 0)))) 12852 return; 12853 12854 if (!isa<VectorType>(Target)) { 12855 if (S.SourceMgr.isInSystemMacro(CC)) 12856 return; 12857 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 12858 } 12859 12860 // If the vector cast is cast between two vectors of the same size, it is 12861 // a bitcast, not a conversion. 12862 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 12863 return; 12864 12865 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 12866 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 12867 } 12868 if (auto VecTy = dyn_cast<VectorType>(Target)) 12869 Target = VecTy->getElementType().getTypePtr(); 12870 12871 // Strip complex types. 12872 if (isa<ComplexType>(Source)) { 12873 if (!isa<ComplexType>(Target)) { 12874 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 12875 return; 12876 12877 return DiagnoseImpCast(S, E, T, CC, 12878 S.getLangOpts().CPlusPlus 12879 ? diag::err_impcast_complex_scalar 12880 : diag::warn_impcast_complex_scalar); 12881 } 12882 12883 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 12884 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 12885 } 12886 12887 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 12888 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 12889 12890 // If the source is floating point... 12891 if (SourceBT && SourceBT->isFloatingPoint()) { 12892 // ...and the target is floating point... 12893 if (TargetBT && TargetBT->isFloatingPoint()) { 12894 // ...then warn if we're dropping FP rank. 12895 12896 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12897 QualType(SourceBT, 0), QualType(TargetBT, 0)); 12898 if (Order > 0) { 12899 // Don't warn about float constants that are precisely 12900 // representable in the target type. 12901 Expr::EvalResult result; 12902 if (E->EvaluateAsRValue(result, S.Context)) { 12903 // Value might be a float, a float vector, or a float complex. 12904 if (IsSameFloatAfterCast(result.Val, 12905 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 12906 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 12907 return; 12908 } 12909 12910 if (S.SourceMgr.isInSystemMacro(CC)) 12911 return; 12912 12913 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 12914 } 12915 // ... or possibly if we're increasing rank, too 12916 else if (Order < 0) { 12917 if (S.SourceMgr.isInSystemMacro(CC)) 12918 return; 12919 12920 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 12921 } 12922 return; 12923 } 12924 12925 // If the target is integral, always warn. 12926 if (TargetBT && TargetBT->isInteger()) { 12927 if (S.SourceMgr.isInSystemMacro(CC)) 12928 return; 12929 12930 DiagnoseFloatingImpCast(S, E, T, CC); 12931 } 12932 12933 // Detect the case where a call result is converted from floating-point to 12934 // to bool, and the final argument to the call is converted from bool, to 12935 // discover this typo: 12936 // 12937 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 12938 // 12939 // FIXME: This is an incredibly special case; is there some more general 12940 // way to detect this class of misplaced-parentheses bug? 12941 if (Target->isBooleanType() && isa<CallExpr>(E)) { 12942 // Check last argument of function call to see if it is an 12943 // implicit cast from a type matching the type the result 12944 // is being cast to. 12945 CallExpr *CEx = cast<CallExpr>(E); 12946 if (unsigned NumArgs = CEx->getNumArgs()) { 12947 Expr *LastA = CEx->getArg(NumArgs - 1); 12948 Expr *InnerE = LastA->IgnoreParenImpCasts(); 12949 if (isa<ImplicitCastExpr>(LastA) && 12950 InnerE->getType()->isBooleanType()) { 12951 // Warn on this floating-point to bool conversion 12952 DiagnoseImpCast(S, E, T, CC, 12953 diag::warn_impcast_floating_point_to_bool); 12954 } 12955 } 12956 } 12957 return; 12958 } 12959 12960 // Valid casts involving fixed point types should be accounted for here. 12961 if (Source->isFixedPointType()) { 12962 if (Target->isUnsaturatedFixedPointType()) { 12963 Expr::EvalResult Result; 12964 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 12965 S.isConstantEvaluated())) { 12966 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 12967 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 12968 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 12969 if (Value > MaxVal || Value < MinVal) { 12970 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12971 S.PDiag(diag::warn_impcast_fixed_point_range) 12972 << Value.toString() << T 12973 << E->getSourceRange() 12974 << clang::SourceRange(CC)); 12975 return; 12976 } 12977 } 12978 } else if (Target->isIntegerType()) { 12979 Expr::EvalResult Result; 12980 if (!S.isConstantEvaluated() && 12981 E->EvaluateAsFixedPoint(Result, S.Context, 12982 Expr::SE_AllowSideEffects)) { 12983 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 12984 12985 bool Overflowed; 12986 llvm::APSInt IntResult = FXResult.convertToInt( 12987 S.Context.getIntWidth(T), 12988 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 12989 12990 if (Overflowed) { 12991 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12992 S.PDiag(diag::warn_impcast_fixed_point_range) 12993 << FXResult.toString() << T 12994 << E->getSourceRange() 12995 << clang::SourceRange(CC)); 12996 return; 12997 } 12998 } 12999 } 13000 } else if (Target->isUnsaturatedFixedPointType()) { 13001 if (Source->isIntegerType()) { 13002 Expr::EvalResult Result; 13003 if (!S.isConstantEvaluated() && 13004 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 13005 llvm::APSInt Value = Result.Val.getInt(); 13006 13007 bool Overflowed; 13008 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 13009 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 13010 13011 if (Overflowed) { 13012 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13013 S.PDiag(diag::warn_impcast_fixed_point_range) 13014 << toString(Value, /*Radix=*/10) << T 13015 << E->getSourceRange() 13016 << clang::SourceRange(CC)); 13017 return; 13018 } 13019 } 13020 } 13021 } 13022 13023 // If we are casting an integer type to a floating point type without 13024 // initialization-list syntax, we might lose accuracy if the floating 13025 // point type has a narrower significand than the integer type. 13026 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 13027 TargetBT->isFloatingType() && !IsListInit) { 13028 // Determine the number of precision bits in the source integer type. 13029 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 13030 /*Approximate*/ true); 13031 unsigned int SourcePrecision = SourceRange.Width; 13032 13033 // Determine the number of precision bits in the 13034 // target floating point type. 13035 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 13036 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13037 13038 if (SourcePrecision > 0 && TargetPrecision > 0 && 13039 SourcePrecision > TargetPrecision) { 13040 13041 if (Optional<llvm::APSInt> SourceInt = 13042 E->getIntegerConstantExpr(S.Context)) { 13043 // If the source integer is a constant, convert it to the target 13044 // floating point type. Issue a warning if the value changes 13045 // during the whole conversion. 13046 llvm::APFloat TargetFloatValue( 13047 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13048 llvm::APFloat::opStatus ConversionStatus = 13049 TargetFloatValue.convertFromAPInt( 13050 *SourceInt, SourceBT->isSignedInteger(), 13051 llvm::APFloat::rmNearestTiesToEven); 13052 13053 if (ConversionStatus != llvm::APFloat::opOK) { 13054 SmallString<32> PrettySourceValue; 13055 SourceInt->toString(PrettySourceValue, 10); 13056 SmallString<32> PrettyTargetValue; 13057 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 13058 13059 S.DiagRuntimeBehavior( 13060 E->getExprLoc(), E, 13061 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 13062 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13063 << E->getSourceRange() << clang::SourceRange(CC)); 13064 } 13065 } else { 13066 // Otherwise, the implicit conversion may lose precision. 13067 DiagnoseImpCast(S, E, T, CC, 13068 diag::warn_impcast_integer_float_precision); 13069 } 13070 } 13071 } 13072 13073 DiagnoseNullConversion(S, E, T, CC); 13074 13075 S.DiscardMisalignedMemberAddress(Target, E); 13076 13077 if (Target->isBooleanType()) 13078 DiagnoseIntInBoolContext(S, E); 13079 13080 if (!Source->isIntegerType() || !Target->isIntegerType()) 13081 return; 13082 13083 // TODO: remove this early return once the false positives for constant->bool 13084 // in templates, macros, etc, are reduced or removed. 13085 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13086 return; 13087 13088 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13089 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13090 return adornObjCBoolConversionDiagWithTernaryFixit( 13091 S, E, 13092 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13093 << E->getType()); 13094 } 13095 13096 IntRange SourceTypeRange = 13097 IntRange::forTargetOfCanonicalType(S.Context, Source); 13098 IntRange LikelySourceRange = 13099 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13100 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13101 13102 if (LikelySourceRange.Width > TargetRange.Width) { 13103 // If the source is a constant, use a default-on diagnostic. 13104 // TODO: this should happen for bitfield stores, too. 13105 Expr::EvalResult Result; 13106 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13107 S.isConstantEvaluated())) { 13108 llvm::APSInt Value(32); 13109 Value = Result.Val.getInt(); 13110 13111 if (S.SourceMgr.isInSystemMacro(CC)) 13112 return; 13113 13114 std::string PrettySourceValue = toString(Value, 10); 13115 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13116 13117 S.DiagRuntimeBehavior( 13118 E->getExprLoc(), E, 13119 S.PDiag(diag::warn_impcast_integer_precision_constant) 13120 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13121 << E->getSourceRange() << SourceRange(CC)); 13122 return; 13123 } 13124 13125 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13126 if (S.SourceMgr.isInSystemMacro(CC)) 13127 return; 13128 13129 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13130 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13131 /* pruneControlFlow */ true); 13132 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13133 } 13134 13135 if (TargetRange.Width > SourceTypeRange.Width) { 13136 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13137 if (UO->getOpcode() == UO_Minus) 13138 if (Source->isUnsignedIntegerType()) { 13139 if (Target->isUnsignedIntegerType()) 13140 return DiagnoseImpCast(S, E, T, CC, 13141 diag::warn_impcast_high_order_zero_bits); 13142 if (Target->isSignedIntegerType()) 13143 return DiagnoseImpCast(S, E, T, CC, 13144 diag::warn_impcast_nonnegative_result); 13145 } 13146 } 13147 13148 if (TargetRange.Width == LikelySourceRange.Width && 13149 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13150 Source->isSignedIntegerType()) { 13151 // Warn when doing a signed to signed conversion, warn if the positive 13152 // source value is exactly the width of the target type, which will 13153 // cause a negative value to be stored. 13154 13155 Expr::EvalResult Result; 13156 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13157 !S.SourceMgr.isInSystemMacro(CC)) { 13158 llvm::APSInt Value = Result.Val.getInt(); 13159 if (isSameWidthConstantConversion(S, E, T, CC)) { 13160 std::string PrettySourceValue = toString(Value, 10); 13161 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13162 13163 S.DiagRuntimeBehavior( 13164 E->getExprLoc(), E, 13165 S.PDiag(diag::warn_impcast_integer_precision_constant) 13166 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13167 << E->getSourceRange() << SourceRange(CC)); 13168 return; 13169 } 13170 } 13171 13172 // Fall through for non-constants to give a sign conversion warning. 13173 } 13174 13175 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13176 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13177 LikelySourceRange.Width == TargetRange.Width)) { 13178 if (S.SourceMgr.isInSystemMacro(CC)) 13179 return; 13180 13181 unsigned DiagID = diag::warn_impcast_integer_sign; 13182 13183 // Traditionally, gcc has warned about this under -Wsign-compare. 13184 // We also want to warn about it in -Wconversion. 13185 // So if -Wconversion is off, use a completely identical diagnostic 13186 // in the sign-compare group. 13187 // The conditional-checking code will 13188 if (ICContext) { 13189 DiagID = diag::warn_impcast_integer_sign_conditional; 13190 *ICContext = true; 13191 } 13192 13193 return DiagnoseImpCast(S, E, T, CC, DiagID); 13194 } 13195 13196 // Diagnose conversions between different enumeration types. 13197 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13198 // type, to give us better diagnostics. 13199 QualType SourceType = E->getType(); 13200 if (!S.getLangOpts().CPlusPlus) { 13201 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13202 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 13203 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 13204 SourceType = S.Context.getTypeDeclType(Enum); 13205 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 13206 } 13207 } 13208 13209 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 13210 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 13211 if (SourceEnum->getDecl()->hasNameForLinkage() && 13212 TargetEnum->getDecl()->hasNameForLinkage() && 13213 SourceEnum != TargetEnum) { 13214 if (S.SourceMgr.isInSystemMacro(CC)) 13215 return; 13216 13217 return DiagnoseImpCast(S, E, SourceType, T, CC, 13218 diag::warn_impcast_different_enum_types); 13219 } 13220 } 13221 13222 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13223 SourceLocation CC, QualType T); 13224 13225 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13226 SourceLocation CC, bool &ICContext) { 13227 E = E->IgnoreParenImpCasts(); 13228 13229 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13230 return CheckConditionalOperator(S, CO, CC, T); 13231 13232 AnalyzeImplicitConversions(S, E, CC); 13233 if (E->getType() != T) 13234 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13235 } 13236 13237 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13238 SourceLocation CC, QualType T) { 13239 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13240 13241 Expr *TrueExpr = E->getTrueExpr(); 13242 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13243 TrueExpr = BCO->getCommon(); 13244 13245 bool Suspicious = false; 13246 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13247 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13248 13249 if (T->isBooleanType()) 13250 DiagnoseIntInBoolContext(S, E); 13251 13252 // If -Wconversion would have warned about either of the candidates 13253 // for a signedness conversion to the context type... 13254 if (!Suspicious) return; 13255 13256 // ...but it's currently ignored... 13257 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13258 return; 13259 13260 // ...then check whether it would have warned about either of the 13261 // candidates for a signedness conversion to the condition type. 13262 if (E->getType() == T) return; 13263 13264 Suspicious = false; 13265 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13266 E->getType(), CC, &Suspicious); 13267 if (!Suspicious) 13268 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13269 E->getType(), CC, &Suspicious); 13270 } 13271 13272 /// Check conversion of given expression to boolean. 13273 /// Input argument E is a logical expression. 13274 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13275 if (S.getLangOpts().Bool) 13276 return; 13277 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13278 return; 13279 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13280 } 13281 13282 namespace { 13283 struct AnalyzeImplicitConversionsWorkItem { 13284 Expr *E; 13285 SourceLocation CC; 13286 bool IsListInit; 13287 }; 13288 } 13289 13290 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 13291 /// that should be visited are added to WorkList. 13292 static void AnalyzeImplicitConversions( 13293 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 13294 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 13295 Expr *OrigE = Item.E; 13296 SourceLocation CC = Item.CC; 13297 13298 QualType T = OrigE->getType(); 13299 Expr *E = OrigE->IgnoreParenImpCasts(); 13300 13301 // Propagate whether we are in a C++ list initialization expression. 13302 // If so, we do not issue warnings for implicit int-float conversion 13303 // precision loss, because C++11 narrowing already handles it. 13304 bool IsListInit = Item.IsListInit || 13305 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 13306 13307 if (E->isTypeDependent() || E->isValueDependent()) 13308 return; 13309 13310 Expr *SourceExpr = E; 13311 // Examine, but don't traverse into the source expression of an 13312 // OpaqueValueExpr, since it may have multiple parents and we don't want to 13313 // emit duplicate diagnostics. Its fine to examine the form or attempt to 13314 // evaluate it in the context of checking the specific conversion to T though. 13315 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 13316 if (auto *Src = OVE->getSourceExpr()) 13317 SourceExpr = Src; 13318 13319 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 13320 if (UO->getOpcode() == UO_Not && 13321 UO->getSubExpr()->isKnownToHaveBooleanValue()) 13322 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 13323 << OrigE->getSourceRange() << T->isBooleanType() 13324 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 13325 13326 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 13327 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 13328 BO->getLHS()->isKnownToHaveBooleanValue() && 13329 BO->getRHS()->isKnownToHaveBooleanValue() && 13330 BO->getLHS()->HasSideEffects(S.Context) && 13331 BO->getRHS()->HasSideEffects(S.Context)) { 13332 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 13333 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 13334 << FixItHint::CreateReplacement( 13335 BO->getOperatorLoc(), 13336 (BO->getOpcode() == BO_And ? "&&" : "||")); 13337 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 13338 } 13339 13340 // For conditional operators, we analyze the arguments as if they 13341 // were being fed directly into the output. 13342 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 13343 CheckConditionalOperator(S, CO, CC, T); 13344 return; 13345 } 13346 13347 // Check implicit argument conversions for function calls. 13348 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 13349 CheckImplicitArgumentConversions(S, Call, CC); 13350 13351 // Go ahead and check any implicit conversions we might have skipped. 13352 // The non-canonical typecheck is just an optimization; 13353 // CheckImplicitConversion will filter out dead implicit conversions. 13354 if (SourceExpr->getType() != T) 13355 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 13356 13357 // Now continue drilling into this expression. 13358 13359 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 13360 // The bound subexpressions in a PseudoObjectExpr are not reachable 13361 // as transitive children. 13362 // FIXME: Use a more uniform representation for this. 13363 for (auto *SE : POE->semantics()) 13364 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 13365 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 13366 } 13367 13368 // Skip past explicit casts. 13369 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 13370 E = CE->getSubExpr()->IgnoreParenImpCasts(); 13371 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 13372 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13373 WorkList.push_back({E, CC, IsListInit}); 13374 return; 13375 } 13376 13377 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13378 // Do a somewhat different check with comparison operators. 13379 if (BO->isComparisonOp()) 13380 return AnalyzeComparison(S, BO); 13381 13382 // And with simple assignments. 13383 if (BO->getOpcode() == BO_Assign) 13384 return AnalyzeAssignment(S, BO); 13385 // And with compound assignments. 13386 if (BO->isAssignmentOp()) 13387 return AnalyzeCompoundAssignment(S, BO); 13388 } 13389 13390 // These break the otherwise-useful invariant below. Fortunately, 13391 // we don't really need to recurse into them, because any internal 13392 // expressions should have been analyzed already when they were 13393 // built into statements. 13394 if (isa<StmtExpr>(E)) return; 13395 13396 // Don't descend into unevaluated contexts. 13397 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 13398 13399 // Now just recurse over the expression's children. 13400 CC = E->getExprLoc(); 13401 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 13402 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 13403 for (Stmt *SubStmt : E->children()) { 13404 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 13405 if (!ChildExpr) 13406 continue; 13407 13408 if (IsLogicalAndOperator && 13409 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 13410 // Ignore checking string literals that are in logical and operators. 13411 // This is a common pattern for asserts. 13412 continue; 13413 WorkList.push_back({ChildExpr, CC, IsListInit}); 13414 } 13415 13416 if (BO && BO->isLogicalOp()) { 13417 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 13418 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13419 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13420 13421 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 13422 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13423 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13424 } 13425 13426 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 13427 if (U->getOpcode() == UO_LNot) { 13428 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 13429 } else if (U->getOpcode() != UO_AddrOf) { 13430 if (U->getSubExpr()->getType()->isAtomicType()) 13431 S.Diag(U->getSubExpr()->getBeginLoc(), 13432 diag::warn_atomic_implicit_seq_cst); 13433 } 13434 } 13435 } 13436 13437 /// AnalyzeImplicitConversions - Find and report any interesting 13438 /// implicit conversions in the given expression. There are a couple 13439 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 13440 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 13441 bool IsListInit/*= false*/) { 13442 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 13443 WorkList.push_back({OrigE, CC, IsListInit}); 13444 while (!WorkList.empty()) 13445 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 13446 } 13447 13448 /// Diagnose integer type and any valid implicit conversion to it. 13449 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 13450 // Taking into account implicit conversions, 13451 // allow any integer. 13452 if (!E->getType()->isIntegerType()) { 13453 S.Diag(E->getBeginLoc(), 13454 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 13455 return true; 13456 } 13457 // Potentially emit standard warnings for implicit conversions if enabled 13458 // using -Wconversion. 13459 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 13460 return false; 13461 } 13462 13463 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 13464 // Returns true when emitting a warning about taking the address of a reference. 13465 static bool CheckForReference(Sema &SemaRef, const Expr *E, 13466 const PartialDiagnostic &PD) { 13467 E = E->IgnoreParenImpCasts(); 13468 13469 const FunctionDecl *FD = nullptr; 13470 13471 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 13472 if (!DRE->getDecl()->getType()->isReferenceType()) 13473 return false; 13474 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13475 if (!M->getMemberDecl()->getType()->isReferenceType()) 13476 return false; 13477 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 13478 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 13479 return false; 13480 FD = Call->getDirectCallee(); 13481 } else { 13482 return false; 13483 } 13484 13485 SemaRef.Diag(E->getExprLoc(), PD); 13486 13487 // If possible, point to location of function. 13488 if (FD) { 13489 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 13490 } 13491 13492 return true; 13493 } 13494 13495 // Returns true if the SourceLocation is expanded from any macro body. 13496 // Returns false if the SourceLocation is invalid, is from not in a macro 13497 // expansion, or is from expanded from a top-level macro argument. 13498 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 13499 if (Loc.isInvalid()) 13500 return false; 13501 13502 while (Loc.isMacroID()) { 13503 if (SM.isMacroBodyExpansion(Loc)) 13504 return true; 13505 Loc = SM.getImmediateMacroCallerLoc(Loc); 13506 } 13507 13508 return false; 13509 } 13510 13511 /// Diagnose pointers that are always non-null. 13512 /// \param E the expression containing the pointer 13513 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 13514 /// compared to a null pointer 13515 /// \param IsEqual True when the comparison is equal to a null pointer 13516 /// \param Range Extra SourceRange to highlight in the diagnostic 13517 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 13518 Expr::NullPointerConstantKind NullKind, 13519 bool IsEqual, SourceRange Range) { 13520 if (!E) 13521 return; 13522 13523 // Don't warn inside macros. 13524 if (E->getExprLoc().isMacroID()) { 13525 const SourceManager &SM = getSourceManager(); 13526 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 13527 IsInAnyMacroBody(SM, Range.getBegin())) 13528 return; 13529 } 13530 E = E->IgnoreImpCasts(); 13531 13532 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 13533 13534 if (isa<CXXThisExpr>(E)) { 13535 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 13536 : diag::warn_this_bool_conversion; 13537 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 13538 return; 13539 } 13540 13541 bool IsAddressOf = false; 13542 13543 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13544 if (UO->getOpcode() != UO_AddrOf) 13545 return; 13546 IsAddressOf = true; 13547 E = UO->getSubExpr(); 13548 } 13549 13550 if (IsAddressOf) { 13551 unsigned DiagID = IsCompare 13552 ? diag::warn_address_of_reference_null_compare 13553 : diag::warn_address_of_reference_bool_conversion; 13554 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 13555 << IsEqual; 13556 if (CheckForReference(*this, E, PD)) { 13557 return; 13558 } 13559 } 13560 13561 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 13562 bool IsParam = isa<NonNullAttr>(NonnullAttr); 13563 std::string Str; 13564 llvm::raw_string_ostream S(Str); 13565 E->printPretty(S, nullptr, getPrintingPolicy()); 13566 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 13567 : diag::warn_cast_nonnull_to_bool; 13568 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 13569 << E->getSourceRange() << Range << IsEqual; 13570 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 13571 }; 13572 13573 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 13574 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 13575 if (auto *Callee = Call->getDirectCallee()) { 13576 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 13577 ComplainAboutNonnullParamOrCall(A); 13578 return; 13579 } 13580 } 13581 } 13582 13583 // Expect to find a single Decl. Skip anything more complicated. 13584 ValueDecl *D = nullptr; 13585 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 13586 D = R->getDecl(); 13587 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13588 D = M->getMemberDecl(); 13589 } 13590 13591 // Weak Decls can be null. 13592 if (!D || D->isWeak()) 13593 return; 13594 13595 // Check for parameter decl with nonnull attribute 13596 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 13597 if (getCurFunction() && 13598 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 13599 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 13600 ComplainAboutNonnullParamOrCall(A); 13601 return; 13602 } 13603 13604 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 13605 // Skip function template not specialized yet. 13606 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 13607 return; 13608 auto ParamIter = llvm::find(FD->parameters(), PV); 13609 assert(ParamIter != FD->param_end()); 13610 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 13611 13612 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 13613 if (!NonNull->args_size()) { 13614 ComplainAboutNonnullParamOrCall(NonNull); 13615 return; 13616 } 13617 13618 for (const ParamIdx &ArgNo : NonNull->args()) { 13619 if (ArgNo.getASTIndex() == ParamNo) { 13620 ComplainAboutNonnullParamOrCall(NonNull); 13621 return; 13622 } 13623 } 13624 } 13625 } 13626 } 13627 } 13628 13629 QualType T = D->getType(); 13630 const bool IsArray = T->isArrayType(); 13631 const bool IsFunction = T->isFunctionType(); 13632 13633 // Address of function is used to silence the function warning. 13634 if (IsAddressOf && IsFunction) { 13635 return; 13636 } 13637 13638 // Found nothing. 13639 if (!IsAddressOf && !IsFunction && !IsArray) 13640 return; 13641 13642 // Pretty print the expression for the diagnostic. 13643 std::string Str; 13644 llvm::raw_string_ostream S(Str); 13645 E->printPretty(S, nullptr, getPrintingPolicy()); 13646 13647 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 13648 : diag::warn_impcast_pointer_to_bool; 13649 enum { 13650 AddressOf, 13651 FunctionPointer, 13652 ArrayPointer 13653 } DiagType; 13654 if (IsAddressOf) 13655 DiagType = AddressOf; 13656 else if (IsFunction) 13657 DiagType = FunctionPointer; 13658 else if (IsArray) 13659 DiagType = ArrayPointer; 13660 else 13661 llvm_unreachable("Could not determine diagnostic."); 13662 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 13663 << Range << IsEqual; 13664 13665 if (!IsFunction) 13666 return; 13667 13668 // Suggest '&' to silence the function warning. 13669 Diag(E->getExprLoc(), diag::note_function_warning_silence) 13670 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 13671 13672 // Check to see if '()' fixit should be emitted. 13673 QualType ReturnType; 13674 UnresolvedSet<4> NonTemplateOverloads; 13675 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 13676 if (ReturnType.isNull()) 13677 return; 13678 13679 if (IsCompare) { 13680 // There are two cases here. If there is null constant, the only suggest 13681 // for a pointer return type. If the null is 0, then suggest if the return 13682 // type is a pointer or an integer type. 13683 if (!ReturnType->isPointerType()) { 13684 if (NullKind == Expr::NPCK_ZeroExpression || 13685 NullKind == Expr::NPCK_ZeroLiteral) { 13686 if (!ReturnType->isIntegerType()) 13687 return; 13688 } else { 13689 return; 13690 } 13691 } 13692 } else { // !IsCompare 13693 // For function to bool, only suggest if the function pointer has bool 13694 // return type. 13695 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 13696 return; 13697 } 13698 Diag(E->getExprLoc(), diag::note_function_to_function_call) 13699 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 13700 } 13701 13702 /// Diagnoses "dangerous" implicit conversions within the given 13703 /// expression (which is a full expression). Implements -Wconversion 13704 /// and -Wsign-compare. 13705 /// 13706 /// \param CC the "context" location of the implicit conversion, i.e. 13707 /// the most location of the syntactic entity requiring the implicit 13708 /// conversion 13709 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 13710 // Don't diagnose in unevaluated contexts. 13711 if (isUnevaluatedContext()) 13712 return; 13713 13714 // Don't diagnose for value- or type-dependent expressions. 13715 if (E->isTypeDependent() || E->isValueDependent()) 13716 return; 13717 13718 // Check for array bounds violations in cases where the check isn't triggered 13719 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 13720 // ArraySubscriptExpr is on the RHS of a variable initialization. 13721 CheckArrayAccess(E); 13722 13723 // This is not the right CC for (e.g.) a variable initialization. 13724 AnalyzeImplicitConversions(*this, E, CC); 13725 } 13726 13727 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 13728 /// Input argument E is a logical expression. 13729 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 13730 ::CheckBoolLikeConversion(*this, E, CC); 13731 } 13732 13733 /// Diagnose when expression is an integer constant expression and its evaluation 13734 /// results in integer overflow 13735 void Sema::CheckForIntOverflow (Expr *E) { 13736 // Use a work list to deal with nested struct initializers. 13737 SmallVector<Expr *, 2> Exprs(1, E); 13738 13739 do { 13740 Expr *OriginalE = Exprs.pop_back_val(); 13741 Expr *E = OriginalE->IgnoreParenCasts(); 13742 13743 if (isa<BinaryOperator>(E)) { 13744 E->EvaluateForOverflow(Context); 13745 continue; 13746 } 13747 13748 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 13749 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 13750 else if (isa<ObjCBoxedExpr>(OriginalE)) 13751 E->EvaluateForOverflow(Context); 13752 else if (auto Call = dyn_cast<CallExpr>(E)) 13753 Exprs.append(Call->arg_begin(), Call->arg_end()); 13754 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 13755 Exprs.append(Message->arg_begin(), Message->arg_end()); 13756 } while (!Exprs.empty()); 13757 } 13758 13759 namespace { 13760 13761 /// Visitor for expressions which looks for unsequenced operations on the 13762 /// same object. 13763 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 13764 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 13765 13766 /// A tree of sequenced regions within an expression. Two regions are 13767 /// unsequenced if one is an ancestor or a descendent of the other. When we 13768 /// finish processing an expression with sequencing, such as a comma 13769 /// expression, we fold its tree nodes into its parent, since they are 13770 /// unsequenced with respect to nodes we will visit later. 13771 class SequenceTree { 13772 struct Value { 13773 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 13774 unsigned Parent : 31; 13775 unsigned Merged : 1; 13776 }; 13777 SmallVector<Value, 8> Values; 13778 13779 public: 13780 /// A region within an expression which may be sequenced with respect 13781 /// to some other region. 13782 class Seq { 13783 friend class SequenceTree; 13784 13785 unsigned Index; 13786 13787 explicit Seq(unsigned N) : Index(N) {} 13788 13789 public: 13790 Seq() : Index(0) {} 13791 }; 13792 13793 SequenceTree() { Values.push_back(Value(0)); } 13794 Seq root() const { return Seq(0); } 13795 13796 /// Create a new sequence of operations, which is an unsequenced 13797 /// subset of \p Parent. This sequence of operations is sequenced with 13798 /// respect to other children of \p Parent. 13799 Seq allocate(Seq Parent) { 13800 Values.push_back(Value(Parent.Index)); 13801 return Seq(Values.size() - 1); 13802 } 13803 13804 /// Merge a sequence of operations into its parent. 13805 void merge(Seq S) { 13806 Values[S.Index].Merged = true; 13807 } 13808 13809 /// Determine whether two operations are unsequenced. This operation 13810 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 13811 /// should have been merged into its parent as appropriate. 13812 bool isUnsequenced(Seq Cur, Seq Old) { 13813 unsigned C = representative(Cur.Index); 13814 unsigned Target = representative(Old.Index); 13815 while (C >= Target) { 13816 if (C == Target) 13817 return true; 13818 C = Values[C].Parent; 13819 } 13820 return false; 13821 } 13822 13823 private: 13824 /// Pick a representative for a sequence. 13825 unsigned representative(unsigned K) { 13826 if (Values[K].Merged) 13827 // Perform path compression as we go. 13828 return Values[K].Parent = representative(Values[K].Parent); 13829 return K; 13830 } 13831 }; 13832 13833 /// An object for which we can track unsequenced uses. 13834 using Object = const NamedDecl *; 13835 13836 /// Different flavors of object usage which we track. We only track the 13837 /// least-sequenced usage of each kind. 13838 enum UsageKind { 13839 /// A read of an object. Multiple unsequenced reads are OK. 13840 UK_Use, 13841 13842 /// A modification of an object which is sequenced before the value 13843 /// computation of the expression, such as ++n in C++. 13844 UK_ModAsValue, 13845 13846 /// A modification of an object which is not sequenced before the value 13847 /// computation of the expression, such as n++. 13848 UK_ModAsSideEffect, 13849 13850 UK_Count = UK_ModAsSideEffect + 1 13851 }; 13852 13853 /// Bundle together a sequencing region and the expression corresponding 13854 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 13855 struct Usage { 13856 const Expr *UsageExpr; 13857 SequenceTree::Seq Seq; 13858 13859 Usage() : UsageExpr(nullptr), Seq() {} 13860 }; 13861 13862 struct UsageInfo { 13863 Usage Uses[UK_Count]; 13864 13865 /// Have we issued a diagnostic for this object already? 13866 bool Diagnosed; 13867 13868 UsageInfo() : Uses(), Diagnosed(false) {} 13869 }; 13870 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 13871 13872 Sema &SemaRef; 13873 13874 /// Sequenced regions within the expression. 13875 SequenceTree Tree; 13876 13877 /// Declaration modifications and references which we have seen. 13878 UsageInfoMap UsageMap; 13879 13880 /// The region we are currently within. 13881 SequenceTree::Seq Region; 13882 13883 /// Filled in with declarations which were modified as a side-effect 13884 /// (that is, post-increment operations). 13885 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 13886 13887 /// Expressions to check later. We defer checking these to reduce 13888 /// stack usage. 13889 SmallVectorImpl<const Expr *> &WorkList; 13890 13891 /// RAII object wrapping the visitation of a sequenced subexpression of an 13892 /// expression. At the end of this process, the side-effects of the evaluation 13893 /// become sequenced with respect to the value computation of the result, so 13894 /// we downgrade any UK_ModAsSideEffect within the evaluation to 13895 /// UK_ModAsValue. 13896 struct SequencedSubexpression { 13897 SequencedSubexpression(SequenceChecker &Self) 13898 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 13899 Self.ModAsSideEffect = &ModAsSideEffect; 13900 } 13901 13902 ~SequencedSubexpression() { 13903 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 13904 // Add a new usage with usage kind UK_ModAsValue, and then restore 13905 // the previous usage with UK_ModAsSideEffect (thus clearing it if 13906 // the previous one was empty). 13907 UsageInfo &UI = Self.UsageMap[M.first]; 13908 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 13909 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 13910 SideEffectUsage = M.second; 13911 } 13912 Self.ModAsSideEffect = OldModAsSideEffect; 13913 } 13914 13915 SequenceChecker &Self; 13916 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 13917 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 13918 }; 13919 13920 /// RAII object wrapping the visitation of a subexpression which we might 13921 /// choose to evaluate as a constant. If any subexpression is evaluated and 13922 /// found to be non-constant, this allows us to suppress the evaluation of 13923 /// the outer expression. 13924 class EvaluationTracker { 13925 public: 13926 EvaluationTracker(SequenceChecker &Self) 13927 : Self(Self), Prev(Self.EvalTracker) { 13928 Self.EvalTracker = this; 13929 } 13930 13931 ~EvaluationTracker() { 13932 Self.EvalTracker = Prev; 13933 if (Prev) 13934 Prev->EvalOK &= EvalOK; 13935 } 13936 13937 bool evaluate(const Expr *E, bool &Result) { 13938 if (!EvalOK || E->isValueDependent()) 13939 return false; 13940 EvalOK = E->EvaluateAsBooleanCondition( 13941 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 13942 return EvalOK; 13943 } 13944 13945 private: 13946 SequenceChecker &Self; 13947 EvaluationTracker *Prev; 13948 bool EvalOK = true; 13949 } *EvalTracker = nullptr; 13950 13951 /// Find the object which is produced by the specified expression, 13952 /// if any. 13953 Object getObject(const Expr *E, bool Mod) const { 13954 E = E->IgnoreParenCasts(); 13955 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13956 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 13957 return getObject(UO->getSubExpr(), Mod); 13958 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13959 if (BO->getOpcode() == BO_Comma) 13960 return getObject(BO->getRHS(), Mod); 13961 if (Mod && BO->isAssignmentOp()) 13962 return getObject(BO->getLHS(), Mod); 13963 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 13964 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 13965 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 13966 return ME->getMemberDecl(); 13967 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13968 // FIXME: If this is a reference, map through to its value. 13969 return DRE->getDecl(); 13970 return nullptr; 13971 } 13972 13973 /// Note that an object \p O was modified or used by an expression 13974 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 13975 /// the object \p O as obtained via the \p UsageMap. 13976 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 13977 // Get the old usage for the given object and usage kind. 13978 Usage &U = UI.Uses[UK]; 13979 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 13980 // If we have a modification as side effect and are in a sequenced 13981 // subexpression, save the old Usage so that we can restore it later 13982 // in SequencedSubexpression::~SequencedSubexpression. 13983 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 13984 ModAsSideEffect->push_back(std::make_pair(O, U)); 13985 // Then record the new usage with the current sequencing region. 13986 U.UsageExpr = UsageExpr; 13987 U.Seq = Region; 13988 } 13989 } 13990 13991 /// Check whether a modification or use of an object \p O in an expression 13992 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 13993 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 13994 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 13995 /// usage and false we are checking for a mod-use unsequenced usage. 13996 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 13997 UsageKind OtherKind, bool IsModMod) { 13998 if (UI.Diagnosed) 13999 return; 14000 14001 const Usage &U = UI.Uses[OtherKind]; 14002 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 14003 return; 14004 14005 const Expr *Mod = U.UsageExpr; 14006 const Expr *ModOrUse = UsageExpr; 14007 if (OtherKind == UK_Use) 14008 std::swap(Mod, ModOrUse); 14009 14010 SemaRef.DiagRuntimeBehavior( 14011 Mod->getExprLoc(), {Mod, ModOrUse}, 14012 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 14013 : diag::warn_unsequenced_mod_use) 14014 << O << SourceRange(ModOrUse->getExprLoc())); 14015 UI.Diagnosed = true; 14016 } 14017 14018 // A note on note{Pre, Post}{Use, Mod}: 14019 // 14020 // (It helps to follow the algorithm with an expression such as 14021 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 14022 // operations before C++17 and both are well-defined in C++17). 14023 // 14024 // When visiting a node which uses/modify an object we first call notePreUse 14025 // or notePreMod before visiting its sub-expression(s). At this point the 14026 // children of the current node have not yet been visited and so the eventual 14027 // uses/modifications resulting from the children of the current node have not 14028 // been recorded yet. 14029 // 14030 // We then visit the children of the current node. After that notePostUse or 14031 // notePostMod is called. These will 1) detect an unsequenced modification 14032 // as side effect (as in "k++ + k") and 2) add a new usage with the 14033 // appropriate usage kind. 14034 // 14035 // We also have to be careful that some operation sequences modification as 14036 // side effect as well (for example: || or ,). To account for this we wrap 14037 // the visitation of such a sub-expression (for example: the LHS of || or ,) 14038 // with SequencedSubexpression. SequencedSubexpression is an RAII object 14039 // which record usages which are modifications as side effect, and then 14040 // downgrade them (or more accurately restore the previous usage which was a 14041 // modification as side effect) when exiting the scope of the sequenced 14042 // subexpression. 14043 14044 void notePreUse(Object O, const Expr *UseExpr) { 14045 UsageInfo &UI = UsageMap[O]; 14046 // Uses conflict with other modifications. 14047 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 14048 } 14049 14050 void notePostUse(Object O, const Expr *UseExpr) { 14051 UsageInfo &UI = UsageMap[O]; 14052 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 14053 /*IsModMod=*/false); 14054 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 14055 } 14056 14057 void notePreMod(Object O, const Expr *ModExpr) { 14058 UsageInfo &UI = UsageMap[O]; 14059 // Modifications conflict with other modifications and with uses. 14060 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 14061 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 14062 } 14063 14064 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 14065 UsageInfo &UI = UsageMap[O]; 14066 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 14067 /*IsModMod=*/true); 14068 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 14069 } 14070 14071 public: 14072 SequenceChecker(Sema &S, const Expr *E, 14073 SmallVectorImpl<const Expr *> &WorkList) 14074 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 14075 Visit(E); 14076 // Silence a -Wunused-private-field since WorkList is now unused. 14077 // TODO: Evaluate if it can be used, and if not remove it. 14078 (void)this->WorkList; 14079 } 14080 14081 void VisitStmt(const Stmt *S) { 14082 // Skip all statements which aren't expressions for now. 14083 } 14084 14085 void VisitExpr(const Expr *E) { 14086 // By default, just recurse to evaluated subexpressions. 14087 Base::VisitStmt(E); 14088 } 14089 14090 void VisitCastExpr(const CastExpr *E) { 14091 Object O = Object(); 14092 if (E->getCastKind() == CK_LValueToRValue) 14093 O = getObject(E->getSubExpr(), false); 14094 14095 if (O) 14096 notePreUse(O, E); 14097 VisitExpr(E); 14098 if (O) 14099 notePostUse(O, E); 14100 } 14101 14102 void VisitSequencedExpressions(const Expr *SequencedBefore, 14103 const Expr *SequencedAfter) { 14104 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14105 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14106 SequenceTree::Seq OldRegion = Region; 14107 14108 { 14109 SequencedSubexpression SeqBefore(*this); 14110 Region = BeforeRegion; 14111 Visit(SequencedBefore); 14112 } 14113 14114 Region = AfterRegion; 14115 Visit(SequencedAfter); 14116 14117 Region = OldRegion; 14118 14119 Tree.merge(BeforeRegion); 14120 Tree.merge(AfterRegion); 14121 } 14122 14123 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14124 // C++17 [expr.sub]p1: 14125 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14126 // expression E1 is sequenced before the expression E2. 14127 if (SemaRef.getLangOpts().CPlusPlus17) 14128 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14129 else { 14130 Visit(ASE->getLHS()); 14131 Visit(ASE->getRHS()); 14132 } 14133 } 14134 14135 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14136 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14137 void VisitBinPtrMem(const BinaryOperator *BO) { 14138 // C++17 [expr.mptr.oper]p4: 14139 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14140 // the expression E1 is sequenced before the expression E2. 14141 if (SemaRef.getLangOpts().CPlusPlus17) 14142 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14143 else { 14144 Visit(BO->getLHS()); 14145 Visit(BO->getRHS()); 14146 } 14147 } 14148 14149 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14150 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14151 void VisitBinShlShr(const BinaryOperator *BO) { 14152 // C++17 [expr.shift]p4: 14153 // The expression E1 is sequenced before the expression E2. 14154 if (SemaRef.getLangOpts().CPlusPlus17) 14155 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14156 else { 14157 Visit(BO->getLHS()); 14158 Visit(BO->getRHS()); 14159 } 14160 } 14161 14162 void VisitBinComma(const BinaryOperator *BO) { 14163 // C++11 [expr.comma]p1: 14164 // Every value computation and side effect associated with the left 14165 // expression is sequenced before every value computation and side 14166 // effect associated with the right expression. 14167 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14168 } 14169 14170 void VisitBinAssign(const BinaryOperator *BO) { 14171 SequenceTree::Seq RHSRegion; 14172 SequenceTree::Seq LHSRegion; 14173 if (SemaRef.getLangOpts().CPlusPlus17) { 14174 RHSRegion = Tree.allocate(Region); 14175 LHSRegion = Tree.allocate(Region); 14176 } else { 14177 RHSRegion = Region; 14178 LHSRegion = Region; 14179 } 14180 SequenceTree::Seq OldRegion = Region; 14181 14182 // C++11 [expr.ass]p1: 14183 // [...] the assignment is sequenced after the value computation 14184 // of the right and left operands, [...] 14185 // 14186 // so check it before inspecting the operands and update the 14187 // map afterwards. 14188 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14189 if (O) 14190 notePreMod(O, BO); 14191 14192 if (SemaRef.getLangOpts().CPlusPlus17) { 14193 // C++17 [expr.ass]p1: 14194 // [...] The right operand is sequenced before the left operand. [...] 14195 { 14196 SequencedSubexpression SeqBefore(*this); 14197 Region = RHSRegion; 14198 Visit(BO->getRHS()); 14199 } 14200 14201 Region = LHSRegion; 14202 Visit(BO->getLHS()); 14203 14204 if (O && isa<CompoundAssignOperator>(BO)) 14205 notePostUse(O, BO); 14206 14207 } else { 14208 // C++11 does not specify any sequencing between the LHS and RHS. 14209 Region = LHSRegion; 14210 Visit(BO->getLHS()); 14211 14212 if (O && isa<CompoundAssignOperator>(BO)) 14213 notePostUse(O, BO); 14214 14215 Region = RHSRegion; 14216 Visit(BO->getRHS()); 14217 } 14218 14219 // C++11 [expr.ass]p1: 14220 // the assignment is sequenced [...] before the value computation of the 14221 // assignment expression. 14222 // C11 6.5.16/3 has no such rule. 14223 Region = OldRegion; 14224 if (O) 14225 notePostMod(O, BO, 14226 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14227 : UK_ModAsSideEffect); 14228 if (SemaRef.getLangOpts().CPlusPlus17) { 14229 Tree.merge(RHSRegion); 14230 Tree.merge(LHSRegion); 14231 } 14232 } 14233 14234 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 14235 VisitBinAssign(CAO); 14236 } 14237 14238 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14239 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14240 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14241 Object O = getObject(UO->getSubExpr(), true); 14242 if (!O) 14243 return VisitExpr(UO); 14244 14245 notePreMod(O, UO); 14246 Visit(UO->getSubExpr()); 14247 // C++11 [expr.pre.incr]p1: 14248 // the expression ++x is equivalent to x+=1 14249 notePostMod(O, UO, 14250 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14251 : UK_ModAsSideEffect); 14252 } 14253 14254 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14255 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14256 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14257 Object O = getObject(UO->getSubExpr(), true); 14258 if (!O) 14259 return VisitExpr(UO); 14260 14261 notePreMod(O, UO); 14262 Visit(UO->getSubExpr()); 14263 notePostMod(O, UO, UK_ModAsSideEffect); 14264 } 14265 14266 void VisitBinLOr(const BinaryOperator *BO) { 14267 // C++11 [expr.log.or]p2: 14268 // If the second expression is evaluated, every value computation and 14269 // side effect associated with the first expression is sequenced before 14270 // every value computation and side effect associated with the 14271 // second expression. 14272 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14273 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14274 SequenceTree::Seq OldRegion = Region; 14275 14276 EvaluationTracker Eval(*this); 14277 { 14278 SequencedSubexpression Sequenced(*this); 14279 Region = LHSRegion; 14280 Visit(BO->getLHS()); 14281 } 14282 14283 // C++11 [expr.log.or]p1: 14284 // [...] the second operand is not evaluated if the first operand 14285 // evaluates to true. 14286 bool EvalResult = false; 14287 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14288 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 14289 if (ShouldVisitRHS) { 14290 Region = RHSRegion; 14291 Visit(BO->getRHS()); 14292 } 14293 14294 Region = OldRegion; 14295 Tree.merge(LHSRegion); 14296 Tree.merge(RHSRegion); 14297 } 14298 14299 void VisitBinLAnd(const BinaryOperator *BO) { 14300 // C++11 [expr.log.and]p2: 14301 // If the second expression is evaluated, every value computation and 14302 // side effect associated with the first expression is sequenced before 14303 // every value computation and side effect associated with the 14304 // second expression. 14305 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14306 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14307 SequenceTree::Seq OldRegion = Region; 14308 14309 EvaluationTracker Eval(*this); 14310 { 14311 SequencedSubexpression Sequenced(*this); 14312 Region = LHSRegion; 14313 Visit(BO->getLHS()); 14314 } 14315 14316 // C++11 [expr.log.and]p1: 14317 // [...] the second operand is not evaluated if the first operand is false. 14318 bool EvalResult = false; 14319 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14320 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 14321 if (ShouldVisitRHS) { 14322 Region = RHSRegion; 14323 Visit(BO->getRHS()); 14324 } 14325 14326 Region = OldRegion; 14327 Tree.merge(LHSRegion); 14328 Tree.merge(RHSRegion); 14329 } 14330 14331 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 14332 // C++11 [expr.cond]p1: 14333 // [...] Every value computation and side effect associated with the first 14334 // expression is sequenced before every value computation and side effect 14335 // associated with the second or third expression. 14336 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 14337 14338 // No sequencing is specified between the true and false expression. 14339 // However since exactly one of both is going to be evaluated we can 14340 // consider them to be sequenced. This is needed to avoid warning on 14341 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 14342 // both the true and false expressions because we can't evaluate x. 14343 // This will still allow us to detect an expression like (pre C++17) 14344 // "(x ? y += 1 : y += 2) = y". 14345 // 14346 // We don't wrap the visitation of the true and false expression with 14347 // SequencedSubexpression because we don't want to downgrade modifications 14348 // as side effect in the true and false expressions after the visition 14349 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 14350 // not warn between the two "y++", but we should warn between the "y++" 14351 // and the "y". 14352 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 14353 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 14354 SequenceTree::Seq OldRegion = Region; 14355 14356 EvaluationTracker Eval(*this); 14357 { 14358 SequencedSubexpression Sequenced(*this); 14359 Region = ConditionRegion; 14360 Visit(CO->getCond()); 14361 } 14362 14363 // C++11 [expr.cond]p1: 14364 // [...] The first expression is contextually converted to bool (Clause 4). 14365 // It is evaluated and if it is true, the result of the conditional 14366 // expression is the value of the second expression, otherwise that of the 14367 // third expression. Only one of the second and third expressions is 14368 // evaluated. [...] 14369 bool EvalResult = false; 14370 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 14371 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 14372 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 14373 if (ShouldVisitTrueExpr) { 14374 Region = TrueRegion; 14375 Visit(CO->getTrueExpr()); 14376 } 14377 if (ShouldVisitFalseExpr) { 14378 Region = FalseRegion; 14379 Visit(CO->getFalseExpr()); 14380 } 14381 14382 Region = OldRegion; 14383 Tree.merge(ConditionRegion); 14384 Tree.merge(TrueRegion); 14385 Tree.merge(FalseRegion); 14386 } 14387 14388 void VisitCallExpr(const CallExpr *CE) { 14389 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 14390 14391 if (CE->isUnevaluatedBuiltinCall(Context)) 14392 return; 14393 14394 // C++11 [intro.execution]p15: 14395 // When calling a function [...], every value computation and side effect 14396 // associated with any argument expression, or with the postfix expression 14397 // designating the called function, is sequenced before execution of every 14398 // expression or statement in the body of the function [and thus before 14399 // the value computation of its result]. 14400 SequencedSubexpression Sequenced(*this); 14401 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 14402 // C++17 [expr.call]p5 14403 // The postfix-expression is sequenced before each expression in the 14404 // expression-list and any default argument. [...] 14405 SequenceTree::Seq CalleeRegion; 14406 SequenceTree::Seq OtherRegion; 14407 if (SemaRef.getLangOpts().CPlusPlus17) { 14408 CalleeRegion = Tree.allocate(Region); 14409 OtherRegion = Tree.allocate(Region); 14410 } else { 14411 CalleeRegion = Region; 14412 OtherRegion = Region; 14413 } 14414 SequenceTree::Seq OldRegion = Region; 14415 14416 // Visit the callee expression first. 14417 Region = CalleeRegion; 14418 if (SemaRef.getLangOpts().CPlusPlus17) { 14419 SequencedSubexpression Sequenced(*this); 14420 Visit(CE->getCallee()); 14421 } else { 14422 Visit(CE->getCallee()); 14423 } 14424 14425 // Then visit the argument expressions. 14426 Region = OtherRegion; 14427 for (const Expr *Argument : CE->arguments()) 14428 Visit(Argument); 14429 14430 Region = OldRegion; 14431 if (SemaRef.getLangOpts().CPlusPlus17) { 14432 Tree.merge(CalleeRegion); 14433 Tree.merge(OtherRegion); 14434 } 14435 }); 14436 } 14437 14438 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 14439 // C++17 [over.match.oper]p2: 14440 // [...] the operator notation is first transformed to the equivalent 14441 // function-call notation as summarized in Table 12 (where @ denotes one 14442 // of the operators covered in the specified subclause). However, the 14443 // operands are sequenced in the order prescribed for the built-in 14444 // operator (Clause 8). 14445 // 14446 // From the above only overloaded binary operators and overloaded call 14447 // operators have sequencing rules in C++17 that we need to handle 14448 // separately. 14449 if (!SemaRef.getLangOpts().CPlusPlus17 || 14450 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 14451 return VisitCallExpr(CXXOCE); 14452 14453 enum { 14454 NoSequencing, 14455 LHSBeforeRHS, 14456 RHSBeforeLHS, 14457 LHSBeforeRest 14458 } SequencingKind; 14459 switch (CXXOCE->getOperator()) { 14460 case OO_Equal: 14461 case OO_PlusEqual: 14462 case OO_MinusEqual: 14463 case OO_StarEqual: 14464 case OO_SlashEqual: 14465 case OO_PercentEqual: 14466 case OO_CaretEqual: 14467 case OO_AmpEqual: 14468 case OO_PipeEqual: 14469 case OO_LessLessEqual: 14470 case OO_GreaterGreaterEqual: 14471 SequencingKind = RHSBeforeLHS; 14472 break; 14473 14474 case OO_LessLess: 14475 case OO_GreaterGreater: 14476 case OO_AmpAmp: 14477 case OO_PipePipe: 14478 case OO_Comma: 14479 case OO_ArrowStar: 14480 case OO_Subscript: 14481 SequencingKind = LHSBeforeRHS; 14482 break; 14483 14484 case OO_Call: 14485 SequencingKind = LHSBeforeRest; 14486 break; 14487 14488 default: 14489 SequencingKind = NoSequencing; 14490 break; 14491 } 14492 14493 if (SequencingKind == NoSequencing) 14494 return VisitCallExpr(CXXOCE); 14495 14496 // This is a call, so all subexpressions are sequenced before the result. 14497 SequencedSubexpression Sequenced(*this); 14498 14499 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 14500 assert(SemaRef.getLangOpts().CPlusPlus17 && 14501 "Should only get there with C++17 and above!"); 14502 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 14503 "Should only get there with an overloaded binary operator" 14504 " or an overloaded call operator!"); 14505 14506 if (SequencingKind == LHSBeforeRest) { 14507 assert(CXXOCE->getOperator() == OO_Call && 14508 "We should only have an overloaded call operator here!"); 14509 14510 // This is very similar to VisitCallExpr, except that we only have the 14511 // C++17 case. The postfix-expression is the first argument of the 14512 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 14513 // are in the following arguments. 14514 // 14515 // Note that we intentionally do not visit the callee expression since 14516 // it is just a decayed reference to a function. 14517 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 14518 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 14519 SequenceTree::Seq OldRegion = Region; 14520 14521 assert(CXXOCE->getNumArgs() >= 1 && 14522 "An overloaded call operator must have at least one argument" 14523 " for the postfix-expression!"); 14524 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 14525 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 14526 CXXOCE->getNumArgs() - 1); 14527 14528 // Visit the postfix-expression first. 14529 { 14530 Region = PostfixExprRegion; 14531 SequencedSubexpression Sequenced(*this); 14532 Visit(PostfixExpr); 14533 } 14534 14535 // Then visit the argument expressions. 14536 Region = ArgsRegion; 14537 for (const Expr *Arg : Args) 14538 Visit(Arg); 14539 14540 Region = OldRegion; 14541 Tree.merge(PostfixExprRegion); 14542 Tree.merge(ArgsRegion); 14543 } else { 14544 assert(CXXOCE->getNumArgs() == 2 && 14545 "Should only have two arguments here!"); 14546 assert((SequencingKind == LHSBeforeRHS || 14547 SequencingKind == RHSBeforeLHS) && 14548 "Unexpected sequencing kind!"); 14549 14550 // We do not visit the callee expression since it is just a decayed 14551 // reference to a function. 14552 const Expr *E1 = CXXOCE->getArg(0); 14553 const Expr *E2 = CXXOCE->getArg(1); 14554 if (SequencingKind == RHSBeforeLHS) 14555 std::swap(E1, E2); 14556 14557 return VisitSequencedExpressions(E1, E2); 14558 } 14559 }); 14560 } 14561 14562 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 14563 // This is a call, so all subexpressions are sequenced before the result. 14564 SequencedSubexpression Sequenced(*this); 14565 14566 if (!CCE->isListInitialization()) 14567 return VisitExpr(CCE); 14568 14569 // In C++11, list initializations are sequenced. 14570 SmallVector<SequenceTree::Seq, 32> Elts; 14571 SequenceTree::Seq Parent = Region; 14572 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 14573 E = CCE->arg_end(); 14574 I != E; ++I) { 14575 Region = Tree.allocate(Parent); 14576 Elts.push_back(Region); 14577 Visit(*I); 14578 } 14579 14580 // Forget that the initializers are sequenced. 14581 Region = Parent; 14582 for (unsigned I = 0; I < Elts.size(); ++I) 14583 Tree.merge(Elts[I]); 14584 } 14585 14586 void VisitInitListExpr(const InitListExpr *ILE) { 14587 if (!SemaRef.getLangOpts().CPlusPlus11) 14588 return VisitExpr(ILE); 14589 14590 // In C++11, list initializations are sequenced. 14591 SmallVector<SequenceTree::Seq, 32> Elts; 14592 SequenceTree::Seq Parent = Region; 14593 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 14594 const Expr *E = ILE->getInit(I); 14595 if (!E) 14596 continue; 14597 Region = Tree.allocate(Parent); 14598 Elts.push_back(Region); 14599 Visit(E); 14600 } 14601 14602 // Forget that the initializers are sequenced. 14603 Region = Parent; 14604 for (unsigned I = 0; I < Elts.size(); ++I) 14605 Tree.merge(Elts[I]); 14606 } 14607 }; 14608 14609 } // namespace 14610 14611 void Sema::CheckUnsequencedOperations(const Expr *E) { 14612 SmallVector<const Expr *, 8> WorkList; 14613 WorkList.push_back(E); 14614 while (!WorkList.empty()) { 14615 const Expr *Item = WorkList.pop_back_val(); 14616 SequenceChecker(*this, Item, WorkList); 14617 } 14618 } 14619 14620 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 14621 bool IsConstexpr) { 14622 llvm::SaveAndRestore<bool> ConstantContext( 14623 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 14624 CheckImplicitConversions(E, CheckLoc); 14625 if (!E->isInstantiationDependent()) 14626 CheckUnsequencedOperations(E); 14627 if (!IsConstexpr && !E->isValueDependent()) 14628 CheckForIntOverflow(E); 14629 DiagnoseMisalignedMembers(); 14630 } 14631 14632 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 14633 FieldDecl *BitField, 14634 Expr *Init) { 14635 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 14636 } 14637 14638 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 14639 SourceLocation Loc) { 14640 if (!PType->isVariablyModifiedType()) 14641 return; 14642 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 14643 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 14644 return; 14645 } 14646 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 14647 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 14648 return; 14649 } 14650 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 14651 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 14652 return; 14653 } 14654 14655 const ArrayType *AT = S.Context.getAsArrayType(PType); 14656 if (!AT) 14657 return; 14658 14659 if (AT->getSizeModifier() != ArrayType::Star) { 14660 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 14661 return; 14662 } 14663 14664 S.Diag(Loc, diag::err_array_star_in_function_definition); 14665 } 14666 14667 /// CheckParmsForFunctionDef - Check that the parameters of the given 14668 /// function are appropriate for the definition of a function. This 14669 /// takes care of any checks that cannot be performed on the 14670 /// declaration itself, e.g., that the types of each of the function 14671 /// parameters are complete. 14672 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 14673 bool CheckParameterNames) { 14674 bool HasInvalidParm = false; 14675 for (ParmVarDecl *Param : Parameters) { 14676 // C99 6.7.5.3p4: the parameters in a parameter type list in a 14677 // function declarator that is part of a function definition of 14678 // that function shall not have incomplete type. 14679 // 14680 // This is also C++ [dcl.fct]p6. 14681 if (!Param->isInvalidDecl() && 14682 RequireCompleteType(Param->getLocation(), Param->getType(), 14683 diag::err_typecheck_decl_incomplete_type)) { 14684 Param->setInvalidDecl(); 14685 HasInvalidParm = true; 14686 } 14687 14688 // C99 6.9.1p5: If the declarator includes a parameter type list, the 14689 // declaration of each parameter shall include an identifier. 14690 if (CheckParameterNames && Param->getIdentifier() == nullptr && 14691 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 14692 // Diagnose this as an extension in C17 and earlier. 14693 if (!getLangOpts().C2x) 14694 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 14695 } 14696 14697 // C99 6.7.5.3p12: 14698 // If the function declarator is not part of a definition of that 14699 // function, parameters may have incomplete type and may use the [*] 14700 // notation in their sequences of declarator specifiers to specify 14701 // variable length array types. 14702 QualType PType = Param->getOriginalType(); 14703 // FIXME: This diagnostic should point the '[*]' if source-location 14704 // information is added for it. 14705 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 14706 14707 // If the parameter is a c++ class type and it has to be destructed in the 14708 // callee function, declare the destructor so that it can be called by the 14709 // callee function. Do not perform any direct access check on the dtor here. 14710 if (!Param->isInvalidDecl()) { 14711 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 14712 if (!ClassDecl->isInvalidDecl() && 14713 !ClassDecl->hasIrrelevantDestructor() && 14714 !ClassDecl->isDependentContext() && 14715 ClassDecl->isParamDestroyedInCallee()) { 14716 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 14717 MarkFunctionReferenced(Param->getLocation(), Destructor); 14718 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 14719 } 14720 } 14721 } 14722 14723 // Parameters with the pass_object_size attribute only need to be marked 14724 // constant at function definitions. Because we lack information about 14725 // whether we're on a declaration or definition when we're instantiating the 14726 // attribute, we need to check for constness here. 14727 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 14728 if (!Param->getType().isConstQualified()) 14729 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 14730 << Attr->getSpelling() << 1; 14731 14732 // Check for parameter names shadowing fields from the class. 14733 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 14734 // The owning context for the parameter should be the function, but we 14735 // want to see if this function's declaration context is a record. 14736 DeclContext *DC = Param->getDeclContext(); 14737 if (DC && DC->isFunctionOrMethod()) { 14738 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 14739 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 14740 RD, /*DeclIsField*/ false); 14741 } 14742 } 14743 } 14744 14745 return HasInvalidParm; 14746 } 14747 14748 Optional<std::pair<CharUnits, CharUnits>> 14749 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 14750 14751 /// Compute the alignment and offset of the base class object given the 14752 /// derived-to-base cast expression and the alignment and offset of the derived 14753 /// class object. 14754 static std::pair<CharUnits, CharUnits> 14755 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 14756 CharUnits BaseAlignment, CharUnits Offset, 14757 ASTContext &Ctx) { 14758 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 14759 ++PathI) { 14760 const CXXBaseSpecifier *Base = *PathI; 14761 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 14762 if (Base->isVirtual()) { 14763 // The complete object may have a lower alignment than the non-virtual 14764 // alignment of the base, in which case the base may be misaligned. Choose 14765 // the smaller of the non-virtual alignment and BaseAlignment, which is a 14766 // conservative lower bound of the complete object alignment. 14767 CharUnits NonVirtualAlignment = 14768 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 14769 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 14770 Offset = CharUnits::Zero(); 14771 } else { 14772 const ASTRecordLayout &RL = 14773 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 14774 Offset += RL.getBaseClassOffset(BaseDecl); 14775 } 14776 DerivedType = Base->getType(); 14777 } 14778 14779 return std::make_pair(BaseAlignment, Offset); 14780 } 14781 14782 /// Compute the alignment and offset of a binary additive operator. 14783 static Optional<std::pair<CharUnits, CharUnits>> 14784 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 14785 bool IsSub, ASTContext &Ctx) { 14786 QualType PointeeType = PtrE->getType()->getPointeeType(); 14787 14788 if (!PointeeType->isConstantSizeType()) 14789 return llvm::None; 14790 14791 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 14792 14793 if (!P) 14794 return llvm::None; 14795 14796 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 14797 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 14798 CharUnits Offset = EltSize * IdxRes->getExtValue(); 14799 if (IsSub) 14800 Offset = -Offset; 14801 return std::make_pair(P->first, P->second + Offset); 14802 } 14803 14804 // If the integer expression isn't a constant expression, compute the lower 14805 // bound of the alignment using the alignment and offset of the pointer 14806 // expression and the element size. 14807 return std::make_pair( 14808 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 14809 CharUnits::Zero()); 14810 } 14811 14812 /// This helper function takes an lvalue expression and returns the alignment of 14813 /// a VarDecl and a constant offset from the VarDecl. 14814 Optional<std::pair<CharUnits, CharUnits>> 14815 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 14816 E = E->IgnoreParens(); 14817 switch (E->getStmtClass()) { 14818 default: 14819 break; 14820 case Stmt::CStyleCastExprClass: 14821 case Stmt::CXXStaticCastExprClass: 14822 case Stmt::ImplicitCastExprClass: { 14823 auto *CE = cast<CastExpr>(E); 14824 const Expr *From = CE->getSubExpr(); 14825 switch (CE->getCastKind()) { 14826 default: 14827 break; 14828 case CK_NoOp: 14829 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14830 case CK_UncheckedDerivedToBase: 14831 case CK_DerivedToBase: { 14832 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14833 if (!P) 14834 break; 14835 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 14836 P->second, Ctx); 14837 } 14838 } 14839 break; 14840 } 14841 case Stmt::ArraySubscriptExprClass: { 14842 auto *ASE = cast<ArraySubscriptExpr>(E); 14843 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 14844 false, Ctx); 14845 } 14846 case Stmt::DeclRefExprClass: { 14847 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 14848 // FIXME: If VD is captured by copy or is an escaping __block variable, 14849 // use the alignment of VD's type. 14850 if (!VD->getType()->isReferenceType()) 14851 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 14852 if (VD->hasInit()) 14853 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 14854 } 14855 break; 14856 } 14857 case Stmt::MemberExprClass: { 14858 auto *ME = cast<MemberExpr>(E); 14859 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 14860 if (!FD || FD->getType()->isReferenceType() || 14861 FD->getParent()->isInvalidDecl()) 14862 break; 14863 Optional<std::pair<CharUnits, CharUnits>> P; 14864 if (ME->isArrow()) 14865 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 14866 else 14867 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 14868 if (!P) 14869 break; 14870 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 14871 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 14872 return std::make_pair(P->first, 14873 P->second + CharUnits::fromQuantity(Offset)); 14874 } 14875 case Stmt::UnaryOperatorClass: { 14876 auto *UO = cast<UnaryOperator>(E); 14877 switch (UO->getOpcode()) { 14878 default: 14879 break; 14880 case UO_Deref: 14881 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 14882 } 14883 break; 14884 } 14885 case Stmt::BinaryOperatorClass: { 14886 auto *BO = cast<BinaryOperator>(E); 14887 auto Opcode = BO->getOpcode(); 14888 switch (Opcode) { 14889 default: 14890 break; 14891 case BO_Comma: 14892 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 14893 } 14894 break; 14895 } 14896 } 14897 return llvm::None; 14898 } 14899 14900 /// This helper function takes a pointer expression and returns the alignment of 14901 /// a VarDecl and a constant offset from the VarDecl. 14902 Optional<std::pair<CharUnits, CharUnits>> 14903 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 14904 E = E->IgnoreParens(); 14905 switch (E->getStmtClass()) { 14906 default: 14907 break; 14908 case Stmt::CStyleCastExprClass: 14909 case Stmt::CXXStaticCastExprClass: 14910 case Stmt::ImplicitCastExprClass: { 14911 auto *CE = cast<CastExpr>(E); 14912 const Expr *From = CE->getSubExpr(); 14913 switch (CE->getCastKind()) { 14914 default: 14915 break; 14916 case CK_NoOp: 14917 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 14918 case CK_ArrayToPointerDecay: 14919 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14920 case CK_UncheckedDerivedToBase: 14921 case CK_DerivedToBase: { 14922 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 14923 if (!P) 14924 break; 14925 return getDerivedToBaseAlignmentAndOffset( 14926 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 14927 } 14928 } 14929 break; 14930 } 14931 case Stmt::CXXThisExprClass: { 14932 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 14933 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 14934 return std::make_pair(Alignment, CharUnits::Zero()); 14935 } 14936 case Stmt::UnaryOperatorClass: { 14937 auto *UO = cast<UnaryOperator>(E); 14938 if (UO->getOpcode() == UO_AddrOf) 14939 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 14940 break; 14941 } 14942 case Stmt::BinaryOperatorClass: { 14943 auto *BO = cast<BinaryOperator>(E); 14944 auto Opcode = BO->getOpcode(); 14945 switch (Opcode) { 14946 default: 14947 break; 14948 case BO_Add: 14949 case BO_Sub: { 14950 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 14951 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 14952 std::swap(LHS, RHS); 14953 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 14954 Ctx); 14955 } 14956 case BO_Comma: 14957 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 14958 } 14959 break; 14960 } 14961 } 14962 return llvm::None; 14963 } 14964 14965 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 14966 // See if we can compute the alignment of a VarDecl and an offset from it. 14967 Optional<std::pair<CharUnits, CharUnits>> P = 14968 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 14969 14970 if (P) 14971 return P->first.alignmentAtOffset(P->second); 14972 14973 // If that failed, return the type's alignment. 14974 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 14975 } 14976 14977 /// CheckCastAlign - Implements -Wcast-align, which warns when a 14978 /// pointer cast increases the alignment requirements. 14979 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 14980 // This is actually a lot of work to potentially be doing on every 14981 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 14982 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 14983 return; 14984 14985 // Ignore dependent types. 14986 if (T->isDependentType() || Op->getType()->isDependentType()) 14987 return; 14988 14989 // Require that the destination be a pointer type. 14990 const PointerType *DestPtr = T->getAs<PointerType>(); 14991 if (!DestPtr) return; 14992 14993 // If the destination has alignment 1, we're done. 14994 QualType DestPointee = DestPtr->getPointeeType(); 14995 if (DestPointee->isIncompleteType()) return; 14996 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 14997 if (DestAlign.isOne()) return; 14998 14999 // Require that the source be a pointer type. 15000 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 15001 if (!SrcPtr) return; 15002 QualType SrcPointee = SrcPtr->getPointeeType(); 15003 15004 // Explicitly allow casts from cv void*. We already implicitly 15005 // allowed casts to cv void*, since they have alignment 1. 15006 // Also allow casts involving incomplete types, which implicitly 15007 // includes 'void'. 15008 if (SrcPointee->isIncompleteType()) return; 15009 15010 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 15011 15012 if (SrcAlign >= DestAlign) return; 15013 15014 Diag(TRange.getBegin(), diag::warn_cast_align) 15015 << Op->getType() << T 15016 << static_cast<unsigned>(SrcAlign.getQuantity()) 15017 << static_cast<unsigned>(DestAlign.getQuantity()) 15018 << TRange << Op->getSourceRange(); 15019 } 15020 15021 /// Check whether this array fits the idiom of a size-one tail padded 15022 /// array member of a struct. 15023 /// 15024 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 15025 /// commonly used to emulate flexible arrays in C89 code. 15026 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 15027 const NamedDecl *ND) { 15028 if (Size != 1 || !ND) return false; 15029 15030 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 15031 if (!FD) return false; 15032 15033 // Don't consider sizes resulting from macro expansions or template argument 15034 // substitution to form C89 tail-padded arrays. 15035 15036 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 15037 while (TInfo) { 15038 TypeLoc TL = TInfo->getTypeLoc(); 15039 // Look through typedefs. 15040 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 15041 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 15042 TInfo = TDL->getTypeSourceInfo(); 15043 continue; 15044 } 15045 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 15046 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 15047 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 15048 return false; 15049 } 15050 break; 15051 } 15052 15053 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 15054 if (!RD) return false; 15055 if (RD->isUnion()) return false; 15056 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 15057 if (!CRD->isStandardLayout()) return false; 15058 } 15059 15060 // See if this is the last field decl in the record. 15061 const Decl *D = FD; 15062 while ((D = D->getNextDeclInContext())) 15063 if (isa<FieldDecl>(D)) 15064 return false; 15065 return true; 15066 } 15067 15068 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 15069 const ArraySubscriptExpr *ASE, 15070 bool AllowOnePastEnd, bool IndexNegated) { 15071 // Already diagnosed by the constant evaluator. 15072 if (isConstantEvaluated()) 15073 return; 15074 15075 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 15076 if (IndexExpr->isValueDependent()) 15077 return; 15078 15079 const Type *EffectiveType = 15080 BaseExpr->getType()->getPointeeOrArrayElementType(); 15081 BaseExpr = BaseExpr->IgnoreParenCasts(); 15082 const ConstantArrayType *ArrayTy = 15083 Context.getAsConstantArrayType(BaseExpr->getType()); 15084 15085 const Type *BaseType = 15086 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 15087 bool IsUnboundedArray = (BaseType == nullptr); 15088 if (EffectiveType->isDependentType() || 15089 (!IsUnboundedArray && BaseType->isDependentType())) 15090 return; 15091 15092 Expr::EvalResult Result; 15093 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15094 return; 15095 15096 llvm::APSInt index = Result.Val.getInt(); 15097 if (IndexNegated) { 15098 index.setIsUnsigned(false); 15099 index = -index; 15100 } 15101 15102 const NamedDecl *ND = nullptr; 15103 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15104 ND = DRE->getDecl(); 15105 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15106 ND = ME->getMemberDecl(); 15107 15108 if (IsUnboundedArray) { 15109 if (index.isUnsigned() || !index.isNegative()) { 15110 const auto &ASTC = getASTContext(); 15111 unsigned AddrBits = 15112 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15113 EffectiveType->getCanonicalTypeInternal())); 15114 if (index.getBitWidth() < AddrBits) 15115 index = index.zext(AddrBits); 15116 Optional<CharUnits> ElemCharUnits = 15117 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15118 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15119 // pointer) bounds-checking isn't meaningful. 15120 if (!ElemCharUnits) 15121 return; 15122 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15123 // If index has more active bits than address space, we already know 15124 // we have a bounds violation to warn about. Otherwise, compute 15125 // address of (index + 1)th element, and warn about bounds violation 15126 // only if that address exceeds address space. 15127 if (index.getActiveBits() <= AddrBits) { 15128 bool Overflow; 15129 llvm::APInt Product(index); 15130 Product += 1; 15131 Product = Product.umul_ov(ElemBytes, Overflow); 15132 if (!Overflow && Product.getActiveBits() <= AddrBits) 15133 return; 15134 } 15135 15136 // Need to compute max possible elements in address space, since that 15137 // is included in diag message. 15138 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15139 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15140 MaxElems += 1; 15141 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15142 MaxElems = MaxElems.udiv(ElemBytes); 15143 15144 unsigned DiagID = 15145 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15146 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15147 15148 // Diag message shows element size in bits and in "bytes" (platform- 15149 // dependent CharUnits) 15150 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15151 PDiag(DiagID) 15152 << toString(index, 10, true) << AddrBits 15153 << (unsigned)ASTC.toBits(*ElemCharUnits) 15154 << toString(ElemBytes, 10, false) 15155 << toString(MaxElems, 10, false) 15156 << (unsigned)MaxElems.getLimitedValue(~0U) 15157 << IndexExpr->getSourceRange()); 15158 15159 if (!ND) { 15160 // Try harder to find a NamedDecl to point at in the note. 15161 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15162 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15163 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15164 ND = DRE->getDecl(); 15165 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15166 ND = ME->getMemberDecl(); 15167 } 15168 15169 if (ND) 15170 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15171 PDiag(diag::note_array_declared_here) << ND); 15172 } 15173 return; 15174 } 15175 15176 if (index.isUnsigned() || !index.isNegative()) { 15177 // It is possible that the type of the base expression after 15178 // IgnoreParenCasts is incomplete, even though the type of the base 15179 // expression before IgnoreParenCasts is complete (see PR39746 for an 15180 // example). In this case we have no information about whether the array 15181 // access exceeds the array bounds. However we can still diagnose an array 15182 // access which precedes the array bounds. 15183 if (BaseType->isIncompleteType()) 15184 return; 15185 15186 llvm::APInt size = ArrayTy->getSize(); 15187 if (!size.isStrictlyPositive()) 15188 return; 15189 15190 if (BaseType != EffectiveType) { 15191 // Make sure we're comparing apples to apples when comparing index to size 15192 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 15193 uint64_t array_typesize = Context.getTypeSize(BaseType); 15194 // Handle ptrarith_typesize being zero, such as when casting to void* 15195 if (!ptrarith_typesize) ptrarith_typesize = 1; 15196 if (ptrarith_typesize != array_typesize) { 15197 // There's a cast to a different size type involved 15198 uint64_t ratio = array_typesize / ptrarith_typesize; 15199 // TODO: Be smarter about handling cases where array_typesize is not a 15200 // multiple of ptrarith_typesize 15201 if (ptrarith_typesize * ratio == array_typesize) 15202 size *= llvm::APInt(size.getBitWidth(), ratio); 15203 } 15204 } 15205 15206 if (size.getBitWidth() > index.getBitWidth()) 15207 index = index.zext(size.getBitWidth()); 15208 else if (size.getBitWidth() < index.getBitWidth()) 15209 size = size.zext(index.getBitWidth()); 15210 15211 // For array subscripting the index must be less than size, but for pointer 15212 // arithmetic also allow the index (offset) to be equal to size since 15213 // computing the next address after the end of the array is legal and 15214 // commonly done e.g. in C++ iterators and range-based for loops. 15215 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 15216 return; 15217 15218 // Also don't warn for arrays of size 1 which are members of some 15219 // structure. These are often used to approximate flexible arrays in C89 15220 // code. 15221 if (IsTailPaddedMemberArray(*this, size, ND)) 15222 return; 15223 15224 // Suppress the warning if the subscript expression (as identified by the 15225 // ']' location) and the index expression are both from macro expansions 15226 // within a system header. 15227 if (ASE) { 15228 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 15229 ASE->getRBracketLoc()); 15230 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 15231 SourceLocation IndexLoc = 15232 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 15233 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 15234 return; 15235 } 15236 } 15237 15238 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15239 : diag::warn_ptr_arith_exceeds_bounds; 15240 15241 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15242 PDiag(DiagID) << toString(index, 10, true) 15243 << toString(size, 10, true) 15244 << (unsigned)size.getLimitedValue(~0U) 15245 << IndexExpr->getSourceRange()); 15246 } else { 15247 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15248 if (!ASE) { 15249 DiagID = diag::warn_ptr_arith_precedes_bounds; 15250 if (index.isNegative()) index = -index; 15251 } 15252 15253 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15254 PDiag(DiagID) << toString(index, 10, true) 15255 << IndexExpr->getSourceRange()); 15256 } 15257 15258 if (!ND) { 15259 // Try harder to find a NamedDecl to point at in the note. 15260 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15261 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15262 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15263 ND = DRE->getDecl(); 15264 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15265 ND = ME->getMemberDecl(); 15266 } 15267 15268 if (ND) 15269 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15270 PDiag(diag::note_array_declared_here) << ND); 15271 } 15272 15273 void Sema::CheckArrayAccess(const Expr *expr) { 15274 int AllowOnePastEnd = 0; 15275 while (expr) { 15276 expr = expr->IgnoreParenImpCasts(); 15277 switch (expr->getStmtClass()) { 15278 case Stmt::ArraySubscriptExprClass: { 15279 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 15280 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 15281 AllowOnePastEnd > 0); 15282 expr = ASE->getBase(); 15283 break; 15284 } 15285 case Stmt::MemberExprClass: { 15286 expr = cast<MemberExpr>(expr)->getBase(); 15287 break; 15288 } 15289 case Stmt::OMPArraySectionExprClass: { 15290 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 15291 if (ASE->getLowerBound()) 15292 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 15293 /*ASE=*/nullptr, AllowOnePastEnd > 0); 15294 return; 15295 } 15296 case Stmt::UnaryOperatorClass: { 15297 // Only unwrap the * and & unary operators 15298 const UnaryOperator *UO = cast<UnaryOperator>(expr); 15299 expr = UO->getSubExpr(); 15300 switch (UO->getOpcode()) { 15301 case UO_AddrOf: 15302 AllowOnePastEnd++; 15303 break; 15304 case UO_Deref: 15305 AllowOnePastEnd--; 15306 break; 15307 default: 15308 return; 15309 } 15310 break; 15311 } 15312 case Stmt::ConditionalOperatorClass: { 15313 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 15314 if (const Expr *lhs = cond->getLHS()) 15315 CheckArrayAccess(lhs); 15316 if (const Expr *rhs = cond->getRHS()) 15317 CheckArrayAccess(rhs); 15318 return; 15319 } 15320 case Stmt::CXXOperatorCallExprClass: { 15321 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 15322 for (const auto *Arg : OCE->arguments()) 15323 CheckArrayAccess(Arg); 15324 return; 15325 } 15326 default: 15327 return; 15328 } 15329 } 15330 } 15331 15332 //===--- CHECK: Objective-C retain cycles ----------------------------------// 15333 15334 namespace { 15335 15336 struct RetainCycleOwner { 15337 VarDecl *Variable = nullptr; 15338 SourceRange Range; 15339 SourceLocation Loc; 15340 bool Indirect = false; 15341 15342 RetainCycleOwner() = default; 15343 15344 void setLocsFrom(Expr *e) { 15345 Loc = e->getExprLoc(); 15346 Range = e->getSourceRange(); 15347 } 15348 }; 15349 15350 } // namespace 15351 15352 /// Consider whether capturing the given variable can possibly lead to 15353 /// a retain cycle. 15354 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 15355 // In ARC, it's captured strongly iff the variable has __strong 15356 // lifetime. In MRR, it's captured strongly if the variable is 15357 // __block and has an appropriate type. 15358 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15359 return false; 15360 15361 owner.Variable = var; 15362 if (ref) 15363 owner.setLocsFrom(ref); 15364 return true; 15365 } 15366 15367 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 15368 while (true) { 15369 e = e->IgnoreParens(); 15370 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 15371 switch (cast->getCastKind()) { 15372 case CK_BitCast: 15373 case CK_LValueBitCast: 15374 case CK_LValueToRValue: 15375 case CK_ARCReclaimReturnedObject: 15376 e = cast->getSubExpr(); 15377 continue; 15378 15379 default: 15380 return false; 15381 } 15382 } 15383 15384 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 15385 ObjCIvarDecl *ivar = ref->getDecl(); 15386 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15387 return false; 15388 15389 // Try to find a retain cycle in the base. 15390 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 15391 return false; 15392 15393 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 15394 owner.Indirect = true; 15395 return true; 15396 } 15397 15398 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 15399 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 15400 if (!var) return false; 15401 return considerVariable(var, ref, owner); 15402 } 15403 15404 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 15405 if (member->isArrow()) return false; 15406 15407 // Don't count this as an indirect ownership. 15408 e = member->getBase(); 15409 continue; 15410 } 15411 15412 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 15413 // Only pay attention to pseudo-objects on property references. 15414 ObjCPropertyRefExpr *pre 15415 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 15416 ->IgnoreParens()); 15417 if (!pre) return false; 15418 if (pre->isImplicitProperty()) return false; 15419 ObjCPropertyDecl *property = pre->getExplicitProperty(); 15420 if (!property->isRetaining() && 15421 !(property->getPropertyIvarDecl() && 15422 property->getPropertyIvarDecl()->getType() 15423 .getObjCLifetime() == Qualifiers::OCL_Strong)) 15424 return false; 15425 15426 owner.Indirect = true; 15427 if (pre->isSuperReceiver()) { 15428 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 15429 if (!owner.Variable) 15430 return false; 15431 owner.Loc = pre->getLocation(); 15432 owner.Range = pre->getSourceRange(); 15433 return true; 15434 } 15435 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 15436 ->getSourceExpr()); 15437 continue; 15438 } 15439 15440 // Array ivars? 15441 15442 return false; 15443 } 15444 } 15445 15446 namespace { 15447 15448 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 15449 ASTContext &Context; 15450 VarDecl *Variable; 15451 Expr *Capturer = nullptr; 15452 bool VarWillBeReased = false; 15453 15454 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 15455 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 15456 Context(Context), Variable(variable) {} 15457 15458 void VisitDeclRefExpr(DeclRefExpr *ref) { 15459 if (ref->getDecl() == Variable && !Capturer) 15460 Capturer = ref; 15461 } 15462 15463 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 15464 if (Capturer) return; 15465 Visit(ref->getBase()); 15466 if (Capturer && ref->isFreeIvar()) 15467 Capturer = ref; 15468 } 15469 15470 void VisitBlockExpr(BlockExpr *block) { 15471 // Look inside nested blocks 15472 if (block->getBlockDecl()->capturesVariable(Variable)) 15473 Visit(block->getBlockDecl()->getBody()); 15474 } 15475 15476 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 15477 if (Capturer) return; 15478 if (OVE->getSourceExpr()) 15479 Visit(OVE->getSourceExpr()); 15480 } 15481 15482 void VisitBinaryOperator(BinaryOperator *BinOp) { 15483 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 15484 return; 15485 Expr *LHS = BinOp->getLHS(); 15486 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 15487 if (DRE->getDecl() != Variable) 15488 return; 15489 if (Expr *RHS = BinOp->getRHS()) { 15490 RHS = RHS->IgnoreParenCasts(); 15491 Optional<llvm::APSInt> Value; 15492 VarWillBeReased = 15493 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 15494 *Value == 0); 15495 } 15496 } 15497 } 15498 }; 15499 15500 } // namespace 15501 15502 /// Check whether the given argument is a block which captures a 15503 /// variable. 15504 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 15505 assert(owner.Variable && owner.Loc.isValid()); 15506 15507 e = e->IgnoreParenCasts(); 15508 15509 // Look through [^{...} copy] and Block_copy(^{...}). 15510 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 15511 Selector Cmd = ME->getSelector(); 15512 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 15513 e = ME->getInstanceReceiver(); 15514 if (!e) 15515 return nullptr; 15516 e = e->IgnoreParenCasts(); 15517 } 15518 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 15519 if (CE->getNumArgs() == 1) { 15520 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 15521 if (Fn) { 15522 const IdentifierInfo *FnI = Fn->getIdentifier(); 15523 if (FnI && FnI->isStr("_Block_copy")) { 15524 e = CE->getArg(0)->IgnoreParenCasts(); 15525 } 15526 } 15527 } 15528 } 15529 15530 BlockExpr *block = dyn_cast<BlockExpr>(e); 15531 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 15532 return nullptr; 15533 15534 FindCaptureVisitor visitor(S.Context, owner.Variable); 15535 visitor.Visit(block->getBlockDecl()->getBody()); 15536 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 15537 } 15538 15539 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 15540 RetainCycleOwner &owner) { 15541 assert(capturer); 15542 assert(owner.Variable && owner.Loc.isValid()); 15543 15544 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 15545 << owner.Variable << capturer->getSourceRange(); 15546 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 15547 << owner.Indirect << owner.Range; 15548 } 15549 15550 /// Check for a keyword selector that starts with the word 'add' or 15551 /// 'set'. 15552 static bool isSetterLikeSelector(Selector sel) { 15553 if (sel.isUnarySelector()) return false; 15554 15555 StringRef str = sel.getNameForSlot(0); 15556 while (!str.empty() && str.front() == '_') str = str.substr(1); 15557 if (str.startswith("set")) 15558 str = str.substr(3); 15559 else if (str.startswith("add")) { 15560 // Specially allow 'addOperationWithBlock:'. 15561 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 15562 return false; 15563 str = str.substr(3); 15564 } 15565 else 15566 return false; 15567 15568 if (str.empty()) return true; 15569 return !isLowercase(str.front()); 15570 } 15571 15572 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 15573 ObjCMessageExpr *Message) { 15574 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 15575 Message->getReceiverInterface(), 15576 NSAPI::ClassId_NSMutableArray); 15577 if (!IsMutableArray) { 15578 return None; 15579 } 15580 15581 Selector Sel = Message->getSelector(); 15582 15583 Optional<NSAPI::NSArrayMethodKind> MKOpt = 15584 S.NSAPIObj->getNSArrayMethodKind(Sel); 15585 if (!MKOpt) { 15586 return None; 15587 } 15588 15589 NSAPI::NSArrayMethodKind MK = *MKOpt; 15590 15591 switch (MK) { 15592 case NSAPI::NSMutableArr_addObject: 15593 case NSAPI::NSMutableArr_insertObjectAtIndex: 15594 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 15595 return 0; 15596 case NSAPI::NSMutableArr_replaceObjectAtIndex: 15597 return 1; 15598 15599 default: 15600 return None; 15601 } 15602 15603 return None; 15604 } 15605 15606 static 15607 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 15608 ObjCMessageExpr *Message) { 15609 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 15610 Message->getReceiverInterface(), 15611 NSAPI::ClassId_NSMutableDictionary); 15612 if (!IsMutableDictionary) { 15613 return None; 15614 } 15615 15616 Selector Sel = Message->getSelector(); 15617 15618 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 15619 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 15620 if (!MKOpt) { 15621 return None; 15622 } 15623 15624 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 15625 15626 switch (MK) { 15627 case NSAPI::NSMutableDict_setObjectForKey: 15628 case NSAPI::NSMutableDict_setValueForKey: 15629 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 15630 return 0; 15631 15632 default: 15633 return None; 15634 } 15635 15636 return None; 15637 } 15638 15639 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 15640 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 15641 Message->getReceiverInterface(), 15642 NSAPI::ClassId_NSMutableSet); 15643 15644 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 15645 Message->getReceiverInterface(), 15646 NSAPI::ClassId_NSMutableOrderedSet); 15647 if (!IsMutableSet && !IsMutableOrderedSet) { 15648 return None; 15649 } 15650 15651 Selector Sel = Message->getSelector(); 15652 15653 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 15654 if (!MKOpt) { 15655 return None; 15656 } 15657 15658 NSAPI::NSSetMethodKind MK = *MKOpt; 15659 15660 switch (MK) { 15661 case NSAPI::NSMutableSet_addObject: 15662 case NSAPI::NSOrderedSet_setObjectAtIndex: 15663 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 15664 case NSAPI::NSOrderedSet_insertObjectAtIndex: 15665 return 0; 15666 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 15667 return 1; 15668 } 15669 15670 return None; 15671 } 15672 15673 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 15674 if (!Message->isInstanceMessage()) { 15675 return; 15676 } 15677 15678 Optional<int> ArgOpt; 15679 15680 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 15681 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 15682 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 15683 return; 15684 } 15685 15686 int ArgIndex = *ArgOpt; 15687 15688 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 15689 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 15690 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 15691 } 15692 15693 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 15694 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15695 if (ArgRE->isObjCSelfExpr()) { 15696 Diag(Message->getSourceRange().getBegin(), 15697 diag::warn_objc_circular_container) 15698 << ArgRE->getDecl() << StringRef("'super'"); 15699 } 15700 } 15701 } else { 15702 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 15703 15704 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 15705 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 15706 } 15707 15708 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 15709 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15710 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 15711 ValueDecl *Decl = ReceiverRE->getDecl(); 15712 Diag(Message->getSourceRange().getBegin(), 15713 diag::warn_objc_circular_container) 15714 << Decl << Decl; 15715 if (!ArgRE->isObjCSelfExpr()) { 15716 Diag(Decl->getLocation(), 15717 diag::note_objc_circular_container_declared_here) 15718 << Decl; 15719 } 15720 } 15721 } 15722 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 15723 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 15724 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 15725 ObjCIvarDecl *Decl = IvarRE->getDecl(); 15726 Diag(Message->getSourceRange().getBegin(), 15727 diag::warn_objc_circular_container) 15728 << Decl << Decl; 15729 Diag(Decl->getLocation(), 15730 diag::note_objc_circular_container_declared_here) 15731 << Decl; 15732 } 15733 } 15734 } 15735 } 15736 } 15737 15738 /// Check a message send to see if it's likely to cause a retain cycle. 15739 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 15740 // Only check instance methods whose selector looks like a setter. 15741 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 15742 return; 15743 15744 // Try to find a variable that the receiver is strongly owned by. 15745 RetainCycleOwner owner; 15746 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 15747 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 15748 return; 15749 } else { 15750 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 15751 owner.Variable = getCurMethodDecl()->getSelfDecl(); 15752 owner.Loc = msg->getSuperLoc(); 15753 owner.Range = msg->getSuperLoc(); 15754 } 15755 15756 // Check whether the receiver is captured by any of the arguments. 15757 const ObjCMethodDecl *MD = msg->getMethodDecl(); 15758 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 15759 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 15760 // noescape blocks should not be retained by the method. 15761 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 15762 continue; 15763 return diagnoseRetainCycle(*this, capturer, owner); 15764 } 15765 } 15766 } 15767 15768 /// Check a property assign to see if it's likely to cause a retain cycle. 15769 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 15770 RetainCycleOwner owner; 15771 if (!findRetainCycleOwner(*this, receiver, owner)) 15772 return; 15773 15774 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 15775 diagnoseRetainCycle(*this, capturer, owner); 15776 } 15777 15778 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 15779 RetainCycleOwner Owner; 15780 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 15781 return; 15782 15783 // Because we don't have an expression for the variable, we have to set the 15784 // location explicitly here. 15785 Owner.Loc = Var->getLocation(); 15786 Owner.Range = Var->getSourceRange(); 15787 15788 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 15789 diagnoseRetainCycle(*this, Capturer, Owner); 15790 } 15791 15792 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 15793 Expr *RHS, bool isProperty) { 15794 // Check if RHS is an Objective-C object literal, which also can get 15795 // immediately zapped in a weak reference. Note that we explicitly 15796 // allow ObjCStringLiterals, since those are designed to never really die. 15797 RHS = RHS->IgnoreParenImpCasts(); 15798 15799 // This enum needs to match with the 'select' in 15800 // warn_objc_arc_literal_assign (off-by-1). 15801 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 15802 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 15803 return false; 15804 15805 S.Diag(Loc, diag::warn_arc_literal_assign) 15806 << (unsigned) Kind 15807 << (isProperty ? 0 : 1) 15808 << RHS->getSourceRange(); 15809 15810 return true; 15811 } 15812 15813 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 15814 Qualifiers::ObjCLifetime LT, 15815 Expr *RHS, bool isProperty) { 15816 // Strip off any implicit cast added to get to the one ARC-specific. 15817 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 15818 if (cast->getCastKind() == CK_ARCConsumeObject) { 15819 S.Diag(Loc, diag::warn_arc_retained_assign) 15820 << (LT == Qualifiers::OCL_ExplicitNone) 15821 << (isProperty ? 0 : 1) 15822 << RHS->getSourceRange(); 15823 return true; 15824 } 15825 RHS = cast->getSubExpr(); 15826 } 15827 15828 if (LT == Qualifiers::OCL_Weak && 15829 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 15830 return true; 15831 15832 return false; 15833 } 15834 15835 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 15836 QualType LHS, Expr *RHS) { 15837 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 15838 15839 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 15840 return false; 15841 15842 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 15843 return true; 15844 15845 return false; 15846 } 15847 15848 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 15849 Expr *LHS, Expr *RHS) { 15850 QualType LHSType; 15851 // PropertyRef on LHS type need be directly obtained from 15852 // its declaration as it has a PseudoType. 15853 ObjCPropertyRefExpr *PRE 15854 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 15855 if (PRE && !PRE->isImplicitProperty()) { 15856 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 15857 if (PD) 15858 LHSType = PD->getType(); 15859 } 15860 15861 if (LHSType.isNull()) 15862 LHSType = LHS->getType(); 15863 15864 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 15865 15866 if (LT == Qualifiers::OCL_Weak) { 15867 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 15868 getCurFunction()->markSafeWeakUse(LHS); 15869 } 15870 15871 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 15872 return; 15873 15874 // FIXME. Check for other life times. 15875 if (LT != Qualifiers::OCL_None) 15876 return; 15877 15878 if (PRE) { 15879 if (PRE->isImplicitProperty()) 15880 return; 15881 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 15882 if (!PD) 15883 return; 15884 15885 unsigned Attributes = PD->getPropertyAttributes(); 15886 if (Attributes & ObjCPropertyAttribute::kind_assign) { 15887 // when 'assign' attribute was not explicitly specified 15888 // by user, ignore it and rely on property type itself 15889 // for lifetime info. 15890 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 15891 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 15892 LHSType->isObjCRetainableType()) 15893 return; 15894 15895 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 15896 if (cast->getCastKind() == CK_ARCConsumeObject) { 15897 Diag(Loc, diag::warn_arc_retained_property_assign) 15898 << RHS->getSourceRange(); 15899 return; 15900 } 15901 RHS = cast->getSubExpr(); 15902 } 15903 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 15904 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 15905 return; 15906 } 15907 } 15908 } 15909 15910 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 15911 15912 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 15913 SourceLocation StmtLoc, 15914 const NullStmt *Body) { 15915 // Do not warn if the body is a macro that expands to nothing, e.g: 15916 // 15917 // #define CALL(x) 15918 // if (condition) 15919 // CALL(0); 15920 if (Body->hasLeadingEmptyMacro()) 15921 return false; 15922 15923 // Get line numbers of statement and body. 15924 bool StmtLineInvalid; 15925 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 15926 &StmtLineInvalid); 15927 if (StmtLineInvalid) 15928 return false; 15929 15930 bool BodyLineInvalid; 15931 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 15932 &BodyLineInvalid); 15933 if (BodyLineInvalid) 15934 return false; 15935 15936 // Warn if null statement and body are on the same line. 15937 if (StmtLine != BodyLine) 15938 return false; 15939 15940 return true; 15941 } 15942 15943 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 15944 const Stmt *Body, 15945 unsigned DiagID) { 15946 // Since this is a syntactic check, don't emit diagnostic for template 15947 // instantiations, this just adds noise. 15948 if (CurrentInstantiationScope) 15949 return; 15950 15951 // The body should be a null statement. 15952 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 15953 if (!NBody) 15954 return; 15955 15956 // Do the usual checks. 15957 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 15958 return; 15959 15960 Diag(NBody->getSemiLoc(), DiagID); 15961 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 15962 } 15963 15964 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 15965 const Stmt *PossibleBody) { 15966 assert(!CurrentInstantiationScope); // Ensured by caller 15967 15968 SourceLocation StmtLoc; 15969 const Stmt *Body; 15970 unsigned DiagID; 15971 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 15972 StmtLoc = FS->getRParenLoc(); 15973 Body = FS->getBody(); 15974 DiagID = diag::warn_empty_for_body; 15975 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 15976 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 15977 Body = WS->getBody(); 15978 DiagID = diag::warn_empty_while_body; 15979 } else 15980 return; // Neither `for' nor `while'. 15981 15982 // The body should be a null statement. 15983 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 15984 if (!NBody) 15985 return; 15986 15987 // Skip expensive checks if diagnostic is disabled. 15988 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 15989 return; 15990 15991 // Do the usual checks. 15992 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 15993 return; 15994 15995 // `for(...);' and `while(...);' are popular idioms, so in order to keep 15996 // noise level low, emit diagnostics only if for/while is followed by a 15997 // CompoundStmt, e.g.: 15998 // for (int i = 0; i < n; i++); 15999 // { 16000 // a(i); 16001 // } 16002 // or if for/while is followed by a statement with more indentation 16003 // than for/while itself: 16004 // for (int i = 0; i < n; i++); 16005 // a(i); 16006 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 16007 if (!ProbableTypo) { 16008 bool BodyColInvalid; 16009 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 16010 PossibleBody->getBeginLoc(), &BodyColInvalid); 16011 if (BodyColInvalid) 16012 return; 16013 16014 bool StmtColInvalid; 16015 unsigned StmtCol = 16016 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 16017 if (StmtColInvalid) 16018 return; 16019 16020 if (BodyCol > StmtCol) 16021 ProbableTypo = true; 16022 } 16023 16024 if (ProbableTypo) { 16025 Diag(NBody->getSemiLoc(), DiagID); 16026 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16027 } 16028 } 16029 16030 //===--- CHECK: Warn on self move with std::move. -------------------------===// 16031 16032 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 16033 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 16034 SourceLocation OpLoc) { 16035 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 16036 return; 16037 16038 if (inTemplateInstantiation()) 16039 return; 16040 16041 // Strip parens and casts away. 16042 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 16043 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 16044 16045 // Check for a call expression 16046 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 16047 if (!CE || CE->getNumArgs() != 1) 16048 return; 16049 16050 // Check for a call to std::move 16051 if (!CE->isCallToStdMove()) 16052 return; 16053 16054 // Get argument from std::move 16055 RHSExpr = CE->getArg(0); 16056 16057 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 16058 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 16059 16060 // Two DeclRefExpr's, check that the decls are the same. 16061 if (LHSDeclRef && RHSDeclRef) { 16062 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16063 return; 16064 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16065 RHSDeclRef->getDecl()->getCanonicalDecl()) 16066 return; 16067 16068 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16069 << LHSExpr->getSourceRange() 16070 << RHSExpr->getSourceRange(); 16071 return; 16072 } 16073 16074 // Member variables require a different approach to check for self moves. 16075 // MemberExpr's are the same if every nested MemberExpr refers to the same 16076 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 16077 // the base Expr's are CXXThisExpr's. 16078 const Expr *LHSBase = LHSExpr; 16079 const Expr *RHSBase = RHSExpr; 16080 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 16081 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 16082 if (!LHSME || !RHSME) 16083 return; 16084 16085 while (LHSME && RHSME) { 16086 if (LHSME->getMemberDecl()->getCanonicalDecl() != 16087 RHSME->getMemberDecl()->getCanonicalDecl()) 16088 return; 16089 16090 LHSBase = LHSME->getBase(); 16091 RHSBase = RHSME->getBase(); 16092 LHSME = dyn_cast<MemberExpr>(LHSBase); 16093 RHSME = dyn_cast<MemberExpr>(RHSBase); 16094 } 16095 16096 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16097 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16098 if (LHSDeclRef && RHSDeclRef) { 16099 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16100 return; 16101 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16102 RHSDeclRef->getDecl()->getCanonicalDecl()) 16103 return; 16104 16105 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16106 << LHSExpr->getSourceRange() 16107 << RHSExpr->getSourceRange(); 16108 return; 16109 } 16110 16111 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16112 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16113 << LHSExpr->getSourceRange() 16114 << RHSExpr->getSourceRange(); 16115 } 16116 16117 //===--- Layout compatibility ----------------------------------------------// 16118 16119 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16120 16121 /// Check if two enumeration types are layout-compatible. 16122 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16123 // C++11 [dcl.enum] p8: 16124 // Two enumeration types are layout-compatible if they have the same 16125 // underlying type. 16126 return ED1->isComplete() && ED2->isComplete() && 16127 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16128 } 16129 16130 /// Check if two fields are layout-compatible. 16131 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16132 FieldDecl *Field2) { 16133 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16134 return false; 16135 16136 if (Field1->isBitField() != Field2->isBitField()) 16137 return false; 16138 16139 if (Field1->isBitField()) { 16140 // Make sure that the bit-fields are the same length. 16141 unsigned Bits1 = Field1->getBitWidthValue(C); 16142 unsigned Bits2 = Field2->getBitWidthValue(C); 16143 16144 if (Bits1 != Bits2) 16145 return false; 16146 } 16147 16148 return true; 16149 } 16150 16151 /// Check if two standard-layout structs are layout-compatible. 16152 /// (C++11 [class.mem] p17) 16153 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16154 RecordDecl *RD2) { 16155 // If both records are C++ classes, check that base classes match. 16156 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16157 // If one of records is a CXXRecordDecl we are in C++ mode, 16158 // thus the other one is a CXXRecordDecl, too. 16159 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16160 // Check number of base classes. 16161 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16162 return false; 16163 16164 // Check the base classes. 16165 for (CXXRecordDecl::base_class_const_iterator 16166 Base1 = D1CXX->bases_begin(), 16167 BaseEnd1 = D1CXX->bases_end(), 16168 Base2 = D2CXX->bases_begin(); 16169 Base1 != BaseEnd1; 16170 ++Base1, ++Base2) { 16171 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 16172 return false; 16173 } 16174 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 16175 // If only RD2 is a C++ class, it should have zero base classes. 16176 if (D2CXX->getNumBases() > 0) 16177 return false; 16178 } 16179 16180 // Check the fields. 16181 RecordDecl::field_iterator Field2 = RD2->field_begin(), 16182 Field2End = RD2->field_end(), 16183 Field1 = RD1->field_begin(), 16184 Field1End = RD1->field_end(); 16185 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 16186 if (!isLayoutCompatible(C, *Field1, *Field2)) 16187 return false; 16188 } 16189 if (Field1 != Field1End || Field2 != Field2End) 16190 return false; 16191 16192 return true; 16193 } 16194 16195 /// Check if two standard-layout unions are layout-compatible. 16196 /// (C++11 [class.mem] p18) 16197 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 16198 RecordDecl *RD2) { 16199 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 16200 for (auto *Field2 : RD2->fields()) 16201 UnmatchedFields.insert(Field2); 16202 16203 for (auto *Field1 : RD1->fields()) { 16204 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 16205 I = UnmatchedFields.begin(), 16206 E = UnmatchedFields.end(); 16207 16208 for ( ; I != E; ++I) { 16209 if (isLayoutCompatible(C, Field1, *I)) { 16210 bool Result = UnmatchedFields.erase(*I); 16211 (void) Result; 16212 assert(Result); 16213 break; 16214 } 16215 } 16216 if (I == E) 16217 return false; 16218 } 16219 16220 return UnmatchedFields.empty(); 16221 } 16222 16223 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 16224 RecordDecl *RD2) { 16225 if (RD1->isUnion() != RD2->isUnion()) 16226 return false; 16227 16228 if (RD1->isUnion()) 16229 return isLayoutCompatibleUnion(C, RD1, RD2); 16230 else 16231 return isLayoutCompatibleStruct(C, RD1, RD2); 16232 } 16233 16234 /// Check if two types are layout-compatible in C++11 sense. 16235 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16236 if (T1.isNull() || T2.isNull()) 16237 return false; 16238 16239 // C++11 [basic.types] p11: 16240 // If two types T1 and T2 are the same type, then T1 and T2 are 16241 // layout-compatible types. 16242 if (C.hasSameType(T1, T2)) 16243 return true; 16244 16245 T1 = T1.getCanonicalType().getUnqualifiedType(); 16246 T2 = T2.getCanonicalType().getUnqualifiedType(); 16247 16248 const Type::TypeClass TC1 = T1->getTypeClass(); 16249 const Type::TypeClass TC2 = T2->getTypeClass(); 16250 16251 if (TC1 != TC2) 16252 return false; 16253 16254 if (TC1 == Type::Enum) { 16255 return isLayoutCompatible(C, 16256 cast<EnumType>(T1)->getDecl(), 16257 cast<EnumType>(T2)->getDecl()); 16258 } else if (TC1 == Type::Record) { 16259 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16260 return false; 16261 16262 return isLayoutCompatible(C, 16263 cast<RecordType>(T1)->getDecl(), 16264 cast<RecordType>(T2)->getDecl()); 16265 } 16266 16267 return false; 16268 } 16269 16270 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16271 16272 /// Given a type tag expression find the type tag itself. 16273 /// 16274 /// \param TypeExpr Type tag expression, as it appears in user's code. 16275 /// 16276 /// \param VD Declaration of an identifier that appears in a type tag. 16277 /// 16278 /// \param MagicValue Type tag magic value. 16279 /// 16280 /// \param isConstantEvaluated whether the evalaution should be performed in 16281 16282 /// constant context. 16283 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 16284 const ValueDecl **VD, uint64_t *MagicValue, 16285 bool isConstantEvaluated) { 16286 while(true) { 16287 if (!TypeExpr) 16288 return false; 16289 16290 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 16291 16292 switch (TypeExpr->getStmtClass()) { 16293 case Stmt::UnaryOperatorClass: { 16294 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 16295 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 16296 TypeExpr = UO->getSubExpr(); 16297 continue; 16298 } 16299 return false; 16300 } 16301 16302 case Stmt::DeclRefExprClass: { 16303 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 16304 *VD = DRE->getDecl(); 16305 return true; 16306 } 16307 16308 case Stmt::IntegerLiteralClass: { 16309 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 16310 llvm::APInt MagicValueAPInt = IL->getValue(); 16311 if (MagicValueAPInt.getActiveBits() <= 64) { 16312 *MagicValue = MagicValueAPInt.getZExtValue(); 16313 return true; 16314 } else 16315 return false; 16316 } 16317 16318 case Stmt::BinaryConditionalOperatorClass: 16319 case Stmt::ConditionalOperatorClass: { 16320 const AbstractConditionalOperator *ACO = 16321 cast<AbstractConditionalOperator>(TypeExpr); 16322 bool Result; 16323 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 16324 isConstantEvaluated)) { 16325 if (Result) 16326 TypeExpr = ACO->getTrueExpr(); 16327 else 16328 TypeExpr = ACO->getFalseExpr(); 16329 continue; 16330 } 16331 return false; 16332 } 16333 16334 case Stmt::BinaryOperatorClass: { 16335 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 16336 if (BO->getOpcode() == BO_Comma) { 16337 TypeExpr = BO->getRHS(); 16338 continue; 16339 } 16340 return false; 16341 } 16342 16343 default: 16344 return false; 16345 } 16346 } 16347 } 16348 16349 /// Retrieve the C type corresponding to type tag TypeExpr. 16350 /// 16351 /// \param TypeExpr Expression that specifies a type tag. 16352 /// 16353 /// \param MagicValues Registered magic values. 16354 /// 16355 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 16356 /// kind. 16357 /// 16358 /// \param TypeInfo Information about the corresponding C type. 16359 /// 16360 /// \param isConstantEvaluated whether the evalaution should be performed in 16361 /// constant context. 16362 /// 16363 /// \returns true if the corresponding C type was found. 16364 static bool GetMatchingCType( 16365 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 16366 const ASTContext &Ctx, 16367 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 16368 *MagicValues, 16369 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 16370 bool isConstantEvaluated) { 16371 FoundWrongKind = false; 16372 16373 // Variable declaration that has type_tag_for_datatype attribute. 16374 const ValueDecl *VD = nullptr; 16375 16376 uint64_t MagicValue; 16377 16378 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 16379 return false; 16380 16381 if (VD) { 16382 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 16383 if (I->getArgumentKind() != ArgumentKind) { 16384 FoundWrongKind = true; 16385 return false; 16386 } 16387 TypeInfo.Type = I->getMatchingCType(); 16388 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 16389 TypeInfo.MustBeNull = I->getMustBeNull(); 16390 return true; 16391 } 16392 return false; 16393 } 16394 16395 if (!MagicValues) 16396 return false; 16397 16398 llvm::DenseMap<Sema::TypeTagMagicValue, 16399 Sema::TypeTagData>::const_iterator I = 16400 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 16401 if (I == MagicValues->end()) 16402 return false; 16403 16404 TypeInfo = I->second; 16405 return true; 16406 } 16407 16408 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 16409 uint64_t MagicValue, QualType Type, 16410 bool LayoutCompatible, 16411 bool MustBeNull) { 16412 if (!TypeTagForDatatypeMagicValues) 16413 TypeTagForDatatypeMagicValues.reset( 16414 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 16415 16416 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 16417 (*TypeTagForDatatypeMagicValues)[Magic] = 16418 TypeTagData(Type, LayoutCompatible, MustBeNull); 16419 } 16420 16421 static bool IsSameCharType(QualType T1, QualType T2) { 16422 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 16423 if (!BT1) 16424 return false; 16425 16426 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 16427 if (!BT2) 16428 return false; 16429 16430 BuiltinType::Kind T1Kind = BT1->getKind(); 16431 BuiltinType::Kind T2Kind = BT2->getKind(); 16432 16433 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 16434 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 16435 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 16436 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 16437 } 16438 16439 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 16440 const ArrayRef<const Expr *> ExprArgs, 16441 SourceLocation CallSiteLoc) { 16442 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 16443 bool IsPointerAttr = Attr->getIsPointer(); 16444 16445 // Retrieve the argument representing the 'type_tag'. 16446 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 16447 if (TypeTagIdxAST >= ExprArgs.size()) { 16448 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16449 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 16450 return; 16451 } 16452 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 16453 bool FoundWrongKind; 16454 TypeTagData TypeInfo; 16455 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 16456 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 16457 TypeInfo, isConstantEvaluated())) { 16458 if (FoundWrongKind) 16459 Diag(TypeTagExpr->getExprLoc(), 16460 diag::warn_type_tag_for_datatype_wrong_kind) 16461 << TypeTagExpr->getSourceRange(); 16462 return; 16463 } 16464 16465 // Retrieve the argument representing the 'arg_idx'. 16466 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 16467 if (ArgumentIdxAST >= ExprArgs.size()) { 16468 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16469 << 1 << Attr->getArgumentIdx().getSourceIndex(); 16470 return; 16471 } 16472 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 16473 if (IsPointerAttr) { 16474 // Skip implicit cast of pointer to `void *' (as a function argument). 16475 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 16476 if (ICE->getType()->isVoidPointerType() && 16477 ICE->getCastKind() == CK_BitCast) 16478 ArgumentExpr = ICE->getSubExpr(); 16479 } 16480 QualType ArgumentType = ArgumentExpr->getType(); 16481 16482 // Passing a `void*' pointer shouldn't trigger a warning. 16483 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 16484 return; 16485 16486 if (TypeInfo.MustBeNull) { 16487 // Type tag with matching void type requires a null pointer. 16488 if (!ArgumentExpr->isNullPointerConstant(Context, 16489 Expr::NPC_ValueDependentIsNotNull)) { 16490 Diag(ArgumentExpr->getExprLoc(), 16491 diag::warn_type_safety_null_pointer_required) 16492 << ArgumentKind->getName() 16493 << ArgumentExpr->getSourceRange() 16494 << TypeTagExpr->getSourceRange(); 16495 } 16496 return; 16497 } 16498 16499 QualType RequiredType = TypeInfo.Type; 16500 if (IsPointerAttr) 16501 RequiredType = Context.getPointerType(RequiredType); 16502 16503 bool mismatch = false; 16504 if (!TypeInfo.LayoutCompatible) { 16505 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 16506 16507 // C++11 [basic.fundamental] p1: 16508 // Plain char, signed char, and unsigned char are three distinct types. 16509 // 16510 // But we treat plain `char' as equivalent to `signed char' or `unsigned 16511 // char' depending on the current char signedness mode. 16512 if (mismatch) 16513 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 16514 RequiredType->getPointeeType())) || 16515 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 16516 mismatch = false; 16517 } else 16518 if (IsPointerAttr) 16519 mismatch = !isLayoutCompatible(Context, 16520 ArgumentType->getPointeeType(), 16521 RequiredType->getPointeeType()); 16522 else 16523 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 16524 16525 if (mismatch) 16526 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 16527 << ArgumentType << ArgumentKind 16528 << TypeInfo.LayoutCompatible << RequiredType 16529 << ArgumentExpr->getSourceRange() 16530 << TypeTagExpr->getSourceRange(); 16531 } 16532 16533 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 16534 CharUnits Alignment) { 16535 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 16536 } 16537 16538 void Sema::DiagnoseMisalignedMembers() { 16539 for (MisalignedMember &m : MisalignedMembers) { 16540 const NamedDecl *ND = m.RD; 16541 if (ND->getName().empty()) { 16542 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 16543 ND = TD; 16544 } 16545 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 16546 << m.MD << ND << m.E->getSourceRange(); 16547 } 16548 MisalignedMembers.clear(); 16549 } 16550 16551 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 16552 E = E->IgnoreParens(); 16553 if (!T->isPointerType() && !T->isIntegerType()) 16554 return; 16555 if (isa<UnaryOperator>(E) && 16556 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 16557 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 16558 if (isa<MemberExpr>(Op)) { 16559 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 16560 if (MA != MisalignedMembers.end() && 16561 (T->isIntegerType() || 16562 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 16563 Context.getTypeAlignInChars( 16564 T->getPointeeType()) <= MA->Alignment)))) 16565 MisalignedMembers.erase(MA); 16566 } 16567 } 16568 } 16569 16570 void Sema::RefersToMemberWithReducedAlignment( 16571 Expr *E, 16572 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 16573 Action) { 16574 const auto *ME = dyn_cast<MemberExpr>(E); 16575 if (!ME) 16576 return; 16577 16578 // No need to check expressions with an __unaligned-qualified type. 16579 if (E->getType().getQualifiers().hasUnaligned()) 16580 return; 16581 16582 // For a chain of MemberExpr like "a.b.c.d" this list 16583 // will keep FieldDecl's like [d, c, b]. 16584 SmallVector<FieldDecl *, 4> ReverseMemberChain; 16585 const MemberExpr *TopME = nullptr; 16586 bool AnyIsPacked = false; 16587 do { 16588 QualType BaseType = ME->getBase()->getType(); 16589 if (BaseType->isDependentType()) 16590 return; 16591 if (ME->isArrow()) 16592 BaseType = BaseType->getPointeeType(); 16593 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 16594 if (RD->isInvalidDecl()) 16595 return; 16596 16597 ValueDecl *MD = ME->getMemberDecl(); 16598 auto *FD = dyn_cast<FieldDecl>(MD); 16599 // We do not care about non-data members. 16600 if (!FD || FD->isInvalidDecl()) 16601 return; 16602 16603 AnyIsPacked = 16604 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 16605 ReverseMemberChain.push_back(FD); 16606 16607 TopME = ME; 16608 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 16609 } while (ME); 16610 assert(TopME && "We did not compute a topmost MemberExpr!"); 16611 16612 // Not the scope of this diagnostic. 16613 if (!AnyIsPacked) 16614 return; 16615 16616 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 16617 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 16618 // TODO: The innermost base of the member expression may be too complicated. 16619 // For now, just disregard these cases. This is left for future 16620 // improvement. 16621 if (!DRE && !isa<CXXThisExpr>(TopBase)) 16622 return; 16623 16624 // Alignment expected by the whole expression. 16625 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 16626 16627 // No need to do anything else with this case. 16628 if (ExpectedAlignment.isOne()) 16629 return; 16630 16631 // Synthesize offset of the whole access. 16632 CharUnits Offset; 16633 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 16634 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 16635 16636 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 16637 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 16638 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 16639 16640 // The base expression of the innermost MemberExpr may give 16641 // stronger guarantees than the class containing the member. 16642 if (DRE && !TopME->isArrow()) { 16643 const ValueDecl *VD = DRE->getDecl(); 16644 if (!VD->getType()->isReferenceType()) 16645 CompleteObjectAlignment = 16646 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 16647 } 16648 16649 // Check if the synthesized offset fulfills the alignment. 16650 if (Offset % ExpectedAlignment != 0 || 16651 // It may fulfill the offset it but the effective alignment may still be 16652 // lower than the expected expression alignment. 16653 CompleteObjectAlignment < ExpectedAlignment) { 16654 // If this happens, we want to determine a sensible culprit of this. 16655 // Intuitively, watching the chain of member expressions from right to 16656 // left, we start with the required alignment (as required by the field 16657 // type) but some packed attribute in that chain has reduced the alignment. 16658 // It may happen that another packed structure increases it again. But if 16659 // we are here such increase has not been enough. So pointing the first 16660 // FieldDecl that either is packed or else its RecordDecl is, 16661 // seems reasonable. 16662 FieldDecl *FD = nullptr; 16663 CharUnits Alignment; 16664 for (FieldDecl *FDI : ReverseMemberChain) { 16665 if (FDI->hasAttr<PackedAttr>() || 16666 FDI->getParent()->hasAttr<PackedAttr>()) { 16667 FD = FDI; 16668 Alignment = std::min( 16669 Context.getTypeAlignInChars(FD->getType()), 16670 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 16671 break; 16672 } 16673 } 16674 assert(FD && "We did not find a packed FieldDecl!"); 16675 Action(E, FD->getParent(), FD, Alignment); 16676 } 16677 } 16678 16679 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 16680 using namespace std::placeholders; 16681 16682 RefersToMemberWithReducedAlignment( 16683 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 16684 _2, _3, _4)); 16685 } 16686 16687 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 16688 // not a valid type, emit an error message and return true. Otherwise return 16689 // false. 16690 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 16691 QualType Ty) { 16692 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 16693 S.Diag(Loc, diag::err_builtin_invalid_arg_type) 16694 << 1 << /* vector, integer or float ty*/ 0 << Ty; 16695 return true; 16696 } 16697 return false; 16698 } 16699 16700 bool Sema::SemaBuiltinElementwiseMathOneArg(CallExpr *TheCall) { 16701 if (checkArgCount(*this, TheCall, 1)) 16702 return true; 16703 16704 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 16705 SourceLocation ArgLoc = TheCall->getArg(0)->getBeginLoc(); 16706 if (A.isInvalid()) 16707 return true; 16708 16709 TheCall->setArg(0, A.get()); 16710 QualType TyA = A.get()->getType(); 16711 if (checkMathBuiltinElementType(*this, ArgLoc, TyA)) 16712 return true; 16713 16714 QualType EltTy = TyA; 16715 if (auto *VecTy = EltTy->getAs<VectorType>()) 16716 EltTy = VecTy->getElementType(); 16717 if (EltTy->isUnsignedIntegerType()) 16718 return Diag(ArgLoc, diag::err_builtin_invalid_arg_type) 16719 << 1 << /*signed integer or float ty*/ 3 << TyA; 16720 16721 TheCall->setType(TyA); 16722 return false; 16723 } 16724 16725 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 16726 if (checkArgCount(*this, TheCall, 2)) 16727 return true; 16728 16729 ExprResult A = TheCall->getArg(0); 16730 ExprResult B = TheCall->getArg(1); 16731 // Do standard promotions between the two arguments, returning their common 16732 // type. 16733 QualType Res = 16734 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 16735 if (A.isInvalid() || B.isInvalid()) 16736 return true; 16737 16738 QualType TyA = A.get()->getType(); 16739 QualType TyB = B.get()->getType(); 16740 16741 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 16742 return Diag(A.get()->getBeginLoc(), 16743 diag::err_typecheck_call_different_arg_types) 16744 << TyA << TyB; 16745 16746 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 16747 return true; 16748 16749 TheCall->setArg(0, A.get()); 16750 TheCall->setArg(1, B.get()); 16751 TheCall->setType(Res); 16752 return false; 16753 } 16754 16755 bool Sema::SemaBuiltinReduceMath(CallExpr *TheCall) { 16756 if (checkArgCount(*this, TheCall, 1)) 16757 return true; 16758 16759 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 16760 if (A.isInvalid()) 16761 return true; 16762 16763 TheCall->setArg(0, A.get()); 16764 const VectorType *TyA = A.get()->getType()->getAs<VectorType>(); 16765 if (!TyA) { 16766 SourceLocation ArgLoc = TheCall->getArg(0)->getBeginLoc(); 16767 return Diag(ArgLoc, diag::err_builtin_invalid_arg_type) 16768 << 1 << /* vector ty*/ 4 << A.get()->getType(); 16769 } 16770 16771 TheCall->setType(TyA->getElementType()); 16772 return false; 16773 } 16774 16775 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 16776 ExprResult CallResult) { 16777 if (checkArgCount(*this, TheCall, 1)) 16778 return ExprError(); 16779 16780 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 16781 if (MatrixArg.isInvalid()) 16782 return MatrixArg; 16783 Expr *Matrix = MatrixArg.get(); 16784 16785 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 16786 if (!MType) { 16787 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 16788 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 16789 return ExprError(); 16790 } 16791 16792 // Create returned matrix type by swapping rows and columns of the argument 16793 // matrix type. 16794 QualType ResultType = Context.getConstantMatrixType( 16795 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 16796 16797 // Change the return type to the type of the returned matrix. 16798 TheCall->setType(ResultType); 16799 16800 // Update call argument to use the possibly converted matrix argument. 16801 TheCall->setArg(0, Matrix); 16802 return CallResult; 16803 } 16804 16805 // Get and verify the matrix dimensions. 16806 static llvm::Optional<unsigned> 16807 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 16808 SourceLocation ErrorPos; 16809 Optional<llvm::APSInt> Value = 16810 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 16811 if (!Value) { 16812 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 16813 << Name; 16814 return {}; 16815 } 16816 uint64_t Dim = Value->getZExtValue(); 16817 if (!ConstantMatrixType::isDimensionValid(Dim)) { 16818 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 16819 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 16820 return {}; 16821 } 16822 return Dim; 16823 } 16824 16825 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 16826 ExprResult CallResult) { 16827 if (!getLangOpts().MatrixTypes) { 16828 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 16829 return ExprError(); 16830 } 16831 16832 if (checkArgCount(*this, TheCall, 4)) 16833 return ExprError(); 16834 16835 unsigned PtrArgIdx = 0; 16836 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 16837 Expr *RowsExpr = TheCall->getArg(1); 16838 Expr *ColumnsExpr = TheCall->getArg(2); 16839 Expr *StrideExpr = TheCall->getArg(3); 16840 16841 bool ArgError = false; 16842 16843 // Check pointer argument. 16844 { 16845 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 16846 if (PtrConv.isInvalid()) 16847 return PtrConv; 16848 PtrExpr = PtrConv.get(); 16849 TheCall->setArg(0, PtrExpr); 16850 if (PtrExpr->isTypeDependent()) { 16851 TheCall->setType(Context.DependentTy); 16852 return TheCall; 16853 } 16854 } 16855 16856 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 16857 QualType ElementTy; 16858 if (!PtrTy) { 16859 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 16860 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 16861 ArgError = true; 16862 } else { 16863 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 16864 16865 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 16866 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 16867 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 16868 << PtrExpr->getType(); 16869 ArgError = true; 16870 } 16871 } 16872 16873 // Apply default Lvalue conversions and convert the expression to size_t. 16874 auto ApplyArgumentConversions = [this](Expr *E) { 16875 ExprResult Conv = DefaultLvalueConversion(E); 16876 if (Conv.isInvalid()) 16877 return Conv; 16878 16879 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 16880 }; 16881 16882 // Apply conversion to row and column expressions. 16883 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 16884 if (!RowsConv.isInvalid()) { 16885 RowsExpr = RowsConv.get(); 16886 TheCall->setArg(1, RowsExpr); 16887 } else 16888 RowsExpr = nullptr; 16889 16890 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 16891 if (!ColumnsConv.isInvalid()) { 16892 ColumnsExpr = ColumnsConv.get(); 16893 TheCall->setArg(2, ColumnsExpr); 16894 } else 16895 ColumnsExpr = nullptr; 16896 16897 // If any any part of the result matrix type is still pending, just use 16898 // Context.DependentTy, until all parts are resolved. 16899 if ((RowsExpr && RowsExpr->isTypeDependent()) || 16900 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 16901 TheCall->setType(Context.DependentTy); 16902 return CallResult; 16903 } 16904 16905 // Check row and column dimensions. 16906 llvm::Optional<unsigned> MaybeRows; 16907 if (RowsExpr) 16908 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 16909 16910 llvm::Optional<unsigned> MaybeColumns; 16911 if (ColumnsExpr) 16912 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 16913 16914 // Check stride argument. 16915 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 16916 if (StrideConv.isInvalid()) 16917 return ExprError(); 16918 StrideExpr = StrideConv.get(); 16919 TheCall->setArg(3, StrideExpr); 16920 16921 if (MaybeRows) { 16922 if (Optional<llvm::APSInt> Value = 16923 StrideExpr->getIntegerConstantExpr(Context)) { 16924 uint64_t Stride = Value->getZExtValue(); 16925 if (Stride < *MaybeRows) { 16926 Diag(StrideExpr->getBeginLoc(), 16927 diag::err_builtin_matrix_stride_too_small); 16928 ArgError = true; 16929 } 16930 } 16931 } 16932 16933 if (ArgError || !MaybeRows || !MaybeColumns) 16934 return ExprError(); 16935 16936 TheCall->setType( 16937 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 16938 return CallResult; 16939 } 16940 16941 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 16942 ExprResult CallResult) { 16943 if (checkArgCount(*this, TheCall, 3)) 16944 return ExprError(); 16945 16946 unsigned PtrArgIdx = 1; 16947 Expr *MatrixExpr = TheCall->getArg(0); 16948 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 16949 Expr *StrideExpr = TheCall->getArg(2); 16950 16951 bool ArgError = false; 16952 16953 { 16954 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 16955 if (MatrixConv.isInvalid()) 16956 return MatrixConv; 16957 MatrixExpr = MatrixConv.get(); 16958 TheCall->setArg(0, MatrixExpr); 16959 } 16960 if (MatrixExpr->isTypeDependent()) { 16961 TheCall->setType(Context.DependentTy); 16962 return TheCall; 16963 } 16964 16965 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 16966 if (!MatrixTy) { 16967 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 16968 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 16969 ArgError = true; 16970 } 16971 16972 { 16973 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 16974 if (PtrConv.isInvalid()) 16975 return PtrConv; 16976 PtrExpr = PtrConv.get(); 16977 TheCall->setArg(1, PtrExpr); 16978 if (PtrExpr->isTypeDependent()) { 16979 TheCall->setType(Context.DependentTy); 16980 return TheCall; 16981 } 16982 } 16983 16984 // Check pointer argument. 16985 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 16986 if (!PtrTy) { 16987 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 16988 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 16989 ArgError = true; 16990 } else { 16991 QualType ElementTy = PtrTy->getPointeeType(); 16992 if (ElementTy.isConstQualified()) { 16993 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 16994 ArgError = true; 16995 } 16996 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 16997 if (MatrixTy && 16998 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 16999 Diag(PtrExpr->getBeginLoc(), 17000 diag::err_builtin_matrix_pointer_arg_mismatch) 17001 << ElementTy << MatrixTy->getElementType(); 17002 ArgError = true; 17003 } 17004 } 17005 17006 // Apply default Lvalue conversions and convert the stride expression to 17007 // size_t. 17008 { 17009 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 17010 if (StrideConv.isInvalid()) 17011 return StrideConv; 17012 17013 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 17014 if (StrideConv.isInvalid()) 17015 return StrideConv; 17016 StrideExpr = StrideConv.get(); 17017 TheCall->setArg(2, StrideExpr); 17018 } 17019 17020 // Check stride argument. 17021 if (MatrixTy) { 17022 if (Optional<llvm::APSInt> Value = 17023 StrideExpr->getIntegerConstantExpr(Context)) { 17024 uint64_t Stride = Value->getZExtValue(); 17025 if (Stride < MatrixTy->getNumRows()) { 17026 Diag(StrideExpr->getBeginLoc(), 17027 diag::err_builtin_matrix_stride_too_small); 17028 ArgError = true; 17029 } 17030 } 17031 } 17032 17033 if (ArgError) 17034 return ExprError(); 17035 17036 return CallResult; 17037 } 17038 17039 /// \brief Enforce the bounds of a TCB 17040 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 17041 /// directly calls other functions in the same TCB as marked by the enforce_tcb 17042 /// and enforce_tcb_leaf attributes. 17043 void Sema::CheckTCBEnforcement(const CallExpr *TheCall, 17044 const FunctionDecl *Callee) { 17045 const FunctionDecl *Caller = getCurFunctionDecl(); 17046 17047 // Calls to builtins are not enforced. 17048 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() || 17049 Callee->getBuiltinID() != 0) 17050 return; 17051 17052 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 17053 // all TCBs the callee is a part of. 17054 llvm::StringSet<> CalleeTCBs; 17055 for_each(Callee->specific_attrs<EnforceTCBAttr>(), 17056 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17057 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(), 17058 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17059 17060 // Go through the TCBs the caller is a part of and emit warnings if Caller 17061 // is in a TCB that the Callee is not. 17062 for_each( 17063 Caller->specific_attrs<EnforceTCBAttr>(), 17064 [&](const auto *A) { 17065 StringRef CallerTCB = A->getTCBName(); 17066 if (CalleeTCBs.count(CallerTCB) == 0) { 17067 this->Diag(TheCall->getExprLoc(), 17068 diag::warn_tcb_enforcement_violation) << Callee 17069 << CallerTCB; 17070 } 17071 }); 17072 } 17073