1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is the desired number. 113 /// This is useful when doing custom type-checking. Returns true on error. 114 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 115 unsigned argCount = call->getNumArgs(); 116 if (argCount == desiredArgCount) return false; 117 118 if (argCount < desiredArgCount) 119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 120 << 0 /*function call*/ << desiredArgCount << argCount 121 << call->getSourceRange(); 122 123 // Highlight all the excess arguments. 124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 125 call->getArg(argCount - 1)->getEndLoc()); 126 127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 128 << 0 /*function call*/ << desiredArgCount << argCount 129 << call->getArg(1)->getSourceRange(); 130 } 131 132 /// Check that the first argument to __builtin_annotation is an integer 133 /// and the second argument is a non-wide string literal. 134 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 135 if (checkArgCount(S, TheCall, 2)) 136 return true; 137 138 // First argument should be an integer. 139 Expr *ValArg = TheCall->getArg(0); 140 QualType Ty = ValArg->getType(); 141 if (!Ty->isIntegerType()) { 142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 143 << ValArg->getSourceRange(); 144 return true; 145 } 146 147 // Second argument should be a constant string. 148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 150 if (!Literal || !Literal->isAscii()) { 151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 152 << StrArg->getSourceRange(); 153 return true; 154 } 155 156 TheCall->setType(Ty); 157 return false; 158 } 159 160 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 161 // We need at least one argument. 162 if (TheCall->getNumArgs() < 1) { 163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 164 << 0 << 1 << TheCall->getNumArgs() 165 << TheCall->getCallee()->getSourceRange(); 166 return true; 167 } 168 169 // All arguments should be wide string literals. 170 for (Expr *Arg : TheCall->arguments()) { 171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 172 if (!Literal || !Literal->isWide()) { 173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 174 << Arg->getSourceRange(); 175 return true; 176 } 177 } 178 179 return false; 180 } 181 182 /// Check that the argument to __builtin_addressof is a glvalue, and set the 183 /// result type to the corresponding pointer type. 184 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 185 if (checkArgCount(S, TheCall, 1)) 186 return true; 187 188 ExprResult Arg(TheCall->getArg(0)); 189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 190 if (ResultType.isNull()) 191 return true; 192 193 TheCall->setArg(0, Arg.get()); 194 TheCall->setType(ResultType); 195 return false; 196 } 197 198 /// Check the number of arguments and set the result type to 199 /// the argument type. 200 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 201 if (checkArgCount(S, TheCall, 1)) 202 return true; 203 204 TheCall->setType(TheCall->getArg(0)->getType()); 205 return false; 206 } 207 208 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 209 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 210 /// type (but not a function pointer) and that the alignment is a power-of-two. 211 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 212 if (checkArgCount(S, TheCall, 2)) 213 return true; 214 215 clang::Expr *Source = TheCall->getArg(0); 216 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 217 218 auto IsValidIntegerType = [](QualType Ty) { 219 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 220 }; 221 QualType SrcTy = Source->getType(); 222 // We should also be able to use it with arrays (but not functions!). 223 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 224 SrcTy = S.Context.getDecayedType(SrcTy); 225 } 226 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 227 SrcTy->isFunctionPointerType()) { 228 // FIXME: this is not quite the right error message since we don't allow 229 // floating point types, or member pointers. 230 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 231 << SrcTy; 232 return true; 233 } 234 235 clang::Expr *AlignOp = TheCall->getArg(1); 236 if (!IsValidIntegerType(AlignOp->getType())) { 237 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 238 << AlignOp->getType(); 239 return true; 240 } 241 Expr::EvalResult AlignResult; 242 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 243 // We can't check validity of alignment if it is value dependent. 244 if (!AlignOp->isValueDependent() && 245 AlignOp->EvaluateAsInt(AlignResult, S.Context, 246 Expr::SE_AllowSideEffects)) { 247 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 248 llvm::APSInt MaxValue( 249 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 250 if (AlignValue < 1) { 251 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 252 return true; 253 } 254 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 255 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 256 << MaxValue.toString(10); 257 return true; 258 } 259 if (!AlignValue.isPowerOf2()) { 260 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 261 return true; 262 } 263 if (AlignValue == 1) { 264 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 265 << IsBooleanAlignBuiltin; 266 } 267 } 268 269 ExprResult SrcArg = S.PerformCopyInitialization( 270 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 271 SourceLocation(), Source); 272 if (SrcArg.isInvalid()) 273 return true; 274 TheCall->setArg(0, SrcArg.get()); 275 ExprResult AlignArg = 276 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 277 S.Context, AlignOp->getType(), false), 278 SourceLocation(), AlignOp); 279 if (AlignArg.isInvalid()) 280 return true; 281 TheCall->setArg(1, AlignArg.get()); 282 // For align_up/align_down, the return type is the same as the (potentially 283 // decayed) argument type including qualifiers. For is_aligned(), the result 284 // is always bool. 285 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 286 return false; 287 } 288 289 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 290 unsigned BuiltinID) { 291 if (checkArgCount(S, TheCall, 3)) 292 return true; 293 294 // First two arguments should be integers. 295 for (unsigned I = 0; I < 2; ++I) { 296 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 297 if (Arg.isInvalid()) return true; 298 TheCall->setArg(I, Arg.get()); 299 300 QualType Ty = Arg.get()->getType(); 301 if (!Ty->isIntegerType()) { 302 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 303 << Ty << Arg.get()->getSourceRange(); 304 return true; 305 } 306 } 307 308 // Third argument should be a pointer to a non-const integer. 309 // IRGen correctly handles volatile, restrict, and address spaces, and 310 // the other qualifiers aren't possible. 311 { 312 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 313 if (Arg.isInvalid()) return true; 314 TheCall->setArg(2, Arg.get()); 315 316 QualType Ty = Arg.get()->getType(); 317 const auto *PtrTy = Ty->getAs<PointerType>(); 318 if (!PtrTy || 319 !PtrTy->getPointeeType()->isIntegerType() || 320 PtrTy->getPointeeType().isConstQualified()) { 321 S.Diag(Arg.get()->getBeginLoc(), 322 diag::err_overflow_builtin_must_be_ptr_int) 323 << Ty << Arg.get()->getSourceRange(); 324 return true; 325 } 326 } 327 328 // Disallow signed ExtIntType args larger than 128 bits to mul function until 329 // we improve backend support. 330 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 331 for (unsigned I = 0; I < 3; ++I) { 332 const auto Arg = TheCall->getArg(I); 333 // Third argument will be a pointer. 334 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 335 if (Ty->isExtIntType() && Ty->isSignedIntegerType() && 336 S.getASTContext().getIntWidth(Ty) > 128) 337 return S.Diag(Arg->getBeginLoc(), 338 diag::err_overflow_builtin_ext_int_max_size) 339 << 128; 340 } 341 } 342 343 return false; 344 } 345 346 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 347 if (checkArgCount(S, BuiltinCall, 2)) 348 return true; 349 350 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 351 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 352 Expr *Call = BuiltinCall->getArg(0); 353 Expr *Chain = BuiltinCall->getArg(1); 354 355 if (Call->getStmtClass() != Stmt::CallExprClass) { 356 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 357 << Call->getSourceRange(); 358 return true; 359 } 360 361 auto CE = cast<CallExpr>(Call); 362 if (CE->getCallee()->getType()->isBlockPointerType()) { 363 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 364 << Call->getSourceRange(); 365 return true; 366 } 367 368 const Decl *TargetDecl = CE->getCalleeDecl(); 369 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 370 if (FD->getBuiltinID()) { 371 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 372 << Call->getSourceRange(); 373 return true; 374 } 375 376 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 377 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 378 << Call->getSourceRange(); 379 return true; 380 } 381 382 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 383 if (ChainResult.isInvalid()) 384 return true; 385 if (!ChainResult.get()->getType()->isPointerType()) { 386 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 387 << Chain->getSourceRange(); 388 return true; 389 } 390 391 QualType ReturnTy = CE->getCallReturnType(S.Context); 392 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 393 QualType BuiltinTy = S.Context.getFunctionType( 394 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 395 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 396 397 Builtin = 398 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 399 400 BuiltinCall->setType(CE->getType()); 401 BuiltinCall->setValueKind(CE->getValueKind()); 402 BuiltinCall->setObjectKind(CE->getObjectKind()); 403 BuiltinCall->setCallee(Builtin); 404 BuiltinCall->setArg(1, ChainResult.get()); 405 406 return false; 407 } 408 409 namespace { 410 411 class EstimateSizeFormatHandler 412 : public analyze_format_string::FormatStringHandler { 413 size_t Size; 414 415 public: 416 EstimateSizeFormatHandler(StringRef Format) 417 : Size(std::min(Format.find(0), Format.size()) + 418 1 /* null byte always written by sprintf */) {} 419 420 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 421 const char *, unsigned SpecifierLen) override { 422 423 const size_t FieldWidth = computeFieldWidth(FS); 424 const size_t Precision = computePrecision(FS); 425 426 // The actual format. 427 switch (FS.getConversionSpecifier().getKind()) { 428 // Just a char. 429 case analyze_format_string::ConversionSpecifier::cArg: 430 case analyze_format_string::ConversionSpecifier::CArg: 431 Size += std::max(FieldWidth, (size_t)1); 432 break; 433 // Just an integer. 434 case analyze_format_string::ConversionSpecifier::dArg: 435 case analyze_format_string::ConversionSpecifier::DArg: 436 case analyze_format_string::ConversionSpecifier::iArg: 437 case analyze_format_string::ConversionSpecifier::oArg: 438 case analyze_format_string::ConversionSpecifier::OArg: 439 case analyze_format_string::ConversionSpecifier::uArg: 440 case analyze_format_string::ConversionSpecifier::UArg: 441 case analyze_format_string::ConversionSpecifier::xArg: 442 case analyze_format_string::ConversionSpecifier::XArg: 443 Size += std::max(FieldWidth, Precision); 444 break; 445 446 // %g style conversion switches between %f or %e style dynamically. 447 // %f always takes less space, so default to it. 448 case analyze_format_string::ConversionSpecifier::gArg: 449 case analyze_format_string::ConversionSpecifier::GArg: 450 451 // Floating point number in the form '[+]ddd.ddd'. 452 case analyze_format_string::ConversionSpecifier::fArg: 453 case analyze_format_string::ConversionSpecifier::FArg: 454 Size += std::max(FieldWidth, 1 /* integer part */ + 455 (Precision ? 1 + Precision 456 : 0) /* period + decimal */); 457 break; 458 459 // Floating point number in the form '[-]d.ddde[+-]dd'. 460 case analyze_format_string::ConversionSpecifier::eArg: 461 case analyze_format_string::ConversionSpecifier::EArg: 462 Size += 463 std::max(FieldWidth, 464 1 /* integer part */ + 465 (Precision ? 1 + Precision : 0) /* period + decimal */ + 466 1 /* e or E letter */ + 2 /* exponent */); 467 break; 468 469 // Floating point number in the form '[-]0xh.hhhhp±dd'. 470 case analyze_format_string::ConversionSpecifier::aArg: 471 case analyze_format_string::ConversionSpecifier::AArg: 472 Size += 473 std::max(FieldWidth, 474 2 /* 0x */ + 1 /* integer part */ + 475 (Precision ? 1 + Precision : 0) /* period + decimal */ + 476 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 477 break; 478 479 // Just a string. 480 case analyze_format_string::ConversionSpecifier::sArg: 481 case analyze_format_string::ConversionSpecifier::SArg: 482 Size += FieldWidth; 483 break; 484 485 // Just a pointer in the form '0xddd'. 486 case analyze_format_string::ConversionSpecifier::pArg: 487 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 488 break; 489 490 // A plain percent. 491 case analyze_format_string::ConversionSpecifier::PercentArg: 492 Size += 1; 493 break; 494 495 default: 496 break; 497 } 498 499 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 500 501 if (FS.hasAlternativeForm()) { 502 switch (FS.getConversionSpecifier().getKind()) { 503 default: 504 break; 505 // Force a leading '0'. 506 case analyze_format_string::ConversionSpecifier::oArg: 507 Size += 1; 508 break; 509 // Force a leading '0x'. 510 case analyze_format_string::ConversionSpecifier::xArg: 511 case analyze_format_string::ConversionSpecifier::XArg: 512 Size += 2; 513 break; 514 // Force a period '.' before decimal, even if precision is 0. 515 case analyze_format_string::ConversionSpecifier::aArg: 516 case analyze_format_string::ConversionSpecifier::AArg: 517 case analyze_format_string::ConversionSpecifier::eArg: 518 case analyze_format_string::ConversionSpecifier::EArg: 519 case analyze_format_string::ConversionSpecifier::fArg: 520 case analyze_format_string::ConversionSpecifier::FArg: 521 case analyze_format_string::ConversionSpecifier::gArg: 522 case analyze_format_string::ConversionSpecifier::GArg: 523 Size += (Precision ? 0 : 1); 524 break; 525 } 526 } 527 assert(SpecifierLen <= Size && "no underflow"); 528 Size -= SpecifierLen; 529 return true; 530 } 531 532 size_t getSizeLowerBound() const { return Size; } 533 534 private: 535 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 536 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 537 size_t FieldWidth = 0; 538 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 539 FieldWidth = FW.getConstantAmount(); 540 return FieldWidth; 541 } 542 543 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 544 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 545 size_t Precision = 0; 546 547 // See man 3 printf for default precision value based on the specifier. 548 switch (FW.getHowSpecified()) { 549 case analyze_format_string::OptionalAmount::NotSpecified: 550 switch (FS.getConversionSpecifier().getKind()) { 551 default: 552 break; 553 case analyze_format_string::ConversionSpecifier::dArg: // %d 554 case analyze_format_string::ConversionSpecifier::DArg: // %D 555 case analyze_format_string::ConversionSpecifier::iArg: // %i 556 Precision = 1; 557 break; 558 case analyze_format_string::ConversionSpecifier::oArg: // %d 559 case analyze_format_string::ConversionSpecifier::OArg: // %D 560 case analyze_format_string::ConversionSpecifier::uArg: // %d 561 case analyze_format_string::ConversionSpecifier::UArg: // %D 562 case analyze_format_string::ConversionSpecifier::xArg: // %d 563 case analyze_format_string::ConversionSpecifier::XArg: // %D 564 Precision = 1; 565 break; 566 case analyze_format_string::ConversionSpecifier::fArg: // %f 567 case analyze_format_string::ConversionSpecifier::FArg: // %F 568 case analyze_format_string::ConversionSpecifier::eArg: // %e 569 case analyze_format_string::ConversionSpecifier::EArg: // %E 570 case analyze_format_string::ConversionSpecifier::gArg: // %g 571 case analyze_format_string::ConversionSpecifier::GArg: // %G 572 Precision = 6; 573 break; 574 case analyze_format_string::ConversionSpecifier::pArg: // %d 575 Precision = 1; 576 break; 577 } 578 break; 579 case analyze_format_string::OptionalAmount::Constant: 580 Precision = FW.getConstantAmount(); 581 break; 582 default: 583 break; 584 } 585 return Precision; 586 } 587 }; 588 589 } // namespace 590 591 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a 592 /// __builtin_*_chk function, then use the object size argument specified in the 593 /// source. Otherwise, infer the object size using __builtin_object_size. 594 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 595 CallExpr *TheCall) { 596 // FIXME: There are some more useful checks we could be doing here: 597 // - Evaluate strlen of strcpy arguments, use as object size. 598 599 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 600 isConstantEvaluated()) 601 return; 602 603 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 604 if (!BuiltinID) 605 return; 606 607 const TargetInfo &TI = getASTContext().getTargetInfo(); 608 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 609 610 unsigned DiagID = 0; 611 bool IsChkVariant = false; 612 Optional<llvm::APSInt> UsedSize; 613 unsigned SizeIndex, ObjectIndex; 614 switch (BuiltinID) { 615 default: 616 return; 617 case Builtin::BIsprintf: 618 case Builtin::BI__builtin___sprintf_chk: { 619 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 620 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 621 622 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 623 624 if (!Format->isAscii() && !Format->isUTF8()) 625 return; 626 627 StringRef FormatStrRef = Format->getString(); 628 EstimateSizeFormatHandler H(FormatStrRef); 629 const char *FormatBytes = FormatStrRef.data(); 630 const ConstantArrayType *T = 631 Context.getAsConstantArrayType(Format->getType()); 632 assert(T && "String literal not of constant array type!"); 633 size_t TypeSize = T->getSize().getZExtValue(); 634 635 // In case there's a null byte somewhere. 636 size_t StrLen = 637 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 638 if (!analyze_format_string::ParsePrintfString( 639 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 640 Context.getTargetInfo(), false)) { 641 DiagID = diag::warn_fortify_source_format_overflow; 642 UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 643 .extOrTrunc(SizeTypeWidth); 644 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 645 IsChkVariant = true; 646 ObjectIndex = 2; 647 } else { 648 IsChkVariant = false; 649 ObjectIndex = 0; 650 } 651 break; 652 } 653 } 654 return; 655 } 656 case Builtin::BI__builtin___memcpy_chk: 657 case Builtin::BI__builtin___memmove_chk: 658 case Builtin::BI__builtin___memset_chk: 659 case Builtin::BI__builtin___strlcat_chk: 660 case Builtin::BI__builtin___strlcpy_chk: 661 case Builtin::BI__builtin___strncat_chk: 662 case Builtin::BI__builtin___strncpy_chk: 663 case Builtin::BI__builtin___stpncpy_chk: 664 case Builtin::BI__builtin___memccpy_chk: 665 case Builtin::BI__builtin___mempcpy_chk: { 666 DiagID = diag::warn_builtin_chk_overflow; 667 IsChkVariant = true; 668 SizeIndex = TheCall->getNumArgs() - 2; 669 ObjectIndex = TheCall->getNumArgs() - 1; 670 break; 671 } 672 673 case Builtin::BI__builtin___snprintf_chk: 674 case Builtin::BI__builtin___vsnprintf_chk: { 675 DiagID = diag::warn_builtin_chk_overflow; 676 IsChkVariant = true; 677 SizeIndex = 1; 678 ObjectIndex = 3; 679 break; 680 } 681 682 case Builtin::BIstrncat: 683 case Builtin::BI__builtin_strncat: 684 case Builtin::BIstrncpy: 685 case Builtin::BI__builtin_strncpy: 686 case Builtin::BIstpncpy: 687 case Builtin::BI__builtin_stpncpy: { 688 // Whether these functions overflow depends on the runtime strlen of the 689 // string, not just the buffer size, so emitting the "always overflow" 690 // diagnostic isn't quite right. We should still diagnose passing a buffer 691 // size larger than the destination buffer though; this is a runtime abort 692 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 693 DiagID = diag::warn_fortify_source_size_mismatch; 694 SizeIndex = TheCall->getNumArgs() - 1; 695 ObjectIndex = 0; 696 break; 697 } 698 699 case Builtin::BImemcpy: 700 case Builtin::BI__builtin_memcpy: 701 case Builtin::BImemmove: 702 case Builtin::BI__builtin_memmove: 703 case Builtin::BImemset: 704 case Builtin::BI__builtin_memset: 705 case Builtin::BImempcpy: 706 case Builtin::BI__builtin_mempcpy: { 707 DiagID = diag::warn_fortify_source_overflow; 708 SizeIndex = TheCall->getNumArgs() - 1; 709 ObjectIndex = 0; 710 break; 711 } 712 case Builtin::BIsnprintf: 713 case Builtin::BI__builtin_snprintf: 714 case Builtin::BIvsnprintf: 715 case Builtin::BI__builtin_vsnprintf: { 716 DiagID = diag::warn_fortify_source_size_mismatch; 717 SizeIndex = 1; 718 ObjectIndex = 0; 719 break; 720 } 721 } 722 723 llvm::APSInt ObjectSize; 724 // For __builtin___*_chk, the object size is explicitly provided by the caller 725 // (usually using __builtin_object_size). Use that value to check this call. 726 if (IsChkVariant) { 727 Expr::EvalResult Result; 728 Expr *SizeArg = TheCall->getArg(ObjectIndex); 729 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 730 return; 731 ObjectSize = Result.Val.getInt(); 732 733 // Otherwise, try to evaluate an imaginary call to __builtin_object_size. 734 } else { 735 // If the parameter has a pass_object_size attribute, then we should use its 736 // (potentially) more strict checking mode. Otherwise, conservatively assume 737 // type 0. 738 int BOSType = 0; 739 if (const auto *POS = 740 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>()) 741 BOSType = POS->getType(); 742 743 Expr *ObjArg = TheCall->getArg(ObjectIndex); 744 uint64_t Result; 745 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 746 return; 747 // Get the object size in the target's size_t width. 748 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 749 } 750 751 // Evaluate the number of bytes of the object that this call will use. 752 if (!UsedSize) { 753 Expr::EvalResult Result; 754 Expr *UsedSizeArg = TheCall->getArg(SizeIndex); 755 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext())) 756 return; 757 UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth); 758 } 759 760 if (UsedSize.getValue().ule(ObjectSize)) 761 return; 762 763 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 764 // Skim off the details of whichever builtin was called to produce a better 765 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 766 if (IsChkVariant) { 767 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 768 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 769 } else if (FunctionName.startswith("__builtin_")) { 770 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 771 } 772 773 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 774 PDiag(DiagID) 775 << FunctionName << ObjectSize.toString(/*Radix=*/10) 776 << UsedSize.getValue().toString(/*Radix=*/10)); 777 } 778 779 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 780 Scope::ScopeFlags NeededScopeFlags, 781 unsigned DiagID) { 782 // Scopes aren't available during instantiation. Fortunately, builtin 783 // functions cannot be template args so they cannot be formed through template 784 // instantiation. Therefore checking once during the parse is sufficient. 785 if (SemaRef.inTemplateInstantiation()) 786 return false; 787 788 Scope *S = SemaRef.getCurScope(); 789 while (S && !S->isSEHExceptScope()) 790 S = S->getParent(); 791 if (!S || !(S->getFlags() & NeededScopeFlags)) { 792 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 793 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 794 << DRE->getDecl()->getIdentifier(); 795 return true; 796 } 797 798 return false; 799 } 800 801 static inline bool isBlockPointer(Expr *Arg) { 802 return Arg->getType()->isBlockPointerType(); 803 } 804 805 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 806 /// void*, which is a requirement of device side enqueue. 807 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 808 const BlockPointerType *BPT = 809 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 810 ArrayRef<QualType> Params = 811 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 812 unsigned ArgCounter = 0; 813 bool IllegalParams = false; 814 // Iterate through the block parameters until either one is found that is not 815 // a local void*, or the block is valid. 816 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 817 I != E; ++I, ++ArgCounter) { 818 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 819 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 820 LangAS::opencl_local) { 821 // Get the location of the error. If a block literal has been passed 822 // (BlockExpr) then we can point straight to the offending argument, 823 // else we just point to the variable reference. 824 SourceLocation ErrorLoc; 825 if (isa<BlockExpr>(BlockArg)) { 826 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 827 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 828 } else if (isa<DeclRefExpr>(BlockArg)) { 829 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 830 } 831 S.Diag(ErrorLoc, 832 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 833 IllegalParams = true; 834 } 835 } 836 837 return IllegalParams; 838 } 839 840 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 841 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) { 842 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 843 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 844 return true; 845 } 846 return false; 847 } 848 849 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 850 if (checkArgCount(S, TheCall, 2)) 851 return true; 852 853 if (checkOpenCLSubgroupExt(S, TheCall)) 854 return true; 855 856 // First argument is an ndrange_t type. 857 Expr *NDRangeArg = TheCall->getArg(0); 858 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 859 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 860 << TheCall->getDirectCallee() << "'ndrange_t'"; 861 return true; 862 } 863 864 Expr *BlockArg = TheCall->getArg(1); 865 if (!isBlockPointer(BlockArg)) { 866 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 867 << TheCall->getDirectCallee() << "block"; 868 return true; 869 } 870 return checkOpenCLBlockArgs(S, BlockArg); 871 } 872 873 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 874 /// get_kernel_work_group_size 875 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 876 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 877 if (checkArgCount(S, TheCall, 1)) 878 return true; 879 880 Expr *BlockArg = TheCall->getArg(0); 881 if (!isBlockPointer(BlockArg)) { 882 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 883 << TheCall->getDirectCallee() << "block"; 884 return true; 885 } 886 return checkOpenCLBlockArgs(S, BlockArg); 887 } 888 889 /// Diagnose integer type and any valid implicit conversion to it. 890 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 891 const QualType &IntType); 892 893 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 894 unsigned Start, unsigned End) { 895 bool IllegalParams = false; 896 for (unsigned I = Start; I <= End; ++I) 897 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 898 S.Context.getSizeType()); 899 return IllegalParams; 900 } 901 902 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 903 /// 'local void*' parameter of passed block. 904 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 905 Expr *BlockArg, 906 unsigned NumNonVarArgs) { 907 const BlockPointerType *BPT = 908 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 909 unsigned NumBlockParams = 910 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 911 unsigned TotalNumArgs = TheCall->getNumArgs(); 912 913 // For each argument passed to the block, a corresponding uint needs to 914 // be passed to describe the size of the local memory. 915 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 916 S.Diag(TheCall->getBeginLoc(), 917 diag::err_opencl_enqueue_kernel_local_size_args); 918 return true; 919 } 920 921 // Check that the sizes of the local memory are specified by integers. 922 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 923 TotalNumArgs - 1); 924 } 925 926 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 927 /// overload formats specified in Table 6.13.17.1. 928 /// int enqueue_kernel(queue_t queue, 929 /// kernel_enqueue_flags_t flags, 930 /// const ndrange_t ndrange, 931 /// void (^block)(void)) 932 /// int enqueue_kernel(queue_t queue, 933 /// kernel_enqueue_flags_t flags, 934 /// const ndrange_t ndrange, 935 /// uint num_events_in_wait_list, 936 /// clk_event_t *event_wait_list, 937 /// clk_event_t *event_ret, 938 /// void (^block)(void)) 939 /// int enqueue_kernel(queue_t queue, 940 /// kernel_enqueue_flags_t flags, 941 /// const ndrange_t ndrange, 942 /// void (^block)(local void*, ...), 943 /// uint size0, ...) 944 /// int enqueue_kernel(queue_t queue, 945 /// kernel_enqueue_flags_t flags, 946 /// const ndrange_t ndrange, 947 /// uint num_events_in_wait_list, 948 /// clk_event_t *event_wait_list, 949 /// clk_event_t *event_ret, 950 /// void (^block)(local void*, ...), 951 /// uint size0, ...) 952 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 953 unsigned NumArgs = TheCall->getNumArgs(); 954 955 if (NumArgs < 4) { 956 S.Diag(TheCall->getBeginLoc(), 957 diag::err_typecheck_call_too_few_args_at_least) 958 << 0 << 4 << NumArgs; 959 return true; 960 } 961 962 Expr *Arg0 = TheCall->getArg(0); 963 Expr *Arg1 = TheCall->getArg(1); 964 Expr *Arg2 = TheCall->getArg(2); 965 Expr *Arg3 = TheCall->getArg(3); 966 967 // First argument always needs to be a queue_t type. 968 if (!Arg0->getType()->isQueueT()) { 969 S.Diag(TheCall->getArg(0)->getBeginLoc(), 970 diag::err_opencl_builtin_expected_type) 971 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 972 return true; 973 } 974 975 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 976 if (!Arg1->getType()->isIntegerType()) { 977 S.Diag(TheCall->getArg(1)->getBeginLoc(), 978 diag::err_opencl_builtin_expected_type) 979 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 980 return true; 981 } 982 983 // Third argument is always an ndrange_t type. 984 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 985 S.Diag(TheCall->getArg(2)->getBeginLoc(), 986 diag::err_opencl_builtin_expected_type) 987 << TheCall->getDirectCallee() << "'ndrange_t'"; 988 return true; 989 } 990 991 // With four arguments, there is only one form that the function could be 992 // called in: no events and no variable arguments. 993 if (NumArgs == 4) { 994 // check that the last argument is the right block type. 995 if (!isBlockPointer(Arg3)) { 996 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 997 << TheCall->getDirectCallee() << "block"; 998 return true; 999 } 1000 // we have a block type, check the prototype 1001 const BlockPointerType *BPT = 1002 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1003 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1004 S.Diag(Arg3->getBeginLoc(), 1005 diag::err_opencl_enqueue_kernel_blocks_no_args); 1006 return true; 1007 } 1008 return false; 1009 } 1010 // we can have block + varargs. 1011 if (isBlockPointer(Arg3)) 1012 return (checkOpenCLBlockArgs(S, Arg3) || 1013 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1014 // last two cases with either exactly 7 args or 7 args and varargs. 1015 if (NumArgs >= 7) { 1016 // check common block argument. 1017 Expr *Arg6 = TheCall->getArg(6); 1018 if (!isBlockPointer(Arg6)) { 1019 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1020 << TheCall->getDirectCallee() << "block"; 1021 return true; 1022 } 1023 if (checkOpenCLBlockArgs(S, Arg6)) 1024 return true; 1025 1026 // Forth argument has to be any integer type. 1027 if (!Arg3->getType()->isIntegerType()) { 1028 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1029 diag::err_opencl_builtin_expected_type) 1030 << TheCall->getDirectCallee() << "integer"; 1031 return true; 1032 } 1033 // check remaining common arguments. 1034 Expr *Arg4 = TheCall->getArg(4); 1035 Expr *Arg5 = TheCall->getArg(5); 1036 1037 // Fifth argument is always passed as a pointer to clk_event_t. 1038 if (!Arg4->isNullPointerConstant(S.Context, 1039 Expr::NPC_ValueDependentIsNotNull) && 1040 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1041 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1042 diag::err_opencl_builtin_expected_type) 1043 << TheCall->getDirectCallee() 1044 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1045 return true; 1046 } 1047 1048 // Sixth argument is always passed as a pointer to clk_event_t. 1049 if (!Arg5->isNullPointerConstant(S.Context, 1050 Expr::NPC_ValueDependentIsNotNull) && 1051 !(Arg5->getType()->isPointerType() && 1052 Arg5->getType()->getPointeeType()->isClkEventT())) { 1053 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1054 diag::err_opencl_builtin_expected_type) 1055 << TheCall->getDirectCallee() 1056 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1057 return true; 1058 } 1059 1060 if (NumArgs == 7) 1061 return false; 1062 1063 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1064 } 1065 1066 // None of the specific case has been detected, give generic error 1067 S.Diag(TheCall->getBeginLoc(), 1068 diag::err_opencl_enqueue_kernel_incorrect_args); 1069 return true; 1070 } 1071 1072 /// Returns OpenCL access qual. 1073 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1074 return D->getAttr<OpenCLAccessAttr>(); 1075 } 1076 1077 /// Returns true if pipe element type is different from the pointer. 1078 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1079 const Expr *Arg0 = Call->getArg(0); 1080 // First argument type should always be pipe. 1081 if (!Arg0->getType()->isPipeType()) { 1082 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1083 << Call->getDirectCallee() << Arg0->getSourceRange(); 1084 return true; 1085 } 1086 OpenCLAccessAttr *AccessQual = 1087 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1088 // Validates the access qualifier is compatible with the call. 1089 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1090 // read_only and write_only, and assumed to be read_only if no qualifier is 1091 // specified. 1092 switch (Call->getDirectCallee()->getBuiltinID()) { 1093 case Builtin::BIread_pipe: 1094 case Builtin::BIreserve_read_pipe: 1095 case Builtin::BIcommit_read_pipe: 1096 case Builtin::BIwork_group_reserve_read_pipe: 1097 case Builtin::BIsub_group_reserve_read_pipe: 1098 case Builtin::BIwork_group_commit_read_pipe: 1099 case Builtin::BIsub_group_commit_read_pipe: 1100 if (!(!AccessQual || AccessQual->isReadOnly())) { 1101 S.Diag(Arg0->getBeginLoc(), 1102 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1103 << "read_only" << Arg0->getSourceRange(); 1104 return true; 1105 } 1106 break; 1107 case Builtin::BIwrite_pipe: 1108 case Builtin::BIreserve_write_pipe: 1109 case Builtin::BIcommit_write_pipe: 1110 case Builtin::BIwork_group_reserve_write_pipe: 1111 case Builtin::BIsub_group_reserve_write_pipe: 1112 case Builtin::BIwork_group_commit_write_pipe: 1113 case Builtin::BIsub_group_commit_write_pipe: 1114 if (!(AccessQual && AccessQual->isWriteOnly())) { 1115 S.Diag(Arg0->getBeginLoc(), 1116 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1117 << "write_only" << Arg0->getSourceRange(); 1118 return true; 1119 } 1120 break; 1121 default: 1122 break; 1123 } 1124 return false; 1125 } 1126 1127 /// Returns true if pipe element type is different from the pointer. 1128 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1129 const Expr *Arg0 = Call->getArg(0); 1130 const Expr *ArgIdx = Call->getArg(Idx); 1131 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1132 const QualType EltTy = PipeTy->getElementType(); 1133 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1134 // The Idx argument should be a pointer and the type of the pointer and 1135 // the type of pipe element should also be the same. 1136 if (!ArgTy || 1137 !S.Context.hasSameType( 1138 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1139 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1140 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1141 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1142 return true; 1143 } 1144 return false; 1145 } 1146 1147 // Performs semantic analysis for the read/write_pipe call. 1148 // \param S Reference to the semantic analyzer. 1149 // \param Call A pointer to the builtin call. 1150 // \return True if a semantic error has been found, false otherwise. 1151 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1152 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1153 // functions have two forms. 1154 switch (Call->getNumArgs()) { 1155 case 2: 1156 if (checkOpenCLPipeArg(S, Call)) 1157 return true; 1158 // The call with 2 arguments should be 1159 // read/write_pipe(pipe T, T*). 1160 // Check packet type T. 1161 if (checkOpenCLPipePacketType(S, Call, 1)) 1162 return true; 1163 break; 1164 1165 case 4: { 1166 if (checkOpenCLPipeArg(S, Call)) 1167 return true; 1168 // The call with 4 arguments should be 1169 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1170 // Check reserve_id_t. 1171 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1172 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1173 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1174 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1175 return true; 1176 } 1177 1178 // Check the index. 1179 const Expr *Arg2 = Call->getArg(2); 1180 if (!Arg2->getType()->isIntegerType() && 1181 !Arg2->getType()->isUnsignedIntegerType()) { 1182 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1183 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1184 << Arg2->getType() << Arg2->getSourceRange(); 1185 return true; 1186 } 1187 1188 // Check packet type T. 1189 if (checkOpenCLPipePacketType(S, Call, 3)) 1190 return true; 1191 } break; 1192 default: 1193 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1194 << Call->getDirectCallee() << Call->getSourceRange(); 1195 return true; 1196 } 1197 1198 return false; 1199 } 1200 1201 // Performs a semantic analysis on the {work_group_/sub_group_ 1202 // /_}reserve_{read/write}_pipe 1203 // \param S Reference to the semantic analyzer. 1204 // \param Call The call to the builtin function to be analyzed. 1205 // \return True if a semantic error was found, false otherwise. 1206 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1207 if (checkArgCount(S, Call, 2)) 1208 return true; 1209 1210 if (checkOpenCLPipeArg(S, Call)) 1211 return true; 1212 1213 // Check the reserve size. 1214 if (!Call->getArg(1)->getType()->isIntegerType() && 1215 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1216 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1217 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1218 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1219 return true; 1220 } 1221 1222 // Since return type of reserve_read/write_pipe built-in function is 1223 // reserve_id_t, which is not defined in the builtin def file , we used int 1224 // as return type and need to override the return type of these functions. 1225 Call->setType(S.Context.OCLReserveIDTy); 1226 1227 return false; 1228 } 1229 1230 // Performs a semantic analysis on {work_group_/sub_group_ 1231 // /_}commit_{read/write}_pipe 1232 // \param S Reference to the semantic analyzer. 1233 // \param Call The call to the builtin function to be analyzed. 1234 // \return True if a semantic error was found, false otherwise. 1235 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1236 if (checkArgCount(S, Call, 2)) 1237 return true; 1238 1239 if (checkOpenCLPipeArg(S, Call)) 1240 return true; 1241 1242 // Check reserve_id_t. 1243 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1244 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1245 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1246 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1247 return true; 1248 } 1249 1250 return false; 1251 } 1252 1253 // Performs a semantic analysis on the call to built-in Pipe 1254 // Query Functions. 1255 // \param S Reference to the semantic analyzer. 1256 // \param Call The call to the builtin function to be analyzed. 1257 // \return True if a semantic error was found, false otherwise. 1258 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1259 if (checkArgCount(S, Call, 1)) 1260 return true; 1261 1262 if (!Call->getArg(0)->getType()->isPipeType()) { 1263 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1264 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1265 return true; 1266 } 1267 1268 return false; 1269 } 1270 1271 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1272 // Performs semantic analysis for the to_global/local/private call. 1273 // \param S Reference to the semantic analyzer. 1274 // \param BuiltinID ID of the builtin function. 1275 // \param Call A pointer to the builtin call. 1276 // \return True if a semantic error has been found, false otherwise. 1277 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1278 CallExpr *Call) { 1279 if (checkArgCount(S, Call, 1)) 1280 return true; 1281 1282 auto RT = Call->getArg(0)->getType(); 1283 if (!RT->isPointerType() || RT->getPointeeType() 1284 .getAddressSpace() == LangAS::opencl_constant) { 1285 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1286 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1287 return true; 1288 } 1289 1290 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1291 S.Diag(Call->getArg(0)->getBeginLoc(), 1292 diag::warn_opencl_generic_address_space_arg) 1293 << Call->getDirectCallee()->getNameInfo().getAsString() 1294 << Call->getArg(0)->getSourceRange(); 1295 } 1296 1297 RT = RT->getPointeeType(); 1298 auto Qual = RT.getQualifiers(); 1299 switch (BuiltinID) { 1300 case Builtin::BIto_global: 1301 Qual.setAddressSpace(LangAS::opencl_global); 1302 break; 1303 case Builtin::BIto_local: 1304 Qual.setAddressSpace(LangAS::opencl_local); 1305 break; 1306 case Builtin::BIto_private: 1307 Qual.setAddressSpace(LangAS::opencl_private); 1308 break; 1309 default: 1310 llvm_unreachable("Invalid builtin function"); 1311 } 1312 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1313 RT.getUnqualifiedType(), Qual))); 1314 1315 return false; 1316 } 1317 1318 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1319 if (checkArgCount(S, TheCall, 1)) 1320 return ExprError(); 1321 1322 // Compute __builtin_launder's parameter type from the argument. 1323 // The parameter type is: 1324 // * The type of the argument if it's not an array or function type, 1325 // Otherwise, 1326 // * The decayed argument type. 1327 QualType ParamTy = [&]() { 1328 QualType ArgTy = TheCall->getArg(0)->getType(); 1329 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1330 return S.Context.getPointerType(Ty->getElementType()); 1331 if (ArgTy->isFunctionType()) { 1332 return S.Context.getPointerType(ArgTy); 1333 } 1334 return ArgTy; 1335 }(); 1336 1337 TheCall->setType(ParamTy); 1338 1339 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1340 if (!ParamTy->isPointerType()) 1341 return 0; 1342 if (ParamTy->isFunctionPointerType()) 1343 return 1; 1344 if (ParamTy->isVoidPointerType()) 1345 return 2; 1346 return llvm::Optional<unsigned>{}; 1347 }(); 1348 if (DiagSelect.hasValue()) { 1349 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1350 << DiagSelect.getValue() << TheCall->getSourceRange(); 1351 return ExprError(); 1352 } 1353 1354 // We either have an incomplete class type, or we have a class template 1355 // whose instantiation has not been forced. Example: 1356 // 1357 // template <class T> struct Foo { T value; }; 1358 // Foo<int> *p = nullptr; 1359 // auto *d = __builtin_launder(p); 1360 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1361 diag::err_incomplete_type)) 1362 return ExprError(); 1363 1364 assert(ParamTy->getPointeeType()->isObjectType() && 1365 "Unhandled non-object pointer case"); 1366 1367 InitializedEntity Entity = 1368 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1369 ExprResult Arg = 1370 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1371 if (Arg.isInvalid()) 1372 return ExprError(); 1373 TheCall->setArg(0, Arg.get()); 1374 1375 return TheCall; 1376 } 1377 1378 // Emit an error and return true if the current architecture is not in the list 1379 // of supported architectures. 1380 static bool 1381 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1382 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1383 llvm::Triple::ArchType CurArch = 1384 S.getASTContext().getTargetInfo().getTriple().getArch(); 1385 if (llvm::is_contained(SupportedArchs, CurArch)) 1386 return false; 1387 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1388 << TheCall->getSourceRange(); 1389 return true; 1390 } 1391 1392 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1393 SourceLocation CallSiteLoc); 1394 1395 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1396 CallExpr *TheCall) { 1397 switch (TI.getTriple().getArch()) { 1398 default: 1399 // Some builtins don't require additional checking, so just consider these 1400 // acceptable. 1401 return false; 1402 case llvm::Triple::arm: 1403 case llvm::Triple::armeb: 1404 case llvm::Triple::thumb: 1405 case llvm::Triple::thumbeb: 1406 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1407 case llvm::Triple::aarch64: 1408 case llvm::Triple::aarch64_32: 1409 case llvm::Triple::aarch64_be: 1410 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1411 case llvm::Triple::bpfeb: 1412 case llvm::Triple::bpfel: 1413 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1414 case llvm::Triple::hexagon: 1415 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1416 case llvm::Triple::mips: 1417 case llvm::Triple::mipsel: 1418 case llvm::Triple::mips64: 1419 case llvm::Triple::mips64el: 1420 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1421 case llvm::Triple::systemz: 1422 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1423 case llvm::Triple::x86: 1424 case llvm::Triple::x86_64: 1425 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1426 case llvm::Triple::ppc: 1427 case llvm::Triple::ppcle: 1428 case llvm::Triple::ppc64: 1429 case llvm::Triple::ppc64le: 1430 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1431 case llvm::Triple::amdgcn: 1432 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1433 case llvm::Triple::riscv32: 1434 case llvm::Triple::riscv64: 1435 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1436 } 1437 } 1438 1439 ExprResult 1440 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1441 CallExpr *TheCall) { 1442 ExprResult TheCallResult(TheCall); 1443 1444 // Find out if any arguments are required to be integer constant expressions. 1445 unsigned ICEArguments = 0; 1446 ASTContext::GetBuiltinTypeError Error; 1447 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1448 if (Error != ASTContext::GE_None) 1449 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1450 1451 // If any arguments are required to be ICE's, check and diagnose. 1452 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1453 // Skip arguments not required to be ICE's. 1454 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1455 1456 llvm::APSInt Result; 1457 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1458 return true; 1459 ICEArguments &= ~(1 << ArgNo); 1460 } 1461 1462 switch (BuiltinID) { 1463 case Builtin::BI__builtin___CFStringMakeConstantString: 1464 assert(TheCall->getNumArgs() == 1 && 1465 "Wrong # arguments to builtin CFStringMakeConstantString"); 1466 if (CheckObjCString(TheCall->getArg(0))) 1467 return ExprError(); 1468 break; 1469 case Builtin::BI__builtin_ms_va_start: 1470 case Builtin::BI__builtin_stdarg_start: 1471 case Builtin::BI__builtin_va_start: 1472 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1473 return ExprError(); 1474 break; 1475 case Builtin::BI__va_start: { 1476 switch (Context.getTargetInfo().getTriple().getArch()) { 1477 case llvm::Triple::aarch64: 1478 case llvm::Triple::arm: 1479 case llvm::Triple::thumb: 1480 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1481 return ExprError(); 1482 break; 1483 default: 1484 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1485 return ExprError(); 1486 break; 1487 } 1488 break; 1489 } 1490 1491 // The acquire, release, and no fence variants are ARM and AArch64 only. 1492 case Builtin::BI_interlockedbittestandset_acq: 1493 case Builtin::BI_interlockedbittestandset_rel: 1494 case Builtin::BI_interlockedbittestandset_nf: 1495 case Builtin::BI_interlockedbittestandreset_acq: 1496 case Builtin::BI_interlockedbittestandreset_rel: 1497 case Builtin::BI_interlockedbittestandreset_nf: 1498 if (CheckBuiltinTargetSupport( 1499 *this, BuiltinID, TheCall, 1500 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1501 return ExprError(); 1502 break; 1503 1504 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1505 case Builtin::BI_bittest64: 1506 case Builtin::BI_bittestandcomplement64: 1507 case Builtin::BI_bittestandreset64: 1508 case Builtin::BI_bittestandset64: 1509 case Builtin::BI_interlockedbittestandreset64: 1510 case Builtin::BI_interlockedbittestandset64: 1511 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1512 {llvm::Triple::x86_64, llvm::Triple::arm, 1513 llvm::Triple::thumb, llvm::Triple::aarch64})) 1514 return ExprError(); 1515 break; 1516 1517 case Builtin::BI__builtin_isgreater: 1518 case Builtin::BI__builtin_isgreaterequal: 1519 case Builtin::BI__builtin_isless: 1520 case Builtin::BI__builtin_islessequal: 1521 case Builtin::BI__builtin_islessgreater: 1522 case Builtin::BI__builtin_isunordered: 1523 if (SemaBuiltinUnorderedCompare(TheCall)) 1524 return ExprError(); 1525 break; 1526 case Builtin::BI__builtin_fpclassify: 1527 if (SemaBuiltinFPClassification(TheCall, 6)) 1528 return ExprError(); 1529 break; 1530 case Builtin::BI__builtin_isfinite: 1531 case Builtin::BI__builtin_isinf: 1532 case Builtin::BI__builtin_isinf_sign: 1533 case Builtin::BI__builtin_isnan: 1534 case Builtin::BI__builtin_isnormal: 1535 case Builtin::BI__builtin_signbit: 1536 case Builtin::BI__builtin_signbitf: 1537 case Builtin::BI__builtin_signbitl: 1538 if (SemaBuiltinFPClassification(TheCall, 1)) 1539 return ExprError(); 1540 break; 1541 case Builtin::BI__builtin_shufflevector: 1542 return SemaBuiltinShuffleVector(TheCall); 1543 // TheCall will be freed by the smart pointer here, but that's fine, since 1544 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1545 case Builtin::BI__builtin_prefetch: 1546 if (SemaBuiltinPrefetch(TheCall)) 1547 return ExprError(); 1548 break; 1549 case Builtin::BI__builtin_alloca_with_align: 1550 if (SemaBuiltinAllocaWithAlign(TheCall)) 1551 return ExprError(); 1552 LLVM_FALLTHROUGH; 1553 case Builtin::BI__builtin_alloca: 1554 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1555 << TheCall->getDirectCallee(); 1556 break; 1557 case Builtin::BI__assume: 1558 case Builtin::BI__builtin_assume: 1559 if (SemaBuiltinAssume(TheCall)) 1560 return ExprError(); 1561 break; 1562 case Builtin::BI__builtin_assume_aligned: 1563 if (SemaBuiltinAssumeAligned(TheCall)) 1564 return ExprError(); 1565 break; 1566 case Builtin::BI__builtin_dynamic_object_size: 1567 case Builtin::BI__builtin_object_size: 1568 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1569 return ExprError(); 1570 break; 1571 case Builtin::BI__builtin_longjmp: 1572 if (SemaBuiltinLongjmp(TheCall)) 1573 return ExprError(); 1574 break; 1575 case Builtin::BI__builtin_setjmp: 1576 if (SemaBuiltinSetjmp(TheCall)) 1577 return ExprError(); 1578 break; 1579 case Builtin::BI__builtin_classify_type: 1580 if (checkArgCount(*this, TheCall, 1)) return true; 1581 TheCall->setType(Context.IntTy); 1582 break; 1583 case Builtin::BI__builtin_complex: 1584 if (SemaBuiltinComplex(TheCall)) 1585 return ExprError(); 1586 break; 1587 case Builtin::BI__builtin_constant_p: { 1588 if (checkArgCount(*this, TheCall, 1)) return true; 1589 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1590 if (Arg.isInvalid()) return true; 1591 TheCall->setArg(0, Arg.get()); 1592 TheCall->setType(Context.IntTy); 1593 break; 1594 } 1595 case Builtin::BI__builtin_launder: 1596 return SemaBuiltinLaunder(*this, TheCall); 1597 case Builtin::BI__sync_fetch_and_add: 1598 case Builtin::BI__sync_fetch_and_add_1: 1599 case Builtin::BI__sync_fetch_and_add_2: 1600 case Builtin::BI__sync_fetch_and_add_4: 1601 case Builtin::BI__sync_fetch_and_add_8: 1602 case Builtin::BI__sync_fetch_and_add_16: 1603 case Builtin::BI__sync_fetch_and_sub: 1604 case Builtin::BI__sync_fetch_and_sub_1: 1605 case Builtin::BI__sync_fetch_and_sub_2: 1606 case Builtin::BI__sync_fetch_and_sub_4: 1607 case Builtin::BI__sync_fetch_and_sub_8: 1608 case Builtin::BI__sync_fetch_and_sub_16: 1609 case Builtin::BI__sync_fetch_and_or: 1610 case Builtin::BI__sync_fetch_and_or_1: 1611 case Builtin::BI__sync_fetch_and_or_2: 1612 case Builtin::BI__sync_fetch_and_or_4: 1613 case Builtin::BI__sync_fetch_and_or_8: 1614 case Builtin::BI__sync_fetch_and_or_16: 1615 case Builtin::BI__sync_fetch_and_and: 1616 case Builtin::BI__sync_fetch_and_and_1: 1617 case Builtin::BI__sync_fetch_and_and_2: 1618 case Builtin::BI__sync_fetch_and_and_4: 1619 case Builtin::BI__sync_fetch_and_and_8: 1620 case Builtin::BI__sync_fetch_and_and_16: 1621 case Builtin::BI__sync_fetch_and_xor: 1622 case Builtin::BI__sync_fetch_and_xor_1: 1623 case Builtin::BI__sync_fetch_and_xor_2: 1624 case Builtin::BI__sync_fetch_and_xor_4: 1625 case Builtin::BI__sync_fetch_and_xor_8: 1626 case Builtin::BI__sync_fetch_and_xor_16: 1627 case Builtin::BI__sync_fetch_and_nand: 1628 case Builtin::BI__sync_fetch_and_nand_1: 1629 case Builtin::BI__sync_fetch_and_nand_2: 1630 case Builtin::BI__sync_fetch_and_nand_4: 1631 case Builtin::BI__sync_fetch_and_nand_8: 1632 case Builtin::BI__sync_fetch_and_nand_16: 1633 case Builtin::BI__sync_add_and_fetch: 1634 case Builtin::BI__sync_add_and_fetch_1: 1635 case Builtin::BI__sync_add_and_fetch_2: 1636 case Builtin::BI__sync_add_and_fetch_4: 1637 case Builtin::BI__sync_add_and_fetch_8: 1638 case Builtin::BI__sync_add_and_fetch_16: 1639 case Builtin::BI__sync_sub_and_fetch: 1640 case Builtin::BI__sync_sub_and_fetch_1: 1641 case Builtin::BI__sync_sub_and_fetch_2: 1642 case Builtin::BI__sync_sub_and_fetch_4: 1643 case Builtin::BI__sync_sub_and_fetch_8: 1644 case Builtin::BI__sync_sub_and_fetch_16: 1645 case Builtin::BI__sync_and_and_fetch: 1646 case Builtin::BI__sync_and_and_fetch_1: 1647 case Builtin::BI__sync_and_and_fetch_2: 1648 case Builtin::BI__sync_and_and_fetch_4: 1649 case Builtin::BI__sync_and_and_fetch_8: 1650 case Builtin::BI__sync_and_and_fetch_16: 1651 case Builtin::BI__sync_or_and_fetch: 1652 case Builtin::BI__sync_or_and_fetch_1: 1653 case Builtin::BI__sync_or_and_fetch_2: 1654 case Builtin::BI__sync_or_and_fetch_4: 1655 case Builtin::BI__sync_or_and_fetch_8: 1656 case Builtin::BI__sync_or_and_fetch_16: 1657 case Builtin::BI__sync_xor_and_fetch: 1658 case Builtin::BI__sync_xor_and_fetch_1: 1659 case Builtin::BI__sync_xor_and_fetch_2: 1660 case Builtin::BI__sync_xor_and_fetch_4: 1661 case Builtin::BI__sync_xor_and_fetch_8: 1662 case Builtin::BI__sync_xor_and_fetch_16: 1663 case Builtin::BI__sync_nand_and_fetch: 1664 case Builtin::BI__sync_nand_and_fetch_1: 1665 case Builtin::BI__sync_nand_and_fetch_2: 1666 case Builtin::BI__sync_nand_and_fetch_4: 1667 case Builtin::BI__sync_nand_and_fetch_8: 1668 case Builtin::BI__sync_nand_and_fetch_16: 1669 case Builtin::BI__sync_val_compare_and_swap: 1670 case Builtin::BI__sync_val_compare_and_swap_1: 1671 case Builtin::BI__sync_val_compare_and_swap_2: 1672 case Builtin::BI__sync_val_compare_and_swap_4: 1673 case Builtin::BI__sync_val_compare_and_swap_8: 1674 case Builtin::BI__sync_val_compare_and_swap_16: 1675 case Builtin::BI__sync_bool_compare_and_swap: 1676 case Builtin::BI__sync_bool_compare_and_swap_1: 1677 case Builtin::BI__sync_bool_compare_and_swap_2: 1678 case Builtin::BI__sync_bool_compare_and_swap_4: 1679 case Builtin::BI__sync_bool_compare_and_swap_8: 1680 case Builtin::BI__sync_bool_compare_and_swap_16: 1681 case Builtin::BI__sync_lock_test_and_set: 1682 case Builtin::BI__sync_lock_test_and_set_1: 1683 case Builtin::BI__sync_lock_test_and_set_2: 1684 case Builtin::BI__sync_lock_test_and_set_4: 1685 case Builtin::BI__sync_lock_test_and_set_8: 1686 case Builtin::BI__sync_lock_test_and_set_16: 1687 case Builtin::BI__sync_lock_release: 1688 case Builtin::BI__sync_lock_release_1: 1689 case Builtin::BI__sync_lock_release_2: 1690 case Builtin::BI__sync_lock_release_4: 1691 case Builtin::BI__sync_lock_release_8: 1692 case Builtin::BI__sync_lock_release_16: 1693 case Builtin::BI__sync_swap: 1694 case Builtin::BI__sync_swap_1: 1695 case Builtin::BI__sync_swap_2: 1696 case Builtin::BI__sync_swap_4: 1697 case Builtin::BI__sync_swap_8: 1698 case Builtin::BI__sync_swap_16: 1699 return SemaBuiltinAtomicOverloaded(TheCallResult); 1700 case Builtin::BI__sync_synchronize: 1701 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1702 << TheCall->getCallee()->getSourceRange(); 1703 break; 1704 case Builtin::BI__builtin_nontemporal_load: 1705 case Builtin::BI__builtin_nontemporal_store: 1706 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1707 case Builtin::BI__builtin_memcpy_inline: { 1708 clang::Expr *SizeOp = TheCall->getArg(2); 1709 // We warn about copying to or from `nullptr` pointers when `size` is 1710 // greater than 0. When `size` is value dependent we cannot evaluate its 1711 // value so we bail out. 1712 if (SizeOp->isValueDependent()) 1713 break; 1714 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) { 1715 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1716 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1717 } 1718 break; 1719 } 1720 #define BUILTIN(ID, TYPE, ATTRS) 1721 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1722 case Builtin::BI##ID: \ 1723 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1724 #include "clang/Basic/Builtins.def" 1725 case Builtin::BI__annotation: 1726 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1727 return ExprError(); 1728 break; 1729 case Builtin::BI__builtin_annotation: 1730 if (SemaBuiltinAnnotation(*this, TheCall)) 1731 return ExprError(); 1732 break; 1733 case Builtin::BI__builtin_addressof: 1734 if (SemaBuiltinAddressof(*this, TheCall)) 1735 return ExprError(); 1736 break; 1737 case Builtin::BI__builtin_is_aligned: 1738 case Builtin::BI__builtin_align_up: 1739 case Builtin::BI__builtin_align_down: 1740 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1741 return ExprError(); 1742 break; 1743 case Builtin::BI__builtin_add_overflow: 1744 case Builtin::BI__builtin_sub_overflow: 1745 case Builtin::BI__builtin_mul_overflow: 1746 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1747 return ExprError(); 1748 break; 1749 case Builtin::BI__builtin_operator_new: 1750 case Builtin::BI__builtin_operator_delete: { 1751 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1752 ExprResult Res = 1753 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1754 if (Res.isInvalid()) 1755 CorrectDelayedTyposInExpr(TheCallResult.get()); 1756 return Res; 1757 } 1758 case Builtin::BI__builtin_dump_struct: { 1759 // We first want to ensure we are called with 2 arguments 1760 if (checkArgCount(*this, TheCall, 2)) 1761 return ExprError(); 1762 // Ensure that the first argument is of type 'struct XX *' 1763 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1764 const QualType PtrArgType = PtrArg->getType(); 1765 if (!PtrArgType->isPointerType() || 1766 !PtrArgType->getPointeeType()->isRecordType()) { 1767 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1768 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1769 << "structure pointer"; 1770 return ExprError(); 1771 } 1772 1773 // Ensure that the second argument is of type 'FunctionType' 1774 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1775 const QualType FnPtrArgType = FnPtrArg->getType(); 1776 if (!FnPtrArgType->isPointerType()) { 1777 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1778 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1779 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1780 return ExprError(); 1781 } 1782 1783 const auto *FuncType = 1784 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1785 1786 if (!FuncType) { 1787 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1788 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1789 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1790 return ExprError(); 1791 } 1792 1793 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1794 if (!FT->getNumParams()) { 1795 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1796 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1797 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1798 return ExprError(); 1799 } 1800 QualType PT = FT->getParamType(0); 1801 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1802 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1803 !PT->getPointeeType().isConstQualified()) { 1804 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1805 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1806 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1807 return ExprError(); 1808 } 1809 } 1810 1811 TheCall->setType(Context.IntTy); 1812 break; 1813 } 1814 case Builtin::BI__builtin_expect_with_probability: { 1815 // We first want to ensure we are called with 3 arguments 1816 if (checkArgCount(*this, TheCall, 3)) 1817 return ExprError(); 1818 // then check probability is constant float in range [0.0, 1.0] 1819 const Expr *ProbArg = TheCall->getArg(2); 1820 SmallVector<PartialDiagnosticAt, 8> Notes; 1821 Expr::EvalResult Eval; 1822 Eval.Diag = &Notes; 1823 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 1824 !Eval.Val.isFloat()) { 1825 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 1826 << ProbArg->getSourceRange(); 1827 for (const PartialDiagnosticAt &PDiag : Notes) 1828 Diag(PDiag.first, PDiag.second); 1829 return ExprError(); 1830 } 1831 llvm::APFloat Probability = Eval.Val.getFloat(); 1832 bool LoseInfo = false; 1833 Probability.convert(llvm::APFloat::IEEEdouble(), 1834 llvm::RoundingMode::Dynamic, &LoseInfo); 1835 if (!(Probability >= llvm::APFloat(0.0) && 1836 Probability <= llvm::APFloat(1.0))) { 1837 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 1838 << ProbArg->getSourceRange(); 1839 return ExprError(); 1840 } 1841 break; 1842 } 1843 case Builtin::BI__builtin_preserve_access_index: 1844 if (SemaBuiltinPreserveAI(*this, TheCall)) 1845 return ExprError(); 1846 break; 1847 case Builtin::BI__builtin_call_with_static_chain: 1848 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1849 return ExprError(); 1850 break; 1851 case Builtin::BI__exception_code: 1852 case Builtin::BI_exception_code: 1853 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1854 diag::err_seh___except_block)) 1855 return ExprError(); 1856 break; 1857 case Builtin::BI__exception_info: 1858 case Builtin::BI_exception_info: 1859 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1860 diag::err_seh___except_filter)) 1861 return ExprError(); 1862 break; 1863 case Builtin::BI__GetExceptionInfo: 1864 if (checkArgCount(*this, TheCall, 1)) 1865 return ExprError(); 1866 1867 if (CheckCXXThrowOperand( 1868 TheCall->getBeginLoc(), 1869 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1870 TheCall)) 1871 return ExprError(); 1872 1873 TheCall->setType(Context.VoidPtrTy); 1874 break; 1875 // OpenCL v2.0, s6.13.16 - Pipe functions 1876 case Builtin::BIread_pipe: 1877 case Builtin::BIwrite_pipe: 1878 // Since those two functions are declared with var args, we need a semantic 1879 // check for the argument. 1880 if (SemaBuiltinRWPipe(*this, TheCall)) 1881 return ExprError(); 1882 break; 1883 case Builtin::BIreserve_read_pipe: 1884 case Builtin::BIreserve_write_pipe: 1885 case Builtin::BIwork_group_reserve_read_pipe: 1886 case Builtin::BIwork_group_reserve_write_pipe: 1887 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1888 return ExprError(); 1889 break; 1890 case Builtin::BIsub_group_reserve_read_pipe: 1891 case Builtin::BIsub_group_reserve_write_pipe: 1892 if (checkOpenCLSubgroupExt(*this, TheCall) || 1893 SemaBuiltinReserveRWPipe(*this, TheCall)) 1894 return ExprError(); 1895 break; 1896 case Builtin::BIcommit_read_pipe: 1897 case Builtin::BIcommit_write_pipe: 1898 case Builtin::BIwork_group_commit_read_pipe: 1899 case Builtin::BIwork_group_commit_write_pipe: 1900 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1901 return ExprError(); 1902 break; 1903 case Builtin::BIsub_group_commit_read_pipe: 1904 case Builtin::BIsub_group_commit_write_pipe: 1905 if (checkOpenCLSubgroupExt(*this, TheCall) || 1906 SemaBuiltinCommitRWPipe(*this, TheCall)) 1907 return ExprError(); 1908 break; 1909 case Builtin::BIget_pipe_num_packets: 1910 case Builtin::BIget_pipe_max_packets: 1911 if (SemaBuiltinPipePackets(*this, TheCall)) 1912 return ExprError(); 1913 break; 1914 case Builtin::BIto_global: 1915 case Builtin::BIto_local: 1916 case Builtin::BIto_private: 1917 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1918 return ExprError(); 1919 break; 1920 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1921 case Builtin::BIenqueue_kernel: 1922 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1923 return ExprError(); 1924 break; 1925 case Builtin::BIget_kernel_work_group_size: 1926 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1927 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1928 return ExprError(); 1929 break; 1930 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1931 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1932 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1933 return ExprError(); 1934 break; 1935 case Builtin::BI__builtin_os_log_format: 1936 Cleanup.setExprNeedsCleanups(true); 1937 LLVM_FALLTHROUGH; 1938 case Builtin::BI__builtin_os_log_format_buffer_size: 1939 if (SemaBuiltinOSLogFormat(TheCall)) 1940 return ExprError(); 1941 break; 1942 case Builtin::BI__builtin_frame_address: 1943 case Builtin::BI__builtin_return_address: { 1944 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 1945 return ExprError(); 1946 1947 // -Wframe-address warning if non-zero passed to builtin 1948 // return/frame address. 1949 Expr::EvalResult Result; 1950 if (!TheCall->getArg(0)->isValueDependent() && 1951 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 1952 Result.Val.getInt() != 0) 1953 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 1954 << ((BuiltinID == Builtin::BI__builtin_return_address) 1955 ? "__builtin_return_address" 1956 : "__builtin_frame_address") 1957 << TheCall->getSourceRange(); 1958 break; 1959 } 1960 1961 case Builtin::BI__builtin_matrix_transpose: 1962 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 1963 1964 case Builtin::BI__builtin_matrix_column_major_load: 1965 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 1966 1967 case Builtin::BI__builtin_matrix_column_major_store: 1968 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 1969 1970 case Builtin::BI__builtin_get_device_side_mangled_name: { 1971 auto Check = [](CallExpr *TheCall) { 1972 if (TheCall->getNumArgs() != 1) 1973 return false; 1974 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 1975 if (!DRE) 1976 return false; 1977 auto *D = DRE->getDecl(); 1978 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 1979 return false; 1980 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 1981 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 1982 }; 1983 if (!Check(TheCall)) { 1984 Diag(TheCall->getBeginLoc(), 1985 diag::err_hip_invalid_args_builtin_mangled_name); 1986 return ExprError(); 1987 } 1988 } 1989 } 1990 1991 // Since the target specific builtins for each arch overlap, only check those 1992 // of the arch we are compiling for. 1993 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1994 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 1995 assert(Context.getAuxTargetInfo() && 1996 "Aux Target Builtin, but not an aux target?"); 1997 1998 if (CheckTSBuiltinFunctionCall( 1999 *Context.getAuxTargetInfo(), 2000 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2001 return ExprError(); 2002 } else { 2003 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2004 TheCall)) 2005 return ExprError(); 2006 } 2007 } 2008 2009 return TheCallResult; 2010 } 2011 2012 // Get the valid immediate range for the specified NEON type code. 2013 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2014 NeonTypeFlags Type(t); 2015 int IsQuad = ForceQuad ? true : Type.isQuad(); 2016 switch (Type.getEltType()) { 2017 case NeonTypeFlags::Int8: 2018 case NeonTypeFlags::Poly8: 2019 return shift ? 7 : (8 << IsQuad) - 1; 2020 case NeonTypeFlags::Int16: 2021 case NeonTypeFlags::Poly16: 2022 return shift ? 15 : (4 << IsQuad) - 1; 2023 case NeonTypeFlags::Int32: 2024 return shift ? 31 : (2 << IsQuad) - 1; 2025 case NeonTypeFlags::Int64: 2026 case NeonTypeFlags::Poly64: 2027 return shift ? 63 : (1 << IsQuad) - 1; 2028 case NeonTypeFlags::Poly128: 2029 return shift ? 127 : (1 << IsQuad) - 1; 2030 case NeonTypeFlags::Float16: 2031 assert(!shift && "cannot shift float types!"); 2032 return (4 << IsQuad) - 1; 2033 case NeonTypeFlags::Float32: 2034 assert(!shift && "cannot shift float types!"); 2035 return (2 << IsQuad) - 1; 2036 case NeonTypeFlags::Float64: 2037 assert(!shift && "cannot shift float types!"); 2038 return (1 << IsQuad) - 1; 2039 case NeonTypeFlags::BFloat16: 2040 assert(!shift && "cannot shift float types!"); 2041 return (4 << IsQuad) - 1; 2042 } 2043 llvm_unreachable("Invalid NeonTypeFlag!"); 2044 } 2045 2046 /// getNeonEltType - Return the QualType corresponding to the elements of 2047 /// the vector type specified by the NeonTypeFlags. This is used to check 2048 /// the pointer arguments for Neon load/store intrinsics. 2049 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2050 bool IsPolyUnsigned, bool IsInt64Long) { 2051 switch (Flags.getEltType()) { 2052 case NeonTypeFlags::Int8: 2053 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2054 case NeonTypeFlags::Int16: 2055 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2056 case NeonTypeFlags::Int32: 2057 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2058 case NeonTypeFlags::Int64: 2059 if (IsInt64Long) 2060 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2061 else 2062 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2063 : Context.LongLongTy; 2064 case NeonTypeFlags::Poly8: 2065 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2066 case NeonTypeFlags::Poly16: 2067 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2068 case NeonTypeFlags::Poly64: 2069 if (IsInt64Long) 2070 return Context.UnsignedLongTy; 2071 else 2072 return Context.UnsignedLongLongTy; 2073 case NeonTypeFlags::Poly128: 2074 break; 2075 case NeonTypeFlags::Float16: 2076 return Context.HalfTy; 2077 case NeonTypeFlags::Float32: 2078 return Context.FloatTy; 2079 case NeonTypeFlags::Float64: 2080 return Context.DoubleTy; 2081 case NeonTypeFlags::BFloat16: 2082 return Context.BFloat16Ty; 2083 } 2084 llvm_unreachable("Invalid NeonTypeFlag!"); 2085 } 2086 2087 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2088 // Range check SVE intrinsics that take immediate values. 2089 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2090 2091 switch (BuiltinID) { 2092 default: 2093 return false; 2094 #define GET_SVE_IMMEDIATE_CHECK 2095 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2096 #undef GET_SVE_IMMEDIATE_CHECK 2097 } 2098 2099 // Perform all the immediate checks for this builtin call. 2100 bool HasError = false; 2101 for (auto &I : ImmChecks) { 2102 int ArgNum, CheckTy, ElementSizeInBits; 2103 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2104 2105 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2106 2107 // Function that checks whether the operand (ArgNum) is an immediate 2108 // that is one of the predefined values. 2109 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2110 int ErrDiag) -> bool { 2111 // We can't check the value of a dependent argument. 2112 Expr *Arg = TheCall->getArg(ArgNum); 2113 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2114 return false; 2115 2116 // Check constant-ness first. 2117 llvm::APSInt Imm; 2118 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2119 return true; 2120 2121 if (!CheckImm(Imm.getSExtValue())) 2122 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2123 return false; 2124 }; 2125 2126 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2127 case SVETypeFlags::ImmCheck0_31: 2128 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2129 HasError = true; 2130 break; 2131 case SVETypeFlags::ImmCheck0_13: 2132 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2133 HasError = true; 2134 break; 2135 case SVETypeFlags::ImmCheck1_16: 2136 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2137 HasError = true; 2138 break; 2139 case SVETypeFlags::ImmCheck0_7: 2140 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2141 HasError = true; 2142 break; 2143 case SVETypeFlags::ImmCheckExtract: 2144 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2145 (2048 / ElementSizeInBits) - 1)) 2146 HasError = true; 2147 break; 2148 case SVETypeFlags::ImmCheckShiftRight: 2149 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2150 HasError = true; 2151 break; 2152 case SVETypeFlags::ImmCheckShiftRightNarrow: 2153 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2154 ElementSizeInBits / 2)) 2155 HasError = true; 2156 break; 2157 case SVETypeFlags::ImmCheckShiftLeft: 2158 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2159 ElementSizeInBits - 1)) 2160 HasError = true; 2161 break; 2162 case SVETypeFlags::ImmCheckLaneIndex: 2163 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2164 (128 / (1 * ElementSizeInBits)) - 1)) 2165 HasError = true; 2166 break; 2167 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2168 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2169 (128 / (2 * ElementSizeInBits)) - 1)) 2170 HasError = true; 2171 break; 2172 case SVETypeFlags::ImmCheckLaneIndexDot: 2173 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2174 (128 / (4 * ElementSizeInBits)) - 1)) 2175 HasError = true; 2176 break; 2177 case SVETypeFlags::ImmCheckComplexRot90_270: 2178 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2179 diag::err_rotation_argument_to_cadd)) 2180 HasError = true; 2181 break; 2182 case SVETypeFlags::ImmCheckComplexRotAll90: 2183 if (CheckImmediateInSet( 2184 [](int64_t V) { 2185 return V == 0 || V == 90 || V == 180 || V == 270; 2186 }, 2187 diag::err_rotation_argument_to_cmla)) 2188 HasError = true; 2189 break; 2190 case SVETypeFlags::ImmCheck0_1: 2191 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2192 HasError = true; 2193 break; 2194 case SVETypeFlags::ImmCheck0_2: 2195 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2196 HasError = true; 2197 break; 2198 case SVETypeFlags::ImmCheck0_3: 2199 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2200 HasError = true; 2201 break; 2202 } 2203 } 2204 2205 return HasError; 2206 } 2207 2208 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2209 unsigned BuiltinID, CallExpr *TheCall) { 2210 llvm::APSInt Result; 2211 uint64_t mask = 0; 2212 unsigned TV = 0; 2213 int PtrArgNum = -1; 2214 bool HasConstPtr = false; 2215 switch (BuiltinID) { 2216 #define GET_NEON_OVERLOAD_CHECK 2217 #include "clang/Basic/arm_neon.inc" 2218 #include "clang/Basic/arm_fp16.inc" 2219 #undef GET_NEON_OVERLOAD_CHECK 2220 } 2221 2222 // For NEON intrinsics which are overloaded on vector element type, validate 2223 // the immediate which specifies which variant to emit. 2224 unsigned ImmArg = TheCall->getNumArgs()-1; 2225 if (mask) { 2226 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2227 return true; 2228 2229 TV = Result.getLimitedValue(64); 2230 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2231 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2232 << TheCall->getArg(ImmArg)->getSourceRange(); 2233 } 2234 2235 if (PtrArgNum >= 0) { 2236 // Check that pointer arguments have the specified type. 2237 Expr *Arg = TheCall->getArg(PtrArgNum); 2238 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2239 Arg = ICE->getSubExpr(); 2240 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2241 QualType RHSTy = RHS.get()->getType(); 2242 2243 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2244 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2245 Arch == llvm::Triple::aarch64_32 || 2246 Arch == llvm::Triple::aarch64_be; 2247 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2248 QualType EltTy = 2249 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2250 if (HasConstPtr) 2251 EltTy = EltTy.withConst(); 2252 QualType LHSTy = Context.getPointerType(EltTy); 2253 AssignConvertType ConvTy; 2254 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2255 if (RHS.isInvalid()) 2256 return true; 2257 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2258 RHS.get(), AA_Assigning)) 2259 return true; 2260 } 2261 2262 // For NEON intrinsics which take an immediate value as part of the 2263 // instruction, range check them here. 2264 unsigned i = 0, l = 0, u = 0; 2265 switch (BuiltinID) { 2266 default: 2267 return false; 2268 #define GET_NEON_IMMEDIATE_CHECK 2269 #include "clang/Basic/arm_neon.inc" 2270 #include "clang/Basic/arm_fp16.inc" 2271 #undef GET_NEON_IMMEDIATE_CHECK 2272 } 2273 2274 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2275 } 2276 2277 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2278 switch (BuiltinID) { 2279 default: 2280 return false; 2281 #include "clang/Basic/arm_mve_builtin_sema.inc" 2282 } 2283 } 2284 2285 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2286 CallExpr *TheCall) { 2287 bool Err = false; 2288 switch (BuiltinID) { 2289 default: 2290 return false; 2291 #include "clang/Basic/arm_cde_builtin_sema.inc" 2292 } 2293 2294 if (Err) 2295 return true; 2296 2297 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2298 } 2299 2300 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2301 const Expr *CoprocArg, bool WantCDE) { 2302 if (isConstantEvaluated()) 2303 return false; 2304 2305 // We can't check the value of a dependent argument. 2306 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2307 return false; 2308 2309 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2310 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2311 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2312 2313 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2314 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2315 2316 if (IsCDECoproc != WantCDE) 2317 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2318 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2319 2320 return false; 2321 } 2322 2323 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2324 unsigned MaxWidth) { 2325 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2326 BuiltinID == ARM::BI__builtin_arm_ldaex || 2327 BuiltinID == ARM::BI__builtin_arm_strex || 2328 BuiltinID == ARM::BI__builtin_arm_stlex || 2329 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2330 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2331 BuiltinID == AArch64::BI__builtin_arm_strex || 2332 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2333 "unexpected ARM builtin"); 2334 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2335 BuiltinID == ARM::BI__builtin_arm_ldaex || 2336 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2337 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2338 2339 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2340 2341 // Ensure that we have the proper number of arguments. 2342 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2343 return true; 2344 2345 // Inspect the pointer argument of the atomic builtin. This should always be 2346 // a pointer type, whose element is an integral scalar or pointer type. 2347 // Because it is a pointer type, we don't have to worry about any implicit 2348 // casts here. 2349 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2350 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2351 if (PointerArgRes.isInvalid()) 2352 return true; 2353 PointerArg = PointerArgRes.get(); 2354 2355 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2356 if (!pointerType) { 2357 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2358 << PointerArg->getType() << PointerArg->getSourceRange(); 2359 return true; 2360 } 2361 2362 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2363 // task is to insert the appropriate casts into the AST. First work out just 2364 // what the appropriate type is. 2365 QualType ValType = pointerType->getPointeeType(); 2366 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2367 if (IsLdrex) 2368 AddrType.addConst(); 2369 2370 // Issue a warning if the cast is dodgy. 2371 CastKind CastNeeded = CK_NoOp; 2372 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2373 CastNeeded = CK_BitCast; 2374 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2375 << PointerArg->getType() << Context.getPointerType(AddrType) 2376 << AA_Passing << PointerArg->getSourceRange(); 2377 } 2378 2379 // Finally, do the cast and replace the argument with the corrected version. 2380 AddrType = Context.getPointerType(AddrType); 2381 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2382 if (PointerArgRes.isInvalid()) 2383 return true; 2384 PointerArg = PointerArgRes.get(); 2385 2386 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2387 2388 // In general, we allow ints, floats and pointers to be loaded and stored. 2389 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2390 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2391 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2392 << PointerArg->getType() << PointerArg->getSourceRange(); 2393 return true; 2394 } 2395 2396 // But ARM doesn't have instructions to deal with 128-bit versions. 2397 if (Context.getTypeSize(ValType) > MaxWidth) { 2398 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2399 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2400 << PointerArg->getType() << PointerArg->getSourceRange(); 2401 return true; 2402 } 2403 2404 switch (ValType.getObjCLifetime()) { 2405 case Qualifiers::OCL_None: 2406 case Qualifiers::OCL_ExplicitNone: 2407 // okay 2408 break; 2409 2410 case Qualifiers::OCL_Weak: 2411 case Qualifiers::OCL_Strong: 2412 case Qualifiers::OCL_Autoreleasing: 2413 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2414 << ValType << PointerArg->getSourceRange(); 2415 return true; 2416 } 2417 2418 if (IsLdrex) { 2419 TheCall->setType(ValType); 2420 return false; 2421 } 2422 2423 // Initialize the argument to be stored. 2424 ExprResult ValArg = TheCall->getArg(0); 2425 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2426 Context, ValType, /*consume*/ false); 2427 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2428 if (ValArg.isInvalid()) 2429 return true; 2430 TheCall->setArg(0, ValArg.get()); 2431 2432 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2433 // but the custom checker bypasses all default analysis. 2434 TheCall->setType(Context.IntTy); 2435 return false; 2436 } 2437 2438 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2439 CallExpr *TheCall) { 2440 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2441 BuiltinID == ARM::BI__builtin_arm_ldaex || 2442 BuiltinID == ARM::BI__builtin_arm_strex || 2443 BuiltinID == ARM::BI__builtin_arm_stlex) { 2444 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2445 } 2446 2447 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2448 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2449 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2450 } 2451 2452 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2453 BuiltinID == ARM::BI__builtin_arm_wsr64) 2454 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2455 2456 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2457 BuiltinID == ARM::BI__builtin_arm_rsrp || 2458 BuiltinID == ARM::BI__builtin_arm_wsr || 2459 BuiltinID == ARM::BI__builtin_arm_wsrp) 2460 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2461 2462 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2463 return true; 2464 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2465 return true; 2466 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2467 return true; 2468 2469 // For intrinsics which take an immediate value as part of the instruction, 2470 // range check them here. 2471 // FIXME: VFP Intrinsics should error if VFP not present. 2472 switch (BuiltinID) { 2473 default: return false; 2474 case ARM::BI__builtin_arm_ssat: 2475 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2476 case ARM::BI__builtin_arm_usat: 2477 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2478 case ARM::BI__builtin_arm_ssat16: 2479 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2480 case ARM::BI__builtin_arm_usat16: 2481 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2482 case ARM::BI__builtin_arm_vcvtr_f: 2483 case ARM::BI__builtin_arm_vcvtr_d: 2484 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2485 case ARM::BI__builtin_arm_dmb: 2486 case ARM::BI__builtin_arm_dsb: 2487 case ARM::BI__builtin_arm_isb: 2488 case ARM::BI__builtin_arm_dbg: 2489 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2490 case ARM::BI__builtin_arm_cdp: 2491 case ARM::BI__builtin_arm_cdp2: 2492 case ARM::BI__builtin_arm_mcr: 2493 case ARM::BI__builtin_arm_mcr2: 2494 case ARM::BI__builtin_arm_mrc: 2495 case ARM::BI__builtin_arm_mrc2: 2496 case ARM::BI__builtin_arm_mcrr: 2497 case ARM::BI__builtin_arm_mcrr2: 2498 case ARM::BI__builtin_arm_mrrc: 2499 case ARM::BI__builtin_arm_mrrc2: 2500 case ARM::BI__builtin_arm_ldc: 2501 case ARM::BI__builtin_arm_ldcl: 2502 case ARM::BI__builtin_arm_ldc2: 2503 case ARM::BI__builtin_arm_ldc2l: 2504 case ARM::BI__builtin_arm_stc: 2505 case ARM::BI__builtin_arm_stcl: 2506 case ARM::BI__builtin_arm_stc2: 2507 case ARM::BI__builtin_arm_stc2l: 2508 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2509 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2510 /*WantCDE*/ false); 2511 } 2512 } 2513 2514 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2515 unsigned BuiltinID, 2516 CallExpr *TheCall) { 2517 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2518 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2519 BuiltinID == AArch64::BI__builtin_arm_strex || 2520 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2521 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2522 } 2523 2524 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2525 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2526 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2527 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2528 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2529 } 2530 2531 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2532 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2533 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2534 2535 // Memory Tagging Extensions (MTE) Intrinsics 2536 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2537 BuiltinID == AArch64::BI__builtin_arm_addg || 2538 BuiltinID == AArch64::BI__builtin_arm_gmi || 2539 BuiltinID == AArch64::BI__builtin_arm_ldg || 2540 BuiltinID == AArch64::BI__builtin_arm_stg || 2541 BuiltinID == AArch64::BI__builtin_arm_subp) { 2542 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2543 } 2544 2545 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2546 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2547 BuiltinID == AArch64::BI__builtin_arm_wsr || 2548 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2549 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2550 2551 // Only check the valid encoding range. Any constant in this range would be 2552 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2553 // an exception for incorrect registers. This matches MSVC behavior. 2554 if (BuiltinID == AArch64::BI_ReadStatusReg || 2555 BuiltinID == AArch64::BI_WriteStatusReg) 2556 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2557 2558 if (BuiltinID == AArch64::BI__getReg) 2559 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2560 2561 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2562 return true; 2563 2564 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2565 return true; 2566 2567 // For intrinsics which take an immediate value as part of the instruction, 2568 // range check them here. 2569 unsigned i = 0, l = 0, u = 0; 2570 switch (BuiltinID) { 2571 default: return false; 2572 case AArch64::BI__builtin_arm_dmb: 2573 case AArch64::BI__builtin_arm_dsb: 2574 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2575 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2576 } 2577 2578 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2579 } 2580 2581 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2582 if (Arg->getType()->getAsPlaceholderType()) 2583 return false; 2584 2585 // The first argument needs to be a record field access. 2586 // If it is an array element access, we delay decision 2587 // to BPF backend to check whether the access is a 2588 // field access or not. 2589 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2590 dyn_cast<MemberExpr>(Arg->IgnoreParens()) || 2591 dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens())); 2592 } 2593 2594 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 2595 QualType VectorTy, QualType EltTy) { 2596 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 2597 if (!Context.hasSameType(VectorEltTy, EltTy)) { 2598 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 2599 << Call->getSourceRange() << VectorEltTy << EltTy; 2600 return false; 2601 } 2602 return true; 2603 } 2604 2605 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 2606 QualType ArgType = Arg->getType(); 2607 if (ArgType->getAsPlaceholderType()) 2608 return false; 2609 2610 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 2611 // format: 2612 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 2613 // 2. <type> var; 2614 // __builtin_preserve_type_info(var, flag); 2615 if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) && 2616 !dyn_cast<UnaryOperator>(Arg->IgnoreParens())) 2617 return false; 2618 2619 // Typedef type. 2620 if (ArgType->getAs<TypedefType>()) 2621 return true; 2622 2623 // Record type or Enum type. 2624 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2625 if (const auto *RT = Ty->getAs<RecordType>()) { 2626 if (!RT->getDecl()->getDeclName().isEmpty()) 2627 return true; 2628 } else if (const auto *ET = Ty->getAs<EnumType>()) { 2629 if (!ET->getDecl()->getDeclName().isEmpty()) 2630 return true; 2631 } 2632 2633 return false; 2634 } 2635 2636 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 2637 QualType ArgType = Arg->getType(); 2638 if (ArgType->getAsPlaceholderType()) 2639 return false; 2640 2641 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 2642 // format: 2643 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 2644 // flag); 2645 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 2646 if (!UO) 2647 return false; 2648 2649 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 2650 if (!CE) 2651 return false; 2652 if (CE->getCastKind() != CK_IntegralToPointer && 2653 CE->getCastKind() != CK_NullToPointer) 2654 return false; 2655 2656 // The integer must be from an EnumConstantDecl. 2657 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 2658 if (!DR) 2659 return false; 2660 2661 const EnumConstantDecl *Enumerator = 2662 dyn_cast<EnumConstantDecl>(DR->getDecl()); 2663 if (!Enumerator) 2664 return false; 2665 2666 // The type must be EnumType. 2667 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2668 const auto *ET = Ty->getAs<EnumType>(); 2669 if (!ET) 2670 return false; 2671 2672 // The enum value must be supported. 2673 for (auto *EDI : ET->getDecl()->enumerators()) { 2674 if (EDI == Enumerator) 2675 return true; 2676 } 2677 2678 return false; 2679 } 2680 2681 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2682 CallExpr *TheCall) { 2683 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2684 BuiltinID == BPF::BI__builtin_btf_type_id || 2685 BuiltinID == BPF::BI__builtin_preserve_type_info || 2686 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 2687 "unexpected BPF builtin"); 2688 2689 if (checkArgCount(*this, TheCall, 2)) 2690 return true; 2691 2692 // The second argument needs to be a constant int 2693 Expr *Arg = TheCall->getArg(1); 2694 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 2695 diag::kind kind; 2696 if (!Value) { 2697 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 2698 kind = diag::err_preserve_field_info_not_const; 2699 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 2700 kind = diag::err_btf_type_id_not_const; 2701 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 2702 kind = diag::err_preserve_type_info_not_const; 2703 else 2704 kind = diag::err_preserve_enum_value_not_const; 2705 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 2706 return true; 2707 } 2708 2709 // The first argument 2710 Arg = TheCall->getArg(0); 2711 bool InvalidArg = false; 2712 bool ReturnUnsignedInt = true; 2713 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 2714 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 2715 InvalidArg = true; 2716 kind = diag::err_preserve_field_info_not_field; 2717 } 2718 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 2719 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 2720 InvalidArg = true; 2721 kind = diag::err_preserve_type_info_invalid; 2722 } 2723 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 2724 if (!isValidBPFPreserveEnumValueArg(Arg)) { 2725 InvalidArg = true; 2726 kind = diag::err_preserve_enum_value_invalid; 2727 } 2728 ReturnUnsignedInt = false; 2729 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 2730 ReturnUnsignedInt = false; 2731 } 2732 2733 if (InvalidArg) { 2734 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 2735 return true; 2736 } 2737 2738 if (ReturnUnsignedInt) 2739 TheCall->setType(Context.UnsignedIntTy); 2740 else 2741 TheCall->setType(Context.UnsignedLongTy); 2742 return false; 2743 } 2744 2745 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2746 struct ArgInfo { 2747 uint8_t OpNum; 2748 bool IsSigned; 2749 uint8_t BitWidth; 2750 uint8_t Align; 2751 }; 2752 struct BuiltinInfo { 2753 unsigned BuiltinID; 2754 ArgInfo Infos[2]; 2755 }; 2756 2757 static BuiltinInfo Infos[] = { 2758 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2759 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2760 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2761 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 2762 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2763 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2764 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2765 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2766 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2767 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2768 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2769 2770 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2771 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2772 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2773 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2774 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2775 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2776 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2777 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2778 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2779 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2780 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2781 2782 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2783 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2784 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2785 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2786 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2787 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2788 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2789 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2790 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2791 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2792 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2793 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2794 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2795 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2796 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2797 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2798 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2799 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2800 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2801 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2802 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2803 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2804 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2805 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2806 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2807 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2808 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2809 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2810 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2811 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2812 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2813 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2814 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2815 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2816 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2817 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2818 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2819 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2820 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2821 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2822 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2823 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2824 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2825 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2826 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2827 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2828 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2829 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2830 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2831 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2832 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2833 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2834 {{ 1, false, 6, 0 }} }, 2835 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2836 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2837 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2838 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2839 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2840 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2841 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2842 {{ 1, false, 5, 0 }} }, 2843 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2844 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2845 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2846 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2847 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2848 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2849 { 2, false, 5, 0 }} }, 2850 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2851 { 2, false, 6, 0 }} }, 2852 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2853 { 3, false, 5, 0 }} }, 2854 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2855 { 3, false, 6, 0 }} }, 2856 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2857 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2858 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2859 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2860 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2861 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2862 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2863 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2864 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2865 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2866 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2867 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2868 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2869 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2870 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2871 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2872 {{ 2, false, 4, 0 }, 2873 { 3, false, 5, 0 }} }, 2874 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2875 {{ 2, false, 4, 0 }, 2876 { 3, false, 5, 0 }} }, 2877 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2878 {{ 2, false, 4, 0 }, 2879 { 3, false, 5, 0 }} }, 2880 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2881 {{ 2, false, 4, 0 }, 2882 { 3, false, 5, 0 }} }, 2883 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2884 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2885 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2886 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2887 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2888 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2889 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2890 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2891 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2892 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2893 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2894 { 2, false, 5, 0 }} }, 2895 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2896 { 2, false, 6, 0 }} }, 2897 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2898 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2899 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2900 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2901 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2902 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2903 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2904 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2905 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2906 {{ 1, false, 4, 0 }} }, 2907 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2908 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2909 {{ 1, false, 4, 0 }} }, 2910 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2911 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2912 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2913 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2914 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2915 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2916 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2917 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2918 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2919 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2920 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2921 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2922 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2923 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2924 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2925 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2926 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2927 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2928 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2929 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2930 {{ 3, false, 1, 0 }} }, 2931 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2932 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2933 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2934 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2935 {{ 3, false, 1, 0 }} }, 2936 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2937 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2938 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2939 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2940 {{ 3, false, 1, 0 }} }, 2941 }; 2942 2943 // Use a dynamically initialized static to sort the table exactly once on 2944 // first run. 2945 static const bool SortOnce = 2946 (llvm::sort(Infos, 2947 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2948 return LHS.BuiltinID < RHS.BuiltinID; 2949 }), 2950 true); 2951 (void)SortOnce; 2952 2953 const BuiltinInfo *F = llvm::partition_point( 2954 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 2955 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2956 return false; 2957 2958 bool Error = false; 2959 2960 for (const ArgInfo &A : F->Infos) { 2961 // Ignore empty ArgInfo elements. 2962 if (A.BitWidth == 0) 2963 continue; 2964 2965 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2966 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2967 if (!A.Align) { 2968 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2969 } else { 2970 unsigned M = 1 << A.Align; 2971 Min *= M; 2972 Max *= M; 2973 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2974 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2975 } 2976 } 2977 return Error; 2978 } 2979 2980 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2981 CallExpr *TheCall) { 2982 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 2983 } 2984 2985 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 2986 unsigned BuiltinID, CallExpr *TheCall) { 2987 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 2988 CheckMipsBuiltinArgument(BuiltinID, TheCall); 2989 } 2990 2991 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 2992 CallExpr *TheCall) { 2993 2994 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 2995 BuiltinID <= Mips::BI__builtin_mips_lwx) { 2996 if (!TI.hasFeature("dsp")) 2997 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 2998 } 2999 3000 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3001 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3002 if (!TI.hasFeature("dspr2")) 3003 return Diag(TheCall->getBeginLoc(), 3004 diag::err_mips_builtin_requires_dspr2); 3005 } 3006 3007 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3008 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3009 if (!TI.hasFeature("msa")) 3010 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3011 } 3012 3013 return false; 3014 } 3015 3016 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3017 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3018 // ordering for DSP is unspecified. MSA is ordered by the data format used 3019 // by the underlying instruction i.e., df/m, df/n and then by size. 3020 // 3021 // FIXME: The size tests here should instead be tablegen'd along with the 3022 // definitions from include/clang/Basic/BuiltinsMips.def. 3023 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3024 // be too. 3025 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3026 unsigned i = 0, l = 0, u = 0, m = 0; 3027 switch (BuiltinID) { 3028 default: return false; 3029 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3030 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3031 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3032 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3033 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3034 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3035 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3036 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3037 // df/m field. 3038 // These intrinsics take an unsigned 3 bit immediate. 3039 case Mips::BI__builtin_msa_bclri_b: 3040 case Mips::BI__builtin_msa_bnegi_b: 3041 case Mips::BI__builtin_msa_bseti_b: 3042 case Mips::BI__builtin_msa_sat_s_b: 3043 case Mips::BI__builtin_msa_sat_u_b: 3044 case Mips::BI__builtin_msa_slli_b: 3045 case Mips::BI__builtin_msa_srai_b: 3046 case Mips::BI__builtin_msa_srari_b: 3047 case Mips::BI__builtin_msa_srli_b: 3048 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3049 case Mips::BI__builtin_msa_binsli_b: 3050 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3051 // These intrinsics take an unsigned 4 bit immediate. 3052 case Mips::BI__builtin_msa_bclri_h: 3053 case Mips::BI__builtin_msa_bnegi_h: 3054 case Mips::BI__builtin_msa_bseti_h: 3055 case Mips::BI__builtin_msa_sat_s_h: 3056 case Mips::BI__builtin_msa_sat_u_h: 3057 case Mips::BI__builtin_msa_slli_h: 3058 case Mips::BI__builtin_msa_srai_h: 3059 case Mips::BI__builtin_msa_srari_h: 3060 case Mips::BI__builtin_msa_srli_h: 3061 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3062 case Mips::BI__builtin_msa_binsli_h: 3063 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3064 // These intrinsics take an unsigned 5 bit immediate. 3065 // The first block of intrinsics actually have an unsigned 5 bit field, 3066 // not a df/n field. 3067 case Mips::BI__builtin_msa_cfcmsa: 3068 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3069 case Mips::BI__builtin_msa_clei_u_b: 3070 case Mips::BI__builtin_msa_clei_u_h: 3071 case Mips::BI__builtin_msa_clei_u_w: 3072 case Mips::BI__builtin_msa_clei_u_d: 3073 case Mips::BI__builtin_msa_clti_u_b: 3074 case Mips::BI__builtin_msa_clti_u_h: 3075 case Mips::BI__builtin_msa_clti_u_w: 3076 case Mips::BI__builtin_msa_clti_u_d: 3077 case Mips::BI__builtin_msa_maxi_u_b: 3078 case Mips::BI__builtin_msa_maxi_u_h: 3079 case Mips::BI__builtin_msa_maxi_u_w: 3080 case Mips::BI__builtin_msa_maxi_u_d: 3081 case Mips::BI__builtin_msa_mini_u_b: 3082 case Mips::BI__builtin_msa_mini_u_h: 3083 case Mips::BI__builtin_msa_mini_u_w: 3084 case Mips::BI__builtin_msa_mini_u_d: 3085 case Mips::BI__builtin_msa_addvi_b: 3086 case Mips::BI__builtin_msa_addvi_h: 3087 case Mips::BI__builtin_msa_addvi_w: 3088 case Mips::BI__builtin_msa_addvi_d: 3089 case Mips::BI__builtin_msa_bclri_w: 3090 case Mips::BI__builtin_msa_bnegi_w: 3091 case Mips::BI__builtin_msa_bseti_w: 3092 case Mips::BI__builtin_msa_sat_s_w: 3093 case Mips::BI__builtin_msa_sat_u_w: 3094 case Mips::BI__builtin_msa_slli_w: 3095 case Mips::BI__builtin_msa_srai_w: 3096 case Mips::BI__builtin_msa_srari_w: 3097 case Mips::BI__builtin_msa_srli_w: 3098 case Mips::BI__builtin_msa_srlri_w: 3099 case Mips::BI__builtin_msa_subvi_b: 3100 case Mips::BI__builtin_msa_subvi_h: 3101 case Mips::BI__builtin_msa_subvi_w: 3102 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3103 case Mips::BI__builtin_msa_binsli_w: 3104 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3105 // These intrinsics take an unsigned 6 bit immediate. 3106 case Mips::BI__builtin_msa_bclri_d: 3107 case Mips::BI__builtin_msa_bnegi_d: 3108 case Mips::BI__builtin_msa_bseti_d: 3109 case Mips::BI__builtin_msa_sat_s_d: 3110 case Mips::BI__builtin_msa_sat_u_d: 3111 case Mips::BI__builtin_msa_slli_d: 3112 case Mips::BI__builtin_msa_srai_d: 3113 case Mips::BI__builtin_msa_srari_d: 3114 case Mips::BI__builtin_msa_srli_d: 3115 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3116 case Mips::BI__builtin_msa_binsli_d: 3117 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3118 // These intrinsics take a signed 5 bit immediate. 3119 case Mips::BI__builtin_msa_ceqi_b: 3120 case Mips::BI__builtin_msa_ceqi_h: 3121 case Mips::BI__builtin_msa_ceqi_w: 3122 case Mips::BI__builtin_msa_ceqi_d: 3123 case Mips::BI__builtin_msa_clti_s_b: 3124 case Mips::BI__builtin_msa_clti_s_h: 3125 case Mips::BI__builtin_msa_clti_s_w: 3126 case Mips::BI__builtin_msa_clti_s_d: 3127 case Mips::BI__builtin_msa_clei_s_b: 3128 case Mips::BI__builtin_msa_clei_s_h: 3129 case Mips::BI__builtin_msa_clei_s_w: 3130 case Mips::BI__builtin_msa_clei_s_d: 3131 case Mips::BI__builtin_msa_maxi_s_b: 3132 case Mips::BI__builtin_msa_maxi_s_h: 3133 case Mips::BI__builtin_msa_maxi_s_w: 3134 case Mips::BI__builtin_msa_maxi_s_d: 3135 case Mips::BI__builtin_msa_mini_s_b: 3136 case Mips::BI__builtin_msa_mini_s_h: 3137 case Mips::BI__builtin_msa_mini_s_w: 3138 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3139 // These intrinsics take an unsigned 8 bit immediate. 3140 case Mips::BI__builtin_msa_andi_b: 3141 case Mips::BI__builtin_msa_nori_b: 3142 case Mips::BI__builtin_msa_ori_b: 3143 case Mips::BI__builtin_msa_shf_b: 3144 case Mips::BI__builtin_msa_shf_h: 3145 case Mips::BI__builtin_msa_shf_w: 3146 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3147 case Mips::BI__builtin_msa_bseli_b: 3148 case Mips::BI__builtin_msa_bmnzi_b: 3149 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3150 // df/n format 3151 // These intrinsics take an unsigned 4 bit immediate. 3152 case Mips::BI__builtin_msa_copy_s_b: 3153 case Mips::BI__builtin_msa_copy_u_b: 3154 case Mips::BI__builtin_msa_insve_b: 3155 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3156 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3157 // These intrinsics take an unsigned 3 bit immediate. 3158 case Mips::BI__builtin_msa_copy_s_h: 3159 case Mips::BI__builtin_msa_copy_u_h: 3160 case Mips::BI__builtin_msa_insve_h: 3161 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3162 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3163 // These intrinsics take an unsigned 2 bit immediate. 3164 case Mips::BI__builtin_msa_copy_s_w: 3165 case Mips::BI__builtin_msa_copy_u_w: 3166 case Mips::BI__builtin_msa_insve_w: 3167 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3168 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3169 // These intrinsics take an unsigned 1 bit immediate. 3170 case Mips::BI__builtin_msa_copy_s_d: 3171 case Mips::BI__builtin_msa_copy_u_d: 3172 case Mips::BI__builtin_msa_insve_d: 3173 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3174 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3175 // Memory offsets and immediate loads. 3176 // These intrinsics take a signed 10 bit immediate. 3177 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3178 case Mips::BI__builtin_msa_ldi_h: 3179 case Mips::BI__builtin_msa_ldi_w: 3180 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3181 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3182 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3183 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3184 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3185 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3186 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3187 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3188 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3189 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3190 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3191 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3192 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3193 } 3194 3195 if (!m) 3196 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3197 3198 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3199 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3200 } 3201 3202 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3203 /// advancing the pointer over the consumed characters. The decoded type is 3204 /// returned. If the decoded type represents a constant integer with a 3205 /// constraint on its value then Mask is set to that value. The type descriptors 3206 /// used in Str are specific to PPC MMA builtins and are documented in the file 3207 /// defining the PPC builtins. 3208 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3209 unsigned &Mask) { 3210 bool RequireICE = false; 3211 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3212 switch (*Str++) { 3213 case 'V': 3214 return Context.getVectorType(Context.UnsignedCharTy, 16, 3215 VectorType::VectorKind::AltiVecVector); 3216 case 'i': { 3217 char *End; 3218 unsigned size = strtoul(Str, &End, 10); 3219 assert(End != Str && "Missing constant parameter constraint"); 3220 Str = End; 3221 Mask = size; 3222 return Context.IntTy; 3223 } 3224 case 'W': { 3225 char *End; 3226 unsigned size = strtoul(Str, &End, 10); 3227 assert(End != Str && "Missing PowerPC MMA type size"); 3228 Str = End; 3229 QualType Type; 3230 switch (size) { 3231 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3232 case size: Type = Context.Id##Ty; break; 3233 #include "clang/Basic/PPCTypes.def" 3234 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3235 } 3236 bool CheckVectorArgs = false; 3237 while (!CheckVectorArgs) { 3238 switch (*Str++) { 3239 case '*': 3240 Type = Context.getPointerType(Type); 3241 break; 3242 case 'C': 3243 Type = Type.withConst(); 3244 break; 3245 default: 3246 CheckVectorArgs = true; 3247 --Str; 3248 break; 3249 } 3250 } 3251 return Type; 3252 } 3253 default: 3254 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3255 } 3256 } 3257 3258 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3259 CallExpr *TheCall) { 3260 unsigned i = 0, l = 0, u = 0; 3261 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3262 BuiltinID == PPC::BI__builtin_divdeu || 3263 BuiltinID == PPC::BI__builtin_bpermd; 3264 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3265 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3266 BuiltinID == PPC::BI__builtin_divweu || 3267 BuiltinID == PPC::BI__builtin_divde || 3268 BuiltinID == PPC::BI__builtin_divdeu; 3269 3270 if (Is64BitBltin && !IsTarget64Bit) 3271 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3272 << TheCall->getSourceRange(); 3273 3274 if ((IsBltinExtDiv && !TI.hasFeature("extdiv")) || 3275 (BuiltinID == PPC::BI__builtin_bpermd && !TI.hasFeature("bpermd"))) 3276 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3277 << TheCall->getSourceRange(); 3278 3279 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3280 if (!TI.hasFeature("vsx")) 3281 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3282 << TheCall->getSourceRange(); 3283 return false; 3284 }; 3285 3286 switch (BuiltinID) { 3287 default: return false; 3288 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3289 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3290 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3291 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3292 case PPC::BI__builtin_altivec_dss: 3293 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3294 case PPC::BI__builtin_tbegin: 3295 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3296 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3297 case PPC::BI__builtin_tabortwc: 3298 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3299 case PPC::BI__builtin_tabortwci: 3300 case PPC::BI__builtin_tabortdci: 3301 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3302 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3303 case PPC::BI__builtin_altivec_dst: 3304 case PPC::BI__builtin_altivec_dstt: 3305 case PPC::BI__builtin_altivec_dstst: 3306 case PPC::BI__builtin_altivec_dststt: 3307 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3308 case PPC::BI__builtin_vsx_xxpermdi: 3309 case PPC::BI__builtin_vsx_xxsldwi: 3310 return SemaBuiltinVSX(TheCall); 3311 case PPC::BI__builtin_unpack_vector_int128: 3312 return SemaVSXCheck(TheCall) || 3313 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3314 case PPC::BI__builtin_pack_vector_int128: 3315 return SemaVSXCheck(TheCall); 3316 case PPC::BI__builtin_altivec_vgnb: 3317 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3318 case PPC::BI__builtin_altivec_vec_replace_elt: 3319 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 3320 QualType VecTy = TheCall->getArg(0)->getType(); 3321 QualType EltTy = TheCall->getArg(1)->getType(); 3322 unsigned Width = Context.getIntWidth(EltTy); 3323 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 3324 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 3325 } 3326 case PPC::BI__builtin_vsx_xxeval: 3327 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3328 case PPC::BI__builtin_altivec_vsldbi: 3329 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3330 case PPC::BI__builtin_altivec_vsrdbi: 3331 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3332 case PPC::BI__builtin_vsx_xxpermx: 3333 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3334 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 3335 case PPC::BI__builtin_##Name: \ 3336 return SemaBuiltinPPCMMACall(TheCall, Types); 3337 #include "clang/Basic/BuiltinsPPC.def" 3338 } 3339 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3340 } 3341 3342 // Check if the given type is a non-pointer PPC MMA type. This function is used 3343 // in Sema to prevent invalid uses of restricted PPC MMA types. 3344 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 3345 if (Type->isPointerType() || Type->isArrayType()) 3346 return false; 3347 3348 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 3349 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 3350 if (false 3351 #include "clang/Basic/PPCTypes.def" 3352 ) { 3353 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 3354 return true; 3355 } 3356 return false; 3357 } 3358 3359 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3360 CallExpr *TheCall) { 3361 // position of memory order and scope arguments in the builtin 3362 unsigned OrderIndex, ScopeIndex; 3363 switch (BuiltinID) { 3364 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3365 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3366 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3367 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3368 OrderIndex = 2; 3369 ScopeIndex = 3; 3370 break; 3371 case AMDGPU::BI__builtin_amdgcn_fence: 3372 OrderIndex = 0; 3373 ScopeIndex = 1; 3374 break; 3375 default: 3376 return false; 3377 } 3378 3379 ExprResult Arg = TheCall->getArg(OrderIndex); 3380 auto ArgExpr = Arg.get(); 3381 Expr::EvalResult ArgResult; 3382 3383 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3384 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3385 << ArgExpr->getType(); 3386 auto Ord = ArgResult.Val.getInt().getZExtValue(); 3387 3388 // Check valididty of memory ordering as per C11 / C++11's memody model. 3389 // Only fence needs check. Atomic dec/inc allow all memory orders. 3390 if (!llvm::isValidAtomicOrderingCABI(Ord)) 3391 return Diag(ArgExpr->getBeginLoc(), 3392 diag::warn_atomic_op_has_invalid_memory_order) 3393 << ArgExpr->getSourceRange(); 3394 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 3395 case llvm::AtomicOrderingCABI::relaxed: 3396 case llvm::AtomicOrderingCABI::consume: 3397 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 3398 return Diag(ArgExpr->getBeginLoc(), 3399 diag::warn_atomic_op_has_invalid_memory_order) 3400 << ArgExpr->getSourceRange(); 3401 break; 3402 case llvm::AtomicOrderingCABI::acquire: 3403 case llvm::AtomicOrderingCABI::release: 3404 case llvm::AtomicOrderingCABI::acq_rel: 3405 case llvm::AtomicOrderingCABI::seq_cst: 3406 break; 3407 } 3408 3409 Arg = TheCall->getArg(ScopeIndex); 3410 ArgExpr = Arg.get(); 3411 Expr::EvalResult ArgResult1; 3412 // Check that sync scope is a constant literal 3413 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 3414 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3415 << ArgExpr->getType(); 3416 3417 return false; 3418 } 3419 3420 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 3421 llvm::APSInt Result; 3422 3423 // We can't check the value of a dependent argument. 3424 Expr *Arg = TheCall->getArg(ArgNum); 3425 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3426 return false; 3427 3428 // Check constant-ness first. 3429 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3430 return true; 3431 3432 int64_t Val = Result.getSExtValue(); 3433 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 3434 return false; 3435 3436 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 3437 << Arg->getSourceRange(); 3438 } 3439 3440 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 3441 unsigned BuiltinID, 3442 CallExpr *TheCall) { 3443 // CodeGenFunction can also detect this, but this gives a better error 3444 // message. 3445 bool FeatureMissing = false; 3446 SmallVector<StringRef> ReqFeatures; 3447 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 3448 Features.split(ReqFeatures, ','); 3449 3450 // Check if each required feature is included 3451 for (StringRef F : ReqFeatures) { 3452 if (TI.hasFeature(F)) 3453 continue; 3454 3455 // If the feature is 64bit, alter the string so it will print better in 3456 // the diagnostic. 3457 if (F == "64bit") 3458 F = "RV64"; 3459 3460 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 3461 F.consume_front("experimental-"); 3462 std::string FeatureStr = F.str(); 3463 FeatureStr[0] = std::toupper(FeatureStr[0]); 3464 3465 // Error message 3466 FeatureMissing = true; 3467 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 3468 << TheCall->getSourceRange() << StringRef(FeatureStr); 3469 } 3470 3471 if (FeatureMissing) 3472 return true; 3473 3474 switch (BuiltinID) { 3475 case RISCV::BI__builtin_rvv_vsetvli: 3476 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 3477 CheckRISCVLMUL(TheCall, 2); 3478 case RISCV::BI__builtin_rvv_vsetvlimax: 3479 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 3480 CheckRISCVLMUL(TheCall, 1); 3481 } 3482 3483 return false; 3484 } 3485 3486 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3487 CallExpr *TheCall) { 3488 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3489 Expr *Arg = TheCall->getArg(0); 3490 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 3491 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 3492 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3493 << Arg->getSourceRange(); 3494 } 3495 3496 // For intrinsics which take an immediate value as part of the instruction, 3497 // range check them here. 3498 unsigned i = 0, l = 0, u = 0; 3499 switch (BuiltinID) { 3500 default: return false; 3501 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3502 case SystemZ::BI__builtin_s390_verimb: 3503 case SystemZ::BI__builtin_s390_verimh: 3504 case SystemZ::BI__builtin_s390_verimf: 3505 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3506 case SystemZ::BI__builtin_s390_vfaeb: 3507 case SystemZ::BI__builtin_s390_vfaeh: 3508 case SystemZ::BI__builtin_s390_vfaef: 3509 case SystemZ::BI__builtin_s390_vfaebs: 3510 case SystemZ::BI__builtin_s390_vfaehs: 3511 case SystemZ::BI__builtin_s390_vfaefs: 3512 case SystemZ::BI__builtin_s390_vfaezb: 3513 case SystemZ::BI__builtin_s390_vfaezh: 3514 case SystemZ::BI__builtin_s390_vfaezf: 3515 case SystemZ::BI__builtin_s390_vfaezbs: 3516 case SystemZ::BI__builtin_s390_vfaezhs: 3517 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3518 case SystemZ::BI__builtin_s390_vfisb: 3519 case SystemZ::BI__builtin_s390_vfidb: 3520 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3521 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3522 case SystemZ::BI__builtin_s390_vftcisb: 3523 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3524 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3525 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3526 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3527 case SystemZ::BI__builtin_s390_vstrcb: 3528 case SystemZ::BI__builtin_s390_vstrch: 3529 case SystemZ::BI__builtin_s390_vstrcf: 3530 case SystemZ::BI__builtin_s390_vstrczb: 3531 case SystemZ::BI__builtin_s390_vstrczh: 3532 case SystemZ::BI__builtin_s390_vstrczf: 3533 case SystemZ::BI__builtin_s390_vstrcbs: 3534 case SystemZ::BI__builtin_s390_vstrchs: 3535 case SystemZ::BI__builtin_s390_vstrcfs: 3536 case SystemZ::BI__builtin_s390_vstrczbs: 3537 case SystemZ::BI__builtin_s390_vstrczhs: 3538 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3539 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3540 case SystemZ::BI__builtin_s390_vfminsb: 3541 case SystemZ::BI__builtin_s390_vfmaxsb: 3542 case SystemZ::BI__builtin_s390_vfmindb: 3543 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3544 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3545 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3546 } 3547 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3548 } 3549 3550 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3551 /// This checks that the target supports __builtin_cpu_supports and 3552 /// that the string argument is constant and valid. 3553 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 3554 CallExpr *TheCall) { 3555 Expr *Arg = TheCall->getArg(0); 3556 3557 // Check if the argument is a string literal. 3558 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3559 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3560 << Arg->getSourceRange(); 3561 3562 // Check the contents of the string. 3563 StringRef Feature = 3564 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3565 if (!TI.validateCpuSupports(Feature)) 3566 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3567 << Arg->getSourceRange(); 3568 return false; 3569 } 3570 3571 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3572 /// This checks that the target supports __builtin_cpu_is and 3573 /// that the string argument is constant and valid. 3574 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 3575 Expr *Arg = TheCall->getArg(0); 3576 3577 // Check if the argument is a string literal. 3578 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3579 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3580 << Arg->getSourceRange(); 3581 3582 // Check the contents of the string. 3583 StringRef Feature = 3584 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3585 if (!TI.validateCpuIs(Feature)) 3586 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3587 << Arg->getSourceRange(); 3588 return false; 3589 } 3590 3591 // Check if the rounding mode is legal. 3592 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3593 // Indicates if this instruction has rounding control or just SAE. 3594 bool HasRC = false; 3595 3596 unsigned ArgNum = 0; 3597 switch (BuiltinID) { 3598 default: 3599 return false; 3600 case X86::BI__builtin_ia32_vcvttsd2si32: 3601 case X86::BI__builtin_ia32_vcvttsd2si64: 3602 case X86::BI__builtin_ia32_vcvttsd2usi32: 3603 case X86::BI__builtin_ia32_vcvttsd2usi64: 3604 case X86::BI__builtin_ia32_vcvttss2si32: 3605 case X86::BI__builtin_ia32_vcvttss2si64: 3606 case X86::BI__builtin_ia32_vcvttss2usi32: 3607 case X86::BI__builtin_ia32_vcvttss2usi64: 3608 ArgNum = 1; 3609 break; 3610 case X86::BI__builtin_ia32_maxpd512: 3611 case X86::BI__builtin_ia32_maxps512: 3612 case X86::BI__builtin_ia32_minpd512: 3613 case X86::BI__builtin_ia32_minps512: 3614 ArgNum = 2; 3615 break; 3616 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3617 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3618 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3619 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3620 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3621 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3622 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3623 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3624 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3625 case X86::BI__builtin_ia32_exp2pd_mask: 3626 case X86::BI__builtin_ia32_exp2ps_mask: 3627 case X86::BI__builtin_ia32_getexppd512_mask: 3628 case X86::BI__builtin_ia32_getexpps512_mask: 3629 case X86::BI__builtin_ia32_rcp28pd_mask: 3630 case X86::BI__builtin_ia32_rcp28ps_mask: 3631 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3632 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3633 case X86::BI__builtin_ia32_vcomisd: 3634 case X86::BI__builtin_ia32_vcomiss: 3635 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3636 ArgNum = 3; 3637 break; 3638 case X86::BI__builtin_ia32_cmppd512_mask: 3639 case X86::BI__builtin_ia32_cmpps512_mask: 3640 case X86::BI__builtin_ia32_cmpsd_mask: 3641 case X86::BI__builtin_ia32_cmpss_mask: 3642 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3643 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3644 case X86::BI__builtin_ia32_getexpss128_round_mask: 3645 case X86::BI__builtin_ia32_getmantpd512_mask: 3646 case X86::BI__builtin_ia32_getmantps512_mask: 3647 case X86::BI__builtin_ia32_maxsd_round_mask: 3648 case X86::BI__builtin_ia32_maxss_round_mask: 3649 case X86::BI__builtin_ia32_minsd_round_mask: 3650 case X86::BI__builtin_ia32_minss_round_mask: 3651 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3652 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3653 case X86::BI__builtin_ia32_reducepd512_mask: 3654 case X86::BI__builtin_ia32_reduceps512_mask: 3655 case X86::BI__builtin_ia32_rndscalepd_mask: 3656 case X86::BI__builtin_ia32_rndscaleps_mask: 3657 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3658 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3659 ArgNum = 4; 3660 break; 3661 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3662 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3663 case X86::BI__builtin_ia32_fixupimmps512_mask: 3664 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3665 case X86::BI__builtin_ia32_fixupimmsd_mask: 3666 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3667 case X86::BI__builtin_ia32_fixupimmss_mask: 3668 case X86::BI__builtin_ia32_fixupimmss_maskz: 3669 case X86::BI__builtin_ia32_getmantsd_round_mask: 3670 case X86::BI__builtin_ia32_getmantss_round_mask: 3671 case X86::BI__builtin_ia32_rangepd512_mask: 3672 case X86::BI__builtin_ia32_rangeps512_mask: 3673 case X86::BI__builtin_ia32_rangesd128_round_mask: 3674 case X86::BI__builtin_ia32_rangess128_round_mask: 3675 case X86::BI__builtin_ia32_reducesd_mask: 3676 case X86::BI__builtin_ia32_reducess_mask: 3677 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3678 case X86::BI__builtin_ia32_rndscaless_round_mask: 3679 ArgNum = 5; 3680 break; 3681 case X86::BI__builtin_ia32_vcvtsd2si64: 3682 case X86::BI__builtin_ia32_vcvtsd2si32: 3683 case X86::BI__builtin_ia32_vcvtsd2usi32: 3684 case X86::BI__builtin_ia32_vcvtsd2usi64: 3685 case X86::BI__builtin_ia32_vcvtss2si32: 3686 case X86::BI__builtin_ia32_vcvtss2si64: 3687 case X86::BI__builtin_ia32_vcvtss2usi32: 3688 case X86::BI__builtin_ia32_vcvtss2usi64: 3689 case X86::BI__builtin_ia32_sqrtpd512: 3690 case X86::BI__builtin_ia32_sqrtps512: 3691 ArgNum = 1; 3692 HasRC = true; 3693 break; 3694 case X86::BI__builtin_ia32_addpd512: 3695 case X86::BI__builtin_ia32_addps512: 3696 case X86::BI__builtin_ia32_divpd512: 3697 case X86::BI__builtin_ia32_divps512: 3698 case X86::BI__builtin_ia32_mulpd512: 3699 case X86::BI__builtin_ia32_mulps512: 3700 case X86::BI__builtin_ia32_subpd512: 3701 case X86::BI__builtin_ia32_subps512: 3702 case X86::BI__builtin_ia32_cvtsi2sd64: 3703 case X86::BI__builtin_ia32_cvtsi2ss32: 3704 case X86::BI__builtin_ia32_cvtsi2ss64: 3705 case X86::BI__builtin_ia32_cvtusi2sd64: 3706 case X86::BI__builtin_ia32_cvtusi2ss32: 3707 case X86::BI__builtin_ia32_cvtusi2ss64: 3708 ArgNum = 2; 3709 HasRC = true; 3710 break; 3711 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3712 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3713 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3714 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3715 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3716 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 3717 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3718 case X86::BI__builtin_ia32_cvtps2dq512_mask: 3719 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3720 case X86::BI__builtin_ia32_cvtps2udq512_mask: 3721 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3722 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3723 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3724 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3725 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3726 ArgNum = 3; 3727 HasRC = true; 3728 break; 3729 case X86::BI__builtin_ia32_addss_round_mask: 3730 case X86::BI__builtin_ia32_addsd_round_mask: 3731 case X86::BI__builtin_ia32_divss_round_mask: 3732 case X86::BI__builtin_ia32_divsd_round_mask: 3733 case X86::BI__builtin_ia32_mulss_round_mask: 3734 case X86::BI__builtin_ia32_mulsd_round_mask: 3735 case X86::BI__builtin_ia32_subss_round_mask: 3736 case X86::BI__builtin_ia32_subsd_round_mask: 3737 case X86::BI__builtin_ia32_scalefpd512_mask: 3738 case X86::BI__builtin_ia32_scalefps512_mask: 3739 case X86::BI__builtin_ia32_scalefsd_round_mask: 3740 case X86::BI__builtin_ia32_scalefss_round_mask: 3741 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3742 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3743 case X86::BI__builtin_ia32_sqrtss_round_mask: 3744 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3745 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3746 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3747 case X86::BI__builtin_ia32_vfmaddss3_mask: 3748 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3749 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3750 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3751 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3752 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3753 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3754 case X86::BI__builtin_ia32_vfmaddps512_mask: 3755 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3756 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3757 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3758 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3759 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3760 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3761 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3762 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3763 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3764 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3765 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3766 ArgNum = 4; 3767 HasRC = true; 3768 break; 3769 } 3770 3771 llvm::APSInt Result; 3772 3773 // We can't check the value of a dependent argument. 3774 Expr *Arg = TheCall->getArg(ArgNum); 3775 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3776 return false; 3777 3778 // Check constant-ness first. 3779 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3780 return true; 3781 3782 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3783 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3784 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 3785 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 3786 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3787 Result == 8/*ROUND_NO_EXC*/ || 3788 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 3789 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3790 return false; 3791 3792 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3793 << Arg->getSourceRange(); 3794 } 3795 3796 // Check if the gather/scatter scale is legal. 3797 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3798 CallExpr *TheCall) { 3799 unsigned ArgNum = 0; 3800 switch (BuiltinID) { 3801 default: 3802 return false; 3803 case X86::BI__builtin_ia32_gatherpfdpd: 3804 case X86::BI__builtin_ia32_gatherpfdps: 3805 case X86::BI__builtin_ia32_gatherpfqpd: 3806 case X86::BI__builtin_ia32_gatherpfqps: 3807 case X86::BI__builtin_ia32_scatterpfdpd: 3808 case X86::BI__builtin_ia32_scatterpfdps: 3809 case X86::BI__builtin_ia32_scatterpfqpd: 3810 case X86::BI__builtin_ia32_scatterpfqps: 3811 ArgNum = 3; 3812 break; 3813 case X86::BI__builtin_ia32_gatherd_pd: 3814 case X86::BI__builtin_ia32_gatherd_pd256: 3815 case X86::BI__builtin_ia32_gatherq_pd: 3816 case X86::BI__builtin_ia32_gatherq_pd256: 3817 case X86::BI__builtin_ia32_gatherd_ps: 3818 case X86::BI__builtin_ia32_gatherd_ps256: 3819 case X86::BI__builtin_ia32_gatherq_ps: 3820 case X86::BI__builtin_ia32_gatherq_ps256: 3821 case X86::BI__builtin_ia32_gatherd_q: 3822 case X86::BI__builtin_ia32_gatherd_q256: 3823 case X86::BI__builtin_ia32_gatherq_q: 3824 case X86::BI__builtin_ia32_gatherq_q256: 3825 case X86::BI__builtin_ia32_gatherd_d: 3826 case X86::BI__builtin_ia32_gatherd_d256: 3827 case X86::BI__builtin_ia32_gatherq_d: 3828 case X86::BI__builtin_ia32_gatherq_d256: 3829 case X86::BI__builtin_ia32_gather3div2df: 3830 case X86::BI__builtin_ia32_gather3div2di: 3831 case X86::BI__builtin_ia32_gather3div4df: 3832 case X86::BI__builtin_ia32_gather3div4di: 3833 case X86::BI__builtin_ia32_gather3div4sf: 3834 case X86::BI__builtin_ia32_gather3div4si: 3835 case X86::BI__builtin_ia32_gather3div8sf: 3836 case X86::BI__builtin_ia32_gather3div8si: 3837 case X86::BI__builtin_ia32_gather3siv2df: 3838 case X86::BI__builtin_ia32_gather3siv2di: 3839 case X86::BI__builtin_ia32_gather3siv4df: 3840 case X86::BI__builtin_ia32_gather3siv4di: 3841 case X86::BI__builtin_ia32_gather3siv4sf: 3842 case X86::BI__builtin_ia32_gather3siv4si: 3843 case X86::BI__builtin_ia32_gather3siv8sf: 3844 case X86::BI__builtin_ia32_gather3siv8si: 3845 case X86::BI__builtin_ia32_gathersiv8df: 3846 case X86::BI__builtin_ia32_gathersiv16sf: 3847 case X86::BI__builtin_ia32_gatherdiv8df: 3848 case X86::BI__builtin_ia32_gatherdiv16sf: 3849 case X86::BI__builtin_ia32_gathersiv8di: 3850 case X86::BI__builtin_ia32_gathersiv16si: 3851 case X86::BI__builtin_ia32_gatherdiv8di: 3852 case X86::BI__builtin_ia32_gatherdiv16si: 3853 case X86::BI__builtin_ia32_scatterdiv2df: 3854 case X86::BI__builtin_ia32_scatterdiv2di: 3855 case X86::BI__builtin_ia32_scatterdiv4df: 3856 case X86::BI__builtin_ia32_scatterdiv4di: 3857 case X86::BI__builtin_ia32_scatterdiv4sf: 3858 case X86::BI__builtin_ia32_scatterdiv4si: 3859 case X86::BI__builtin_ia32_scatterdiv8sf: 3860 case X86::BI__builtin_ia32_scatterdiv8si: 3861 case X86::BI__builtin_ia32_scattersiv2df: 3862 case X86::BI__builtin_ia32_scattersiv2di: 3863 case X86::BI__builtin_ia32_scattersiv4df: 3864 case X86::BI__builtin_ia32_scattersiv4di: 3865 case X86::BI__builtin_ia32_scattersiv4sf: 3866 case X86::BI__builtin_ia32_scattersiv4si: 3867 case X86::BI__builtin_ia32_scattersiv8sf: 3868 case X86::BI__builtin_ia32_scattersiv8si: 3869 case X86::BI__builtin_ia32_scattersiv8df: 3870 case X86::BI__builtin_ia32_scattersiv16sf: 3871 case X86::BI__builtin_ia32_scatterdiv8df: 3872 case X86::BI__builtin_ia32_scatterdiv16sf: 3873 case X86::BI__builtin_ia32_scattersiv8di: 3874 case X86::BI__builtin_ia32_scattersiv16si: 3875 case X86::BI__builtin_ia32_scatterdiv8di: 3876 case X86::BI__builtin_ia32_scatterdiv16si: 3877 ArgNum = 4; 3878 break; 3879 } 3880 3881 llvm::APSInt Result; 3882 3883 // We can't check the value of a dependent argument. 3884 Expr *Arg = TheCall->getArg(ArgNum); 3885 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3886 return false; 3887 3888 // Check constant-ness first. 3889 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3890 return true; 3891 3892 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3893 return false; 3894 3895 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3896 << Arg->getSourceRange(); 3897 } 3898 3899 enum { TileRegLow = 0, TileRegHigh = 7 }; 3900 3901 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 3902 ArrayRef<int> ArgNums) { 3903 for (int ArgNum : ArgNums) { 3904 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 3905 return true; 3906 } 3907 return false; 3908 } 3909 3910 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 3911 ArrayRef<int> ArgNums) { 3912 // Because the max number of tile register is TileRegHigh + 1, so here we use 3913 // each bit to represent the usage of them in bitset. 3914 std::bitset<TileRegHigh + 1> ArgValues; 3915 for (int ArgNum : ArgNums) { 3916 Expr *Arg = TheCall->getArg(ArgNum); 3917 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3918 continue; 3919 3920 llvm::APSInt Result; 3921 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3922 return true; 3923 int ArgExtValue = Result.getExtValue(); 3924 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 3925 "Incorrect tile register num."); 3926 if (ArgValues.test(ArgExtValue)) 3927 return Diag(TheCall->getBeginLoc(), 3928 diag::err_x86_builtin_tile_arg_duplicate) 3929 << TheCall->getArg(ArgNum)->getSourceRange(); 3930 ArgValues.set(ArgExtValue); 3931 } 3932 return false; 3933 } 3934 3935 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 3936 ArrayRef<int> ArgNums) { 3937 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 3938 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 3939 } 3940 3941 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 3942 switch (BuiltinID) { 3943 default: 3944 return false; 3945 case X86::BI__builtin_ia32_tileloadd64: 3946 case X86::BI__builtin_ia32_tileloaddt164: 3947 case X86::BI__builtin_ia32_tilestored64: 3948 case X86::BI__builtin_ia32_tilezero: 3949 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 3950 case X86::BI__builtin_ia32_tdpbssd: 3951 case X86::BI__builtin_ia32_tdpbsud: 3952 case X86::BI__builtin_ia32_tdpbusd: 3953 case X86::BI__builtin_ia32_tdpbuud: 3954 case X86::BI__builtin_ia32_tdpbf16ps: 3955 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 3956 } 3957 } 3958 static bool isX86_32Builtin(unsigned BuiltinID) { 3959 // These builtins only work on x86-32 targets. 3960 switch (BuiltinID) { 3961 case X86::BI__builtin_ia32_readeflags_u32: 3962 case X86::BI__builtin_ia32_writeeflags_u32: 3963 return true; 3964 } 3965 3966 return false; 3967 } 3968 3969 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3970 CallExpr *TheCall) { 3971 if (BuiltinID == X86::BI__builtin_cpu_supports) 3972 return SemaBuiltinCpuSupports(*this, TI, TheCall); 3973 3974 if (BuiltinID == X86::BI__builtin_cpu_is) 3975 return SemaBuiltinCpuIs(*this, TI, TheCall); 3976 3977 // Check for 32-bit only builtins on a 64-bit target. 3978 const llvm::Triple &TT = TI.getTriple(); 3979 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3980 return Diag(TheCall->getCallee()->getBeginLoc(), 3981 diag::err_32_bit_builtin_64_bit_tgt); 3982 3983 // If the intrinsic has rounding or SAE make sure its valid. 3984 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3985 return true; 3986 3987 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3988 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3989 return true; 3990 3991 // If the intrinsic has a tile arguments, make sure they are valid. 3992 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 3993 return true; 3994 3995 // For intrinsics which take an immediate value as part of the instruction, 3996 // range check them here. 3997 int i = 0, l = 0, u = 0; 3998 switch (BuiltinID) { 3999 default: 4000 return false; 4001 case X86::BI__builtin_ia32_vec_ext_v2si: 4002 case X86::BI__builtin_ia32_vec_ext_v2di: 4003 case X86::BI__builtin_ia32_vextractf128_pd256: 4004 case X86::BI__builtin_ia32_vextractf128_ps256: 4005 case X86::BI__builtin_ia32_vextractf128_si256: 4006 case X86::BI__builtin_ia32_extract128i256: 4007 case X86::BI__builtin_ia32_extractf64x4_mask: 4008 case X86::BI__builtin_ia32_extracti64x4_mask: 4009 case X86::BI__builtin_ia32_extractf32x8_mask: 4010 case X86::BI__builtin_ia32_extracti32x8_mask: 4011 case X86::BI__builtin_ia32_extractf64x2_256_mask: 4012 case X86::BI__builtin_ia32_extracti64x2_256_mask: 4013 case X86::BI__builtin_ia32_extractf32x4_256_mask: 4014 case X86::BI__builtin_ia32_extracti32x4_256_mask: 4015 i = 1; l = 0; u = 1; 4016 break; 4017 case X86::BI__builtin_ia32_vec_set_v2di: 4018 case X86::BI__builtin_ia32_vinsertf128_pd256: 4019 case X86::BI__builtin_ia32_vinsertf128_ps256: 4020 case X86::BI__builtin_ia32_vinsertf128_si256: 4021 case X86::BI__builtin_ia32_insert128i256: 4022 case X86::BI__builtin_ia32_insertf32x8: 4023 case X86::BI__builtin_ia32_inserti32x8: 4024 case X86::BI__builtin_ia32_insertf64x4: 4025 case X86::BI__builtin_ia32_inserti64x4: 4026 case X86::BI__builtin_ia32_insertf64x2_256: 4027 case X86::BI__builtin_ia32_inserti64x2_256: 4028 case X86::BI__builtin_ia32_insertf32x4_256: 4029 case X86::BI__builtin_ia32_inserti32x4_256: 4030 i = 2; l = 0; u = 1; 4031 break; 4032 case X86::BI__builtin_ia32_vpermilpd: 4033 case X86::BI__builtin_ia32_vec_ext_v4hi: 4034 case X86::BI__builtin_ia32_vec_ext_v4si: 4035 case X86::BI__builtin_ia32_vec_ext_v4sf: 4036 case X86::BI__builtin_ia32_vec_ext_v4di: 4037 case X86::BI__builtin_ia32_extractf32x4_mask: 4038 case X86::BI__builtin_ia32_extracti32x4_mask: 4039 case X86::BI__builtin_ia32_extractf64x2_512_mask: 4040 case X86::BI__builtin_ia32_extracti64x2_512_mask: 4041 i = 1; l = 0; u = 3; 4042 break; 4043 case X86::BI_mm_prefetch: 4044 case X86::BI__builtin_ia32_vec_ext_v8hi: 4045 case X86::BI__builtin_ia32_vec_ext_v8si: 4046 i = 1; l = 0; u = 7; 4047 break; 4048 case X86::BI__builtin_ia32_sha1rnds4: 4049 case X86::BI__builtin_ia32_blendpd: 4050 case X86::BI__builtin_ia32_shufpd: 4051 case X86::BI__builtin_ia32_vec_set_v4hi: 4052 case X86::BI__builtin_ia32_vec_set_v4si: 4053 case X86::BI__builtin_ia32_vec_set_v4di: 4054 case X86::BI__builtin_ia32_shuf_f32x4_256: 4055 case X86::BI__builtin_ia32_shuf_f64x2_256: 4056 case X86::BI__builtin_ia32_shuf_i32x4_256: 4057 case X86::BI__builtin_ia32_shuf_i64x2_256: 4058 case X86::BI__builtin_ia32_insertf64x2_512: 4059 case X86::BI__builtin_ia32_inserti64x2_512: 4060 case X86::BI__builtin_ia32_insertf32x4: 4061 case X86::BI__builtin_ia32_inserti32x4: 4062 i = 2; l = 0; u = 3; 4063 break; 4064 case X86::BI__builtin_ia32_vpermil2pd: 4065 case X86::BI__builtin_ia32_vpermil2pd256: 4066 case X86::BI__builtin_ia32_vpermil2ps: 4067 case X86::BI__builtin_ia32_vpermil2ps256: 4068 i = 3; l = 0; u = 3; 4069 break; 4070 case X86::BI__builtin_ia32_cmpb128_mask: 4071 case X86::BI__builtin_ia32_cmpw128_mask: 4072 case X86::BI__builtin_ia32_cmpd128_mask: 4073 case X86::BI__builtin_ia32_cmpq128_mask: 4074 case X86::BI__builtin_ia32_cmpb256_mask: 4075 case X86::BI__builtin_ia32_cmpw256_mask: 4076 case X86::BI__builtin_ia32_cmpd256_mask: 4077 case X86::BI__builtin_ia32_cmpq256_mask: 4078 case X86::BI__builtin_ia32_cmpb512_mask: 4079 case X86::BI__builtin_ia32_cmpw512_mask: 4080 case X86::BI__builtin_ia32_cmpd512_mask: 4081 case X86::BI__builtin_ia32_cmpq512_mask: 4082 case X86::BI__builtin_ia32_ucmpb128_mask: 4083 case X86::BI__builtin_ia32_ucmpw128_mask: 4084 case X86::BI__builtin_ia32_ucmpd128_mask: 4085 case X86::BI__builtin_ia32_ucmpq128_mask: 4086 case X86::BI__builtin_ia32_ucmpb256_mask: 4087 case X86::BI__builtin_ia32_ucmpw256_mask: 4088 case X86::BI__builtin_ia32_ucmpd256_mask: 4089 case X86::BI__builtin_ia32_ucmpq256_mask: 4090 case X86::BI__builtin_ia32_ucmpb512_mask: 4091 case X86::BI__builtin_ia32_ucmpw512_mask: 4092 case X86::BI__builtin_ia32_ucmpd512_mask: 4093 case X86::BI__builtin_ia32_ucmpq512_mask: 4094 case X86::BI__builtin_ia32_vpcomub: 4095 case X86::BI__builtin_ia32_vpcomuw: 4096 case X86::BI__builtin_ia32_vpcomud: 4097 case X86::BI__builtin_ia32_vpcomuq: 4098 case X86::BI__builtin_ia32_vpcomb: 4099 case X86::BI__builtin_ia32_vpcomw: 4100 case X86::BI__builtin_ia32_vpcomd: 4101 case X86::BI__builtin_ia32_vpcomq: 4102 case X86::BI__builtin_ia32_vec_set_v8hi: 4103 case X86::BI__builtin_ia32_vec_set_v8si: 4104 i = 2; l = 0; u = 7; 4105 break; 4106 case X86::BI__builtin_ia32_vpermilpd256: 4107 case X86::BI__builtin_ia32_roundps: 4108 case X86::BI__builtin_ia32_roundpd: 4109 case X86::BI__builtin_ia32_roundps256: 4110 case X86::BI__builtin_ia32_roundpd256: 4111 case X86::BI__builtin_ia32_getmantpd128_mask: 4112 case X86::BI__builtin_ia32_getmantpd256_mask: 4113 case X86::BI__builtin_ia32_getmantps128_mask: 4114 case X86::BI__builtin_ia32_getmantps256_mask: 4115 case X86::BI__builtin_ia32_getmantpd512_mask: 4116 case X86::BI__builtin_ia32_getmantps512_mask: 4117 case X86::BI__builtin_ia32_vec_ext_v16qi: 4118 case X86::BI__builtin_ia32_vec_ext_v16hi: 4119 i = 1; l = 0; u = 15; 4120 break; 4121 case X86::BI__builtin_ia32_pblendd128: 4122 case X86::BI__builtin_ia32_blendps: 4123 case X86::BI__builtin_ia32_blendpd256: 4124 case X86::BI__builtin_ia32_shufpd256: 4125 case X86::BI__builtin_ia32_roundss: 4126 case X86::BI__builtin_ia32_roundsd: 4127 case X86::BI__builtin_ia32_rangepd128_mask: 4128 case X86::BI__builtin_ia32_rangepd256_mask: 4129 case X86::BI__builtin_ia32_rangepd512_mask: 4130 case X86::BI__builtin_ia32_rangeps128_mask: 4131 case X86::BI__builtin_ia32_rangeps256_mask: 4132 case X86::BI__builtin_ia32_rangeps512_mask: 4133 case X86::BI__builtin_ia32_getmantsd_round_mask: 4134 case X86::BI__builtin_ia32_getmantss_round_mask: 4135 case X86::BI__builtin_ia32_vec_set_v16qi: 4136 case X86::BI__builtin_ia32_vec_set_v16hi: 4137 i = 2; l = 0; u = 15; 4138 break; 4139 case X86::BI__builtin_ia32_vec_ext_v32qi: 4140 i = 1; l = 0; u = 31; 4141 break; 4142 case X86::BI__builtin_ia32_cmpps: 4143 case X86::BI__builtin_ia32_cmpss: 4144 case X86::BI__builtin_ia32_cmppd: 4145 case X86::BI__builtin_ia32_cmpsd: 4146 case X86::BI__builtin_ia32_cmpps256: 4147 case X86::BI__builtin_ia32_cmppd256: 4148 case X86::BI__builtin_ia32_cmpps128_mask: 4149 case X86::BI__builtin_ia32_cmppd128_mask: 4150 case X86::BI__builtin_ia32_cmpps256_mask: 4151 case X86::BI__builtin_ia32_cmppd256_mask: 4152 case X86::BI__builtin_ia32_cmpps512_mask: 4153 case X86::BI__builtin_ia32_cmppd512_mask: 4154 case X86::BI__builtin_ia32_cmpsd_mask: 4155 case X86::BI__builtin_ia32_cmpss_mask: 4156 case X86::BI__builtin_ia32_vec_set_v32qi: 4157 i = 2; l = 0; u = 31; 4158 break; 4159 case X86::BI__builtin_ia32_permdf256: 4160 case X86::BI__builtin_ia32_permdi256: 4161 case X86::BI__builtin_ia32_permdf512: 4162 case X86::BI__builtin_ia32_permdi512: 4163 case X86::BI__builtin_ia32_vpermilps: 4164 case X86::BI__builtin_ia32_vpermilps256: 4165 case X86::BI__builtin_ia32_vpermilpd512: 4166 case X86::BI__builtin_ia32_vpermilps512: 4167 case X86::BI__builtin_ia32_pshufd: 4168 case X86::BI__builtin_ia32_pshufd256: 4169 case X86::BI__builtin_ia32_pshufd512: 4170 case X86::BI__builtin_ia32_pshufhw: 4171 case X86::BI__builtin_ia32_pshufhw256: 4172 case X86::BI__builtin_ia32_pshufhw512: 4173 case X86::BI__builtin_ia32_pshuflw: 4174 case X86::BI__builtin_ia32_pshuflw256: 4175 case X86::BI__builtin_ia32_pshuflw512: 4176 case X86::BI__builtin_ia32_vcvtps2ph: 4177 case X86::BI__builtin_ia32_vcvtps2ph_mask: 4178 case X86::BI__builtin_ia32_vcvtps2ph256: 4179 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 4180 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 4181 case X86::BI__builtin_ia32_rndscaleps_128_mask: 4182 case X86::BI__builtin_ia32_rndscalepd_128_mask: 4183 case X86::BI__builtin_ia32_rndscaleps_256_mask: 4184 case X86::BI__builtin_ia32_rndscalepd_256_mask: 4185 case X86::BI__builtin_ia32_rndscaleps_mask: 4186 case X86::BI__builtin_ia32_rndscalepd_mask: 4187 case X86::BI__builtin_ia32_reducepd128_mask: 4188 case X86::BI__builtin_ia32_reducepd256_mask: 4189 case X86::BI__builtin_ia32_reducepd512_mask: 4190 case X86::BI__builtin_ia32_reduceps128_mask: 4191 case X86::BI__builtin_ia32_reduceps256_mask: 4192 case X86::BI__builtin_ia32_reduceps512_mask: 4193 case X86::BI__builtin_ia32_prold512: 4194 case X86::BI__builtin_ia32_prolq512: 4195 case X86::BI__builtin_ia32_prold128: 4196 case X86::BI__builtin_ia32_prold256: 4197 case X86::BI__builtin_ia32_prolq128: 4198 case X86::BI__builtin_ia32_prolq256: 4199 case X86::BI__builtin_ia32_prord512: 4200 case X86::BI__builtin_ia32_prorq512: 4201 case X86::BI__builtin_ia32_prord128: 4202 case X86::BI__builtin_ia32_prord256: 4203 case X86::BI__builtin_ia32_prorq128: 4204 case X86::BI__builtin_ia32_prorq256: 4205 case X86::BI__builtin_ia32_fpclasspd128_mask: 4206 case X86::BI__builtin_ia32_fpclasspd256_mask: 4207 case X86::BI__builtin_ia32_fpclassps128_mask: 4208 case X86::BI__builtin_ia32_fpclassps256_mask: 4209 case X86::BI__builtin_ia32_fpclassps512_mask: 4210 case X86::BI__builtin_ia32_fpclasspd512_mask: 4211 case X86::BI__builtin_ia32_fpclasssd_mask: 4212 case X86::BI__builtin_ia32_fpclassss_mask: 4213 case X86::BI__builtin_ia32_pslldqi128_byteshift: 4214 case X86::BI__builtin_ia32_pslldqi256_byteshift: 4215 case X86::BI__builtin_ia32_pslldqi512_byteshift: 4216 case X86::BI__builtin_ia32_psrldqi128_byteshift: 4217 case X86::BI__builtin_ia32_psrldqi256_byteshift: 4218 case X86::BI__builtin_ia32_psrldqi512_byteshift: 4219 case X86::BI__builtin_ia32_kshiftliqi: 4220 case X86::BI__builtin_ia32_kshiftlihi: 4221 case X86::BI__builtin_ia32_kshiftlisi: 4222 case X86::BI__builtin_ia32_kshiftlidi: 4223 case X86::BI__builtin_ia32_kshiftriqi: 4224 case X86::BI__builtin_ia32_kshiftrihi: 4225 case X86::BI__builtin_ia32_kshiftrisi: 4226 case X86::BI__builtin_ia32_kshiftridi: 4227 i = 1; l = 0; u = 255; 4228 break; 4229 case X86::BI__builtin_ia32_vperm2f128_pd256: 4230 case X86::BI__builtin_ia32_vperm2f128_ps256: 4231 case X86::BI__builtin_ia32_vperm2f128_si256: 4232 case X86::BI__builtin_ia32_permti256: 4233 case X86::BI__builtin_ia32_pblendw128: 4234 case X86::BI__builtin_ia32_pblendw256: 4235 case X86::BI__builtin_ia32_blendps256: 4236 case X86::BI__builtin_ia32_pblendd256: 4237 case X86::BI__builtin_ia32_palignr128: 4238 case X86::BI__builtin_ia32_palignr256: 4239 case X86::BI__builtin_ia32_palignr512: 4240 case X86::BI__builtin_ia32_alignq512: 4241 case X86::BI__builtin_ia32_alignd512: 4242 case X86::BI__builtin_ia32_alignd128: 4243 case X86::BI__builtin_ia32_alignd256: 4244 case X86::BI__builtin_ia32_alignq128: 4245 case X86::BI__builtin_ia32_alignq256: 4246 case X86::BI__builtin_ia32_vcomisd: 4247 case X86::BI__builtin_ia32_vcomiss: 4248 case X86::BI__builtin_ia32_shuf_f32x4: 4249 case X86::BI__builtin_ia32_shuf_f64x2: 4250 case X86::BI__builtin_ia32_shuf_i32x4: 4251 case X86::BI__builtin_ia32_shuf_i64x2: 4252 case X86::BI__builtin_ia32_shufpd512: 4253 case X86::BI__builtin_ia32_shufps: 4254 case X86::BI__builtin_ia32_shufps256: 4255 case X86::BI__builtin_ia32_shufps512: 4256 case X86::BI__builtin_ia32_dbpsadbw128: 4257 case X86::BI__builtin_ia32_dbpsadbw256: 4258 case X86::BI__builtin_ia32_dbpsadbw512: 4259 case X86::BI__builtin_ia32_vpshldd128: 4260 case X86::BI__builtin_ia32_vpshldd256: 4261 case X86::BI__builtin_ia32_vpshldd512: 4262 case X86::BI__builtin_ia32_vpshldq128: 4263 case X86::BI__builtin_ia32_vpshldq256: 4264 case X86::BI__builtin_ia32_vpshldq512: 4265 case X86::BI__builtin_ia32_vpshldw128: 4266 case X86::BI__builtin_ia32_vpshldw256: 4267 case X86::BI__builtin_ia32_vpshldw512: 4268 case X86::BI__builtin_ia32_vpshrdd128: 4269 case X86::BI__builtin_ia32_vpshrdd256: 4270 case X86::BI__builtin_ia32_vpshrdd512: 4271 case X86::BI__builtin_ia32_vpshrdq128: 4272 case X86::BI__builtin_ia32_vpshrdq256: 4273 case X86::BI__builtin_ia32_vpshrdq512: 4274 case X86::BI__builtin_ia32_vpshrdw128: 4275 case X86::BI__builtin_ia32_vpshrdw256: 4276 case X86::BI__builtin_ia32_vpshrdw512: 4277 i = 2; l = 0; u = 255; 4278 break; 4279 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4280 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4281 case X86::BI__builtin_ia32_fixupimmps512_mask: 4282 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4283 case X86::BI__builtin_ia32_fixupimmsd_mask: 4284 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4285 case X86::BI__builtin_ia32_fixupimmss_mask: 4286 case X86::BI__builtin_ia32_fixupimmss_maskz: 4287 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4288 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4289 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4290 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4291 case X86::BI__builtin_ia32_fixupimmps128_mask: 4292 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4293 case X86::BI__builtin_ia32_fixupimmps256_mask: 4294 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4295 case X86::BI__builtin_ia32_pternlogd512_mask: 4296 case X86::BI__builtin_ia32_pternlogd512_maskz: 4297 case X86::BI__builtin_ia32_pternlogq512_mask: 4298 case X86::BI__builtin_ia32_pternlogq512_maskz: 4299 case X86::BI__builtin_ia32_pternlogd128_mask: 4300 case X86::BI__builtin_ia32_pternlogd128_maskz: 4301 case X86::BI__builtin_ia32_pternlogd256_mask: 4302 case X86::BI__builtin_ia32_pternlogd256_maskz: 4303 case X86::BI__builtin_ia32_pternlogq128_mask: 4304 case X86::BI__builtin_ia32_pternlogq128_maskz: 4305 case X86::BI__builtin_ia32_pternlogq256_mask: 4306 case X86::BI__builtin_ia32_pternlogq256_maskz: 4307 i = 3; l = 0; u = 255; 4308 break; 4309 case X86::BI__builtin_ia32_gatherpfdpd: 4310 case X86::BI__builtin_ia32_gatherpfdps: 4311 case X86::BI__builtin_ia32_gatherpfqpd: 4312 case X86::BI__builtin_ia32_gatherpfqps: 4313 case X86::BI__builtin_ia32_scatterpfdpd: 4314 case X86::BI__builtin_ia32_scatterpfdps: 4315 case X86::BI__builtin_ia32_scatterpfqpd: 4316 case X86::BI__builtin_ia32_scatterpfqps: 4317 i = 4; l = 2; u = 3; 4318 break; 4319 case X86::BI__builtin_ia32_reducesd_mask: 4320 case X86::BI__builtin_ia32_reducess_mask: 4321 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4322 case X86::BI__builtin_ia32_rndscaless_round_mask: 4323 i = 4; l = 0; u = 255; 4324 break; 4325 } 4326 4327 // Note that we don't force a hard error on the range check here, allowing 4328 // template-generated or macro-generated dead code to potentially have out-of- 4329 // range values. These need to code generate, but don't need to necessarily 4330 // make any sense. We use a warning that defaults to an error. 4331 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4332 } 4333 4334 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4335 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4336 /// Returns true when the format fits the function and the FormatStringInfo has 4337 /// been populated. 4338 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4339 FormatStringInfo *FSI) { 4340 FSI->HasVAListArg = Format->getFirstArg() == 0; 4341 FSI->FormatIdx = Format->getFormatIdx() - 1; 4342 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4343 4344 // The way the format attribute works in GCC, the implicit this argument 4345 // of member functions is counted. However, it doesn't appear in our own 4346 // lists, so decrement format_idx in that case. 4347 if (IsCXXMember) { 4348 if(FSI->FormatIdx == 0) 4349 return false; 4350 --FSI->FormatIdx; 4351 if (FSI->FirstDataArg != 0) 4352 --FSI->FirstDataArg; 4353 } 4354 return true; 4355 } 4356 4357 /// Checks if a the given expression evaluates to null. 4358 /// 4359 /// Returns true if the value evaluates to null. 4360 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4361 // If the expression has non-null type, it doesn't evaluate to null. 4362 if (auto nullability 4363 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4364 if (*nullability == NullabilityKind::NonNull) 4365 return false; 4366 } 4367 4368 // As a special case, transparent unions initialized with zero are 4369 // considered null for the purposes of the nonnull attribute. 4370 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4371 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4372 if (const CompoundLiteralExpr *CLE = 4373 dyn_cast<CompoundLiteralExpr>(Expr)) 4374 if (const InitListExpr *ILE = 4375 dyn_cast<InitListExpr>(CLE->getInitializer())) 4376 Expr = ILE->getInit(0); 4377 } 4378 4379 bool Result; 4380 return (!Expr->isValueDependent() && 4381 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4382 !Result); 4383 } 4384 4385 static void CheckNonNullArgument(Sema &S, 4386 const Expr *ArgExpr, 4387 SourceLocation CallSiteLoc) { 4388 if (CheckNonNullExpr(S, ArgExpr)) 4389 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4390 S.PDiag(diag::warn_null_arg) 4391 << ArgExpr->getSourceRange()); 4392 } 4393 4394 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4395 FormatStringInfo FSI; 4396 if ((GetFormatStringType(Format) == FST_NSString) && 4397 getFormatStringInfo(Format, false, &FSI)) { 4398 Idx = FSI.FormatIdx; 4399 return true; 4400 } 4401 return false; 4402 } 4403 4404 /// Diagnose use of %s directive in an NSString which is being passed 4405 /// as formatting string to formatting method. 4406 static void 4407 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4408 const NamedDecl *FDecl, 4409 Expr **Args, 4410 unsigned NumArgs) { 4411 unsigned Idx = 0; 4412 bool Format = false; 4413 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4414 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4415 Idx = 2; 4416 Format = true; 4417 } 4418 else 4419 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4420 if (S.GetFormatNSStringIdx(I, Idx)) { 4421 Format = true; 4422 break; 4423 } 4424 } 4425 if (!Format || NumArgs <= Idx) 4426 return; 4427 const Expr *FormatExpr = Args[Idx]; 4428 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4429 FormatExpr = CSCE->getSubExpr(); 4430 const StringLiteral *FormatString; 4431 if (const ObjCStringLiteral *OSL = 4432 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4433 FormatString = OSL->getString(); 4434 else 4435 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4436 if (!FormatString) 4437 return; 4438 if (S.FormatStringHasSArg(FormatString)) { 4439 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4440 << "%s" << 1 << 1; 4441 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4442 << FDecl->getDeclName(); 4443 } 4444 } 4445 4446 /// Determine whether the given type has a non-null nullability annotation. 4447 static bool isNonNullType(ASTContext &ctx, QualType type) { 4448 if (auto nullability = type->getNullability(ctx)) 4449 return *nullability == NullabilityKind::NonNull; 4450 4451 return false; 4452 } 4453 4454 static void CheckNonNullArguments(Sema &S, 4455 const NamedDecl *FDecl, 4456 const FunctionProtoType *Proto, 4457 ArrayRef<const Expr *> Args, 4458 SourceLocation CallSiteLoc) { 4459 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4460 4461 // Already checked by by constant evaluator. 4462 if (S.isConstantEvaluated()) 4463 return; 4464 // Check the attributes attached to the method/function itself. 4465 llvm::SmallBitVector NonNullArgs; 4466 if (FDecl) { 4467 // Handle the nonnull attribute on the function/method declaration itself. 4468 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4469 if (!NonNull->args_size()) { 4470 // Easy case: all pointer arguments are nonnull. 4471 for (const auto *Arg : Args) 4472 if (S.isValidPointerAttrType(Arg->getType())) 4473 CheckNonNullArgument(S, Arg, CallSiteLoc); 4474 return; 4475 } 4476 4477 for (const ParamIdx &Idx : NonNull->args()) { 4478 unsigned IdxAST = Idx.getASTIndex(); 4479 if (IdxAST >= Args.size()) 4480 continue; 4481 if (NonNullArgs.empty()) 4482 NonNullArgs.resize(Args.size()); 4483 NonNullArgs.set(IdxAST); 4484 } 4485 } 4486 } 4487 4488 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4489 // Handle the nonnull attribute on the parameters of the 4490 // function/method. 4491 ArrayRef<ParmVarDecl*> parms; 4492 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4493 parms = FD->parameters(); 4494 else 4495 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4496 4497 unsigned ParamIndex = 0; 4498 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4499 I != E; ++I, ++ParamIndex) { 4500 const ParmVarDecl *PVD = *I; 4501 if (PVD->hasAttr<NonNullAttr>() || 4502 isNonNullType(S.Context, PVD->getType())) { 4503 if (NonNullArgs.empty()) 4504 NonNullArgs.resize(Args.size()); 4505 4506 NonNullArgs.set(ParamIndex); 4507 } 4508 } 4509 } else { 4510 // If we have a non-function, non-method declaration but no 4511 // function prototype, try to dig out the function prototype. 4512 if (!Proto) { 4513 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4514 QualType type = VD->getType().getNonReferenceType(); 4515 if (auto pointerType = type->getAs<PointerType>()) 4516 type = pointerType->getPointeeType(); 4517 else if (auto blockType = type->getAs<BlockPointerType>()) 4518 type = blockType->getPointeeType(); 4519 // FIXME: data member pointers? 4520 4521 // Dig out the function prototype, if there is one. 4522 Proto = type->getAs<FunctionProtoType>(); 4523 } 4524 } 4525 4526 // Fill in non-null argument information from the nullability 4527 // information on the parameter types (if we have them). 4528 if (Proto) { 4529 unsigned Index = 0; 4530 for (auto paramType : Proto->getParamTypes()) { 4531 if (isNonNullType(S.Context, paramType)) { 4532 if (NonNullArgs.empty()) 4533 NonNullArgs.resize(Args.size()); 4534 4535 NonNullArgs.set(Index); 4536 } 4537 4538 ++Index; 4539 } 4540 } 4541 } 4542 4543 // Check for non-null arguments. 4544 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4545 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4546 if (NonNullArgs[ArgIndex]) 4547 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4548 } 4549 } 4550 4551 /// Warn if a pointer or reference argument passed to a function points to an 4552 /// object that is less aligned than the parameter. This can happen when 4553 /// creating a typedef with a lower alignment than the original type and then 4554 /// calling functions defined in terms of the original type. 4555 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 4556 StringRef ParamName, QualType ArgTy, 4557 QualType ParamTy) { 4558 4559 // If a function accepts a pointer or reference type 4560 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 4561 return; 4562 4563 // If the parameter is a pointer type, get the pointee type for the 4564 // argument too. If the parameter is a reference type, don't try to get 4565 // the pointee type for the argument. 4566 if (ParamTy->isPointerType()) 4567 ArgTy = ArgTy->getPointeeType(); 4568 4569 // Remove reference or pointer 4570 ParamTy = ParamTy->getPointeeType(); 4571 4572 // Find expected alignment, and the actual alignment of the passed object. 4573 // getTypeAlignInChars requires complete types 4574 if (ParamTy->isIncompleteType() || ArgTy->isIncompleteType() || 4575 ParamTy->isUndeducedType() || ArgTy->isUndeducedType()) 4576 return; 4577 4578 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 4579 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 4580 4581 // If the argument is less aligned than the parameter, there is a 4582 // potential alignment issue. 4583 if (ArgAlign < ParamAlign) 4584 Diag(Loc, diag::warn_param_mismatched_alignment) 4585 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 4586 << ParamName << FDecl; 4587 } 4588 4589 /// Handles the checks for format strings, non-POD arguments to vararg 4590 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4591 /// attributes. 4592 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4593 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4594 bool IsMemberFunction, SourceLocation Loc, 4595 SourceRange Range, VariadicCallType CallType) { 4596 // FIXME: We should check as much as we can in the template definition. 4597 if (CurContext->isDependentContext()) 4598 return; 4599 4600 // Printf and scanf checking. 4601 llvm::SmallBitVector CheckedVarArgs; 4602 if (FDecl) { 4603 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4604 // Only create vector if there are format attributes. 4605 CheckedVarArgs.resize(Args.size()); 4606 4607 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4608 CheckedVarArgs); 4609 } 4610 } 4611 4612 // Refuse POD arguments that weren't caught by the format string 4613 // checks above. 4614 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4615 if (CallType != VariadicDoesNotApply && 4616 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4617 unsigned NumParams = Proto ? Proto->getNumParams() 4618 : FDecl && isa<FunctionDecl>(FDecl) 4619 ? cast<FunctionDecl>(FDecl)->getNumParams() 4620 : FDecl && isa<ObjCMethodDecl>(FDecl) 4621 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4622 : 0; 4623 4624 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4625 // Args[ArgIdx] can be null in malformed code. 4626 if (const Expr *Arg = Args[ArgIdx]) { 4627 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4628 checkVariadicArgument(Arg, CallType); 4629 } 4630 } 4631 } 4632 4633 if (FDecl || Proto) { 4634 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4635 4636 // Type safety checking. 4637 if (FDecl) { 4638 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4639 CheckArgumentWithTypeTag(I, Args, Loc); 4640 } 4641 } 4642 4643 // Check that passed arguments match the alignment of original arguments. 4644 // Try to get the missing prototype from the declaration. 4645 if (!Proto && FDecl) { 4646 const auto *FT = FDecl->getFunctionType(); 4647 if (isa_and_nonnull<FunctionProtoType>(FT)) 4648 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 4649 } 4650 if (Proto) { 4651 // For variadic functions, we may have more args than parameters. 4652 // For some K&R functions, we may have less args than parameters. 4653 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 4654 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 4655 // Args[ArgIdx] can be null in malformed code. 4656 if (const Expr *Arg = Args[ArgIdx]) { 4657 QualType ParamTy = Proto->getParamType(ArgIdx); 4658 QualType ArgTy = Arg->getType(); 4659 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 4660 ArgTy, ParamTy); 4661 } 4662 } 4663 } 4664 4665 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 4666 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 4667 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 4668 if (!Arg->isValueDependent()) { 4669 Expr::EvalResult Align; 4670 if (Arg->EvaluateAsInt(Align, Context)) { 4671 const llvm::APSInt &I = Align.Val.getInt(); 4672 if (!I.isPowerOf2()) 4673 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 4674 << Arg->getSourceRange(); 4675 4676 if (I > Sema::MaximumAlignment) 4677 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 4678 << Arg->getSourceRange() << Sema::MaximumAlignment; 4679 } 4680 } 4681 } 4682 4683 if (FD) 4684 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4685 } 4686 4687 /// CheckConstructorCall - Check a constructor call for correctness and safety 4688 /// properties not enforced by the C type system. 4689 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 4690 ArrayRef<const Expr *> Args, 4691 const FunctionProtoType *Proto, 4692 SourceLocation Loc) { 4693 VariadicCallType CallType = 4694 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4695 4696 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 4697 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 4698 Context.getPointerType(Ctor->getThisObjectType())); 4699 4700 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4701 Loc, SourceRange(), CallType); 4702 } 4703 4704 /// CheckFunctionCall - Check a direct function call for various correctness 4705 /// and safety properties not strictly enforced by the C type system. 4706 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4707 const FunctionProtoType *Proto) { 4708 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4709 isa<CXXMethodDecl>(FDecl); 4710 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4711 IsMemberOperatorCall; 4712 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4713 TheCall->getCallee()); 4714 Expr** Args = TheCall->getArgs(); 4715 unsigned NumArgs = TheCall->getNumArgs(); 4716 4717 Expr *ImplicitThis = nullptr; 4718 if (IsMemberOperatorCall) { 4719 // If this is a call to a member operator, hide the first argument 4720 // from checkCall. 4721 // FIXME: Our choice of AST representation here is less than ideal. 4722 ImplicitThis = Args[0]; 4723 ++Args; 4724 --NumArgs; 4725 } else if (IsMemberFunction) 4726 ImplicitThis = 4727 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4728 4729 if (ImplicitThis) { 4730 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 4731 // used. 4732 QualType ThisType = ImplicitThis->getType(); 4733 if (!ThisType->isPointerType()) { 4734 assert(!ThisType->isReferenceType()); 4735 ThisType = Context.getPointerType(ThisType); 4736 } 4737 4738 QualType ThisTypeFromDecl = 4739 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 4740 4741 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 4742 ThisTypeFromDecl); 4743 } 4744 4745 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4746 IsMemberFunction, TheCall->getRParenLoc(), 4747 TheCall->getCallee()->getSourceRange(), CallType); 4748 4749 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4750 // None of the checks below are needed for functions that don't have 4751 // simple names (e.g., C++ conversion functions). 4752 if (!FnInfo) 4753 return false; 4754 4755 CheckTCBEnforcement(TheCall, FDecl); 4756 4757 CheckAbsoluteValueFunction(TheCall, FDecl); 4758 CheckMaxUnsignedZero(TheCall, FDecl); 4759 4760 if (getLangOpts().ObjC) 4761 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4762 4763 unsigned CMId = FDecl->getMemoryFunctionKind(); 4764 4765 // Handle memory setting and copying functions. 4766 switch (CMId) { 4767 case 0: 4768 return false; 4769 case Builtin::BIstrlcpy: // fallthrough 4770 case Builtin::BIstrlcat: 4771 CheckStrlcpycatArguments(TheCall, FnInfo); 4772 break; 4773 case Builtin::BIstrncat: 4774 CheckStrncatArguments(TheCall, FnInfo); 4775 break; 4776 case Builtin::BIfree: 4777 CheckFreeArguments(TheCall); 4778 break; 4779 default: 4780 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4781 } 4782 4783 return false; 4784 } 4785 4786 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4787 ArrayRef<const Expr *> Args) { 4788 VariadicCallType CallType = 4789 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4790 4791 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4792 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4793 CallType); 4794 4795 return false; 4796 } 4797 4798 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4799 const FunctionProtoType *Proto) { 4800 QualType Ty; 4801 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4802 Ty = V->getType().getNonReferenceType(); 4803 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4804 Ty = F->getType().getNonReferenceType(); 4805 else 4806 return false; 4807 4808 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4809 !Ty->isFunctionProtoType()) 4810 return false; 4811 4812 VariadicCallType CallType; 4813 if (!Proto || !Proto->isVariadic()) { 4814 CallType = VariadicDoesNotApply; 4815 } else if (Ty->isBlockPointerType()) { 4816 CallType = VariadicBlock; 4817 } else { // Ty->isFunctionPointerType() 4818 CallType = VariadicFunction; 4819 } 4820 4821 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4822 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4823 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4824 TheCall->getCallee()->getSourceRange(), CallType); 4825 4826 return false; 4827 } 4828 4829 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4830 /// such as function pointers returned from functions. 4831 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4832 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4833 TheCall->getCallee()); 4834 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4835 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4836 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4837 TheCall->getCallee()->getSourceRange(), CallType); 4838 4839 return false; 4840 } 4841 4842 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4843 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4844 return false; 4845 4846 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4847 switch (Op) { 4848 case AtomicExpr::AO__c11_atomic_init: 4849 case AtomicExpr::AO__opencl_atomic_init: 4850 llvm_unreachable("There is no ordering argument for an init"); 4851 4852 case AtomicExpr::AO__c11_atomic_load: 4853 case AtomicExpr::AO__opencl_atomic_load: 4854 case AtomicExpr::AO__atomic_load_n: 4855 case AtomicExpr::AO__atomic_load: 4856 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4857 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4858 4859 case AtomicExpr::AO__c11_atomic_store: 4860 case AtomicExpr::AO__opencl_atomic_store: 4861 case AtomicExpr::AO__atomic_store: 4862 case AtomicExpr::AO__atomic_store_n: 4863 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4864 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4865 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4866 4867 default: 4868 return true; 4869 } 4870 } 4871 4872 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4873 AtomicExpr::AtomicOp Op) { 4874 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4875 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4876 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 4877 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 4878 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 4879 Op); 4880 } 4881 4882 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 4883 SourceLocation RParenLoc, MultiExprArg Args, 4884 AtomicExpr::AtomicOp Op, 4885 AtomicArgumentOrder ArgOrder) { 4886 // All the non-OpenCL operations take one of the following forms. 4887 // The OpenCL operations take the __c11 forms with one extra argument for 4888 // synchronization scope. 4889 enum { 4890 // C __c11_atomic_init(A *, C) 4891 Init, 4892 4893 // C __c11_atomic_load(A *, int) 4894 Load, 4895 4896 // void __atomic_load(A *, CP, int) 4897 LoadCopy, 4898 4899 // void __atomic_store(A *, CP, int) 4900 Copy, 4901 4902 // C __c11_atomic_add(A *, M, int) 4903 Arithmetic, 4904 4905 // C __atomic_exchange_n(A *, CP, int) 4906 Xchg, 4907 4908 // void __atomic_exchange(A *, C *, CP, int) 4909 GNUXchg, 4910 4911 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4912 C11CmpXchg, 4913 4914 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4915 GNUCmpXchg 4916 } Form = Init; 4917 4918 const unsigned NumForm = GNUCmpXchg + 1; 4919 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4920 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4921 // where: 4922 // C is an appropriate type, 4923 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4924 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4925 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4926 // the int parameters are for orderings. 4927 4928 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4929 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4930 "need to update code for modified forms"); 4931 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4932 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 4933 AtomicExpr::AO__atomic_load, 4934 "need to update code for modified C11 atomics"); 4935 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4936 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4937 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4938 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 4939 IsOpenCL; 4940 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4941 Op == AtomicExpr::AO__atomic_store_n || 4942 Op == AtomicExpr::AO__atomic_exchange_n || 4943 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4944 bool IsAddSub = false; 4945 4946 switch (Op) { 4947 case AtomicExpr::AO__c11_atomic_init: 4948 case AtomicExpr::AO__opencl_atomic_init: 4949 Form = Init; 4950 break; 4951 4952 case AtomicExpr::AO__c11_atomic_load: 4953 case AtomicExpr::AO__opencl_atomic_load: 4954 case AtomicExpr::AO__atomic_load_n: 4955 Form = Load; 4956 break; 4957 4958 case AtomicExpr::AO__atomic_load: 4959 Form = LoadCopy; 4960 break; 4961 4962 case AtomicExpr::AO__c11_atomic_store: 4963 case AtomicExpr::AO__opencl_atomic_store: 4964 case AtomicExpr::AO__atomic_store: 4965 case AtomicExpr::AO__atomic_store_n: 4966 Form = Copy; 4967 break; 4968 4969 case AtomicExpr::AO__c11_atomic_fetch_add: 4970 case AtomicExpr::AO__c11_atomic_fetch_sub: 4971 case AtomicExpr::AO__opencl_atomic_fetch_add: 4972 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4973 case AtomicExpr::AO__atomic_fetch_add: 4974 case AtomicExpr::AO__atomic_fetch_sub: 4975 case AtomicExpr::AO__atomic_add_fetch: 4976 case AtomicExpr::AO__atomic_sub_fetch: 4977 IsAddSub = true; 4978 Form = Arithmetic; 4979 break; 4980 case AtomicExpr::AO__c11_atomic_fetch_and: 4981 case AtomicExpr::AO__c11_atomic_fetch_or: 4982 case AtomicExpr::AO__c11_atomic_fetch_xor: 4983 case AtomicExpr::AO__opencl_atomic_fetch_and: 4984 case AtomicExpr::AO__opencl_atomic_fetch_or: 4985 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4986 case AtomicExpr::AO__atomic_fetch_and: 4987 case AtomicExpr::AO__atomic_fetch_or: 4988 case AtomicExpr::AO__atomic_fetch_xor: 4989 case AtomicExpr::AO__atomic_fetch_nand: 4990 case AtomicExpr::AO__atomic_and_fetch: 4991 case AtomicExpr::AO__atomic_or_fetch: 4992 case AtomicExpr::AO__atomic_xor_fetch: 4993 case AtomicExpr::AO__atomic_nand_fetch: 4994 Form = Arithmetic; 4995 break; 4996 case AtomicExpr::AO__c11_atomic_fetch_min: 4997 case AtomicExpr::AO__c11_atomic_fetch_max: 4998 case AtomicExpr::AO__opencl_atomic_fetch_min: 4999 case AtomicExpr::AO__opencl_atomic_fetch_max: 5000 case AtomicExpr::AO__atomic_min_fetch: 5001 case AtomicExpr::AO__atomic_max_fetch: 5002 case AtomicExpr::AO__atomic_fetch_min: 5003 case AtomicExpr::AO__atomic_fetch_max: 5004 Form = Arithmetic; 5005 break; 5006 5007 case AtomicExpr::AO__c11_atomic_exchange: 5008 case AtomicExpr::AO__opencl_atomic_exchange: 5009 case AtomicExpr::AO__atomic_exchange_n: 5010 Form = Xchg; 5011 break; 5012 5013 case AtomicExpr::AO__atomic_exchange: 5014 Form = GNUXchg; 5015 break; 5016 5017 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 5018 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 5019 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 5020 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 5021 Form = C11CmpXchg; 5022 break; 5023 5024 case AtomicExpr::AO__atomic_compare_exchange: 5025 case AtomicExpr::AO__atomic_compare_exchange_n: 5026 Form = GNUCmpXchg; 5027 break; 5028 } 5029 5030 unsigned AdjustedNumArgs = NumArgs[Form]; 5031 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 5032 ++AdjustedNumArgs; 5033 // Check we have the right number of arguments. 5034 if (Args.size() < AdjustedNumArgs) { 5035 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 5036 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5037 << ExprRange; 5038 return ExprError(); 5039 } else if (Args.size() > AdjustedNumArgs) { 5040 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 5041 diag::err_typecheck_call_too_many_args) 5042 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5043 << ExprRange; 5044 return ExprError(); 5045 } 5046 5047 // Inspect the first argument of the atomic operation. 5048 Expr *Ptr = Args[0]; 5049 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 5050 if (ConvertedPtr.isInvalid()) 5051 return ExprError(); 5052 5053 Ptr = ConvertedPtr.get(); 5054 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 5055 if (!pointerType) { 5056 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 5057 << Ptr->getType() << Ptr->getSourceRange(); 5058 return ExprError(); 5059 } 5060 5061 // For a __c11 builtin, this should be a pointer to an _Atomic type. 5062 QualType AtomTy = pointerType->getPointeeType(); // 'A' 5063 QualType ValType = AtomTy; // 'C' 5064 if (IsC11) { 5065 if (!AtomTy->isAtomicType()) { 5066 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 5067 << Ptr->getType() << Ptr->getSourceRange(); 5068 return ExprError(); 5069 } 5070 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 5071 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 5072 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 5073 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 5074 << Ptr->getSourceRange(); 5075 return ExprError(); 5076 } 5077 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 5078 } else if (Form != Load && Form != LoadCopy) { 5079 if (ValType.isConstQualified()) { 5080 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 5081 << Ptr->getType() << Ptr->getSourceRange(); 5082 return ExprError(); 5083 } 5084 } 5085 5086 // For an arithmetic operation, the implied arithmetic must be well-formed. 5087 if (Form == Arithmetic) { 5088 // gcc does not enforce these rules for GNU atomics, but we do so for 5089 // sanity. 5090 auto IsAllowedValueType = [&](QualType ValType) { 5091 if (ValType->isIntegerType()) 5092 return true; 5093 if (ValType->isPointerType()) 5094 return true; 5095 if (!ValType->isFloatingType()) 5096 return false; 5097 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 5098 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 5099 &Context.getTargetInfo().getLongDoubleFormat() == 5100 &llvm::APFloat::x87DoubleExtended()) 5101 return false; 5102 return true; 5103 }; 5104 if (IsAddSub && !IsAllowedValueType(ValType)) { 5105 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 5106 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5107 return ExprError(); 5108 } 5109 if (!IsAddSub && !ValType->isIntegerType()) { 5110 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 5111 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5112 return ExprError(); 5113 } 5114 if (IsC11 && ValType->isPointerType() && 5115 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 5116 diag::err_incomplete_type)) { 5117 return ExprError(); 5118 } 5119 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 5120 // For __atomic_*_n operations, the value type must be a scalar integral or 5121 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 5122 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 5123 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5124 return ExprError(); 5125 } 5126 5127 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 5128 !AtomTy->isScalarType()) { 5129 // For GNU atomics, require a trivially-copyable type. This is not part of 5130 // the GNU atomics specification, but we enforce it for sanity. 5131 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 5132 << Ptr->getType() << Ptr->getSourceRange(); 5133 return ExprError(); 5134 } 5135 5136 switch (ValType.getObjCLifetime()) { 5137 case Qualifiers::OCL_None: 5138 case Qualifiers::OCL_ExplicitNone: 5139 // okay 5140 break; 5141 5142 case Qualifiers::OCL_Weak: 5143 case Qualifiers::OCL_Strong: 5144 case Qualifiers::OCL_Autoreleasing: 5145 // FIXME: Can this happen? By this point, ValType should be known 5146 // to be trivially copyable. 5147 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 5148 << ValType << Ptr->getSourceRange(); 5149 return ExprError(); 5150 } 5151 5152 // All atomic operations have an overload which takes a pointer to a volatile 5153 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 5154 // into the result or the other operands. Similarly atomic_load takes a 5155 // pointer to a const 'A'. 5156 ValType.removeLocalVolatile(); 5157 ValType.removeLocalConst(); 5158 QualType ResultType = ValType; 5159 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 5160 Form == Init) 5161 ResultType = Context.VoidTy; 5162 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 5163 ResultType = Context.BoolTy; 5164 5165 // The type of a parameter passed 'by value'. In the GNU atomics, such 5166 // arguments are actually passed as pointers. 5167 QualType ByValType = ValType; // 'CP' 5168 bool IsPassedByAddress = false; 5169 if (!IsC11 && !IsN) { 5170 ByValType = Ptr->getType(); 5171 IsPassedByAddress = true; 5172 } 5173 5174 SmallVector<Expr *, 5> APIOrderedArgs; 5175 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 5176 APIOrderedArgs.push_back(Args[0]); 5177 switch (Form) { 5178 case Init: 5179 case Load: 5180 APIOrderedArgs.push_back(Args[1]); // Val1/Order 5181 break; 5182 case LoadCopy: 5183 case Copy: 5184 case Arithmetic: 5185 case Xchg: 5186 APIOrderedArgs.push_back(Args[2]); // Val1 5187 APIOrderedArgs.push_back(Args[1]); // Order 5188 break; 5189 case GNUXchg: 5190 APIOrderedArgs.push_back(Args[2]); // Val1 5191 APIOrderedArgs.push_back(Args[3]); // Val2 5192 APIOrderedArgs.push_back(Args[1]); // Order 5193 break; 5194 case C11CmpXchg: 5195 APIOrderedArgs.push_back(Args[2]); // Val1 5196 APIOrderedArgs.push_back(Args[4]); // Val2 5197 APIOrderedArgs.push_back(Args[1]); // Order 5198 APIOrderedArgs.push_back(Args[3]); // OrderFail 5199 break; 5200 case GNUCmpXchg: 5201 APIOrderedArgs.push_back(Args[2]); // Val1 5202 APIOrderedArgs.push_back(Args[4]); // Val2 5203 APIOrderedArgs.push_back(Args[5]); // Weak 5204 APIOrderedArgs.push_back(Args[1]); // Order 5205 APIOrderedArgs.push_back(Args[3]); // OrderFail 5206 break; 5207 } 5208 } else 5209 APIOrderedArgs.append(Args.begin(), Args.end()); 5210 5211 // The first argument's non-CV pointer type is used to deduce the type of 5212 // subsequent arguments, except for: 5213 // - weak flag (always converted to bool) 5214 // - memory order (always converted to int) 5215 // - scope (always converted to int) 5216 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 5217 QualType Ty; 5218 if (i < NumVals[Form] + 1) { 5219 switch (i) { 5220 case 0: 5221 // The first argument is always a pointer. It has a fixed type. 5222 // It is always dereferenced, a nullptr is undefined. 5223 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5224 // Nothing else to do: we already know all we want about this pointer. 5225 continue; 5226 case 1: 5227 // The second argument is the non-atomic operand. For arithmetic, this 5228 // is always passed by value, and for a compare_exchange it is always 5229 // passed by address. For the rest, GNU uses by-address and C11 uses 5230 // by-value. 5231 assert(Form != Load); 5232 if (Form == Arithmetic && ValType->isPointerType()) 5233 Ty = Context.getPointerDiffType(); 5234 else if (Form == Init || Form == Arithmetic) 5235 Ty = ValType; 5236 else if (Form == Copy || Form == Xchg) { 5237 if (IsPassedByAddress) { 5238 // The value pointer is always dereferenced, a nullptr is undefined. 5239 CheckNonNullArgument(*this, APIOrderedArgs[i], 5240 ExprRange.getBegin()); 5241 } 5242 Ty = ByValType; 5243 } else { 5244 Expr *ValArg = APIOrderedArgs[i]; 5245 // The value pointer is always dereferenced, a nullptr is undefined. 5246 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 5247 LangAS AS = LangAS::Default; 5248 // Keep address space of non-atomic pointer type. 5249 if (const PointerType *PtrTy = 5250 ValArg->getType()->getAs<PointerType>()) { 5251 AS = PtrTy->getPointeeType().getAddressSpace(); 5252 } 5253 Ty = Context.getPointerType( 5254 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 5255 } 5256 break; 5257 case 2: 5258 // The third argument to compare_exchange / GNU exchange is the desired 5259 // value, either by-value (for the C11 and *_n variant) or as a pointer. 5260 if (IsPassedByAddress) 5261 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5262 Ty = ByValType; 5263 break; 5264 case 3: 5265 // The fourth argument to GNU compare_exchange is a 'weak' flag. 5266 Ty = Context.BoolTy; 5267 break; 5268 } 5269 } else { 5270 // The order(s) and scope are always converted to int. 5271 Ty = Context.IntTy; 5272 } 5273 5274 InitializedEntity Entity = 5275 InitializedEntity::InitializeParameter(Context, Ty, false); 5276 ExprResult Arg = APIOrderedArgs[i]; 5277 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5278 if (Arg.isInvalid()) 5279 return true; 5280 APIOrderedArgs[i] = Arg.get(); 5281 } 5282 5283 // Permute the arguments into a 'consistent' order. 5284 SmallVector<Expr*, 5> SubExprs; 5285 SubExprs.push_back(Ptr); 5286 switch (Form) { 5287 case Init: 5288 // Note, AtomicExpr::getVal1() has a special case for this atomic. 5289 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5290 break; 5291 case Load: 5292 SubExprs.push_back(APIOrderedArgs[1]); // Order 5293 break; 5294 case LoadCopy: 5295 case Copy: 5296 case Arithmetic: 5297 case Xchg: 5298 SubExprs.push_back(APIOrderedArgs[2]); // Order 5299 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5300 break; 5301 case GNUXchg: 5302 // Note, AtomicExpr::getVal2() has a special case for this atomic. 5303 SubExprs.push_back(APIOrderedArgs[3]); // Order 5304 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5305 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5306 break; 5307 case C11CmpXchg: 5308 SubExprs.push_back(APIOrderedArgs[3]); // Order 5309 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5310 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 5311 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5312 break; 5313 case GNUCmpXchg: 5314 SubExprs.push_back(APIOrderedArgs[4]); // Order 5315 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5316 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 5317 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5318 SubExprs.push_back(APIOrderedArgs[3]); // Weak 5319 break; 5320 } 5321 5322 if (SubExprs.size() >= 2 && Form != Init) { 5323 if (Optional<llvm::APSInt> Result = 5324 SubExprs[1]->getIntegerConstantExpr(Context)) 5325 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 5326 Diag(SubExprs[1]->getBeginLoc(), 5327 diag::warn_atomic_op_has_invalid_memory_order) 5328 << SubExprs[1]->getSourceRange(); 5329 } 5330 5331 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 5332 auto *Scope = Args[Args.size() - 1]; 5333 if (Optional<llvm::APSInt> Result = 5334 Scope->getIntegerConstantExpr(Context)) { 5335 if (!ScopeModel->isValid(Result->getZExtValue())) 5336 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 5337 << Scope->getSourceRange(); 5338 } 5339 SubExprs.push_back(Scope); 5340 } 5341 5342 AtomicExpr *AE = new (Context) 5343 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 5344 5345 if ((Op == AtomicExpr::AO__c11_atomic_load || 5346 Op == AtomicExpr::AO__c11_atomic_store || 5347 Op == AtomicExpr::AO__opencl_atomic_load || 5348 Op == AtomicExpr::AO__opencl_atomic_store ) && 5349 Context.AtomicUsesUnsupportedLibcall(AE)) 5350 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 5351 << ((Op == AtomicExpr::AO__c11_atomic_load || 5352 Op == AtomicExpr::AO__opencl_atomic_load) 5353 ? 0 5354 : 1); 5355 5356 if (ValType->isExtIntType()) { 5357 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit); 5358 return ExprError(); 5359 } 5360 5361 return AE; 5362 } 5363 5364 /// checkBuiltinArgument - Given a call to a builtin function, perform 5365 /// normal type-checking on the given argument, updating the call in 5366 /// place. This is useful when a builtin function requires custom 5367 /// type-checking for some of its arguments but not necessarily all of 5368 /// them. 5369 /// 5370 /// Returns true on error. 5371 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 5372 FunctionDecl *Fn = E->getDirectCallee(); 5373 assert(Fn && "builtin call without direct callee!"); 5374 5375 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 5376 InitializedEntity Entity = 5377 InitializedEntity::InitializeParameter(S.Context, Param); 5378 5379 ExprResult Arg = E->getArg(0); 5380 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 5381 if (Arg.isInvalid()) 5382 return true; 5383 5384 E->setArg(ArgIndex, Arg.get()); 5385 return false; 5386 } 5387 5388 /// We have a call to a function like __sync_fetch_and_add, which is an 5389 /// overloaded function based on the pointer type of its first argument. 5390 /// The main BuildCallExpr routines have already promoted the types of 5391 /// arguments because all of these calls are prototyped as void(...). 5392 /// 5393 /// This function goes through and does final semantic checking for these 5394 /// builtins, as well as generating any warnings. 5395 ExprResult 5396 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 5397 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 5398 Expr *Callee = TheCall->getCallee(); 5399 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 5400 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5401 5402 // Ensure that we have at least one argument to do type inference from. 5403 if (TheCall->getNumArgs() < 1) { 5404 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5405 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 5406 return ExprError(); 5407 } 5408 5409 // Inspect the first argument of the atomic builtin. This should always be 5410 // a pointer type, whose element is an integral scalar or pointer type. 5411 // Because it is a pointer type, we don't have to worry about any implicit 5412 // casts here. 5413 // FIXME: We don't allow floating point scalars as input. 5414 Expr *FirstArg = TheCall->getArg(0); 5415 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 5416 if (FirstArgResult.isInvalid()) 5417 return ExprError(); 5418 FirstArg = FirstArgResult.get(); 5419 TheCall->setArg(0, FirstArg); 5420 5421 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 5422 if (!pointerType) { 5423 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 5424 << FirstArg->getType() << FirstArg->getSourceRange(); 5425 return ExprError(); 5426 } 5427 5428 QualType ValType = pointerType->getPointeeType(); 5429 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5430 !ValType->isBlockPointerType()) { 5431 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 5432 << FirstArg->getType() << FirstArg->getSourceRange(); 5433 return ExprError(); 5434 } 5435 5436 if (ValType.isConstQualified()) { 5437 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 5438 << FirstArg->getType() << FirstArg->getSourceRange(); 5439 return ExprError(); 5440 } 5441 5442 switch (ValType.getObjCLifetime()) { 5443 case Qualifiers::OCL_None: 5444 case Qualifiers::OCL_ExplicitNone: 5445 // okay 5446 break; 5447 5448 case Qualifiers::OCL_Weak: 5449 case Qualifiers::OCL_Strong: 5450 case Qualifiers::OCL_Autoreleasing: 5451 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 5452 << ValType << FirstArg->getSourceRange(); 5453 return ExprError(); 5454 } 5455 5456 // Strip any qualifiers off ValType. 5457 ValType = ValType.getUnqualifiedType(); 5458 5459 // The majority of builtins return a value, but a few have special return 5460 // types, so allow them to override appropriately below. 5461 QualType ResultType = ValType; 5462 5463 // We need to figure out which concrete builtin this maps onto. For example, 5464 // __sync_fetch_and_add with a 2 byte object turns into 5465 // __sync_fetch_and_add_2. 5466 #define BUILTIN_ROW(x) \ 5467 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 5468 Builtin::BI##x##_8, Builtin::BI##x##_16 } 5469 5470 static const unsigned BuiltinIndices[][5] = { 5471 BUILTIN_ROW(__sync_fetch_and_add), 5472 BUILTIN_ROW(__sync_fetch_and_sub), 5473 BUILTIN_ROW(__sync_fetch_and_or), 5474 BUILTIN_ROW(__sync_fetch_and_and), 5475 BUILTIN_ROW(__sync_fetch_and_xor), 5476 BUILTIN_ROW(__sync_fetch_and_nand), 5477 5478 BUILTIN_ROW(__sync_add_and_fetch), 5479 BUILTIN_ROW(__sync_sub_and_fetch), 5480 BUILTIN_ROW(__sync_and_and_fetch), 5481 BUILTIN_ROW(__sync_or_and_fetch), 5482 BUILTIN_ROW(__sync_xor_and_fetch), 5483 BUILTIN_ROW(__sync_nand_and_fetch), 5484 5485 BUILTIN_ROW(__sync_val_compare_and_swap), 5486 BUILTIN_ROW(__sync_bool_compare_and_swap), 5487 BUILTIN_ROW(__sync_lock_test_and_set), 5488 BUILTIN_ROW(__sync_lock_release), 5489 BUILTIN_ROW(__sync_swap) 5490 }; 5491 #undef BUILTIN_ROW 5492 5493 // Determine the index of the size. 5494 unsigned SizeIndex; 5495 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 5496 case 1: SizeIndex = 0; break; 5497 case 2: SizeIndex = 1; break; 5498 case 4: SizeIndex = 2; break; 5499 case 8: SizeIndex = 3; break; 5500 case 16: SizeIndex = 4; break; 5501 default: 5502 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5503 << FirstArg->getType() << FirstArg->getSourceRange(); 5504 return ExprError(); 5505 } 5506 5507 // Each of these builtins has one pointer argument, followed by some number of 5508 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5509 // that we ignore. Find out which row of BuiltinIndices to read from as well 5510 // as the number of fixed args. 5511 unsigned BuiltinID = FDecl->getBuiltinID(); 5512 unsigned BuiltinIndex, NumFixed = 1; 5513 bool WarnAboutSemanticsChange = false; 5514 switch (BuiltinID) { 5515 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5516 case Builtin::BI__sync_fetch_and_add: 5517 case Builtin::BI__sync_fetch_and_add_1: 5518 case Builtin::BI__sync_fetch_and_add_2: 5519 case Builtin::BI__sync_fetch_and_add_4: 5520 case Builtin::BI__sync_fetch_and_add_8: 5521 case Builtin::BI__sync_fetch_and_add_16: 5522 BuiltinIndex = 0; 5523 break; 5524 5525 case Builtin::BI__sync_fetch_and_sub: 5526 case Builtin::BI__sync_fetch_and_sub_1: 5527 case Builtin::BI__sync_fetch_and_sub_2: 5528 case Builtin::BI__sync_fetch_and_sub_4: 5529 case Builtin::BI__sync_fetch_and_sub_8: 5530 case Builtin::BI__sync_fetch_and_sub_16: 5531 BuiltinIndex = 1; 5532 break; 5533 5534 case Builtin::BI__sync_fetch_and_or: 5535 case Builtin::BI__sync_fetch_and_or_1: 5536 case Builtin::BI__sync_fetch_and_or_2: 5537 case Builtin::BI__sync_fetch_and_or_4: 5538 case Builtin::BI__sync_fetch_and_or_8: 5539 case Builtin::BI__sync_fetch_and_or_16: 5540 BuiltinIndex = 2; 5541 break; 5542 5543 case Builtin::BI__sync_fetch_and_and: 5544 case Builtin::BI__sync_fetch_and_and_1: 5545 case Builtin::BI__sync_fetch_and_and_2: 5546 case Builtin::BI__sync_fetch_and_and_4: 5547 case Builtin::BI__sync_fetch_and_and_8: 5548 case Builtin::BI__sync_fetch_and_and_16: 5549 BuiltinIndex = 3; 5550 break; 5551 5552 case Builtin::BI__sync_fetch_and_xor: 5553 case Builtin::BI__sync_fetch_and_xor_1: 5554 case Builtin::BI__sync_fetch_and_xor_2: 5555 case Builtin::BI__sync_fetch_and_xor_4: 5556 case Builtin::BI__sync_fetch_and_xor_8: 5557 case Builtin::BI__sync_fetch_and_xor_16: 5558 BuiltinIndex = 4; 5559 break; 5560 5561 case Builtin::BI__sync_fetch_and_nand: 5562 case Builtin::BI__sync_fetch_and_nand_1: 5563 case Builtin::BI__sync_fetch_and_nand_2: 5564 case Builtin::BI__sync_fetch_and_nand_4: 5565 case Builtin::BI__sync_fetch_and_nand_8: 5566 case Builtin::BI__sync_fetch_and_nand_16: 5567 BuiltinIndex = 5; 5568 WarnAboutSemanticsChange = true; 5569 break; 5570 5571 case Builtin::BI__sync_add_and_fetch: 5572 case Builtin::BI__sync_add_and_fetch_1: 5573 case Builtin::BI__sync_add_and_fetch_2: 5574 case Builtin::BI__sync_add_and_fetch_4: 5575 case Builtin::BI__sync_add_and_fetch_8: 5576 case Builtin::BI__sync_add_and_fetch_16: 5577 BuiltinIndex = 6; 5578 break; 5579 5580 case Builtin::BI__sync_sub_and_fetch: 5581 case Builtin::BI__sync_sub_and_fetch_1: 5582 case Builtin::BI__sync_sub_and_fetch_2: 5583 case Builtin::BI__sync_sub_and_fetch_4: 5584 case Builtin::BI__sync_sub_and_fetch_8: 5585 case Builtin::BI__sync_sub_and_fetch_16: 5586 BuiltinIndex = 7; 5587 break; 5588 5589 case Builtin::BI__sync_and_and_fetch: 5590 case Builtin::BI__sync_and_and_fetch_1: 5591 case Builtin::BI__sync_and_and_fetch_2: 5592 case Builtin::BI__sync_and_and_fetch_4: 5593 case Builtin::BI__sync_and_and_fetch_8: 5594 case Builtin::BI__sync_and_and_fetch_16: 5595 BuiltinIndex = 8; 5596 break; 5597 5598 case Builtin::BI__sync_or_and_fetch: 5599 case Builtin::BI__sync_or_and_fetch_1: 5600 case Builtin::BI__sync_or_and_fetch_2: 5601 case Builtin::BI__sync_or_and_fetch_4: 5602 case Builtin::BI__sync_or_and_fetch_8: 5603 case Builtin::BI__sync_or_and_fetch_16: 5604 BuiltinIndex = 9; 5605 break; 5606 5607 case Builtin::BI__sync_xor_and_fetch: 5608 case Builtin::BI__sync_xor_and_fetch_1: 5609 case Builtin::BI__sync_xor_and_fetch_2: 5610 case Builtin::BI__sync_xor_and_fetch_4: 5611 case Builtin::BI__sync_xor_and_fetch_8: 5612 case Builtin::BI__sync_xor_and_fetch_16: 5613 BuiltinIndex = 10; 5614 break; 5615 5616 case Builtin::BI__sync_nand_and_fetch: 5617 case Builtin::BI__sync_nand_and_fetch_1: 5618 case Builtin::BI__sync_nand_and_fetch_2: 5619 case Builtin::BI__sync_nand_and_fetch_4: 5620 case Builtin::BI__sync_nand_and_fetch_8: 5621 case Builtin::BI__sync_nand_and_fetch_16: 5622 BuiltinIndex = 11; 5623 WarnAboutSemanticsChange = true; 5624 break; 5625 5626 case Builtin::BI__sync_val_compare_and_swap: 5627 case Builtin::BI__sync_val_compare_and_swap_1: 5628 case Builtin::BI__sync_val_compare_and_swap_2: 5629 case Builtin::BI__sync_val_compare_and_swap_4: 5630 case Builtin::BI__sync_val_compare_and_swap_8: 5631 case Builtin::BI__sync_val_compare_and_swap_16: 5632 BuiltinIndex = 12; 5633 NumFixed = 2; 5634 break; 5635 5636 case Builtin::BI__sync_bool_compare_and_swap: 5637 case Builtin::BI__sync_bool_compare_and_swap_1: 5638 case Builtin::BI__sync_bool_compare_and_swap_2: 5639 case Builtin::BI__sync_bool_compare_and_swap_4: 5640 case Builtin::BI__sync_bool_compare_and_swap_8: 5641 case Builtin::BI__sync_bool_compare_and_swap_16: 5642 BuiltinIndex = 13; 5643 NumFixed = 2; 5644 ResultType = Context.BoolTy; 5645 break; 5646 5647 case Builtin::BI__sync_lock_test_and_set: 5648 case Builtin::BI__sync_lock_test_and_set_1: 5649 case Builtin::BI__sync_lock_test_and_set_2: 5650 case Builtin::BI__sync_lock_test_and_set_4: 5651 case Builtin::BI__sync_lock_test_and_set_8: 5652 case Builtin::BI__sync_lock_test_and_set_16: 5653 BuiltinIndex = 14; 5654 break; 5655 5656 case Builtin::BI__sync_lock_release: 5657 case Builtin::BI__sync_lock_release_1: 5658 case Builtin::BI__sync_lock_release_2: 5659 case Builtin::BI__sync_lock_release_4: 5660 case Builtin::BI__sync_lock_release_8: 5661 case Builtin::BI__sync_lock_release_16: 5662 BuiltinIndex = 15; 5663 NumFixed = 0; 5664 ResultType = Context.VoidTy; 5665 break; 5666 5667 case Builtin::BI__sync_swap: 5668 case Builtin::BI__sync_swap_1: 5669 case Builtin::BI__sync_swap_2: 5670 case Builtin::BI__sync_swap_4: 5671 case Builtin::BI__sync_swap_8: 5672 case Builtin::BI__sync_swap_16: 5673 BuiltinIndex = 16; 5674 break; 5675 } 5676 5677 // Now that we know how many fixed arguments we expect, first check that we 5678 // have at least that many. 5679 if (TheCall->getNumArgs() < 1+NumFixed) { 5680 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5681 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5682 << Callee->getSourceRange(); 5683 return ExprError(); 5684 } 5685 5686 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5687 << Callee->getSourceRange(); 5688 5689 if (WarnAboutSemanticsChange) { 5690 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5691 << Callee->getSourceRange(); 5692 } 5693 5694 // Get the decl for the concrete builtin from this, we can tell what the 5695 // concrete integer type we should convert to is. 5696 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5697 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5698 FunctionDecl *NewBuiltinDecl; 5699 if (NewBuiltinID == BuiltinID) 5700 NewBuiltinDecl = FDecl; 5701 else { 5702 // Perform builtin lookup to avoid redeclaring it. 5703 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5704 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5705 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5706 assert(Res.getFoundDecl()); 5707 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5708 if (!NewBuiltinDecl) 5709 return ExprError(); 5710 } 5711 5712 // The first argument --- the pointer --- has a fixed type; we 5713 // deduce the types of the rest of the arguments accordingly. Walk 5714 // the remaining arguments, converting them to the deduced value type. 5715 for (unsigned i = 0; i != NumFixed; ++i) { 5716 ExprResult Arg = TheCall->getArg(i+1); 5717 5718 // GCC does an implicit conversion to the pointer or integer ValType. This 5719 // can fail in some cases (1i -> int**), check for this error case now. 5720 // Initialize the argument. 5721 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5722 ValType, /*consume*/ false); 5723 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5724 if (Arg.isInvalid()) 5725 return ExprError(); 5726 5727 // Okay, we have something that *can* be converted to the right type. Check 5728 // to see if there is a potentially weird extension going on here. This can 5729 // happen when you do an atomic operation on something like an char* and 5730 // pass in 42. The 42 gets converted to char. This is even more strange 5731 // for things like 45.123 -> char, etc. 5732 // FIXME: Do this check. 5733 TheCall->setArg(i+1, Arg.get()); 5734 } 5735 5736 // Create a new DeclRefExpr to refer to the new decl. 5737 DeclRefExpr *NewDRE = DeclRefExpr::Create( 5738 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 5739 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 5740 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 5741 5742 // Set the callee in the CallExpr. 5743 // FIXME: This loses syntactic information. 5744 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5745 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5746 CK_BuiltinFnToFnPtr); 5747 TheCall->setCallee(PromotedCall.get()); 5748 5749 // Change the result type of the call to match the original value type. This 5750 // is arbitrary, but the codegen for these builtins ins design to handle it 5751 // gracefully. 5752 TheCall->setType(ResultType); 5753 5754 // Prohibit use of _ExtInt with atomic builtins. 5755 // The arguments would have already been converted to the first argument's 5756 // type, so only need to check the first argument. 5757 const auto *ExtIntValType = ValType->getAs<ExtIntType>(); 5758 if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) { 5759 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 5760 return ExprError(); 5761 } 5762 5763 return TheCallResult; 5764 } 5765 5766 /// SemaBuiltinNontemporalOverloaded - We have a call to 5767 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5768 /// overloaded function based on the pointer type of its last argument. 5769 /// 5770 /// This function goes through and does final semantic checking for these 5771 /// builtins. 5772 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5773 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5774 DeclRefExpr *DRE = 5775 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5776 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5777 unsigned BuiltinID = FDecl->getBuiltinID(); 5778 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5779 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5780 "Unexpected nontemporal load/store builtin!"); 5781 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5782 unsigned numArgs = isStore ? 2 : 1; 5783 5784 // Ensure that we have the proper number of arguments. 5785 if (checkArgCount(*this, TheCall, numArgs)) 5786 return ExprError(); 5787 5788 // Inspect the last argument of the nontemporal builtin. This should always 5789 // be a pointer type, from which we imply the type of the memory access. 5790 // Because it is a pointer type, we don't have to worry about any implicit 5791 // casts here. 5792 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5793 ExprResult PointerArgResult = 5794 DefaultFunctionArrayLvalueConversion(PointerArg); 5795 5796 if (PointerArgResult.isInvalid()) 5797 return ExprError(); 5798 PointerArg = PointerArgResult.get(); 5799 TheCall->setArg(numArgs - 1, PointerArg); 5800 5801 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5802 if (!pointerType) { 5803 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5804 << PointerArg->getType() << PointerArg->getSourceRange(); 5805 return ExprError(); 5806 } 5807 5808 QualType ValType = pointerType->getPointeeType(); 5809 5810 // Strip any qualifiers off ValType. 5811 ValType = ValType.getUnqualifiedType(); 5812 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5813 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5814 !ValType->isVectorType()) { 5815 Diag(DRE->getBeginLoc(), 5816 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5817 << PointerArg->getType() << PointerArg->getSourceRange(); 5818 return ExprError(); 5819 } 5820 5821 if (!isStore) { 5822 TheCall->setType(ValType); 5823 return TheCallResult; 5824 } 5825 5826 ExprResult ValArg = TheCall->getArg(0); 5827 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5828 Context, ValType, /*consume*/ false); 5829 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5830 if (ValArg.isInvalid()) 5831 return ExprError(); 5832 5833 TheCall->setArg(0, ValArg.get()); 5834 TheCall->setType(Context.VoidTy); 5835 return TheCallResult; 5836 } 5837 5838 /// CheckObjCString - Checks that the argument to the builtin 5839 /// CFString constructor is correct 5840 /// Note: It might also make sense to do the UTF-16 conversion here (would 5841 /// simplify the backend). 5842 bool Sema::CheckObjCString(Expr *Arg) { 5843 Arg = Arg->IgnoreParenCasts(); 5844 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5845 5846 if (!Literal || !Literal->isAscii()) { 5847 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5848 << Arg->getSourceRange(); 5849 return true; 5850 } 5851 5852 if (Literal->containsNonAsciiOrNull()) { 5853 StringRef String = Literal->getString(); 5854 unsigned NumBytes = String.size(); 5855 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5856 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5857 llvm::UTF16 *ToPtr = &ToBuf[0]; 5858 5859 llvm::ConversionResult Result = 5860 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5861 ToPtr + NumBytes, llvm::strictConversion); 5862 // Check for conversion failure. 5863 if (Result != llvm::conversionOK) 5864 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5865 << Arg->getSourceRange(); 5866 } 5867 return false; 5868 } 5869 5870 /// CheckObjCString - Checks that the format string argument to the os_log() 5871 /// and os_trace() functions is correct, and converts it to const char *. 5872 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5873 Arg = Arg->IgnoreParenCasts(); 5874 auto *Literal = dyn_cast<StringLiteral>(Arg); 5875 if (!Literal) { 5876 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5877 Literal = ObjcLiteral->getString(); 5878 } 5879 } 5880 5881 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5882 return ExprError( 5883 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5884 << Arg->getSourceRange()); 5885 } 5886 5887 ExprResult Result(Literal); 5888 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5889 InitializedEntity Entity = 5890 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5891 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5892 return Result; 5893 } 5894 5895 /// Check that the user is calling the appropriate va_start builtin for the 5896 /// target and calling convention. 5897 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5898 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5899 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5900 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 5901 TT.getArch() == llvm::Triple::aarch64_32); 5902 bool IsWindows = TT.isOSWindows(); 5903 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5904 if (IsX64 || IsAArch64) { 5905 CallingConv CC = CC_C; 5906 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5907 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 5908 if (IsMSVAStart) { 5909 // Don't allow this in System V ABI functions. 5910 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5911 return S.Diag(Fn->getBeginLoc(), 5912 diag::err_ms_va_start_used_in_sysv_function); 5913 } else { 5914 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5915 // On x64 Windows, don't allow this in System V ABI functions. 5916 // (Yes, that means there's no corresponding way to support variadic 5917 // System V ABI functions on Windows.) 5918 if ((IsWindows && CC == CC_X86_64SysV) || 5919 (!IsWindows && CC == CC_Win64)) 5920 return S.Diag(Fn->getBeginLoc(), 5921 diag::err_va_start_used_in_wrong_abi_function) 5922 << !IsWindows; 5923 } 5924 return false; 5925 } 5926 5927 if (IsMSVAStart) 5928 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5929 return false; 5930 } 5931 5932 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5933 ParmVarDecl **LastParam = nullptr) { 5934 // Determine whether the current function, block, or obj-c method is variadic 5935 // and get its parameter list. 5936 bool IsVariadic = false; 5937 ArrayRef<ParmVarDecl *> Params; 5938 DeclContext *Caller = S.CurContext; 5939 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5940 IsVariadic = Block->isVariadic(); 5941 Params = Block->parameters(); 5942 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5943 IsVariadic = FD->isVariadic(); 5944 Params = FD->parameters(); 5945 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5946 IsVariadic = MD->isVariadic(); 5947 // FIXME: This isn't correct for methods (results in bogus warning). 5948 Params = MD->parameters(); 5949 } else if (isa<CapturedDecl>(Caller)) { 5950 // We don't support va_start in a CapturedDecl. 5951 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5952 return true; 5953 } else { 5954 // This must be some other declcontext that parses exprs. 5955 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5956 return true; 5957 } 5958 5959 if (!IsVariadic) { 5960 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5961 return true; 5962 } 5963 5964 if (LastParam) 5965 *LastParam = Params.empty() ? nullptr : Params.back(); 5966 5967 return false; 5968 } 5969 5970 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5971 /// for validity. Emit an error and return true on failure; return false 5972 /// on success. 5973 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5974 Expr *Fn = TheCall->getCallee(); 5975 5976 if (checkVAStartABI(*this, BuiltinID, Fn)) 5977 return true; 5978 5979 if (checkArgCount(*this, TheCall, 2)) 5980 return true; 5981 5982 // Type-check the first argument normally. 5983 if (checkBuiltinArgument(*this, TheCall, 0)) 5984 return true; 5985 5986 // Check that the current function is variadic, and get its last parameter. 5987 ParmVarDecl *LastParam; 5988 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5989 return true; 5990 5991 // Verify that the second argument to the builtin is the last argument of the 5992 // current function or method. 5993 bool SecondArgIsLastNamedArgument = false; 5994 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5995 5996 // These are valid if SecondArgIsLastNamedArgument is false after the next 5997 // block. 5998 QualType Type; 5999 SourceLocation ParamLoc; 6000 bool IsCRegister = false; 6001 6002 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 6003 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 6004 SecondArgIsLastNamedArgument = PV == LastParam; 6005 6006 Type = PV->getType(); 6007 ParamLoc = PV->getLocation(); 6008 IsCRegister = 6009 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 6010 } 6011 } 6012 6013 if (!SecondArgIsLastNamedArgument) 6014 Diag(TheCall->getArg(1)->getBeginLoc(), 6015 diag::warn_second_arg_of_va_start_not_last_named_param); 6016 else if (IsCRegister || Type->isReferenceType() || 6017 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 6018 // Promotable integers are UB, but enumerations need a bit of 6019 // extra checking to see what their promotable type actually is. 6020 if (!Type->isPromotableIntegerType()) 6021 return false; 6022 if (!Type->isEnumeralType()) 6023 return true; 6024 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 6025 return !(ED && 6026 Context.typesAreCompatible(ED->getPromotionType(), Type)); 6027 }()) { 6028 unsigned Reason = 0; 6029 if (Type->isReferenceType()) Reason = 1; 6030 else if (IsCRegister) Reason = 2; 6031 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 6032 Diag(ParamLoc, diag::note_parameter_type) << Type; 6033 } 6034 6035 TheCall->setType(Context.VoidTy); 6036 return false; 6037 } 6038 6039 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 6040 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 6041 // const char *named_addr); 6042 6043 Expr *Func = Call->getCallee(); 6044 6045 if (Call->getNumArgs() < 3) 6046 return Diag(Call->getEndLoc(), 6047 diag::err_typecheck_call_too_few_args_at_least) 6048 << 0 /*function call*/ << 3 << Call->getNumArgs(); 6049 6050 // Type-check the first argument normally. 6051 if (checkBuiltinArgument(*this, Call, 0)) 6052 return true; 6053 6054 // Check that the current function is variadic. 6055 if (checkVAStartIsInVariadicFunction(*this, Func)) 6056 return true; 6057 6058 // __va_start on Windows does not validate the parameter qualifiers 6059 6060 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 6061 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 6062 6063 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 6064 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 6065 6066 const QualType &ConstCharPtrTy = 6067 Context.getPointerType(Context.CharTy.withConst()); 6068 if (!Arg1Ty->isPointerType() || 6069 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 6070 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6071 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 6072 << 0 /* qualifier difference */ 6073 << 3 /* parameter mismatch */ 6074 << 2 << Arg1->getType() << ConstCharPtrTy; 6075 6076 const QualType SizeTy = Context.getSizeType(); 6077 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 6078 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6079 << Arg2->getType() << SizeTy << 1 /* different class */ 6080 << 0 /* qualifier difference */ 6081 << 3 /* parameter mismatch */ 6082 << 3 << Arg2->getType() << SizeTy; 6083 6084 return false; 6085 } 6086 6087 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 6088 /// friends. This is declared to take (...), so we have to check everything. 6089 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 6090 if (checkArgCount(*this, TheCall, 2)) 6091 return true; 6092 6093 ExprResult OrigArg0 = TheCall->getArg(0); 6094 ExprResult OrigArg1 = TheCall->getArg(1); 6095 6096 // Do standard promotions between the two arguments, returning their common 6097 // type. 6098 QualType Res = UsualArithmeticConversions( 6099 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 6100 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 6101 return true; 6102 6103 // Make sure any conversions are pushed back into the call; this is 6104 // type safe since unordered compare builtins are declared as "_Bool 6105 // foo(...)". 6106 TheCall->setArg(0, OrigArg0.get()); 6107 TheCall->setArg(1, OrigArg1.get()); 6108 6109 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 6110 return false; 6111 6112 // If the common type isn't a real floating type, then the arguments were 6113 // invalid for this operation. 6114 if (Res.isNull() || !Res->isRealFloatingType()) 6115 return Diag(OrigArg0.get()->getBeginLoc(), 6116 diag::err_typecheck_call_invalid_ordered_compare) 6117 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 6118 << SourceRange(OrigArg0.get()->getBeginLoc(), 6119 OrigArg1.get()->getEndLoc()); 6120 6121 return false; 6122 } 6123 6124 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 6125 /// __builtin_isnan and friends. This is declared to take (...), so we have 6126 /// to check everything. We expect the last argument to be a floating point 6127 /// value. 6128 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 6129 if (checkArgCount(*this, TheCall, NumArgs)) 6130 return true; 6131 6132 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 6133 // on all preceding parameters just being int. Try all of those. 6134 for (unsigned i = 0; i < NumArgs - 1; ++i) { 6135 Expr *Arg = TheCall->getArg(i); 6136 6137 if (Arg->isTypeDependent()) 6138 return false; 6139 6140 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 6141 6142 if (Res.isInvalid()) 6143 return true; 6144 TheCall->setArg(i, Res.get()); 6145 } 6146 6147 Expr *OrigArg = TheCall->getArg(NumArgs-1); 6148 6149 if (OrigArg->isTypeDependent()) 6150 return false; 6151 6152 // Usual Unary Conversions will convert half to float, which we want for 6153 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 6154 // type how it is, but do normal L->Rvalue conversions. 6155 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 6156 OrigArg = UsualUnaryConversions(OrigArg).get(); 6157 else 6158 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 6159 TheCall->setArg(NumArgs - 1, OrigArg); 6160 6161 // This operation requires a non-_Complex floating-point number. 6162 if (!OrigArg->getType()->isRealFloatingType()) 6163 return Diag(OrigArg->getBeginLoc(), 6164 diag::err_typecheck_call_invalid_unary_fp) 6165 << OrigArg->getType() << OrigArg->getSourceRange(); 6166 6167 return false; 6168 } 6169 6170 /// Perform semantic analysis for a call to __builtin_complex. 6171 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 6172 if (checkArgCount(*this, TheCall, 2)) 6173 return true; 6174 6175 bool Dependent = false; 6176 for (unsigned I = 0; I != 2; ++I) { 6177 Expr *Arg = TheCall->getArg(I); 6178 QualType T = Arg->getType(); 6179 if (T->isDependentType()) { 6180 Dependent = true; 6181 continue; 6182 } 6183 6184 // Despite supporting _Complex int, GCC requires a real floating point type 6185 // for the operands of __builtin_complex. 6186 if (!T->isRealFloatingType()) { 6187 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 6188 << Arg->getType() << Arg->getSourceRange(); 6189 } 6190 6191 ExprResult Converted = DefaultLvalueConversion(Arg); 6192 if (Converted.isInvalid()) 6193 return true; 6194 TheCall->setArg(I, Converted.get()); 6195 } 6196 6197 if (Dependent) { 6198 TheCall->setType(Context.DependentTy); 6199 return false; 6200 } 6201 6202 Expr *Real = TheCall->getArg(0); 6203 Expr *Imag = TheCall->getArg(1); 6204 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 6205 return Diag(Real->getBeginLoc(), 6206 diag::err_typecheck_call_different_arg_types) 6207 << Real->getType() << Imag->getType() 6208 << Real->getSourceRange() << Imag->getSourceRange(); 6209 } 6210 6211 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 6212 // don't allow this builtin to form those types either. 6213 // FIXME: Should we allow these types? 6214 if (Real->getType()->isFloat16Type()) 6215 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6216 << "_Float16"; 6217 if (Real->getType()->isHalfType()) 6218 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6219 << "half"; 6220 6221 TheCall->setType(Context.getComplexType(Real->getType())); 6222 return false; 6223 } 6224 6225 // Customized Sema Checking for VSX builtins that have the following signature: 6226 // vector [...] builtinName(vector [...], vector [...], const int); 6227 // Which takes the same type of vectors (any legal vector type) for the first 6228 // two arguments and takes compile time constant for the third argument. 6229 // Example builtins are : 6230 // vector double vec_xxpermdi(vector double, vector double, int); 6231 // vector short vec_xxsldwi(vector short, vector short, int); 6232 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 6233 unsigned ExpectedNumArgs = 3; 6234 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 6235 return true; 6236 6237 // Check the third argument is a compile time constant 6238 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 6239 return Diag(TheCall->getBeginLoc(), 6240 diag::err_vsx_builtin_nonconstant_argument) 6241 << 3 /* argument index */ << TheCall->getDirectCallee() 6242 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 6243 TheCall->getArg(2)->getEndLoc()); 6244 6245 QualType Arg1Ty = TheCall->getArg(0)->getType(); 6246 QualType Arg2Ty = TheCall->getArg(1)->getType(); 6247 6248 // Check the type of argument 1 and argument 2 are vectors. 6249 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 6250 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 6251 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 6252 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 6253 << TheCall->getDirectCallee() 6254 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6255 TheCall->getArg(1)->getEndLoc()); 6256 } 6257 6258 // Check the first two arguments are the same type. 6259 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 6260 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 6261 << TheCall->getDirectCallee() 6262 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6263 TheCall->getArg(1)->getEndLoc()); 6264 } 6265 6266 // When default clang type checking is turned off and the customized type 6267 // checking is used, the returning type of the function must be explicitly 6268 // set. Otherwise it is _Bool by default. 6269 TheCall->setType(Arg1Ty); 6270 6271 return false; 6272 } 6273 6274 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 6275 // This is declared to take (...), so we have to check everything. 6276 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 6277 if (TheCall->getNumArgs() < 2) 6278 return ExprError(Diag(TheCall->getEndLoc(), 6279 diag::err_typecheck_call_too_few_args_at_least) 6280 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 6281 << TheCall->getSourceRange()); 6282 6283 // Determine which of the following types of shufflevector we're checking: 6284 // 1) unary, vector mask: (lhs, mask) 6285 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 6286 QualType resType = TheCall->getArg(0)->getType(); 6287 unsigned numElements = 0; 6288 6289 if (!TheCall->getArg(0)->isTypeDependent() && 6290 !TheCall->getArg(1)->isTypeDependent()) { 6291 QualType LHSType = TheCall->getArg(0)->getType(); 6292 QualType RHSType = TheCall->getArg(1)->getType(); 6293 6294 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 6295 return ExprError( 6296 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 6297 << TheCall->getDirectCallee() 6298 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6299 TheCall->getArg(1)->getEndLoc())); 6300 6301 numElements = LHSType->castAs<VectorType>()->getNumElements(); 6302 unsigned numResElements = TheCall->getNumArgs() - 2; 6303 6304 // Check to see if we have a call with 2 vector arguments, the unary shuffle 6305 // with mask. If so, verify that RHS is an integer vector type with the 6306 // same number of elts as lhs. 6307 if (TheCall->getNumArgs() == 2) { 6308 if (!RHSType->hasIntegerRepresentation() || 6309 RHSType->castAs<VectorType>()->getNumElements() != numElements) 6310 return ExprError(Diag(TheCall->getBeginLoc(), 6311 diag::err_vec_builtin_incompatible_vector) 6312 << TheCall->getDirectCallee() 6313 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 6314 TheCall->getArg(1)->getEndLoc())); 6315 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 6316 return ExprError(Diag(TheCall->getBeginLoc(), 6317 diag::err_vec_builtin_incompatible_vector) 6318 << TheCall->getDirectCallee() 6319 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6320 TheCall->getArg(1)->getEndLoc())); 6321 } else if (numElements != numResElements) { 6322 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 6323 resType = Context.getVectorType(eltType, numResElements, 6324 VectorType::GenericVector); 6325 } 6326 } 6327 6328 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 6329 if (TheCall->getArg(i)->isTypeDependent() || 6330 TheCall->getArg(i)->isValueDependent()) 6331 continue; 6332 6333 Optional<llvm::APSInt> Result; 6334 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 6335 return ExprError(Diag(TheCall->getBeginLoc(), 6336 diag::err_shufflevector_nonconstant_argument) 6337 << TheCall->getArg(i)->getSourceRange()); 6338 6339 // Allow -1 which will be translated to undef in the IR. 6340 if (Result->isSigned() && Result->isAllOnesValue()) 6341 continue; 6342 6343 if (Result->getActiveBits() > 64 || 6344 Result->getZExtValue() >= numElements * 2) 6345 return ExprError(Diag(TheCall->getBeginLoc(), 6346 diag::err_shufflevector_argument_too_large) 6347 << TheCall->getArg(i)->getSourceRange()); 6348 } 6349 6350 SmallVector<Expr*, 32> exprs; 6351 6352 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 6353 exprs.push_back(TheCall->getArg(i)); 6354 TheCall->setArg(i, nullptr); 6355 } 6356 6357 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 6358 TheCall->getCallee()->getBeginLoc(), 6359 TheCall->getRParenLoc()); 6360 } 6361 6362 /// SemaConvertVectorExpr - Handle __builtin_convertvector 6363 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 6364 SourceLocation BuiltinLoc, 6365 SourceLocation RParenLoc) { 6366 ExprValueKind VK = VK_RValue; 6367 ExprObjectKind OK = OK_Ordinary; 6368 QualType DstTy = TInfo->getType(); 6369 QualType SrcTy = E->getType(); 6370 6371 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 6372 return ExprError(Diag(BuiltinLoc, 6373 diag::err_convertvector_non_vector) 6374 << E->getSourceRange()); 6375 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 6376 return ExprError(Diag(BuiltinLoc, 6377 diag::err_convertvector_non_vector_type)); 6378 6379 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 6380 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 6381 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 6382 if (SrcElts != DstElts) 6383 return ExprError(Diag(BuiltinLoc, 6384 diag::err_convertvector_incompatible_vector) 6385 << E->getSourceRange()); 6386 } 6387 6388 return new (Context) 6389 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 6390 } 6391 6392 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 6393 // This is declared to take (const void*, ...) and can take two 6394 // optional constant int args. 6395 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 6396 unsigned NumArgs = TheCall->getNumArgs(); 6397 6398 if (NumArgs > 3) 6399 return Diag(TheCall->getEndLoc(), 6400 diag::err_typecheck_call_too_many_args_at_most) 6401 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6402 6403 // Argument 0 is checked for us and the remaining arguments must be 6404 // constant integers. 6405 for (unsigned i = 1; i != NumArgs; ++i) 6406 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 6407 return true; 6408 6409 return false; 6410 } 6411 6412 /// SemaBuiltinAssume - Handle __assume (MS Extension). 6413 // __assume does not evaluate its arguments, and should warn if its argument 6414 // has side effects. 6415 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 6416 Expr *Arg = TheCall->getArg(0); 6417 if (Arg->isInstantiationDependent()) return false; 6418 6419 if (Arg->HasSideEffects(Context)) 6420 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 6421 << Arg->getSourceRange() 6422 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 6423 6424 return false; 6425 } 6426 6427 /// Handle __builtin_alloca_with_align. This is declared 6428 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 6429 /// than 8. 6430 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 6431 // The alignment must be a constant integer. 6432 Expr *Arg = TheCall->getArg(1); 6433 6434 // We can't check the value of a dependent argument. 6435 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6436 if (const auto *UE = 6437 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 6438 if (UE->getKind() == UETT_AlignOf || 6439 UE->getKind() == UETT_PreferredAlignOf) 6440 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 6441 << Arg->getSourceRange(); 6442 6443 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 6444 6445 if (!Result.isPowerOf2()) 6446 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6447 << Arg->getSourceRange(); 6448 6449 if (Result < Context.getCharWidth()) 6450 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 6451 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 6452 6453 if (Result > std::numeric_limits<int32_t>::max()) 6454 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 6455 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 6456 } 6457 6458 return false; 6459 } 6460 6461 /// Handle __builtin_assume_aligned. This is declared 6462 /// as (const void*, size_t, ...) and can take one optional constant int arg. 6463 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 6464 unsigned NumArgs = TheCall->getNumArgs(); 6465 6466 if (NumArgs > 3) 6467 return Diag(TheCall->getEndLoc(), 6468 diag::err_typecheck_call_too_many_args_at_most) 6469 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6470 6471 // The alignment must be a constant integer. 6472 Expr *Arg = TheCall->getArg(1); 6473 6474 // We can't check the value of a dependent argument. 6475 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6476 llvm::APSInt Result; 6477 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6478 return true; 6479 6480 if (!Result.isPowerOf2()) 6481 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6482 << Arg->getSourceRange(); 6483 6484 if (Result > Sema::MaximumAlignment) 6485 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 6486 << Arg->getSourceRange() << Sema::MaximumAlignment; 6487 } 6488 6489 if (NumArgs > 2) { 6490 ExprResult Arg(TheCall->getArg(2)); 6491 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6492 Context.getSizeType(), false); 6493 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6494 if (Arg.isInvalid()) return true; 6495 TheCall->setArg(2, Arg.get()); 6496 } 6497 6498 return false; 6499 } 6500 6501 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 6502 unsigned BuiltinID = 6503 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 6504 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 6505 6506 unsigned NumArgs = TheCall->getNumArgs(); 6507 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 6508 if (NumArgs < NumRequiredArgs) { 6509 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 6510 << 0 /* function call */ << NumRequiredArgs << NumArgs 6511 << TheCall->getSourceRange(); 6512 } 6513 if (NumArgs >= NumRequiredArgs + 0x100) { 6514 return Diag(TheCall->getEndLoc(), 6515 diag::err_typecheck_call_too_many_args_at_most) 6516 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 6517 << TheCall->getSourceRange(); 6518 } 6519 unsigned i = 0; 6520 6521 // For formatting call, check buffer arg. 6522 if (!IsSizeCall) { 6523 ExprResult Arg(TheCall->getArg(i)); 6524 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6525 Context, Context.VoidPtrTy, false); 6526 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6527 if (Arg.isInvalid()) 6528 return true; 6529 TheCall->setArg(i, Arg.get()); 6530 i++; 6531 } 6532 6533 // Check string literal arg. 6534 unsigned FormatIdx = i; 6535 { 6536 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 6537 if (Arg.isInvalid()) 6538 return true; 6539 TheCall->setArg(i, Arg.get()); 6540 i++; 6541 } 6542 6543 // Make sure variadic args are scalar. 6544 unsigned FirstDataArg = i; 6545 while (i < NumArgs) { 6546 ExprResult Arg = DefaultVariadicArgumentPromotion( 6547 TheCall->getArg(i), VariadicFunction, nullptr); 6548 if (Arg.isInvalid()) 6549 return true; 6550 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 6551 if (ArgSize.getQuantity() >= 0x100) { 6552 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 6553 << i << (int)ArgSize.getQuantity() << 0xff 6554 << TheCall->getSourceRange(); 6555 } 6556 TheCall->setArg(i, Arg.get()); 6557 i++; 6558 } 6559 6560 // Check formatting specifiers. NOTE: We're only doing this for the non-size 6561 // call to avoid duplicate diagnostics. 6562 if (!IsSizeCall) { 6563 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 6564 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 6565 bool Success = CheckFormatArguments( 6566 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 6567 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 6568 CheckedVarArgs); 6569 if (!Success) 6570 return true; 6571 } 6572 6573 if (IsSizeCall) { 6574 TheCall->setType(Context.getSizeType()); 6575 } else { 6576 TheCall->setType(Context.VoidPtrTy); 6577 } 6578 return false; 6579 } 6580 6581 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 6582 /// TheCall is a constant expression. 6583 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 6584 llvm::APSInt &Result) { 6585 Expr *Arg = TheCall->getArg(ArgNum); 6586 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6587 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6588 6589 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6590 6591 Optional<llvm::APSInt> R; 6592 if (!(R = Arg->getIntegerConstantExpr(Context))) 6593 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6594 << FDecl->getDeclName() << Arg->getSourceRange(); 6595 Result = *R; 6596 return false; 6597 } 6598 6599 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6600 /// TheCall is a constant expression in the range [Low, High]. 6601 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6602 int Low, int High, bool RangeIsError) { 6603 if (isConstantEvaluated()) 6604 return false; 6605 llvm::APSInt Result; 6606 6607 // We can't check the value of a dependent argument. 6608 Expr *Arg = TheCall->getArg(ArgNum); 6609 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6610 return false; 6611 6612 // Check constant-ness first. 6613 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6614 return true; 6615 6616 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6617 if (RangeIsError) 6618 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6619 << Result.toString(10) << Low << High << Arg->getSourceRange(); 6620 else 6621 // Defer the warning until we know if the code will be emitted so that 6622 // dead code can ignore this. 6623 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6624 PDiag(diag::warn_argument_invalid_range) 6625 << Result.toString(10) << Low << High 6626 << Arg->getSourceRange()); 6627 } 6628 6629 return false; 6630 } 6631 6632 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6633 /// TheCall is a constant expression is a multiple of Num.. 6634 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6635 unsigned Num) { 6636 llvm::APSInt Result; 6637 6638 // We can't check the value of a dependent argument. 6639 Expr *Arg = TheCall->getArg(ArgNum); 6640 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6641 return false; 6642 6643 // Check constant-ness first. 6644 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6645 return true; 6646 6647 if (Result.getSExtValue() % Num != 0) 6648 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6649 << Num << Arg->getSourceRange(); 6650 6651 return false; 6652 } 6653 6654 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 6655 /// constant expression representing a power of 2. 6656 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 6657 llvm::APSInt Result; 6658 6659 // We can't check the value of a dependent argument. 6660 Expr *Arg = TheCall->getArg(ArgNum); 6661 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6662 return false; 6663 6664 // Check constant-ness first. 6665 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6666 return true; 6667 6668 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 6669 // and only if x is a power of 2. 6670 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 6671 return false; 6672 6673 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 6674 << Arg->getSourceRange(); 6675 } 6676 6677 static bool IsShiftedByte(llvm::APSInt Value) { 6678 if (Value.isNegative()) 6679 return false; 6680 6681 // Check if it's a shifted byte, by shifting it down 6682 while (true) { 6683 // If the value fits in the bottom byte, the check passes. 6684 if (Value < 0x100) 6685 return true; 6686 6687 // Otherwise, if the value has _any_ bits in the bottom byte, the check 6688 // fails. 6689 if ((Value & 0xFF) != 0) 6690 return false; 6691 6692 // If the bottom 8 bits are all 0, but something above that is nonzero, 6693 // then shifting the value right by 8 bits won't affect whether it's a 6694 // shifted byte or not. So do that, and go round again. 6695 Value >>= 8; 6696 } 6697 } 6698 6699 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 6700 /// a constant expression representing an arbitrary byte value shifted left by 6701 /// a multiple of 8 bits. 6702 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 6703 unsigned ArgBits) { 6704 llvm::APSInt Result; 6705 6706 // We can't check the value of a dependent argument. 6707 Expr *Arg = TheCall->getArg(ArgNum); 6708 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6709 return false; 6710 6711 // Check constant-ness first. 6712 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6713 return true; 6714 6715 // Truncate to the given size. 6716 Result = Result.getLoBits(ArgBits); 6717 Result.setIsUnsigned(true); 6718 6719 if (IsShiftedByte(Result)) 6720 return false; 6721 6722 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 6723 << Arg->getSourceRange(); 6724 } 6725 6726 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 6727 /// TheCall is a constant expression representing either a shifted byte value, 6728 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 6729 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 6730 /// Arm MVE intrinsics. 6731 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 6732 int ArgNum, 6733 unsigned ArgBits) { 6734 llvm::APSInt Result; 6735 6736 // We can't check the value of a dependent argument. 6737 Expr *Arg = TheCall->getArg(ArgNum); 6738 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6739 return false; 6740 6741 // Check constant-ness first. 6742 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6743 return true; 6744 6745 // Truncate to the given size. 6746 Result = Result.getLoBits(ArgBits); 6747 Result.setIsUnsigned(true); 6748 6749 // Check to see if it's in either of the required forms. 6750 if (IsShiftedByte(Result) || 6751 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 6752 return false; 6753 6754 return Diag(TheCall->getBeginLoc(), 6755 diag::err_argument_not_shifted_byte_or_xxff) 6756 << Arg->getSourceRange(); 6757 } 6758 6759 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 6760 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 6761 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 6762 if (checkArgCount(*this, TheCall, 2)) 6763 return true; 6764 Expr *Arg0 = TheCall->getArg(0); 6765 Expr *Arg1 = TheCall->getArg(1); 6766 6767 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6768 if (FirstArg.isInvalid()) 6769 return true; 6770 QualType FirstArgType = FirstArg.get()->getType(); 6771 if (!FirstArgType->isAnyPointerType()) 6772 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6773 << "first" << FirstArgType << Arg0->getSourceRange(); 6774 TheCall->setArg(0, FirstArg.get()); 6775 6776 ExprResult SecArg = DefaultLvalueConversion(Arg1); 6777 if (SecArg.isInvalid()) 6778 return true; 6779 QualType SecArgType = SecArg.get()->getType(); 6780 if (!SecArgType->isIntegerType()) 6781 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6782 << "second" << SecArgType << Arg1->getSourceRange(); 6783 6784 // Derive the return type from the pointer argument. 6785 TheCall->setType(FirstArgType); 6786 return false; 6787 } 6788 6789 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 6790 if (checkArgCount(*this, TheCall, 2)) 6791 return true; 6792 6793 Expr *Arg0 = TheCall->getArg(0); 6794 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6795 if (FirstArg.isInvalid()) 6796 return true; 6797 QualType FirstArgType = FirstArg.get()->getType(); 6798 if (!FirstArgType->isAnyPointerType()) 6799 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6800 << "first" << FirstArgType << Arg0->getSourceRange(); 6801 TheCall->setArg(0, FirstArg.get()); 6802 6803 // Derive the return type from the pointer argument. 6804 TheCall->setType(FirstArgType); 6805 6806 // Second arg must be an constant in range [0,15] 6807 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6808 } 6809 6810 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 6811 if (checkArgCount(*this, TheCall, 2)) 6812 return true; 6813 Expr *Arg0 = TheCall->getArg(0); 6814 Expr *Arg1 = TheCall->getArg(1); 6815 6816 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6817 if (FirstArg.isInvalid()) 6818 return true; 6819 QualType FirstArgType = FirstArg.get()->getType(); 6820 if (!FirstArgType->isAnyPointerType()) 6821 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6822 << "first" << FirstArgType << Arg0->getSourceRange(); 6823 6824 QualType SecArgType = Arg1->getType(); 6825 if (!SecArgType->isIntegerType()) 6826 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6827 << "second" << SecArgType << Arg1->getSourceRange(); 6828 TheCall->setType(Context.IntTy); 6829 return false; 6830 } 6831 6832 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 6833 BuiltinID == AArch64::BI__builtin_arm_stg) { 6834 if (checkArgCount(*this, TheCall, 1)) 6835 return true; 6836 Expr *Arg0 = TheCall->getArg(0); 6837 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6838 if (FirstArg.isInvalid()) 6839 return true; 6840 6841 QualType FirstArgType = FirstArg.get()->getType(); 6842 if (!FirstArgType->isAnyPointerType()) 6843 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6844 << "first" << FirstArgType << Arg0->getSourceRange(); 6845 TheCall->setArg(0, FirstArg.get()); 6846 6847 // Derive the return type from the pointer argument. 6848 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 6849 TheCall->setType(FirstArgType); 6850 return false; 6851 } 6852 6853 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 6854 Expr *ArgA = TheCall->getArg(0); 6855 Expr *ArgB = TheCall->getArg(1); 6856 6857 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 6858 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 6859 6860 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 6861 return true; 6862 6863 QualType ArgTypeA = ArgExprA.get()->getType(); 6864 QualType ArgTypeB = ArgExprB.get()->getType(); 6865 6866 auto isNull = [&] (Expr *E) -> bool { 6867 return E->isNullPointerConstant( 6868 Context, Expr::NPC_ValueDependentIsNotNull); }; 6869 6870 // argument should be either a pointer or null 6871 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 6872 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6873 << "first" << ArgTypeA << ArgA->getSourceRange(); 6874 6875 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 6876 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6877 << "second" << ArgTypeB << ArgB->getSourceRange(); 6878 6879 // Ensure Pointee types are compatible 6880 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 6881 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 6882 QualType pointeeA = ArgTypeA->getPointeeType(); 6883 QualType pointeeB = ArgTypeB->getPointeeType(); 6884 if (!Context.typesAreCompatible( 6885 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 6886 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 6887 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 6888 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 6889 << ArgB->getSourceRange(); 6890 } 6891 } 6892 6893 // at least one argument should be pointer type 6894 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 6895 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 6896 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 6897 6898 if (isNull(ArgA)) // adopt type of the other pointer 6899 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 6900 6901 if (isNull(ArgB)) 6902 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 6903 6904 TheCall->setArg(0, ArgExprA.get()); 6905 TheCall->setArg(1, ArgExprB.get()); 6906 TheCall->setType(Context.LongLongTy); 6907 return false; 6908 } 6909 assert(false && "Unhandled ARM MTE intrinsic"); 6910 return true; 6911 } 6912 6913 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 6914 /// TheCall is an ARM/AArch64 special register string literal. 6915 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 6916 int ArgNum, unsigned ExpectedFieldNum, 6917 bool AllowName) { 6918 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 6919 BuiltinID == ARM::BI__builtin_arm_wsr64 || 6920 BuiltinID == ARM::BI__builtin_arm_rsr || 6921 BuiltinID == ARM::BI__builtin_arm_rsrp || 6922 BuiltinID == ARM::BI__builtin_arm_wsr || 6923 BuiltinID == ARM::BI__builtin_arm_wsrp; 6924 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 6925 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 6926 BuiltinID == AArch64::BI__builtin_arm_rsr || 6927 BuiltinID == AArch64::BI__builtin_arm_rsrp || 6928 BuiltinID == AArch64::BI__builtin_arm_wsr || 6929 BuiltinID == AArch64::BI__builtin_arm_wsrp; 6930 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 6931 6932 // We can't check the value of a dependent argument. 6933 Expr *Arg = TheCall->getArg(ArgNum); 6934 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6935 return false; 6936 6937 // Check if the argument is a string literal. 6938 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6939 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6940 << Arg->getSourceRange(); 6941 6942 // Check the type of special register given. 6943 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6944 SmallVector<StringRef, 6> Fields; 6945 Reg.split(Fields, ":"); 6946 6947 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 6948 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6949 << Arg->getSourceRange(); 6950 6951 // If the string is the name of a register then we cannot check that it is 6952 // valid here but if the string is of one the forms described in ACLE then we 6953 // can check that the supplied fields are integers and within the valid 6954 // ranges. 6955 if (Fields.size() > 1) { 6956 bool FiveFields = Fields.size() == 5; 6957 6958 bool ValidString = true; 6959 if (IsARMBuiltin) { 6960 ValidString &= Fields[0].startswith_lower("cp") || 6961 Fields[0].startswith_lower("p"); 6962 if (ValidString) 6963 Fields[0] = 6964 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6965 6966 ValidString &= Fields[2].startswith_lower("c"); 6967 if (ValidString) 6968 Fields[2] = Fields[2].drop_front(1); 6969 6970 if (FiveFields) { 6971 ValidString &= Fields[3].startswith_lower("c"); 6972 if (ValidString) 6973 Fields[3] = Fields[3].drop_front(1); 6974 } 6975 } 6976 6977 SmallVector<int, 5> Ranges; 6978 if (FiveFields) 6979 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6980 else 6981 Ranges.append({15, 7, 15}); 6982 6983 for (unsigned i=0; i<Fields.size(); ++i) { 6984 int IntField; 6985 ValidString &= !Fields[i].getAsInteger(10, IntField); 6986 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6987 } 6988 6989 if (!ValidString) 6990 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6991 << Arg->getSourceRange(); 6992 } else if (IsAArch64Builtin && Fields.size() == 1) { 6993 // If the register name is one of those that appear in the condition below 6994 // and the special register builtin being used is one of the write builtins, 6995 // then we require that the argument provided for writing to the register 6996 // is an integer constant expression. This is because it will be lowered to 6997 // an MSR (immediate) instruction, so we need to know the immediate at 6998 // compile time. 6999 if (TheCall->getNumArgs() != 2) 7000 return false; 7001 7002 std::string RegLower = Reg.lower(); 7003 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 7004 RegLower != "pan" && RegLower != "uao") 7005 return false; 7006 7007 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7008 } 7009 7010 return false; 7011 } 7012 7013 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 7014 /// Emit an error and return true on failure; return false on success. 7015 /// TypeStr is a string containing the type descriptor of the value returned by 7016 /// the builtin and the descriptors of the expected type of the arguments. 7017 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeStr) { 7018 7019 assert((TypeStr[0] != '\0') && 7020 "Invalid types in PPC MMA builtin declaration"); 7021 7022 unsigned Mask = 0; 7023 unsigned ArgNum = 0; 7024 7025 // The first type in TypeStr is the type of the value returned by the 7026 // builtin. So we first read that type and change the type of TheCall. 7027 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7028 TheCall->setType(type); 7029 7030 while (*TypeStr != '\0') { 7031 Mask = 0; 7032 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7033 if (ArgNum >= TheCall->getNumArgs()) { 7034 ArgNum++; 7035 break; 7036 } 7037 7038 Expr *Arg = TheCall->getArg(ArgNum); 7039 QualType ArgType = Arg->getType(); 7040 7041 if ((ExpectedType->isVoidPointerType() && !ArgType->isPointerType()) || 7042 (!ExpectedType->isVoidPointerType() && 7043 ArgType.getCanonicalType() != ExpectedType)) 7044 return Diag(Arg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7045 << ArgType << ExpectedType << 1 << 0 << 0; 7046 7047 // If the value of the Mask is not 0, we have a constraint in the size of 7048 // the integer argument so here we ensure the argument is a constant that 7049 // is in the valid range. 7050 if (Mask != 0 && 7051 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 7052 return true; 7053 7054 ArgNum++; 7055 } 7056 7057 // In case we exited early from the previous loop, there are other types to 7058 // read from TypeStr. So we need to read them all to ensure we have the right 7059 // number of arguments in TheCall and if it is not the case, to display a 7060 // better error message. 7061 while (*TypeStr != '\0') { 7062 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7063 ArgNum++; 7064 } 7065 if (checkArgCount(*this, TheCall, ArgNum)) 7066 return true; 7067 7068 return false; 7069 } 7070 7071 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 7072 /// This checks that the target supports __builtin_longjmp and 7073 /// that val is a constant 1. 7074 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 7075 if (!Context.getTargetInfo().hasSjLjLowering()) 7076 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 7077 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7078 7079 Expr *Arg = TheCall->getArg(1); 7080 llvm::APSInt Result; 7081 7082 // TODO: This is less than ideal. Overload this to take a value. 7083 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7084 return true; 7085 7086 if (Result != 1) 7087 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 7088 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 7089 7090 return false; 7091 } 7092 7093 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 7094 /// This checks that the target supports __builtin_setjmp. 7095 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 7096 if (!Context.getTargetInfo().hasSjLjLowering()) 7097 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 7098 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7099 return false; 7100 } 7101 7102 namespace { 7103 7104 class UncoveredArgHandler { 7105 enum { Unknown = -1, AllCovered = -2 }; 7106 7107 signed FirstUncoveredArg = Unknown; 7108 SmallVector<const Expr *, 4> DiagnosticExprs; 7109 7110 public: 7111 UncoveredArgHandler() = default; 7112 7113 bool hasUncoveredArg() const { 7114 return (FirstUncoveredArg >= 0); 7115 } 7116 7117 unsigned getUncoveredArg() const { 7118 assert(hasUncoveredArg() && "no uncovered argument"); 7119 return FirstUncoveredArg; 7120 } 7121 7122 void setAllCovered() { 7123 // A string has been found with all arguments covered, so clear out 7124 // the diagnostics. 7125 DiagnosticExprs.clear(); 7126 FirstUncoveredArg = AllCovered; 7127 } 7128 7129 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 7130 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 7131 7132 // Don't update if a previous string covers all arguments. 7133 if (FirstUncoveredArg == AllCovered) 7134 return; 7135 7136 // UncoveredArgHandler tracks the highest uncovered argument index 7137 // and with it all the strings that match this index. 7138 if (NewFirstUncoveredArg == FirstUncoveredArg) 7139 DiagnosticExprs.push_back(StrExpr); 7140 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 7141 DiagnosticExprs.clear(); 7142 DiagnosticExprs.push_back(StrExpr); 7143 FirstUncoveredArg = NewFirstUncoveredArg; 7144 } 7145 } 7146 7147 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 7148 }; 7149 7150 enum StringLiteralCheckType { 7151 SLCT_NotALiteral, 7152 SLCT_UncheckedLiteral, 7153 SLCT_CheckedLiteral 7154 }; 7155 7156 } // namespace 7157 7158 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 7159 BinaryOperatorKind BinOpKind, 7160 bool AddendIsRight) { 7161 unsigned BitWidth = Offset.getBitWidth(); 7162 unsigned AddendBitWidth = Addend.getBitWidth(); 7163 // There might be negative interim results. 7164 if (Addend.isUnsigned()) { 7165 Addend = Addend.zext(++AddendBitWidth); 7166 Addend.setIsSigned(true); 7167 } 7168 // Adjust the bit width of the APSInts. 7169 if (AddendBitWidth > BitWidth) { 7170 Offset = Offset.sext(AddendBitWidth); 7171 BitWidth = AddendBitWidth; 7172 } else if (BitWidth > AddendBitWidth) { 7173 Addend = Addend.sext(BitWidth); 7174 } 7175 7176 bool Ov = false; 7177 llvm::APSInt ResOffset = Offset; 7178 if (BinOpKind == BO_Add) 7179 ResOffset = Offset.sadd_ov(Addend, Ov); 7180 else { 7181 assert(AddendIsRight && BinOpKind == BO_Sub && 7182 "operator must be add or sub with addend on the right"); 7183 ResOffset = Offset.ssub_ov(Addend, Ov); 7184 } 7185 7186 // We add an offset to a pointer here so we should support an offset as big as 7187 // possible. 7188 if (Ov) { 7189 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 7190 "index (intermediate) result too big"); 7191 Offset = Offset.sext(2 * BitWidth); 7192 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 7193 return; 7194 } 7195 7196 Offset = ResOffset; 7197 } 7198 7199 namespace { 7200 7201 // This is a wrapper class around StringLiteral to support offsetted string 7202 // literals as format strings. It takes the offset into account when returning 7203 // the string and its length or the source locations to display notes correctly. 7204 class FormatStringLiteral { 7205 const StringLiteral *FExpr; 7206 int64_t Offset; 7207 7208 public: 7209 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 7210 : FExpr(fexpr), Offset(Offset) {} 7211 7212 StringRef getString() const { 7213 return FExpr->getString().drop_front(Offset); 7214 } 7215 7216 unsigned getByteLength() const { 7217 return FExpr->getByteLength() - getCharByteWidth() * Offset; 7218 } 7219 7220 unsigned getLength() const { return FExpr->getLength() - Offset; } 7221 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 7222 7223 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 7224 7225 QualType getType() const { return FExpr->getType(); } 7226 7227 bool isAscii() const { return FExpr->isAscii(); } 7228 bool isWide() const { return FExpr->isWide(); } 7229 bool isUTF8() const { return FExpr->isUTF8(); } 7230 bool isUTF16() const { return FExpr->isUTF16(); } 7231 bool isUTF32() const { return FExpr->isUTF32(); } 7232 bool isPascal() const { return FExpr->isPascal(); } 7233 7234 SourceLocation getLocationOfByte( 7235 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 7236 const TargetInfo &Target, unsigned *StartToken = nullptr, 7237 unsigned *StartTokenByteOffset = nullptr) const { 7238 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 7239 StartToken, StartTokenByteOffset); 7240 } 7241 7242 SourceLocation getBeginLoc() const LLVM_READONLY { 7243 return FExpr->getBeginLoc().getLocWithOffset(Offset); 7244 } 7245 7246 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 7247 }; 7248 7249 } // namespace 7250 7251 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 7252 const Expr *OrigFormatExpr, 7253 ArrayRef<const Expr *> Args, 7254 bool HasVAListArg, unsigned format_idx, 7255 unsigned firstDataArg, 7256 Sema::FormatStringType Type, 7257 bool inFunctionCall, 7258 Sema::VariadicCallType CallType, 7259 llvm::SmallBitVector &CheckedVarArgs, 7260 UncoveredArgHandler &UncoveredArg, 7261 bool IgnoreStringsWithoutSpecifiers); 7262 7263 // Determine if an expression is a string literal or constant string. 7264 // If this function returns false on the arguments to a function expecting a 7265 // format string, we will usually need to emit a warning. 7266 // True string literals are then checked by CheckFormatString. 7267 static StringLiteralCheckType 7268 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 7269 bool HasVAListArg, unsigned format_idx, 7270 unsigned firstDataArg, Sema::FormatStringType Type, 7271 Sema::VariadicCallType CallType, bool InFunctionCall, 7272 llvm::SmallBitVector &CheckedVarArgs, 7273 UncoveredArgHandler &UncoveredArg, 7274 llvm::APSInt Offset, 7275 bool IgnoreStringsWithoutSpecifiers = false) { 7276 if (S.isConstantEvaluated()) 7277 return SLCT_NotALiteral; 7278 tryAgain: 7279 assert(Offset.isSigned() && "invalid offset"); 7280 7281 if (E->isTypeDependent() || E->isValueDependent()) 7282 return SLCT_NotALiteral; 7283 7284 E = E->IgnoreParenCasts(); 7285 7286 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 7287 // Technically -Wformat-nonliteral does not warn about this case. 7288 // The behavior of printf and friends in this case is implementation 7289 // dependent. Ideally if the format string cannot be null then 7290 // it should have a 'nonnull' attribute in the function prototype. 7291 return SLCT_UncheckedLiteral; 7292 7293 switch (E->getStmtClass()) { 7294 case Stmt::BinaryConditionalOperatorClass: 7295 case Stmt::ConditionalOperatorClass: { 7296 // The expression is a literal if both sub-expressions were, and it was 7297 // completely checked only if both sub-expressions were checked. 7298 const AbstractConditionalOperator *C = 7299 cast<AbstractConditionalOperator>(E); 7300 7301 // Determine whether it is necessary to check both sub-expressions, for 7302 // example, because the condition expression is a constant that can be 7303 // evaluated at compile time. 7304 bool CheckLeft = true, CheckRight = true; 7305 7306 bool Cond; 7307 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 7308 S.isConstantEvaluated())) { 7309 if (Cond) 7310 CheckRight = false; 7311 else 7312 CheckLeft = false; 7313 } 7314 7315 // We need to maintain the offsets for the right and the left hand side 7316 // separately to check if every possible indexed expression is a valid 7317 // string literal. They might have different offsets for different string 7318 // literals in the end. 7319 StringLiteralCheckType Left; 7320 if (!CheckLeft) 7321 Left = SLCT_UncheckedLiteral; 7322 else { 7323 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 7324 HasVAListArg, format_idx, firstDataArg, 7325 Type, CallType, InFunctionCall, 7326 CheckedVarArgs, UncoveredArg, Offset, 7327 IgnoreStringsWithoutSpecifiers); 7328 if (Left == SLCT_NotALiteral || !CheckRight) { 7329 return Left; 7330 } 7331 } 7332 7333 StringLiteralCheckType Right = checkFormatStringExpr( 7334 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 7335 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7336 IgnoreStringsWithoutSpecifiers); 7337 7338 return (CheckLeft && Left < Right) ? Left : Right; 7339 } 7340 7341 case Stmt::ImplicitCastExprClass: 7342 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 7343 goto tryAgain; 7344 7345 case Stmt::OpaqueValueExprClass: 7346 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 7347 E = src; 7348 goto tryAgain; 7349 } 7350 return SLCT_NotALiteral; 7351 7352 case Stmt::PredefinedExprClass: 7353 // While __func__, etc., are technically not string literals, they 7354 // cannot contain format specifiers and thus are not a security 7355 // liability. 7356 return SLCT_UncheckedLiteral; 7357 7358 case Stmt::DeclRefExprClass: { 7359 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 7360 7361 // As an exception, do not flag errors for variables binding to 7362 // const string literals. 7363 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 7364 bool isConstant = false; 7365 QualType T = DR->getType(); 7366 7367 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 7368 isConstant = AT->getElementType().isConstant(S.Context); 7369 } else if (const PointerType *PT = T->getAs<PointerType>()) { 7370 isConstant = T.isConstant(S.Context) && 7371 PT->getPointeeType().isConstant(S.Context); 7372 } else if (T->isObjCObjectPointerType()) { 7373 // In ObjC, there is usually no "const ObjectPointer" type, 7374 // so don't check if the pointee type is constant. 7375 isConstant = T.isConstant(S.Context); 7376 } 7377 7378 if (isConstant) { 7379 if (const Expr *Init = VD->getAnyInitializer()) { 7380 // Look through initializers like const char c[] = { "foo" } 7381 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 7382 if (InitList->isStringLiteralInit()) 7383 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 7384 } 7385 return checkFormatStringExpr(S, Init, Args, 7386 HasVAListArg, format_idx, 7387 firstDataArg, Type, CallType, 7388 /*InFunctionCall*/ false, CheckedVarArgs, 7389 UncoveredArg, Offset); 7390 } 7391 } 7392 7393 // For vprintf* functions (i.e., HasVAListArg==true), we add a 7394 // special check to see if the format string is a function parameter 7395 // of the function calling the printf function. If the function 7396 // has an attribute indicating it is a printf-like function, then we 7397 // should suppress warnings concerning non-literals being used in a call 7398 // to a vprintf function. For example: 7399 // 7400 // void 7401 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 7402 // va_list ap; 7403 // va_start(ap, fmt); 7404 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 7405 // ... 7406 // } 7407 if (HasVAListArg) { 7408 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 7409 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 7410 int PVIndex = PV->getFunctionScopeIndex() + 1; 7411 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 7412 // adjust for implicit parameter 7413 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 7414 if (MD->isInstance()) 7415 ++PVIndex; 7416 // We also check if the formats are compatible. 7417 // We can't pass a 'scanf' string to a 'printf' function. 7418 if (PVIndex == PVFormat->getFormatIdx() && 7419 Type == S.GetFormatStringType(PVFormat)) 7420 return SLCT_UncheckedLiteral; 7421 } 7422 } 7423 } 7424 } 7425 } 7426 7427 return SLCT_NotALiteral; 7428 } 7429 7430 case Stmt::CallExprClass: 7431 case Stmt::CXXMemberCallExprClass: { 7432 const CallExpr *CE = cast<CallExpr>(E); 7433 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 7434 bool IsFirst = true; 7435 StringLiteralCheckType CommonResult; 7436 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 7437 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 7438 StringLiteralCheckType Result = checkFormatStringExpr( 7439 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7440 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7441 IgnoreStringsWithoutSpecifiers); 7442 if (IsFirst) { 7443 CommonResult = Result; 7444 IsFirst = false; 7445 } 7446 } 7447 if (!IsFirst) 7448 return CommonResult; 7449 7450 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 7451 unsigned BuiltinID = FD->getBuiltinID(); 7452 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 7453 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 7454 const Expr *Arg = CE->getArg(0); 7455 return checkFormatStringExpr(S, Arg, Args, 7456 HasVAListArg, format_idx, 7457 firstDataArg, Type, CallType, 7458 InFunctionCall, CheckedVarArgs, 7459 UncoveredArg, Offset, 7460 IgnoreStringsWithoutSpecifiers); 7461 } 7462 } 7463 } 7464 7465 return SLCT_NotALiteral; 7466 } 7467 case Stmt::ObjCMessageExprClass: { 7468 const auto *ME = cast<ObjCMessageExpr>(E); 7469 if (const auto *MD = ME->getMethodDecl()) { 7470 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 7471 // As a special case heuristic, if we're using the method -[NSBundle 7472 // localizedStringForKey:value:table:], ignore any key strings that lack 7473 // format specifiers. The idea is that if the key doesn't have any 7474 // format specifiers then its probably just a key to map to the 7475 // localized strings. If it does have format specifiers though, then its 7476 // likely that the text of the key is the format string in the 7477 // programmer's language, and should be checked. 7478 const ObjCInterfaceDecl *IFace; 7479 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 7480 IFace->getIdentifier()->isStr("NSBundle") && 7481 MD->getSelector().isKeywordSelector( 7482 {"localizedStringForKey", "value", "table"})) { 7483 IgnoreStringsWithoutSpecifiers = true; 7484 } 7485 7486 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 7487 return checkFormatStringExpr( 7488 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7489 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7490 IgnoreStringsWithoutSpecifiers); 7491 } 7492 } 7493 7494 return SLCT_NotALiteral; 7495 } 7496 case Stmt::ObjCStringLiteralClass: 7497 case Stmt::StringLiteralClass: { 7498 const StringLiteral *StrE = nullptr; 7499 7500 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 7501 StrE = ObjCFExpr->getString(); 7502 else 7503 StrE = cast<StringLiteral>(E); 7504 7505 if (StrE) { 7506 if (Offset.isNegative() || Offset > StrE->getLength()) { 7507 // TODO: It would be better to have an explicit warning for out of 7508 // bounds literals. 7509 return SLCT_NotALiteral; 7510 } 7511 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 7512 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 7513 firstDataArg, Type, InFunctionCall, CallType, 7514 CheckedVarArgs, UncoveredArg, 7515 IgnoreStringsWithoutSpecifiers); 7516 return SLCT_CheckedLiteral; 7517 } 7518 7519 return SLCT_NotALiteral; 7520 } 7521 case Stmt::BinaryOperatorClass: { 7522 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 7523 7524 // A string literal + an int offset is still a string literal. 7525 if (BinOp->isAdditiveOp()) { 7526 Expr::EvalResult LResult, RResult; 7527 7528 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 7529 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7530 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 7531 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7532 7533 if (LIsInt != RIsInt) { 7534 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 7535 7536 if (LIsInt) { 7537 if (BinOpKind == BO_Add) { 7538 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 7539 E = BinOp->getRHS(); 7540 goto tryAgain; 7541 } 7542 } else { 7543 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 7544 E = BinOp->getLHS(); 7545 goto tryAgain; 7546 } 7547 } 7548 } 7549 7550 return SLCT_NotALiteral; 7551 } 7552 case Stmt::UnaryOperatorClass: { 7553 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 7554 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 7555 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 7556 Expr::EvalResult IndexResult; 7557 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 7558 Expr::SE_NoSideEffects, 7559 S.isConstantEvaluated())) { 7560 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 7561 /*RHS is int*/ true); 7562 E = ASE->getBase(); 7563 goto tryAgain; 7564 } 7565 } 7566 7567 return SLCT_NotALiteral; 7568 } 7569 7570 default: 7571 return SLCT_NotALiteral; 7572 } 7573 } 7574 7575 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 7576 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 7577 .Case("scanf", FST_Scanf) 7578 .Cases("printf", "printf0", FST_Printf) 7579 .Cases("NSString", "CFString", FST_NSString) 7580 .Case("strftime", FST_Strftime) 7581 .Case("strfmon", FST_Strfmon) 7582 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 7583 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 7584 .Case("os_trace", FST_OSLog) 7585 .Case("os_log", FST_OSLog) 7586 .Default(FST_Unknown); 7587 } 7588 7589 /// CheckFormatArguments - Check calls to printf and scanf (and similar 7590 /// functions) for correct use of format strings. 7591 /// Returns true if a format string has been fully checked. 7592 bool Sema::CheckFormatArguments(const FormatAttr *Format, 7593 ArrayRef<const Expr *> Args, 7594 bool IsCXXMember, 7595 VariadicCallType CallType, 7596 SourceLocation Loc, SourceRange Range, 7597 llvm::SmallBitVector &CheckedVarArgs) { 7598 FormatStringInfo FSI; 7599 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 7600 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 7601 FSI.FirstDataArg, GetFormatStringType(Format), 7602 CallType, Loc, Range, CheckedVarArgs); 7603 return false; 7604 } 7605 7606 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 7607 bool HasVAListArg, unsigned format_idx, 7608 unsigned firstDataArg, FormatStringType Type, 7609 VariadicCallType CallType, 7610 SourceLocation Loc, SourceRange Range, 7611 llvm::SmallBitVector &CheckedVarArgs) { 7612 // CHECK: printf/scanf-like function is called with no format string. 7613 if (format_idx >= Args.size()) { 7614 Diag(Loc, diag::warn_missing_format_string) << Range; 7615 return false; 7616 } 7617 7618 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 7619 7620 // CHECK: format string is not a string literal. 7621 // 7622 // Dynamically generated format strings are difficult to 7623 // automatically vet at compile time. Requiring that format strings 7624 // are string literals: (1) permits the checking of format strings by 7625 // the compiler and thereby (2) can practically remove the source of 7626 // many format string exploits. 7627 7628 // Format string can be either ObjC string (e.g. @"%d") or 7629 // C string (e.g. "%d") 7630 // ObjC string uses the same format specifiers as C string, so we can use 7631 // the same format string checking logic for both ObjC and C strings. 7632 UncoveredArgHandler UncoveredArg; 7633 StringLiteralCheckType CT = 7634 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 7635 format_idx, firstDataArg, Type, CallType, 7636 /*IsFunctionCall*/ true, CheckedVarArgs, 7637 UncoveredArg, 7638 /*no string offset*/ llvm::APSInt(64, false) = 0); 7639 7640 // Generate a diagnostic where an uncovered argument is detected. 7641 if (UncoveredArg.hasUncoveredArg()) { 7642 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 7643 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 7644 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 7645 } 7646 7647 if (CT != SLCT_NotALiteral) 7648 // Literal format string found, check done! 7649 return CT == SLCT_CheckedLiteral; 7650 7651 // Strftime is particular as it always uses a single 'time' argument, 7652 // so it is safe to pass a non-literal string. 7653 if (Type == FST_Strftime) 7654 return false; 7655 7656 // Do not emit diag when the string param is a macro expansion and the 7657 // format is either NSString or CFString. This is a hack to prevent 7658 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 7659 // which are usually used in place of NS and CF string literals. 7660 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 7661 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 7662 return false; 7663 7664 // If there are no arguments specified, warn with -Wformat-security, otherwise 7665 // warn only with -Wformat-nonliteral. 7666 if (Args.size() == firstDataArg) { 7667 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 7668 << OrigFormatExpr->getSourceRange(); 7669 switch (Type) { 7670 default: 7671 break; 7672 case FST_Kprintf: 7673 case FST_FreeBSDKPrintf: 7674 case FST_Printf: 7675 Diag(FormatLoc, diag::note_format_security_fixit) 7676 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 7677 break; 7678 case FST_NSString: 7679 Diag(FormatLoc, diag::note_format_security_fixit) 7680 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 7681 break; 7682 } 7683 } else { 7684 Diag(FormatLoc, diag::warn_format_nonliteral) 7685 << OrigFormatExpr->getSourceRange(); 7686 } 7687 return false; 7688 } 7689 7690 namespace { 7691 7692 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 7693 protected: 7694 Sema &S; 7695 const FormatStringLiteral *FExpr; 7696 const Expr *OrigFormatExpr; 7697 const Sema::FormatStringType FSType; 7698 const unsigned FirstDataArg; 7699 const unsigned NumDataArgs; 7700 const char *Beg; // Start of format string. 7701 const bool HasVAListArg; 7702 ArrayRef<const Expr *> Args; 7703 unsigned FormatIdx; 7704 llvm::SmallBitVector CoveredArgs; 7705 bool usesPositionalArgs = false; 7706 bool atFirstArg = true; 7707 bool inFunctionCall; 7708 Sema::VariadicCallType CallType; 7709 llvm::SmallBitVector &CheckedVarArgs; 7710 UncoveredArgHandler &UncoveredArg; 7711 7712 public: 7713 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 7714 const Expr *origFormatExpr, 7715 const Sema::FormatStringType type, unsigned firstDataArg, 7716 unsigned numDataArgs, const char *beg, bool hasVAListArg, 7717 ArrayRef<const Expr *> Args, unsigned formatIdx, 7718 bool inFunctionCall, Sema::VariadicCallType callType, 7719 llvm::SmallBitVector &CheckedVarArgs, 7720 UncoveredArgHandler &UncoveredArg) 7721 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 7722 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 7723 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 7724 inFunctionCall(inFunctionCall), CallType(callType), 7725 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 7726 CoveredArgs.resize(numDataArgs); 7727 CoveredArgs.reset(); 7728 } 7729 7730 void DoneProcessing(); 7731 7732 void HandleIncompleteSpecifier(const char *startSpecifier, 7733 unsigned specifierLen) override; 7734 7735 void HandleInvalidLengthModifier( 7736 const analyze_format_string::FormatSpecifier &FS, 7737 const analyze_format_string::ConversionSpecifier &CS, 7738 const char *startSpecifier, unsigned specifierLen, 7739 unsigned DiagID); 7740 7741 void HandleNonStandardLengthModifier( 7742 const analyze_format_string::FormatSpecifier &FS, 7743 const char *startSpecifier, unsigned specifierLen); 7744 7745 void HandleNonStandardConversionSpecifier( 7746 const analyze_format_string::ConversionSpecifier &CS, 7747 const char *startSpecifier, unsigned specifierLen); 7748 7749 void HandlePosition(const char *startPos, unsigned posLen) override; 7750 7751 void HandleInvalidPosition(const char *startSpecifier, 7752 unsigned specifierLen, 7753 analyze_format_string::PositionContext p) override; 7754 7755 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 7756 7757 void HandleNullChar(const char *nullCharacter) override; 7758 7759 template <typename Range> 7760 static void 7761 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 7762 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 7763 bool IsStringLocation, Range StringRange, 7764 ArrayRef<FixItHint> Fixit = None); 7765 7766 protected: 7767 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 7768 const char *startSpec, 7769 unsigned specifierLen, 7770 const char *csStart, unsigned csLen); 7771 7772 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 7773 const char *startSpec, 7774 unsigned specifierLen); 7775 7776 SourceRange getFormatStringRange(); 7777 CharSourceRange getSpecifierRange(const char *startSpecifier, 7778 unsigned specifierLen); 7779 SourceLocation getLocationOfByte(const char *x); 7780 7781 const Expr *getDataArg(unsigned i) const; 7782 7783 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 7784 const analyze_format_string::ConversionSpecifier &CS, 7785 const char *startSpecifier, unsigned specifierLen, 7786 unsigned argIndex); 7787 7788 template <typename Range> 7789 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 7790 bool IsStringLocation, Range StringRange, 7791 ArrayRef<FixItHint> Fixit = None); 7792 }; 7793 7794 } // namespace 7795 7796 SourceRange CheckFormatHandler::getFormatStringRange() { 7797 return OrigFormatExpr->getSourceRange(); 7798 } 7799 7800 CharSourceRange CheckFormatHandler:: 7801 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 7802 SourceLocation Start = getLocationOfByte(startSpecifier); 7803 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 7804 7805 // Advance the end SourceLocation by one due to half-open ranges. 7806 End = End.getLocWithOffset(1); 7807 7808 return CharSourceRange::getCharRange(Start, End); 7809 } 7810 7811 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 7812 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 7813 S.getLangOpts(), S.Context.getTargetInfo()); 7814 } 7815 7816 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 7817 unsigned specifierLen){ 7818 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 7819 getLocationOfByte(startSpecifier), 7820 /*IsStringLocation*/true, 7821 getSpecifierRange(startSpecifier, specifierLen)); 7822 } 7823 7824 void CheckFormatHandler::HandleInvalidLengthModifier( 7825 const analyze_format_string::FormatSpecifier &FS, 7826 const analyze_format_string::ConversionSpecifier &CS, 7827 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 7828 using namespace analyze_format_string; 7829 7830 const LengthModifier &LM = FS.getLengthModifier(); 7831 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7832 7833 // See if we know how to fix this length modifier. 7834 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7835 if (FixedLM) { 7836 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7837 getLocationOfByte(LM.getStart()), 7838 /*IsStringLocation*/true, 7839 getSpecifierRange(startSpecifier, specifierLen)); 7840 7841 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7842 << FixedLM->toString() 7843 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7844 7845 } else { 7846 FixItHint Hint; 7847 if (DiagID == diag::warn_format_nonsensical_length) 7848 Hint = FixItHint::CreateRemoval(LMRange); 7849 7850 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7851 getLocationOfByte(LM.getStart()), 7852 /*IsStringLocation*/true, 7853 getSpecifierRange(startSpecifier, specifierLen), 7854 Hint); 7855 } 7856 } 7857 7858 void CheckFormatHandler::HandleNonStandardLengthModifier( 7859 const analyze_format_string::FormatSpecifier &FS, 7860 const char *startSpecifier, unsigned specifierLen) { 7861 using namespace analyze_format_string; 7862 7863 const LengthModifier &LM = FS.getLengthModifier(); 7864 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7865 7866 // See if we know how to fix this length modifier. 7867 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7868 if (FixedLM) { 7869 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7870 << LM.toString() << 0, 7871 getLocationOfByte(LM.getStart()), 7872 /*IsStringLocation*/true, 7873 getSpecifierRange(startSpecifier, specifierLen)); 7874 7875 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7876 << FixedLM->toString() 7877 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7878 7879 } else { 7880 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7881 << LM.toString() << 0, 7882 getLocationOfByte(LM.getStart()), 7883 /*IsStringLocation*/true, 7884 getSpecifierRange(startSpecifier, specifierLen)); 7885 } 7886 } 7887 7888 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 7889 const analyze_format_string::ConversionSpecifier &CS, 7890 const char *startSpecifier, unsigned specifierLen) { 7891 using namespace analyze_format_string; 7892 7893 // See if we know how to fix this conversion specifier. 7894 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 7895 if (FixedCS) { 7896 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7897 << CS.toString() << /*conversion specifier*/1, 7898 getLocationOfByte(CS.getStart()), 7899 /*IsStringLocation*/true, 7900 getSpecifierRange(startSpecifier, specifierLen)); 7901 7902 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 7903 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 7904 << FixedCS->toString() 7905 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 7906 } else { 7907 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7908 << CS.toString() << /*conversion specifier*/1, 7909 getLocationOfByte(CS.getStart()), 7910 /*IsStringLocation*/true, 7911 getSpecifierRange(startSpecifier, specifierLen)); 7912 } 7913 } 7914 7915 void CheckFormatHandler::HandlePosition(const char *startPos, 7916 unsigned posLen) { 7917 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 7918 getLocationOfByte(startPos), 7919 /*IsStringLocation*/true, 7920 getSpecifierRange(startPos, posLen)); 7921 } 7922 7923 void 7924 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 7925 analyze_format_string::PositionContext p) { 7926 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 7927 << (unsigned) p, 7928 getLocationOfByte(startPos), /*IsStringLocation*/true, 7929 getSpecifierRange(startPos, posLen)); 7930 } 7931 7932 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 7933 unsigned posLen) { 7934 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 7935 getLocationOfByte(startPos), 7936 /*IsStringLocation*/true, 7937 getSpecifierRange(startPos, posLen)); 7938 } 7939 7940 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 7941 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 7942 // The presence of a null character is likely an error. 7943 EmitFormatDiagnostic( 7944 S.PDiag(diag::warn_printf_format_string_contains_null_char), 7945 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 7946 getFormatStringRange()); 7947 } 7948 } 7949 7950 // Note that this may return NULL if there was an error parsing or building 7951 // one of the argument expressions. 7952 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 7953 return Args[FirstDataArg + i]; 7954 } 7955 7956 void CheckFormatHandler::DoneProcessing() { 7957 // Does the number of data arguments exceed the number of 7958 // format conversions in the format string? 7959 if (!HasVAListArg) { 7960 // Find any arguments that weren't covered. 7961 CoveredArgs.flip(); 7962 signed notCoveredArg = CoveredArgs.find_first(); 7963 if (notCoveredArg >= 0) { 7964 assert((unsigned)notCoveredArg < NumDataArgs); 7965 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 7966 } else { 7967 UncoveredArg.setAllCovered(); 7968 } 7969 } 7970 } 7971 7972 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 7973 const Expr *ArgExpr) { 7974 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 7975 "Invalid state"); 7976 7977 if (!ArgExpr) 7978 return; 7979 7980 SourceLocation Loc = ArgExpr->getBeginLoc(); 7981 7982 if (S.getSourceManager().isInSystemMacro(Loc)) 7983 return; 7984 7985 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 7986 for (auto E : DiagnosticExprs) 7987 PDiag << E->getSourceRange(); 7988 7989 CheckFormatHandler::EmitFormatDiagnostic( 7990 S, IsFunctionCall, DiagnosticExprs[0], 7991 PDiag, Loc, /*IsStringLocation*/false, 7992 DiagnosticExprs[0]->getSourceRange()); 7993 } 7994 7995 bool 7996 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 7997 SourceLocation Loc, 7998 const char *startSpec, 7999 unsigned specifierLen, 8000 const char *csStart, 8001 unsigned csLen) { 8002 bool keepGoing = true; 8003 if (argIndex < NumDataArgs) { 8004 // Consider the argument coverered, even though the specifier doesn't 8005 // make sense. 8006 CoveredArgs.set(argIndex); 8007 } 8008 else { 8009 // If argIndex exceeds the number of data arguments we 8010 // don't issue a warning because that is just a cascade of warnings (and 8011 // they may have intended '%%' anyway). We don't want to continue processing 8012 // the format string after this point, however, as we will like just get 8013 // gibberish when trying to match arguments. 8014 keepGoing = false; 8015 } 8016 8017 StringRef Specifier(csStart, csLen); 8018 8019 // If the specifier in non-printable, it could be the first byte of a UTF-8 8020 // sequence. In that case, print the UTF-8 code point. If not, print the byte 8021 // hex value. 8022 std::string CodePointStr; 8023 if (!llvm::sys::locale::isPrint(*csStart)) { 8024 llvm::UTF32 CodePoint; 8025 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 8026 const llvm::UTF8 *E = 8027 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 8028 llvm::ConversionResult Result = 8029 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 8030 8031 if (Result != llvm::conversionOK) { 8032 unsigned char FirstChar = *csStart; 8033 CodePoint = (llvm::UTF32)FirstChar; 8034 } 8035 8036 llvm::raw_string_ostream OS(CodePointStr); 8037 if (CodePoint < 256) 8038 OS << "\\x" << llvm::format("%02x", CodePoint); 8039 else if (CodePoint <= 0xFFFF) 8040 OS << "\\u" << llvm::format("%04x", CodePoint); 8041 else 8042 OS << "\\U" << llvm::format("%08x", CodePoint); 8043 OS.flush(); 8044 Specifier = CodePointStr; 8045 } 8046 8047 EmitFormatDiagnostic( 8048 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 8049 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 8050 8051 return keepGoing; 8052 } 8053 8054 void 8055 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 8056 const char *startSpec, 8057 unsigned specifierLen) { 8058 EmitFormatDiagnostic( 8059 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 8060 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 8061 } 8062 8063 bool 8064 CheckFormatHandler::CheckNumArgs( 8065 const analyze_format_string::FormatSpecifier &FS, 8066 const analyze_format_string::ConversionSpecifier &CS, 8067 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 8068 8069 if (argIndex >= NumDataArgs) { 8070 PartialDiagnostic PDiag = FS.usesPositionalArg() 8071 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 8072 << (argIndex+1) << NumDataArgs) 8073 : S.PDiag(diag::warn_printf_insufficient_data_args); 8074 EmitFormatDiagnostic( 8075 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 8076 getSpecifierRange(startSpecifier, specifierLen)); 8077 8078 // Since more arguments than conversion tokens are given, by extension 8079 // all arguments are covered, so mark this as so. 8080 UncoveredArg.setAllCovered(); 8081 return false; 8082 } 8083 return true; 8084 } 8085 8086 template<typename Range> 8087 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 8088 SourceLocation Loc, 8089 bool IsStringLocation, 8090 Range StringRange, 8091 ArrayRef<FixItHint> FixIt) { 8092 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 8093 Loc, IsStringLocation, StringRange, FixIt); 8094 } 8095 8096 /// If the format string is not within the function call, emit a note 8097 /// so that the function call and string are in diagnostic messages. 8098 /// 8099 /// \param InFunctionCall if true, the format string is within the function 8100 /// call and only one diagnostic message will be produced. Otherwise, an 8101 /// extra note will be emitted pointing to location of the format string. 8102 /// 8103 /// \param ArgumentExpr the expression that is passed as the format string 8104 /// argument in the function call. Used for getting locations when two 8105 /// diagnostics are emitted. 8106 /// 8107 /// \param PDiag the callee should already have provided any strings for the 8108 /// diagnostic message. This function only adds locations and fixits 8109 /// to diagnostics. 8110 /// 8111 /// \param Loc primary location for diagnostic. If two diagnostics are 8112 /// required, one will be at Loc and a new SourceLocation will be created for 8113 /// the other one. 8114 /// 8115 /// \param IsStringLocation if true, Loc points to the format string should be 8116 /// used for the note. Otherwise, Loc points to the argument list and will 8117 /// be used with PDiag. 8118 /// 8119 /// \param StringRange some or all of the string to highlight. This is 8120 /// templated so it can accept either a CharSourceRange or a SourceRange. 8121 /// 8122 /// \param FixIt optional fix it hint for the format string. 8123 template <typename Range> 8124 void CheckFormatHandler::EmitFormatDiagnostic( 8125 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 8126 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 8127 Range StringRange, ArrayRef<FixItHint> FixIt) { 8128 if (InFunctionCall) { 8129 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 8130 D << StringRange; 8131 D << FixIt; 8132 } else { 8133 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 8134 << ArgumentExpr->getSourceRange(); 8135 8136 const Sema::SemaDiagnosticBuilder &Note = 8137 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 8138 diag::note_format_string_defined); 8139 8140 Note << StringRange; 8141 Note << FixIt; 8142 } 8143 } 8144 8145 //===--- CHECK: Printf format string checking ------------------------------===// 8146 8147 namespace { 8148 8149 class CheckPrintfHandler : public CheckFormatHandler { 8150 public: 8151 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 8152 const Expr *origFormatExpr, 8153 const Sema::FormatStringType type, unsigned firstDataArg, 8154 unsigned numDataArgs, bool isObjC, const char *beg, 8155 bool hasVAListArg, ArrayRef<const Expr *> Args, 8156 unsigned formatIdx, bool inFunctionCall, 8157 Sema::VariadicCallType CallType, 8158 llvm::SmallBitVector &CheckedVarArgs, 8159 UncoveredArgHandler &UncoveredArg) 8160 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8161 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8162 inFunctionCall, CallType, CheckedVarArgs, 8163 UncoveredArg) {} 8164 8165 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 8166 8167 /// Returns true if '%@' specifiers are allowed in the format string. 8168 bool allowsObjCArg() const { 8169 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 8170 FSType == Sema::FST_OSTrace; 8171 } 8172 8173 bool HandleInvalidPrintfConversionSpecifier( 8174 const analyze_printf::PrintfSpecifier &FS, 8175 const char *startSpecifier, 8176 unsigned specifierLen) override; 8177 8178 void handleInvalidMaskType(StringRef MaskType) override; 8179 8180 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 8181 const char *startSpecifier, 8182 unsigned specifierLen) override; 8183 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8184 const char *StartSpecifier, 8185 unsigned SpecifierLen, 8186 const Expr *E); 8187 8188 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 8189 const char *startSpecifier, unsigned specifierLen); 8190 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 8191 const analyze_printf::OptionalAmount &Amt, 8192 unsigned type, 8193 const char *startSpecifier, unsigned specifierLen); 8194 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8195 const analyze_printf::OptionalFlag &flag, 8196 const char *startSpecifier, unsigned specifierLen); 8197 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 8198 const analyze_printf::OptionalFlag &ignoredFlag, 8199 const analyze_printf::OptionalFlag &flag, 8200 const char *startSpecifier, unsigned specifierLen); 8201 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 8202 const Expr *E); 8203 8204 void HandleEmptyObjCModifierFlag(const char *startFlag, 8205 unsigned flagLen) override; 8206 8207 void HandleInvalidObjCModifierFlag(const char *startFlag, 8208 unsigned flagLen) override; 8209 8210 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 8211 const char *flagsEnd, 8212 const char *conversionPosition) 8213 override; 8214 }; 8215 8216 } // namespace 8217 8218 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 8219 const analyze_printf::PrintfSpecifier &FS, 8220 const char *startSpecifier, 8221 unsigned specifierLen) { 8222 const analyze_printf::PrintfConversionSpecifier &CS = 8223 FS.getConversionSpecifier(); 8224 8225 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8226 getLocationOfByte(CS.getStart()), 8227 startSpecifier, specifierLen, 8228 CS.getStart(), CS.getLength()); 8229 } 8230 8231 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 8232 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 8233 } 8234 8235 bool CheckPrintfHandler::HandleAmount( 8236 const analyze_format_string::OptionalAmount &Amt, 8237 unsigned k, const char *startSpecifier, 8238 unsigned specifierLen) { 8239 if (Amt.hasDataArgument()) { 8240 if (!HasVAListArg) { 8241 unsigned argIndex = Amt.getArgIndex(); 8242 if (argIndex >= NumDataArgs) { 8243 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 8244 << k, 8245 getLocationOfByte(Amt.getStart()), 8246 /*IsStringLocation*/true, 8247 getSpecifierRange(startSpecifier, specifierLen)); 8248 // Don't do any more checking. We will just emit 8249 // spurious errors. 8250 return false; 8251 } 8252 8253 // Type check the data argument. It should be an 'int'. 8254 // Although not in conformance with C99, we also allow the argument to be 8255 // an 'unsigned int' as that is a reasonably safe case. GCC also 8256 // doesn't emit a warning for that case. 8257 CoveredArgs.set(argIndex); 8258 const Expr *Arg = getDataArg(argIndex); 8259 if (!Arg) 8260 return false; 8261 8262 QualType T = Arg->getType(); 8263 8264 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 8265 assert(AT.isValid()); 8266 8267 if (!AT.matchesType(S.Context, T)) { 8268 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 8269 << k << AT.getRepresentativeTypeName(S.Context) 8270 << T << Arg->getSourceRange(), 8271 getLocationOfByte(Amt.getStart()), 8272 /*IsStringLocation*/true, 8273 getSpecifierRange(startSpecifier, specifierLen)); 8274 // Don't do any more checking. We will just emit 8275 // spurious errors. 8276 return false; 8277 } 8278 } 8279 } 8280 return true; 8281 } 8282 8283 void CheckPrintfHandler::HandleInvalidAmount( 8284 const analyze_printf::PrintfSpecifier &FS, 8285 const analyze_printf::OptionalAmount &Amt, 8286 unsigned type, 8287 const char *startSpecifier, 8288 unsigned specifierLen) { 8289 const analyze_printf::PrintfConversionSpecifier &CS = 8290 FS.getConversionSpecifier(); 8291 8292 FixItHint fixit = 8293 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 8294 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 8295 Amt.getConstantLength())) 8296 : FixItHint(); 8297 8298 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 8299 << type << CS.toString(), 8300 getLocationOfByte(Amt.getStart()), 8301 /*IsStringLocation*/true, 8302 getSpecifierRange(startSpecifier, specifierLen), 8303 fixit); 8304 } 8305 8306 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8307 const analyze_printf::OptionalFlag &flag, 8308 const char *startSpecifier, 8309 unsigned specifierLen) { 8310 // Warn about pointless flag with a fixit removal. 8311 const analyze_printf::PrintfConversionSpecifier &CS = 8312 FS.getConversionSpecifier(); 8313 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 8314 << flag.toString() << CS.toString(), 8315 getLocationOfByte(flag.getPosition()), 8316 /*IsStringLocation*/true, 8317 getSpecifierRange(startSpecifier, specifierLen), 8318 FixItHint::CreateRemoval( 8319 getSpecifierRange(flag.getPosition(), 1))); 8320 } 8321 8322 void CheckPrintfHandler::HandleIgnoredFlag( 8323 const analyze_printf::PrintfSpecifier &FS, 8324 const analyze_printf::OptionalFlag &ignoredFlag, 8325 const analyze_printf::OptionalFlag &flag, 8326 const char *startSpecifier, 8327 unsigned specifierLen) { 8328 // Warn about ignored flag with a fixit removal. 8329 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 8330 << ignoredFlag.toString() << flag.toString(), 8331 getLocationOfByte(ignoredFlag.getPosition()), 8332 /*IsStringLocation*/true, 8333 getSpecifierRange(startSpecifier, specifierLen), 8334 FixItHint::CreateRemoval( 8335 getSpecifierRange(ignoredFlag.getPosition(), 1))); 8336 } 8337 8338 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 8339 unsigned flagLen) { 8340 // Warn about an empty flag. 8341 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 8342 getLocationOfByte(startFlag), 8343 /*IsStringLocation*/true, 8344 getSpecifierRange(startFlag, flagLen)); 8345 } 8346 8347 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 8348 unsigned flagLen) { 8349 // Warn about an invalid flag. 8350 auto Range = getSpecifierRange(startFlag, flagLen); 8351 StringRef flag(startFlag, flagLen); 8352 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 8353 getLocationOfByte(startFlag), 8354 /*IsStringLocation*/true, 8355 Range, FixItHint::CreateRemoval(Range)); 8356 } 8357 8358 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 8359 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 8360 // Warn about using '[...]' without a '@' conversion. 8361 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 8362 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 8363 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 8364 getLocationOfByte(conversionPosition), 8365 /*IsStringLocation*/true, 8366 Range, FixItHint::CreateRemoval(Range)); 8367 } 8368 8369 // Determines if the specified is a C++ class or struct containing 8370 // a member with the specified name and kind (e.g. a CXXMethodDecl named 8371 // "c_str()"). 8372 template<typename MemberKind> 8373 static llvm::SmallPtrSet<MemberKind*, 1> 8374 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 8375 const RecordType *RT = Ty->getAs<RecordType>(); 8376 llvm::SmallPtrSet<MemberKind*, 1> Results; 8377 8378 if (!RT) 8379 return Results; 8380 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 8381 if (!RD || !RD->getDefinition()) 8382 return Results; 8383 8384 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 8385 Sema::LookupMemberName); 8386 R.suppressDiagnostics(); 8387 8388 // We just need to include all members of the right kind turned up by the 8389 // filter, at this point. 8390 if (S.LookupQualifiedName(R, RT->getDecl())) 8391 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 8392 NamedDecl *decl = (*I)->getUnderlyingDecl(); 8393 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 8394 Results.insert(FK); 8395 } 8396 return Results; 8397 } 8398 8399 /// Check if we could call '.c_str()' on an object. 8400 /// 8401 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 8402 /// allow the call, or if it would be ambiguous). 8403 bool Sema::hasCStrMethod(const Expr *E) { 8404 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8405 8406 MethodSet Results = 8407 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 8408 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8409 MI != ME; ++MI) 8410 if ((*MI)->getMinRequiredArguments() == 0) 8411 return true; 8412 return false; 8413 } 8414 8415 // Check if a (w)string was passed when a (w)char* was needed, and offer a 8416 // better diagnostic if so. AT is assumed to be valid. 8417 // Returns true when a c_str() conversion method is found. 8418 bool CheckPrintfHandler::checkForCStrMembers( 8419 const analyze_printf::ArgType &AT, const Expr *E) { 8420 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8421 8422 MethodSet Results = 8423 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 8424 8425 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8426 MI != ME; ++MI) { 8427 const CXXMethodDecl *Method = *MI; 8428 if (Method->getMinRequiredArguments() == 0 && 8429 AT.matchesType(S.Context, Method->getReturnType())) { 8430 // FIXME: Suggest parens if the expression needs them. 8431 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 8432 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 8433 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 8434 return true; 8435 } 8436 } 8437 8438 return false; 8439 } 8440 8441 bool 8442 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 8443 &FS, 8444 const char *startSpecifier, 8445 unsigned specifierLen) { 8446 using namespace analyze_format_string; 8447 using namespace analyze_printf; 8448 8449 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 8450 8451 if (FS.consumesDataArgument()) { 8452 if (atFirstArg) { 8453 atFirstArg = false; 8454 usesPositionalArgs = FS.usesPositionalArg(); 8455 } 8456 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8457 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8458 startSpecifier, specifierLen); 8459 return false; 8460 } 8461 } 8462 8463 // First check if the field width, precision, and conversion specifier 8464 // have matching data arguments. 8465 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 8466 startSpecifier, specifierLen)) { 8467 return false; 8468 } 8469 8470 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 8471 startSpecifier, specifierLen)) { 8472 return false; 8473 } 8474 8475 if (!CS.consumesDataArgument()) { 8476 // FIXME: Technically specifying a precision or field width here 8477 // makes no sense. Worth issuing a warning at some point. 8478 return true; 8479 } 8480 8481 // Consume the argument. 8482 unsigned argIndex = FS.getArgIndex(); 8483 if (argIndex < NumDataArgs) { 8484 // The check to see if the argIndex is valid will come later. 8485 // We set the bit here because we may exit early from this 8486 // function if we encounter some other error. 8487 CoveredArgs.set(argIndex); 8488 } 8489 8490 // FreeBSD kernel extensions. 8491 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 8492 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 8493 // We need at least two arguments. 8494 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 8495 return false; 8496 8497 // Claim the second argument. 8498 CoveredArgs.set(argIndex + 1); 8499 8500 // Type check the first argument (int for %b, pointer for %D) 8501 const Expr *Ex = getDataArg(argIndex); 8502 const analyze_printf::ArgType &AT = 8503 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 8504 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 8505 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 8506 EmitFormatDiagnostic( 8507 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8508 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 8509 << false << Ex->getSourceRange(), 8510 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8511 getSpecifierRange(startSpecifier, specifierLen)); 8512 8513 // Type check the second argument (char * for both %b and %D) 8514 Ex = getDataArg(argIndex + 1); 8515 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 8516 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 8517 EmitFormatDiagnostic( 8518 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8519 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 8520 << false << Ex->getSourceRange(), 8521 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8522 getSpecifierRange(startSpecifier, specifierLen)); 8523 8524 return true; 8525 } 8526 8527 // Check for using an Objective-C specific conversion specifier 8528 // in a non-ObjC literal. 8529 if (!allowsObjCArg() && CS.isObjCArg()) { 8530 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8531 specifierLen); 8532 } 8533 8534 // %P can only be used with os_log. 8535 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 8536 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8537 specifierLen); 8538 } 8539 8540 // %n is not allowed with os_log. 8541 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 8542 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 8543 getLocationOfByte(CS.getStart()), 8544 /*IsStringLocation*/ false, 8545 getSpecifierRange(startSpecifier, specifierLen)); 8546 8547 return true; 8548 } 8549 8550 // Only scalars are allowed for os_trace. 8551 if (FSType == Sema::FST_OSTrace && 8552 (CS.getKind() == ConversionSpecifier::PArg || 8553 CS.getKind() == ConversionSpecifier::sArg || 8554 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 8555 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8556 specifierLen); 8557 } 8558 8559 // Check for use of public/private annotation outside of os_log(). 8560 if (FSType != Sema::FST_OSLog) { 8561 if (FS.isPublic().isSet()) { 8562 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8563 << "public", 8564 getLocationOfByte(FS.isPublic().getPosition()), 8565 /*IsStringLocation*/ false, 8566 getSpecifierRange(startSpecifier, specifierLen)); 8567 } 8568 if (FS.isPrivate().isSet()) { 8569 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8570 << "private", 8571 getLocationOfByte(FS.isPrivate().getPosition()), 8572 /*IsStringLocation*/ false, 8573 getSpecifierRange(startSpecifier, specifierLen)); 8574 } 8575 } 8576 8577 // Check for invalid use of field width 8578 if (!FS.hasValidFieldWidth()) { 8579 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 8580 startSpecifier, specifierLen); 8581 } 8582 8583 // Check for invalid use of precision 8584 if (!FS.hasValidPrecision()) { 8585 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 8586 startSpecifier, specifierLen); 8587 } 8588 8589 // Precision is mandatory for %P specifier. 8590 if (CS.getKind() == ConversionSpecifier::PArg && 8591 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 8592 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 8593 getLocationOfByte(startSpecifier), 8594 /*IsStringLocation*/ false, 8595 getSpecifierRange(startSpecifier, specifierLen)); 8596 } 8597 8598 // Check each flag does not conflict with any other component. 8599 if (!FS.hasValidThousandsGroupingPrefix()) 8600 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 8601 if (!FS.hasValidLeadingZeros()) 8602 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 8603 if (!FS.hasValidPlusPrefix()) 8604 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 8605 if (!FS.hasValidSpacePrefix()) 8606 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 8607 if (!FS.hasValidAlternativeForm()) 8608 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 8609 if (!FS.hasValidLeftJustified()) 8610 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 8611 8612 // Check that flags are not ignored by another flag 8613 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 8614 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 8615 startSpecifier, specifierLen); 8616 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 8617 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 8618 startSpecifier, specifierLen); 8619 8620 // Check the length modifier is valid with the given conversion specifier. 8621 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8622 S.getLangOpts())) 8623 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8624 diag::warn_format_nonsensical_length); 8625 else if (!FS.hasStandardLengthModifier()) 8626 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8627 else if (!FS.hasStandardLengthConversionCombination()) 8628 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8629 diag::warn_format_non_standard_conversion_spec); 8630 8631 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8632 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8633 8634 // The remaining checks depend on the data arguments. 8635 if (HasVAListArg) 8636 return true; 8637 8638 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8639 return false; 8640 8641 const Expr *Arg = getDataArg(argIndex); 8642 if (!Arg) 8643 return true; 8644 8645 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 8646 } 8647 8648 static bool requiresParensToAddCast(const Expr *E) { 8649 // FIXME: We should have a general way to reason about operator 8650 // precedence and whether parens are actually needed here. 8651 // Take care of a few common cases where they aren't. 8652 const Expr *Inside = E->IgnoreImpCasts(); 8653 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 8654 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 8655 8656 switch (Inside->getStmtClass()) { 8657 case Stmt::ArraySubscriptExprClass: 8658 case Stmt::CallExprClass: 8659 case Stmt::CharacterLiteralClass: 8660 case Stmt::CXXBoolLiteralExprClass: 8661 case Stmt::DeclRefExprClass: 8662 case Stmt::FloatingLiteralClass: 8663 case Stmt::IntegerLiteralClass: 8664 case Stmt::MemberExprClass: 8665 case Stmt::ObjCArrayLiteralClass: 8666 case Stmt::ObjCBoolLiteralExprClass: 8667 case Stmt::ObjCBoxedExprClass: 8668 case Stmt::ObjCDictionaryLiteralClass: 8669 case Stmt::ObjCEncodeExprClass: 8670 case Stmt::ObjCIvarRefExprClass: 8671 case Stmt::ObjCMessageExprClass: 8672 case Stmt::ObjCPropertyRefExprClass: 8673 case Stmt::ObjCStringLiteralClass: 8674 case Stmt::ObjCSubscriptRefExprClass: 8675 case Stmt::ParenExprClass: 8676 case Stmt::StringLiteralClass: 8677 case Stmt::UnaryOperatorClass: 8678 return false; 8679 default: 8680 return true; 8681 } 8682 } 8683 8684 static std::pair<QualType, StringRef> 8685 shouldNotPrintDirectly(const ASTContext &Context, 8686 QualType IntendedTy, 8687 const Expr *E) { 8688 // Use a 'while' to peel off layers of typedefs. 8689 QualType TyTy = IntendedTy; 8690 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 8691 StringRef Name = UserTy->getDecl()->getName(); 8692 QualType CastTy = llvm::StringSwitch<QualType>(Name) 8693 .Case("CFIndex", Context.getNSIntegerType()) 8694 .Case("NSInteger", Context.getNSIntegerType()) 8695 .Case("NSUInteger", Context.getNSUIntegerType()) 8696 .Case("SInt32", Context.IntTy) 8697 .Case("UInt32", Context.UnsignedIntTy) 8698 .Default(QualType()); 8699 8700 if (!CastTy.isNull()) 8701 return std::make_pair(CastTy, Name); 8702 8703 TyTy = UserTy->desugar(); 8704 } 8705 8706 // Strip parens if necessary. 8707 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 8708 return shouldNotPrintDirectly(Context, 8709 PE->getSubExpr()->getType(), 8710 PE->getSubExpr()); 8711 8712 // If this is a conditional expression, then its result type is constructed 8713 // via usual arithmetic conversions and thus there might be no necessary 8714 // typedef sugar there. Recurse to operands to check for NSInteger & 8715 // Co. usage condition. 8716 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 8717 QualType TrueTy, FalseTy; 8718 StringRef TrueName, FalseName; 8719 8720 std::tie(TrueTy, TrueName) = 8721 shouldNotPrintDirectly(Context, 8722 CO->getTrueExpr()->getType(), 8723 CO->getTrueExpr()); 8724 std::tie(FalseTy, FalseName) = 8725 shouldNotPrintDirectly(Context, 8726 CO->getFalseExpr()->getType(), 8727 CO->getFalseExpr()); 8728 8729 if (TrueTy == FalseTy) 8730 return std::make_pair(TrueTy, TrueName); 8731 else if (TrueTy.isNull()) 8732 return std::make_pair(FalseTy, FalseName); 8733 else if (FalseTy.isNull()) 8734 return std::make_pair(TrueTy, TrueName); 8735 } 8736 8737 return std::make_pair(QualType(), StringRef()); 8738 } 8739 8740 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 8741 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 8742 /// type do not count. 8743 static bool 8744 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 8745 QualType From = ICE->getSubExpr()->getType(); 8746 QualType To = ICE->getType(); 8747 // It's an integer promotion if the destination type is the promoted 8748 // source type. 8749 if (ICE->getCastKind() == CK_IntegralCast && 8750 From->isPromotableIntegerType() && 8751 S.Context.getPromotedIntegerType(From) == To) 8752 return true; 8753 // Look through vector types, since we do default argument promotion for 8754 // those in OpenCL. 8755 if (const auto *VecTy = From->getAs<ExtVectorType>()) 8756 From = VecTy->getElementType(); 8757 if (const auto *VecTy = To->getAs<ExtVectorType>()) 8758 To = VecTy->getElementType(); 8759 // It's a floating promotion if the source type is a lower rank. 8760 return ICE->getCastKind() == CK_FloatingCast && 8761 S.Context.getFloatingTypeOrder(From, To) < 0; 8762 } 8763 8764 bool 8765 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8766 const char *StartSpecifier, 8767 unsigned SpecifierLen, 8768 const Expr *E) { 8769 using namespace analyze_format_string; 8770 using namespace analyze_printf; 8771 8772 // Now type check the data expression that matches the 8773 // format specifier. 8774 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 8775 if (!AT.isValid()) 8776 return true; 8777 8778 QualType ExprTy = E->getType(); 8779 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 8780 ExprTy = TET->getUnderlyingExpr()->getType(); 8781 } 8782 8783 // Diagnose attempts to print a boolean value as a character. Unlike other 8784 // -Wformat diagnostics, this is fine from a type perspective, but it still 8785 // doesn't make sense. 8786 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 8787 E->isKnownToHaveBooleanValue()) { 8788 const CharSourceRange &CSR = 8789 getSpecifierRange(StartSpecifier, SpecifierLen); 8790 SmallString<4> FSString; 8791 llvm::raw_svector_ostream os(FSString); 8792 FS.toString(os); 8793 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 8794 << FSString, 8795 E->getExprLoc(), false, CSR); 8796 return true; 8797 } 8798 8799 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 8800 if (Match == analyze_printf::ArgType::Match) 8801 return true; 8802 8803 // Look through argument promotions for our error message's reported type. 8804 // This includes the integral and floating promotions, but excludes array 8805 // and function pointer decay (seeing that an argument intended to be a 8806 // string has type 'char [6]' is probably more confusing than 'char *') and 8807 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 8808 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 8809 if (isArithmeticArgumentPromotion(S, ICE)) { 8810 E = ICE->getSubExpr(); 8811 ExprTy = E->getType(); 8812 8813 // Check if we didn't match because of an implicit cast from a 'char' 8814 // or 'short' to an 'int'. This is done because printf is a varargs 8815 // function. 8816 if (ICE->getType() == S.Context.IntTy || 8817 ICE->getType() == S.Context.UnsignedIntTy) { 8818 // All further checking is done on the subexpression 8819 const analyze_printf::ArgType::MatchKind ImplicitMatch = 8820 AT.matchesType(S.Context, ExprTy); 8821 if (ImplicitMatch == analyze_printf::ArgType::Match) 8822 return true; 8823 if (ImplicitMatch == ArgType::NoMatchPedantic || 8824 ImplicitMatch == ArgType::NoMatchTypeConfusion) 8825 Match = ImplicitMatch; 8826 } 8827 } 8828 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 8829 // Special case for 'a', which has type 'int' in C. 8830 // Note, however, that we do /not/ want to treat multibyte constants like 8831 // 'MooV' as characters! This form is deprecated but still exists. In 8832 // addition, don't treat expressions as of type 'char' if one byte length 8833 // modifier is provided. 8834 if (ExprTy == S.Context.IntTy && 8835 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 8836 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 8837 ExprTy = S.Context.CharTy; 8838 } 8839 8840 // Look through enums to their underlying type. 8841 bool IsEnum = false; 8842 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 8843 ExprTy = EnumTy->getDecl()->getIntegerType(); 8844 IsEnum = true; 8845 } 8846 8847 // %C in an Objective-C context prints a unichar, not a wchar_t. 8848 // If the argument is an integer of some kind, believe the %C and suggest 8849 // a cast instead of changing the conversion specifier. 8850 QualType IntendedTy = ExprTy; 8851 if (isObjCContext() && 8852 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 8853 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 8854 !ExprTy->isCharType()) { 8855 // 'unichar' is defined as a typedef of unsigned short, but we should 8856 // prefer using the typedef if it is visible. 8857 IntendedTy = S.Context.UnsignedShortTy; 8858 8859 // While we are here, check if the value is an IntegerLiteral that happens 8860 // to be within the valid range. 8861 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 8862 const llvm::APInt &V = IL->getValue(); 8863 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 8864 return true; 8865 } 8866 8867 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 8868 Sema::LookupOrdinaryName); 8869 if (S.LookupName(Result, S.getCurScope())) { 8870 NamedDecl *ND = Result.getFoundDecl(); 8871 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 8872 if (TD->getUnderlyingType() == IntendedTy) 8873 IntendedTy = S.Context.getTypedefType(TD); 8874 } 8875 } 8876 } 8877 8878 // Special-case some of Darwin's platform-independence types by suggesting 8879 // casts to primitive types that are known to be large enough. 8880 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 8881 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 8882 QualType CastTy; 8883 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 8884 if (!CastTy.isNull()) { 8885 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 8886 // (long in ASTContext). Only complain to pedants. 8887 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 8888 (AT.isSizeT() || AT.isPtrdiffT()) && 8889 AT.matchesType(S.Context, CastTy)) 8890 Match = ArgType::NoMatchPedantic; 8891 IntendedTy = CastTy; 8892 ShouldNotPrintDirectly = true; 8893 } 8894 } 8895 8896 // We may be able to offer a FixItHint if it is a supported type. 8897 PrintfSpecifier fixedFS = FS; 8898 bool Success = 8899 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 8900 8901 if (Success) { 8902 // Get the fix string from the fixed format specifier 8903 SmallString<16> buf; 8904 llvm::raw_svector_ostream os(buf); 8905 fixedFS.toString(os); 8906 8907 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 8908 8909 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 8910 unsigned Diag; 8911 switch (Match) { 8912 case ArgType::Match: llvm_unreachable("expected non-matching"); 8913 case ArgType::NoMatchPedantic: 8914 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8915 break; 8916 case ArgType::NoMatchTypeConfusion: 8917 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8918 break; 8919 case ArgType::NoMatch: 8920 Diag = diag::warn_format_conversion_argument_type_mismatch; 8921 break; 8922 } 8923 8924 // In this case, the specifier is wrong and should be changed to match 8925 // the argument. 8926 EmitFormatDiagnostic(S.PDiag(Diag) 8927 << AT.getRepresentativeTypeName(S.Context) 8928 << IntendedTy << IsEnum << E->getSourceRange(), 8929 E->getBeginLoc(), 8930 /*IsStringLocation*/ false, SpecRange, 8931 FixItHint::CreateReplacement(SpecRange, os.str())); 8932 } else { 8933 // The canonical type for formatting this value is different from the 8934 // actual type of the expression. (This occurs, for example, with Darwin's 8935 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 8936 // should be printed as 'long' for 64-bit compatibility.) 8937 // Rather than emitting a normal format/argument mismatch, we want to 8938 // add a cast to the recommended type (and correct the format string 8939 // if necessary). 8940 SmallString<16> CastBuf; 8941 llvm::raw_svector_ostream CastFix(CastBuf); 8942 CastFix << "("; 8943 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 8944 CastFix << ")"; 8945 8946 SmallVector<FixItHint,4> Hints; 8947 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 8948 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 8949 8950 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 8951 // If there's already a cast present, just replace it. 8952 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 8953 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 8954 8955 } else if (!requiresParensToAddCast(E)) { 8956 // If the expression has high enough precedence, 8957 // just write the C-style cast. 8958 Hints.push_back( 8959 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8960 } else { 8961 // Otherwise, add parens around the expression as well as the cast. 8962 CastFix << "("; 8963 Hints.push_back( 8964 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8965 8966 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 8967 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 8968 } 8969 8970 if (ShouldNotPrintDirectly) { 8971 // The expression has a type that should not be printed directly. 8972 // We extract the name from the typedef because we don't want to show 8973 // the underlying type in the diagnostic. 8974 StringRef Name; 8975 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 8976 Name = TypedefTy->getDecl()->getName(); 8977 else 8978 Name = CastTyName; 8979 unsigned Diag = Match == ArgType::NoMatchPedantic 8980 ? diag::warn_format_argument_needs_cast_pedantic 8981 : diag::warn_format_argument_needs_cast; 8982 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 8983 << E->getSourceRange(), 8984 E->getBeginLoc(), /*IsStringLocation=*/false, 8985 SpecRange, Hints); 8986 } else { 8987 // In this case, the expression could be printed using a different 8988 // specifier, but we've decided that the specifier is probably correct 8989 // and we should cast instead. Just use the normal warning message. 8990 EmitFormatDiagnostic( 8991 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8992 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 8993 << E->getSourceRange(), 8994 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 8995 } 8996 } 8997 } else { 8998 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 8999 SpecifierLen); 9000 // Since the warning for passing non-POD types to variadic functions 9001 // was deferred until now, we emit a warning for non-POD 9002 // arguments here. 9003 switch (S.isValidVarArgType(ExprTy)) { 9004 case Sema::VAK_Valid: 9005 case Sema::VAK_ValidInCXX11: { 9006 unsigned Diag; 9007 switch (Match) { 9008 case ArgType::Match: llvm_unreachable("expected non-matching"); 9009 case ArgType::NoMatchPedantic: 9010 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9011 break; 9012 case ArgType::NoMatchTypeConfusion: 9013 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9014 break; 9015 case ArgType::NoMatch: 9016 Diag = diag::warn_format_conversion_argument_type_mismatch; 9017 break; 9018 } 9019 9020 EmitFormatDiagnostic( 9021 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 9022 << IsEnum << CSR << E->getSourceRange(), 9023 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9024 break; 9025 } 9026 case Sema::VAK_Undefined: 9027 case Sema::VAK_MSVCUndefined: 9028 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 9029 << S.getLangOpts().CPlusPlus11 << ExprTy 9030 << CallType 9031 << AT.getRepresentativeTypeName(S.Context) << CSR 9032 << E->getSourceRange(), 9033 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9034 checkForCStrMembers(AT, E); 9035 break; 9036 9037 case Sema::VAK_Invalid: 9038 if (ExprTy->isObjCObjectType()) 9039 EmitFormatDiagnostic( 9040 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 9041 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 9042 << AT.getRepresentativeTypeName(S.Context) << CSR 9043 << E->getSourceRange(), 9044 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9045 else 9046 // FIXME: If this is an initializer list, suggest removing the braces 9047 // or inserting a cast to the target type. 9048 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 9049 << isa<InitListExpr>(E) << ExprTy << CallType 9050 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 9051 break; 9052 } 9053 9054 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 9055 "format string specifier index out of range"); 9056 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 9057 } 9058 9059 return true; 9060 } 9061 9062 //===--- CHECK: Scanf format string checking ------------------------------===// 9063 9064 namespace { 9065 9066 class CheckScanfHandler : public CheckFormatHandler { 9067 public: 9068 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 9069 const Expr *origFormatExpr, Sema::FormatStringType type, 9070 unsigned firstDataArg, unsigned numDataArgs, 9071 const char *beg, bool hasVAListArg, 9072 ArrayRef<const Expr *> Args, unsigned formatIdx, 9073 bool inFunctionCall, Sema::VariadicCallType CallType, 9074 llvm::SmallBitVector &CheckedVarArgs, 9075 UncoveredArgHandler &UncoveredArg) 9076 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9077 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9078 inFunctionCall, CallType, CheckedVarArgs, 9079 UncoveredArg) {} 9080 9081 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 9082 const char *startSpecifier, 9083 unsigned specifierLen) override; 9084 9085 bool HandleInvalidScanfConversionSpecifier( 9086 const analyze_scanf::ScanfSpecifier &FS, 9087 const char *startSpecifier, 9088 unsigned specifierLen) override; 9089 9090 void HandleIncompleteScanList(const char *start, const char *end) override; 9091 }; 9092 9093 } // namespace 9094 9095 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 9096 const char *end) { 9097 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 9098 getLocationOfByte(end), /*IsStringLocation*/true, 9099 getSpecifierRange(start, end - start)); 9100 } 9101 9102 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 9103 const analyze_scanf::ScanfSpecifier &FS, 9104 const char *startSpecifier, 9105 unsigned specifierLen) { 9106 const analyze_scanf::ScanfConversionSpecifier &CS = 9107 FS.getConversionSpecifier(); 9108 9109 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9110 getLocationOfByte(CS.getStart()), 9111 startSpecifier, specifierLen, 9112 CS.getStart(), CS.getLength()); 9113 } 9114 9115 bool CheckScanfHandler::HandleScanfSpecifier( 9116 const analyze_scanf::ScanfSpecifier &FS, 9117 const char *startSpecifier, 9118 unsigned specifierLen) { 9119 using namespace analyze_scanf; 9120 using namespace analyze_format_string; 9121 9122 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 9123 9124 // Handle case where '%' and '*' don't consume an argument. These shouldn't 9125 // be used to decide if we are using positional arguments consistently. 9126 if (FS.consumesDataArgument()) { 9127 if (atFirstArg) { 9128 atFirstArg = false; 9129 usesPositionalArgs = FS.usesPositionalArg(); 9130 } 9131 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9132 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9133 startSpecifier, specifierLen); 9134 return false; 9135 } 9136 } 9137 9138 // Check if the field with is non-zero. 9139 const OptionalAmount &Amt = FS.getFieldWidth(); 9140 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 9141 if (Amt.getConstantAmount() == 0) { 9142 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 9143 Amt.getConstantLength()); 9144 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 9145 getLocationOfByte(Amt.getStart()), 9146 /*IsStringLocation*/true, R, 9147 FixItHint::CreateRemoval(R)); 9148 } 9149 } 9150 9151 if (!FS.consumesDataArgument()) { 9152 // FIXME: Technically specifying a precision or field width here 9153 // makes no sense. Worth issuing a warning at some point. 9154 return true; 9155 } 9156 9157 // Consume the argument. 9158 unsigned argIndex = FS.getArgIndex(); 9159 if (argIndex < NumDataArgs) { 9160 // The check to see if the argIndex is valid will come later. 9161 // We set the bit here because we may exit early from this 9162 // function if we encounter some other error. 9163 CoveredArgs.set(argIndex); 9164 } 9165 9166 // Check the length modifier is valid with the given conversion specifier. 9167 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9168 S.getLangOpts())) 9169 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9170 diag::warn_format_nonsensical_length); 9171 else if (!FS.hasStandardLengthModifier()) 9172 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9173 else if (!FS.hasStandardLengthConversionCombination()) 9174 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9175 diag::warn_format_non_standard_conversion_spec); 9176 9177 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9178 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9179 9180 // The remaining checks depend on the data arguments. 9181 if (HasVAListArg) 9182 return true; 9183 9184 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9185 return false; 9186 9187 // Check that the argument type matches the format specifier. 9188 const Expr *Ex = getDataArg(argIndex); 9189 if (!Ex) 9190 return true; 9191 9192 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 9193 9194 if (!AT.isValid()) { 9195 return true; 9196 } 9197 9198 analyze_format_string::ArgType::MatchKind Match = 9199 AT.matchesType(S.Context, Ex->getType()); 9200 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 9201 if (Match == analyze_format_string::ArgType::Match) 9202 return true; 9203 9204 ScanfSpecifier fixedFS = FS; 9205 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 9206 S.getLangOpts(), S.Context); 9207 9208 unsigned Diag = 9209 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 9210 : diag::warn_format_conversion_argument_type_mismatch; 9211 9212 if (Success) { 9213 // Get the fix string from the fixed format specifier. 9214 SmallString<128> buf; 9215 llvm::raw_svector_ostream os(buf); 9216 fixedFS.toString(os); 9217 9218 EmitFormatDiagnostic( 9219 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 9220 << Ex->getType() << false << Ex->getSourceRange(), 9221 Ex->getBeginLoc(), 9222 /*IsStringLocation*/ false, 9223 getSpecifierRange(startSpecifier, specifierLen), 9224 FixItHint::CreateReplacement( 9225 getSpecifierRange(startSpecifier, specifierLen), os.str())); 9226 } else { 9227 EmitFormatDiagnostic(S.PDiag(Diag) 9228 << AT.getRepresentativeTypeName(S.Context) 9229 << Ex->getType() << false << Ex->getSourceRange(), 9230 Ex->getBeginLoc(), 9231 /*IsStringLocation*/ false, 9232 getSpecifierRange(startSpecifier, specifierLen)); 9233 } 9234 9235 return true; 9236 } 9237 9238 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 9239 const Expr *OrigFormatExpr, 9240 ArrayRef<const Expr *> Args, 9241 bool HasVAListArg, unsigned format_idx, 9242 unsigned firstDataArg, 9243 Sema::FormatStringType Type, 9244 bool inFunctionCall, 9245 Sema::VariadicCallType CallType, 9246 llvm::SmallBitVector &CheckedVarArgs, 9247 UncoveredArgHandler &UncoveredArg, 9248 bool IgnoreStringsWithoutSpecifiers) { 9249 // CHECK: is the format string a wide literal? 9250 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 9251 CheckFormatHandler::EmitFormatDiagnostic( 9252 S, inFunctionCall, Args[format_idx], 9253 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 9254 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9255 return; 9256 } 9257 9258 // Str - The format string. NOTE: this is NOT null-terminated! 9259 StringRef StrRef = FExpr->getString(); 9260 const char *Str = StrRef.data(); 9261 // Account for cases where the string literal is truncated in a declaration. 9262 const ConstantArrayType *T = 9263 S.Context.getAsConstantArrayType(FExpr->getType()); 9264 assert(T && "String literal not of constant array type!"); 9265 size_t TypeSize = T->getSize().getZExtValue(); 9266 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9267 const unsigned numDataArgs = Args.size() - firstDataArg; 9268 9269 if (IgnoreStringsWithoutSpecifiers && 9270 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 9271 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 9272 return; 9273 9274 // Emit a warning if the string literal is truncated and does not contain an 9275 // embedded null character. 9276 if (TypeSize <= StrRef.size() && 9277 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 9278 CheckFormatHandler::EmitFormatDiagnostic( 9279 S, inFunctionCall, Args[format_idx], 9280 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 9281 FExpr->getBeginLoc(), 9282 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 9283 return; 9284 } 9285 9286 // CHECK: empty format string? 9287 if (StrLen == 0 && numDataArgs > 0) { 9288 CheckFormatHandler::EmitFormatDiagnostic( 9289 S, inFunctionCall, Args[format_idx], 9290 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 9291 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9292 return; 9293 } 9294 9295 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 9296 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 9297 Type == Sema::FST_OSTrace) { 9298 CheckPrintfHandler H( 9299 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 9300 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 9301 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 9302 CheckedVarArgs, UncoveredArg); 9303 9304 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 9305 S.getLangOpts(), 9306 S.Context.getTargetInfo(), 9307 Type == Sema::FST_FreeBSDKPrintf)) 9308 H.DoneProcessing(); 9309 } else if (Type == Sema::FST_Scanf) { 9310 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 9311 numDataArgs, Str, HasVAListArg, Args, format_idx, 9312 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 9313 9314 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 9315 S.getLangOpts(), 9316 S.Context.getTargetInfo())) 9317 H.DoneProcessing(); 9318 } // TODO: handle other formats 9319 } 9320 9321 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 9322 // Str - The format string. NOTE: this is NOT null-terminated! 9323 StringRef StrRef = FExpr->getString(); 9324 const char *Str = StrRef.data(); 9325 // Account for cases where the string literal is truncated in a declaration. 9326 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 9327 assert(T && "String literal not of constant array type!"); 9328 size_t TypeSize = T->getSize().getZExtValue(); 9329 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9330 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 9331 getLangOpts(), 9332 Context.getTargetInfo()); 9333 } 9334 9335 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 9336 9337 // Returns the related absolute value function that is larger, of 0 if one 9338 // does not exist. 9339 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 9340 switch (AbsFunction) { 9341 default: 9342 return 0; 9343 9344 case Builtin::BI__builtin_abs: 9345 return Builtin::BI__builtin_labs; 9346 case Builtin::BI__builtin_labs: 9347 return Builtin::BI__builtin_llabs; 9348 case Builtin::BI__builtin_llabs: 9349 return 0; 9350 9351 case Builtin::BI__builtin_fabsf: 9352 return Builtin::BI__builtin_fabs; 9353 case Builtin::BI__builtin_fabs: 9354 return Builtin::BI__builtin_fabsl; 9355 case Builtin::BI__builtin_fabsl: 9356 return 0; 9357 9358 case Builtin::BI__builtin_cabsf: 9359 return Builtin::BI__builtin_cabs; 9360 case Builtin::BI__builtin_cabs: 9361 return Builtin::BI__builtin_cabsl; 9362 case Builtin::BI__builtin_cabsl: 9363 return 0; 9364 9365 case Builtin::BIabs: 9366 return Builtin::BIlabs; 9367 case Builtin::BIlabs: 9368 return Builtin::BIllabs; 9369 case Builtin::BIllabs: 9370 return 0; 9371 9372 case Builtin::BIfabsf: 9373 return Builtin::BIfabs; 9374 case Builtin::BIfabs: 9375 return Builtin::BIfabsl; 9376 case Builtin::BIfabsl: 9377 return 0; 9378 9379 case Builtin::BIcabsf: 9380 return Builtin::BIcabs; 9381 case Builtin::BIcabs: 9382 return Builtin::BIcabsl; 9383 case Builtin::BIcabsl: 9384 return 0; 9385 } 9386 } 9387 9388 // Returns the argument type of the absolute value function. 9389 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 9390 unsigned AbsType) { 9391 if (AbsType == 0) 9392 return QualType(); 9393 9394 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 9395 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 9396 if (Error != ASTContext::GE_None) 9397 return QualType(); 9398 9399 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 9400 if (!FT) 9401 return QualType(); 9402 9403 if (FT->getNumParams() != 1) 9404 return QualType(); 9405 9406 return FT->getParamType(0); 9407 } 9408 9409 // Returns the best absolute value function, or zero, based on type and 9410 // current absolute value function. 9411 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 9412 unsigned AbsFunctionKind) { 9413 unsigned BestKind = 0; 9414 uint64_t ArgSize = Context.getTypeSize(ArgType); 9415 for (unsigned Kind = AbsFunctionKind; Kind != 0; 9416 Kind = getLargerAbsoluteValueFunction(Kind)) { 9417 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 9418 if (Context.getTypeSize(ParamType) >= ArgSize) { 9419 if (BestKind == 0) 9420 BestKind = Kind; 9421 else if (Context.hasSameType(ParamType, ArgType)) { 9422 BestKind = Kind; 9423 break; 9424 } 9425 } 9426 } 9427 return BestKind; 9428 } 9429 9430 enum AbsoluteValueKind { 9431 AVK_Integer, 9432 AVK_Floating, 9433 AVK_Complex 9434 }; 9435 9436 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 9437 if (T->isIntegralOrEnumerationType()) 9438 return AVK_Integer; 9439 if (T->isRealFloatingType()) 9440 return AVK_Floating; 9441 if (T->isAnyComplexType()) 9442 return AVK_Complex; 9443 9444 llvm_unreachable("Type not integer, floating, or complex"); 9445 } 9446 9447 // Changes the absolute value function to a different type. Preserves whether 9448 // the function is a builtin. 9449 static unsigned changeAbsFunction(unsigned AbsKind, 9450 AbsoluteValueKind ValueKind) { 9451 switch (ValueKind) { 9452 case AVK_Integer: 9453 switch (AbsKind) { 9454 default: 9455 return 0; 9456 case Builtin::BI__builtin_fabsf: 9457 case Builtin::BI__builtin_fabs: 9458 case Builtin::BI__builtin_fabsl: 9459 case Builtin::BI__builtin_cabsf: 9460 case Builtin::BI__builtin_cabs: 9461 case Builtin::BI__builtin_cabsl: 9462 return Builtin::BI__builtin_abs; 9463 case Builtin::BIfabsf: 9464 case Builtin::BIfabs: 9465 case Builtin::BIfabsl: 9466 case Builtin::BIcabsf: 9467 case Builtin::BIcabs: 9468 case Builtin::BIcabsl: 9469 return Builtin::BIabs; 9470 } 9471 case AVK_Floating: 9472 switch (AbsKind) { 9473 default: 9474 return 0; 9475 case Builtin::BI__builtin_abs: 9476 case Builtin::BI__builtin_labs: 9477 case Builtin::BI__builtin_llabs: 9478 case Builtin::BI__builtin_cabsf: 9479 case Builtin::BI__builtin_cabs: 9480 case Builtin::BI__builtin_cabsl: 9481 return Builtin::BI__builtin_fabsf; 9482 case Builtin::BIabs: 9483 case Builtin::BIlabs: 9484 case Builtin::BIllabs: 9485 case Builtin::BIcabsf: 9486 case Builtin::BIcabs: 9487 case Builtin::BIcabsl: 9488 return Builtin::BIfabsf; 9489 } 9490 case AVK_Complex: 9491 switch (AbsKind) { 9492 default: 9493 return 0; 9494 case Builtin::BI__builtin_abs: 9495 case Builtin::BI__builtin_labs: 9496 case Builtin::BI__builtin_llabs: 9497 case Builtin::BI__builtin_fabsf: 9498 case Builtin::BI__builtin_fabs: 9499 case Builtin::BI__builtin_fabsl: 9500 return Builtin::BI__builtin_cabsf; 9501 case Builtin::BIabs: 9502 case Builtin::BIlabs: 9503 case Builtin::BIllabs: 9504 case Builtin::BIfabsf: 9505 case Builtin::BIfabs: 9506 case Builtin::BIfabsl: 9507 return Builtin::BIcabsf; 9508 } 9509 } 9510 llvm_unreachable("Unable to convert function"); 9511 } 9512 9513 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 9514 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 9515 if (!FnInfo) 9516 return 0; 9517 9518 switch (FDecl->getBuiltinID()) { 9519 default: 9520 return 0; 9521 case Builtin::BI__builtin_abs: 9522 case Builtin::BI__builtin_fabs: 9523 case Builtin::BI__builtin_fabsf: 9524 case Builtin::BI__builtin_fabsl: 9525 case Builtin::BI__builtin_labs: 9526 case Builtin::BI__builtin_llabs: 9527 case Builtin::BI__builtin_cabs: 9528 case Builtin::BI__builtin_cabsf: 9529 case Builtin::BI__builtin_cabsl: 9530 case Builtin::BIabs: 9531 case Builtin::BIlabs: 9532 case Builtin::BIllabs: 9533 case Builtin::BIfabs: 9534 case Builtin::BIfabsf: 9535 case Builtin::BIfabsl: 9536 case Builtin::BIcabs: 9537 case Builtin::BIcabsf: 9538 case Builtin::BIcabsl: 9539 return FDecl->getBuiltinID(); 9540 } 9541 llvm_unreachable("Unknown Builtin type"); 9542 } 9543 9544 // If the replacement is valid, emit a note with replacement function. 9545 // Additionally, suggest including the proper header if not already included. 9546 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 9547 unsigned AbsKind, QualType ArgType) { 9548 bool EmitHeaderHint = true; 9549 const char *HeaderName = nullptr; 9550 const char *FunctionName = nullptr; 9551 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 9552 FunctionName = "std::abs"; 9553 if (ArgType->isIntegralOrEnumerationType()) { 9554 HeaderName = "cstdlib"; 9555 } else if (ArgType->isRealFloatingType()) { 9556 HeaderName = "cmath"; 9557 } else { 9558 llvm_unreachable("Invalid Type"); 9559 } 9560 9561 // Lookup all std::abs 9562 if (NamespaceDecl *Std = S.getStdNamespace()) { 9563 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 9564 R.suppressDiagnostics(); 9565 S.LookupQualifiedName(R, Std); 9566 9567 for (const auto *I : R) { 9568 const FunctionDecl *FDecl = nullptr; 9569 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 9570 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 9571 } else { 9572 FDecl = dyn_cast<FunctionDecl>(I); 9573 } 9574 if (!FDecl) 9575 continue; 9576 9577 // Found std::abs(), check that they are the right ones. 9578 if (FDecl->getNumParams() != 1) 9579 continue; 9580 9581 // Check that the parameter type can handle the argument. 9582 QualType ParamType = FDecl->getParamDecl(0)->getType(); 9583 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 9584 S.Context.getTypeSize(ArgType) <= 9585 S.Context.getTypeSize(ParamType)) { 9586 // Found a function, don't need the header hint. 9587 EmitHeaderHint = false; 9588 break; 9589 } 9590 } 9591 } 9592 } else { 9593 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 9594 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 9595 9596 if (HeaderName) { 9597 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 9598 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 9599 R.suppressDiagnostics(); 9600 S.LookupName(R, S.getCurScope()); 9601 9602 if (R.isSingleResult()) { 9603 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 9604 if (FD && FD->getBuiltinID() == AbsKind) { 9605 EmitHeaderHint = false; 9606 } else { 9607 return; 9608 } 9609 } else if (!R.empty()) { 9610 return; 9611 } 9612 } 9613 } 9614 9615 S.Diag(Loc, diag::note_replace_abs_function) 9616 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 9617 9618 if (!HeaderName) 9619 return; 9620 9621 if (!EmitHeaderHint) 9622 return; 9623 9624 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 9625 << FunctionName; 9626 } 9627 9628 template <std::size_t StrLen> 9629 static bool IsStdFunction(const FunctionDecl *FDecl, 9630 const char (&Str)[StrLen]) { 9631 if (!FDecl) 9632 return false; 9633 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 9634 return false; 9635 if (!FDecl->isInStdNamespace()) 9636 return false; 9637 9638 return true; 9639 } 9640 9641 // Warn when using the wrong abs() function. 9642 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 9643 const FunctionDecl *FDecl) { 9644 if (Call->getNumArgs() != 1) 9645 return; 9646 9647 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 9648 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 9649 if (AbsKind == 0 && !IsStdAbs) 9650 return; 9651 9652 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9653 QualType ParamType = Call->getArg(0)->getType(); 9654 9655 // Unsigned types cannot be negative. Suggest removing the absolute value 9656 // function call. 9657 if (ArgType->isUnsignedIntegerType()) { 9658 const char *FunctionName = 9659 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 9660 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 9661 Diag(Call->getExprLoc(), diag::note_remove_abs) 9662 << FunctionName 9663 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 9664 return; 9665 } 9666 9667 // Taking the absolute value of a pointer is very suspicious, they probably 9668 // wanted to index into an array, dereference a pointer, call a function, etc. 9669 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 9670 unsigned DiagType = 0; 9671 if (ArgType->isFunctionType()) 9672 DiagType = 1; 9673 else if (ArgType->isArrayType()) 9674 DiagType = 2; 9675 9676 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 9677 return; 9678 } 9679 9680 // std::abs has overloads which prevent most of the absolute value problems 9681 // from occurring. 9682 if (IsStdAbs) 9683 return; 9684 9685 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 9686 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 9687 9688 // The argument and parameter are the same kind. Check if they are the right 9689 // size. 9690 if (ArgValueKind == ParamValueKind) { 9691 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 9692 return; 9693 9694 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 9695 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 9696 << FDecl << ArgType << ParamType; 9697 9698 if (NewAbsKind == 0) 9699 return; 9700 9701 emitReplacement(*this, Call->getExprLoc(), 9702 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9703 return; 9704 } 9705 9706 // ArgValueKind != ParamValueKind 9707 // The wrong type of absolute value function was used. Attempt to find the 9708 // proper one. 9709 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 9710 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 9711 if (NewAbsKind == 0) 9712 return; 9713 9714 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 9715 << FDecl << ParamValueKind << ArgValueKind; 9716 9717 emitReplacement(*this, Call->getExprLoc(), 9718 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9719 } 9720 9721 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 9722 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 9723 const FunctionDecl *FDecl) { 9724 if (!Call || !FDecl) return; 9725 9726 // Ignore template specializations and macros. 9727 if (inTemplateInstantiation()) return; 9728 if (Call->getExprLoc().isMacroID()) return; 9729 9730 // Only care about the one template argument, two function parameter std::max 9731 if (Call->getNumArgs() != 2) return; 9732 if (!IsStdFunction(FDecl, "max")) return; 9733 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 9734 if (!ArgList) return; 9735 if (ArgList->size() != 1) return; 9736 9737 // Check that template type argument is unsigned integer. 9738 const auto& TA = ArgList->get(0); 9739 if (TA.getKind() != TemplateArgument::Type) return; 9740 QualType ArgType = TA.getAsType(); 9741 if (!ArgType->isUnsignedIntegerType()) return; 9742 9743 // See if either argument is a literal zero. 9744 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 9745 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 9746 if (!MTE) return false; 9747 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 9748 if (!Num) return false; 9749 if (Num->getValue() != 0) return false; 9750 return true; 9751 }; 9752 9753 const Expr *FirstArg = Call->getArg(0); 9754 const Expr *SecondArg = Call->getArg(1); 9755 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 9756 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 9757 9758 // Only warn when exactly one argument is zero. 9759 if (IsFirstArgZero == IsSecondArgZero) return; 9760 9761 SourceRange FirstRange = FirstArg->getSourceRange(); 9762 SourceRange SecondRange = SecondArg->getSourceRange(); 9763 9764 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 9765 9766 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 9767 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 9768 9769 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 9770 SourceRange RemovalRange; 9771 if (IsFirstArgZero) { 9772 RemovalRange = SourceRange(FirstRange.getBegin(), 9773 SecondRange.getBegin().getLocWithOffset(-1)); 9774 } else { 9775 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 9776 SecondRange.getEnd()); 9777 } 9778 9779 Diag(Call->getExprLoc(), diag::note_remove_max_call) 9780 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 9781 << FixItHint::CreateRemoval(RemovalRange); 9782 } 9783 9784 //===--- CHECK: Standard memory functions ---------------------------------===// 9785 9786 /// Takes the expression passed to the size_t parameter of functions 9787 /// such as memcmp, strncat, etc and warns if it's a comparison. 9788 /// 9789 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 9790 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 9791 IdentifierInfo *FnName, 9792 SourceLocation FnLoc, 9793 SourceLocation RParenLoc) { 9794 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 9795 if (!Size) 9796 return false; 9797 9798 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 9799 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 9800 return false; 9801 9802 SourceRange SizeRange = Size->getSourceRange(); 9803 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 9804 << SizeRange << FnName; 9805 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 9806 << FnName 9807 << FixItHint::CreateInsertion( 9808 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 9809 << FixItHint::CreateRemoval(RParenLoc); 9810 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 9811 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 9812 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 9813 ")"); 9814 9815 return true; 9816 } 9817 9818 /// Determine whether the given type is or contains a dynamic class type 9819 /// (e.g., whether it has a vtable). 9820 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 9821 bool &IsContained) { 9822 // Look through array types while ignoring qualifiers. 9823 const Type *Ty = T->getBaseElementTypeUnsafe(); 9824 IsContained = false; 9825 9826 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 9827 RD = RD ? RD->getDefinition() : nullptr; 9828 if (!RD || RD->isInvalidDecl()) 9829 return nullptr; 9830 9831 if (RD->isDynamicClass()) 9832 return RD; 9833 9834 // Check all the fields. If any bases were dynamic, the class is dynamic. 9835 // It's impossible for a class to transitively contain itself by value, so 9836 // infinite recursion is impossible. 9837 for (auto *FD : RD->fields()) { 9838 bool SubContained; 9839 if (const CXXRecordDecl *ContainedRD = 9840 getContainedDynamicClass(FD->getType(), SubContained)) { 9841 IsContained = true; 9842 return ContainedRD; 9843 } 9844 } 9845 9846 return nullptr; 9847 } 9848 9849 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 9850 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 9851 if (Unary->getKind() == UETT_SizeOf) 9852 return Unary; 9853 return nullptr; 9854 } 9855 9856 /// If E is a sizeof expression, returns its argument expression, 9857 /// otherwise returns NULL. 9858 static const Expr *getSizeOfExprArg(const Expr *E) { 9859 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9860 if (!SizeOf->isArgumentType()) 9861 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 9862 return nullptr; 9863 } 9864 9865 /// If E is a sizeof expression, returns its argument type. 9866 static QualType getSizeOfArgType(const Expr *E) { 9867 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9868 return SizeOf->getTypeOfArgument(); 9869 return QualType(); 9870 } 9871 9872 namespace { 9873 9874 struct SearchNonTrivialToInitializeField 9875 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 9876 using Super = 9877 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 9878 9879 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 9880 9881 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 9882 SourceLocation SL) { 9883 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9884 asDerived().visitArray(PDIK, AT, SL); 9885 return; 9886 } 9887 9888 Super::visitWithKind(PDIK, FT, SL); 9889 } 9890 9891 void visitARCStrong(QualType FT, SourceLocation SL) { 9892 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9893 } 9894 void visitARCWeak(QualType FT, SourceLocation SL) { 9895 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9896 } 9897 void visitStruct(QualType FT, SourceLocation SL) { 9898 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9899 visit(FD->getType(), FD->getLocation()); 9900 } 9901 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 9902 const ArrayType *AT, SourceLocation SL) { 9903 visit(getContext().getBaseElementType(AT), SL); 9904 } 9905 void visitTrivial(QualType FT, SourceLocation SL) {} 9906 9907 static void diag(QualType RT, const Expr *E, Sema &S) { 9908 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 9909 } 9910 9911 ASTContext &getContext() { return S.getASTContext(); } 9912 9913 const Expr *E; 9914 Sema &S; 9915 }; 9916 9917 struct SearchNonTrivialToCopyField 9918 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 9919 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 9920 9921 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 9922 9923 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 9924 SourceLocation SL) { 9925 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9926 asDerived().visitArray(PCK, AT, SL); 9927 return; 9928 } 9929 9930 Super::visitWithKind(PCK, FT, SL); 9931 } 9932 9933 void visitARCStrong(QualType FT, SourceLocation SL) { 9934 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9935 } 9936 void visitARCWeak(QualType FT, SourceLocation SL) { 9937 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9938 } 9939 void visitStruct(QualType FT, SourceLocation SL) { 9940 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9941 visit(FD->getType(), FD->getLocation()); 9942 } 9943 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 9944 SourceLocation SL) { 9945 visit(getContext().getBaseElementType(AT), SL); 9946 } 9947 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 9948 SourceLocation SL) {} 9949 void visitTrivial(QualType FT, SourceLocation SL) {} 9950 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 9951 9952 static void diag(QualType RT, const Expr *E, Sema &S) { 9953 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 9954 } 9955 9956 ASTContext &getContext() { return S.getASTContext(); } 9957 9958 const Expr *E; 9959 Sema &S; 9960 }; 9961 9962 } 9963 9964 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 9965 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 9966 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 9967 9968 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 9969 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 9970 return false; 9971 9972 return doesExprLikelyComputeSize(BO->getLHS()) || 9973 doesExprLikelyComputeSize(BO->getRHS()); 9974 } 9975 9976 return getAsSizeOfExpr(SizeofExpr) != nullptr; 9977 } 9978 9979 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 9980 /// 9981 /// \code 9982 /// #define MACRO 0 9983 /// foo(MACRO); 9984 /// foo(0); 9985 /// \endcode 9986 /// 9987 /// This should return true for the first call to foo, but not for the second 9988 /// (regardless of whether foo is a macro or function). 9989 static bool isArgumentExpandedFromMacro(SourceManager &SM, 9990 SourceLocation CallLoc, 9991 SourceLocation ArgLoc) { 9992 if (!CallLoc.isMacroID()) 9993 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 9994 9995 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 9996 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 9997 } 9998 9999 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 10000 /// last two arguments transposed. 10001 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 10002 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 10003 return; 10004 10005 const Expr *SizeArg = 10006 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 10007 10008 auto isLiteralZero = [](const Expr *E) { 10009 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 10010 }; 10011 10012 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 10013 SourceLocation CallLoc = Call->getRParenLoc(); 10014 SourceManager &SM = S.getSourceManager(); 10015 if (isLiteralZero(SizeArg) && 10016 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 10017 10018 SourceLocation DiagLoc = SizeArg->getExprLoc(); 10019 10020 // Some platforms #define bzero to __builtin_memset. See if this is the 10021 // case, and if so, emit a better diagnostic. 10022 if (BId == Builtin::BIbzero || 10023 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 10024 CallLoc, SM, S.getLangOpts()) == "bzero")) { 10025 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 10026 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 10027 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 10028 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 10029 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 10030 } 10031 return; 10032 } 10033 10034 // If the second argument to a memset is a sizeof expression and the third 10035 // isn't, this is also likely an error. This should catch 10036 // 'memset(buf, sizeof(buf), 0xff)'. 10037 if (BId == Builtin::BImemset && 10038 doesExprLikelyComputeSize(Call->getArg(1)) && 10039 !doesExprLikelyComputeSize(Call->getArg(2))) { 10040 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 10041 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 10042 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 10043 return; 10044 } 10045 } 10046 10047 /// Check for dangerous or invalid arguments to memset(). 10048 /// 10049 /// This issues warnings on known problematic, dangerous or unspecified 10050 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 10051 /// function calls. 10052 /// 10053 /// \param Call The call expression to diagnose. 10054 void Sema::CheckMemaccessArguments(const CallExpr *Call, 10055 unsigned BId, 10056 IdentifierInfo *FnName) { 10057 assert(BId != 0); 10058 10059 // It is possible to have a non-standard definition of memset. Validate 10060 // we have enough arguments, and if not, abort further checking. 10061 unsigned ExpectedNumArgs = 10062 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 10063 if (Call->getNumArgs() < ExpectedNumArgs) 10064 return; 10065 10066 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 10067 BId == Builtin::BIstrndup ? 1 : 2); 10068 unsigned LenArg = 10069 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 10070 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 10071 10072 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 10073 Call->getBeginLoc(), Call->getRParenLoc())) 10074 return; 10075 10076 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 10077 CheckMemaccessSize(*this, BId, Call); 10078 10079 // We have special checking when the length is a sizeof expression. 10080 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 10081 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 10082 llvm::FoldingSetNodeID SizeOfArgID; 10083 10084 // Although widely used, 'bzero' is not a standard function. Be more strict 10085 // with the argument types before allowing diagnostics and only allow the 10086 // form bzero(ptr, sizeof(...)). 10087 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10088 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 10089 return; 10090 10091 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 10092 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 10093 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 10094 10095 QualType DestTy = Dest->getType(); 10096 QualType PointeeTy; 10097 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 10098 PointeeTy = DestPtrTy->getPointeeType(); 10099 10100 // Never warn about void type pointers. This can be used to suppress 10101 // false positives. 10102 if (PointeeTy->isVoidType()) 10103 continue; 10104 10105 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 10106 // actually comparing the expressions for equality. Because computing the 10107 // expression IDs can be expensive, we only do this if the diagnostic is 10108 // enabled. 10109 if (SizeOfArg && 10110 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 10111 SizeOfArg->getExprLoc())) { 10112 // We only compute IDs for expressions if the warning is enabled, and 10113 // cache the sizeof arg's ID. 10114 if (SizeOfArgID == llvm::FoldingSetNodeID()) 10115 SizeOfArg->Profile(SizeOfArgID, Context, true); 10116 llvm::FoldingSetNodeID DestID; 10117 Dest->Profile(DestID, Context, true); 10118 if (DestID == SizeOfArgID) { 10119 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 10120 // over sizeof(src) as well. 10121 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 10122 StringRef ReadableName = FnName->getName(); 10123 10124 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 10125 if (UnaryOp->getOpcode() == UO_AddrOf) 10126 ActionIdx = 1; // If its an address-of operator, just remove it. 10127 if (!PointeeTy->isIncompleteType() && 10128 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 10129 ActionIdx = 2; // If the pointee's size is sizeof(char), 10130 // suggest an explicit length. 10131 10132 // If the function is defined as a builtin macro, do not show macro 10133 // expansion. 10134 SourceLocation SL = SizeOfArg->getExprLoc(); 10135 SourceRange DSR = Dest->getSourceRange(); 10136 SourceRange SSR = SizeOfArg->getSourceRange(); 10137 SourceManager &SM = getSourceManager(); 10138 10139 if (SM.isMacroArgExpansion(SL)) { 10140 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 10141 SL = SM.getSpellingLoc(SL); 10142 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 10143 SM.getSpellingLoc(DSR.getEnd())); 10144 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 10145 SM.getSpellingLoc(SSR.getEnd())); 10146 } 10147 10148 DiagRuntimeBehavior(SL, SizeOfArg, 10149 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 10150 << ReadableName 10151 << PointeeTy 10152 << DestTy 10153 << DSR 10154 << SSR); 10155 DiagRuntimeBehavior(SL, SizeOfArg, 10156 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 10157 << ActionIdx 10158 << SSR); 10159 10160 break; 10161 } 10162 } 10163 10164 // Also check for cases where the sizeof argument is the exact same 10165 // type as the memory argument, and where it points to a user-defined 10166 // record type. 10167 if (SizeOfArgTy != QualType()) { 10168 if (PointeeTy->isRecordType() && 10169 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 10170 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 10171 PDiag(diag::warn_sizeof_pointer_type_memaccess) 10172 << FnName << SizeOfArgTy << ArgIdx 10173 << PointeeTy << Dest->getSourceRange() 10174 << LenExpr->getSourceRange()); 10175 break; 10176 } 10177 } 10178 } else if (DestTy->isArrayType()) { 10179 PointeeTy = DestTy; 10180 } 10181 10182 if (PointeeTy == QualType()) 10183 continue; 10184 10185 // Always complain about dynamic classes. 10186 bool IsContained; 10187 if (const CXXRecordDecl *ContainedRD = 10188 getContainedDynamicClass(PointeeTy, IsContained)) { 10189 10190 unsigned OperationType = 0; 10191 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 10192 // "overwritten" if we're warning about the destination for any call 10193 // but memcmp; otherwise a verb appropriate to the call. 10194 if (ArgIdx != 0 || IsCmp) { 10195 if (BId == Builtin::BImemcpy) 10196 OperationType = 1; 10197 else if(BId == Builtin::BImemmove) 10198 OperationType = 2; 10199 else if (IsCmp) 10200 OperationType = 3; 10201 } 10202 10203 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10204 PDiag(diag::warn_dyn_class_memaccess) 10205 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 10206 << IsContained << ContainedRD << OperationType 10207 << Call->getCallee()->getSourceRange()); 10208 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 10209 BId != Builtin::BImemset) 10210 DiagRuntimeBehavior( 10211 Dest->getExprLoc(), Dest, 10212 PDiag(diag::warn_arc_object_memaccess) 10213 << ArgIdx << FnName << PointeeTy 10214 << Call->getCallee()->getSourceRange()); 10215 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 10216 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 10217 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 10218 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10219 PDiag(diag::warn_cstruct_memaccess) 10220 << ArgIdx << FnName << PointeeTy << 0); 10221 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 10222 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 10223 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 10224 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10225 PDiag(diag::warn_cstruct_memaccess) 10226 << ArgIdx << FnName << PointeeTy << 1); 10227 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 10228 } else { 10229 continue; 10230 } 10231 } else 10232 continue; 10233 10234 DiagRuntimeBehavior( 10235 Dest->getExprLoc(), Dest, 10236 PDiag(diag::note_bad_memaccess_silence) 10237 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 10238 break; 10239 } 10240 } 10241 10242 // A little helper routine: ignore addition and subtraction of integer literals. 10243 // This intentionally does not ignore all integer constant expressions because 10244 // we don't want to remove sizeof(). 10245 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 10246 Ex = Ex->IgnoreParenCasts(); 10247 10248 while (true) { 10249 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 10250 if (!BO || !BO->isAdditiveOp()) 10251 break; 10252 10253 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 10254 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 10255 10256 if (isa<IntegerLiteral>(RHS)) 10257 Ex = LHS; 10258 else if (isa<IntegerLiteral>(LHS)) 10259 Ex = RHS; 10260 else 10261 break; 10262 } 10263 10264 return Ex; 10265 } 10266 10267 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 10268 ASTContext &Context) { 10269 // Only handle constant-sized or VLAs, but not flexible members. 10270 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 10271 // Only issue the FIXIT for arrays of size > 1. 10272 if (CAT->getSize().getSExtValue() <= 1) 10273 return false; 10274 } else if (!Ty->isVariableArrayType()) { 10275 return false; 10276 } 10277 return true; 10278 } 10279 10280 // Warn if the user has made the 'size' argument to strlcpy or strlcat 10281 // be the size of the source, instead of the destination. 10282 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 10283 IdentifierInfo *FnName) { 10284 10285 // Don't crash if the user has the wrong number of arguments 10286 unsigned NumArgs = Call->getNumArgs(); 10287 if ((NumArgs != 3) && (NumArgs != 4)) 10288 return; 10289 10290 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 10291 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 10292 const Expr *CompareWithSrc = nullptr; 10293 10294 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 10295 Call->getBeginLoc(), Call->getRParenLoc())) 10296 return; 10297 10298 // Look for 'strlcpy(dst, x, sizeof(x))' 10299 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 10300 CompareWithSrc = Ex; 10301 else { 10302 // Look for 'strlcpy(dst, x, strlen(x))' 10303 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 10304 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 10305 SizeCall->getNumArgs() == 1) 10306 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 10307 } 10308 } 10309 10310 if (!CompareWithSrc) 10311 return; 10312 10313 // Determine if the argument to sizeof/strlen is equal to the source 10314 // argument. In principle there's all kinds of things you could do 10315 // here, for instance creating an == expression and evaluating it with 10316 // EvaluateAsBooleanCondition, but this uses a more direct technique: 10317 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 10318 if (!SrcArgDRE) 10319 return; 10320 10321 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 10322 if (!CompareWithSrcDRE || 10323 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 10324 return; 10325 10326 const Expr *OriginalSizeArg = Call->getArg(2); 10327 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 10328 << OriginalSizeArg->getSourceRange() << FnName; 10329 10330 // Output a FIXIT hint if the destination is an array (rather than a 10331 // pointer to an array). This could be enhanced to handle some 10332 // pointers if we know the actual size, like if DstArg is 'array+2' 10333 // we could say 'sizeof(array)-2'. 10334 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 10335 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 10336 return; 10337 10338 SmallString<128> sizeString; 10339 llvm::raw_svector_ostream OS(sizeString); 10340 OS << "sizeof("; 10341 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10342 OS << ")"; 10343 10344 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 10345 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 10346 OS.str()); 10347 } 10348 10349 /// Check if two expressions refer to the same declaration. 10350 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 10351 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 10352 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 10353 return D1->getDecl() == D2->getDecl(); 10354 return false; 10355 } 10356 10357 static const Expr *getStrlenExprArg(const Expr *E) { 10358 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 10359 const FunctionDecl *FD = CE->getDirectCallee(); 10360 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 10361 return nullptr; 10362 return CE->getArg(0)->IgnoreParenCasts(); 10363 } 10364 return nullptr; 10365 } 10366 10367 // Warn on anti-patterns as the 'size' argument to strncat. 10368 // The correct size argument should look like following: 10369 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 10370 void Sema::CheckStrncatArguments(const CallExpr *CE, 10371 IdentifierInfo *FnName) { 10372 // Don't crash if the user has the wrong number of arguments. 10373 if (CE->getNumArgs() < 3) 10374 return; 10375 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 10376 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 10377 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 10378 10379 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 10380 CE->getRParenLoc())) 10381 return; 10382 10383 // Identify common expressions, which are wrongly used as the size argument 10384 // to strncat and may lead to buffer overflows. 10385 unsigned PatternType = 0; 10386 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 10387 // - sizeof(dst) 10388 if (referToTheSameDecl(SizeOfArg, DstArg)) 10389 PatternType = 1; 10390 // - sizeof(src) 10391 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 10392 PatternType = 2; 10393 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 10394 if (BE->getOpcode() == BO_Sub) { 10395 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 10396 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 10397 // - sizeof(dst) - strlen(dst) 10398 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 10399 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 10400 PatternType = 1; 10401 // - sizeof(src) - (anything) 10402 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 10403 PatternType = 2; 10404 } 10405 } 10406 10407 if (PatternType == 0) 10408 return; 10409 10410 // Generate the diagnostic. 10411 SourceLocation SL = LenArg->getBeginLoc(); 10412 SourceRange SR = LenArg->getSourceRange(); 10413 SourceManager &SM = getSourceManager(); 10414 10415 // If the function is defined as a builtin macro, do not show macro expansion. 10416 if (SM.isMacroArgExpansion(SL)) { 10417 SL = SM.getSpellingLoc(SL); 10418 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 10419 SM.getSpellingLoc(SR.getEnd())); 10420 } 10421 10422 // Check if the destination is an array (rather than a pointer to an array). 10423 QualType DstTy = DstArg->getType(); 10424 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 10425 Context); 10426 if (!isKnownSizeArray) { 10427 if (PatternType == 1) 10428 Diag(SL, diag::warn_strncat_wrong_size) << SR; 10429 else 10430 Diag(SL, diag::warn_strncat_src_size) << SR; 10431 return; 10432 } 10433 10434 if (PatternType == 1) 10435 Diag(SL, diag::warn_strncat_large_size) << SR; 10436 else 10437 Diag(SL, diag::warn_strncat_src_size) << SR; 10438 10439 SmallString<128> sizeString; 10440 llvm::raw_svector_ostream OS(sizeString); 10441 OS << "sizeof("; 10442 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10443 OS << ") - "; 10444 OS << "strlen("; 10445 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10446 OS << ") - 1"; 10447 10448 Diag(SL, diag::note_strncat_wrong_size) 10449 << FixItHint::CreateReplacement(SR, OS.str()); 10450 } 10451 10452 namespace { 10453 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 10454 const UnaryOperator *UnaryExpr, const Decl *D) { 10455 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 10456 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 10457 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 10458 return; 10459 } 10460 } 10461 10462 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 10463 const UnaryOperator *UnaryExpr) { 10464 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 10465 const Decl *D = Lvalue->getDecl(); 10466 if (isa<VarDecl, FunctionDecl>(D)) 10467 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 10468 } 10469 10470 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 10471 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 10472 Lvalue->getMemberDecl()); 10473 } 10474 10475 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 10476 const UnaryOperator *UnaryExpr) { 10477 const auto *Lambda = dyn_cast<LambdaExpr>( 10478 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 10479 if (!Lambda) 10480 return; 10481 10482 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 10483 << CalleeName << 2 /*object: lambda expression*/; 10484 } 10485 10486 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 10487 const DeclRefExpr *Lvalue) { 10488 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 10489 if (Var == nullptr) 10490 return; 10491 10492 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 10493 << CalleeName << 0 /*object: */ << Var; 10494 } 10495 10496 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 10497 const CastExpr *Cast) { 10498 SmallString<128> SizeString; 10499 llvm::raw_svector_ostream OS(SizeString); 10500 10501 clang::CastKind Kind = Cast->getCastKind(); 10502 if (Kind == clang::CK_BitCast && 10503 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 10504 return; 10505 if (Kind == clang::CK_IntegralToPointer && 10506 !isa<IntegerLiteral>( 10507 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 10508 return; 10509 10510 switch (Cast->getCastKind()) { 10511 case clang::CK_BitCast: 10512 case clang::CK_IntegralToPointer: 10513 case clang::CK_FunctionToPointerDecay: 10514 OS << '\''; 10515 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 10516 OS << '\''; 10517 break; 10518 default: 10519 return; 10520 } 10521 10522 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 10523 << CalleeName << 0 /*object: */ << OS.str(); 10524 } 10525 } // namespace 10526 10527 /// Alerts the user that they are attempting to free a non-malloc'd object. 10528 void Sema::CheckFreeArguments(const CallExpr *E) { 10529 const std::string CalleeName = 10530 dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 10531 10532 { // Prefer something that doesn't involve a cast to make things simpler. 10533 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 10534 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 10535 switch (UnaryExpr->getOpcode()) { 10536 case UnaryOperator::Opcode::UO_AddrOf: 10537 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 10538 case UnaryOperator::Opcode::UO_Plus: 10539 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 10540 default: 10541 break; 10542 } 10543 10544 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 10545 if (Lvalue->getType()->isArrayType()) 10546 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 10547 10548 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 10549 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 10550 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 10551 return; 10552 } 10553 10554 if (isa<BlockExpr>(Arg)) { 10555 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 10556 << CalleeName << 1 /*object: block*/; 10557 return; 10558 } 10559 } 10560 // Maybe the cast was important, check after the other cases. 10561 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 10562 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 10563 } 10564 10565 void 10566 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 10567 SourceLocation ReturnLoc, 10568 bool isObjCMethod, 10569 const AttrVec *Attrs, 10570 const FunctionDecl *FD) { 10571 // Check if the return value is null but should not be. 10572 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 10573 (!isObjCMethod && isNonNullType(Context, lhsType))) && 10574 CheckNonNullExpr(*this, RetValExp)) 10575 Diag(ReturnLoc, diag::warn_null_ret) 10576 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 10577 10578 // C++11 [basic.stc.dynamic.allocation]p4: 10579 // If an allocation function declared with a non-throwing 10580 // exception-specification fails to allocate storage, it shall return 10581 // a null pointer. Any other allocation function that fails to allocate 10582 // storage shall indicate failure only by throwing an exception [...] 10583 if (FD) { 10584 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 10585 if (Op == OO_New || Op == OO_Array_New) { 10586 const FunctionProtoType *Proto 10587 = FD->getType()->castAs<FunctionProtoType>(); 10588 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 10589 CheckNonNullExpr(*this, RetValExp)) 10590 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 10591 << FD << getLangOpts().CPlusPlus11; 10592 } 10593 } 10594 10595 // PPC MMA non-pointer types are not allowed as return type. Checking the type 10596 // here prevent the user from using a PPC MMA type as trailing return type. 10597 if (Context.getTargetInfo().getTriple().isPPC64()) 10598 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 10599 } 10600 10601 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 10602 10603 /// Check for comparisons of floating point operands using != and ==. 10604 /// Issue a warning if these are no self-comparisons, as they are not likely 10605 /// to do what the programmer intended. 10606 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 10607 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 10608 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 10609 10610 // Special case: check for x == x (which is OK). 10611 // Do not emit warnings for such cases. 10612 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 10613 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 10614 if (DRL->getDecl() == DRR->getDecl()) 10615 return; 10616 10617 // Special case: check for comparisons against literals that can be exactly 10618 // represented by APFloat. In such cases, do not emit a warning. This 10619 // is a heuristic: often comparison against such literals are used to 10620 // detect if a value in a variable has not changed. This clearly can 10621 // lead to false negatives. 10622 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 10623 if (FLL->isExact()) 10624 return; 10625 } else 10626 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 10627 if (FLR->isExact()) 10628 return; 10629 10630 // Check for comparisons with builtin types. 10631 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 10632 if (CL->getBuiltinCallee()) 10633 return; 10634 10635 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 10636 if (CR->getBuiltinCallee()) 10637 return; 10638 10639 // Emit the diagnostic. 10640 Diag(Loc, diag::warn_floatingpoint_eq) 10641 << LHS->getSourceRange() << RHS->getSourceRange(); 10642 } 10643 10644 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 10645 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 10646 10647 namespace { 10648 10649 /// Structure recording the 'active' range of an integer-valued 10650 /// expression. 10651 struct IntRange { 10652 /// The number of bits active in the int. Note that this includes exactly one 10653 /// sign bit if !NonNegative. 10654 unsigned Width; 10655 10656 /// True if the int is known not to have negative values. If so, all leading 10657 /// bits before Width are known zero, otherwise they are known to be the 10658 /// same as the MSB within Width. 10659 bool NonNegative; 10660 10661 IntRange(unsigned Width, bool NonNegative) 10662 : Width(Width), NonNegative(NonNegative) {} 10663 10664 /// Number of bits excluding the sign bit. 10665 unsigned valueBits() const { 10666 return NonNegative ? Width : Width - 1; 10667 } 10668 10669 /// Returns the range of the bool type. 10670 static IntRange forBoolType() { 10671 return IntRange(1, true); 10672 } 10673 10674 /// Returns the range of an opaque value of the given integral type. 10675 static IntRange forValueOfType(ASTContext &C, QualType T) { 10676 return forValueOfCanonicalType(C, 10677 T->getCanonicalTypeInternal().getTypePtr()); 10678 } 10679 10680 /// Returns the range of an opaque value of a canonical integral type. 10681 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 10682 assert(T->isCanonicalUnqualified()); 10683 10684 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10685 T = VT->getElementType().getTypePtr(); 10686 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10687 T = CT->getElementType().getTypePtr(); 10688 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10689 T = AT->getValueType().getTypePtr(); 10690 10691 if (!C.getLangOpts().CPlusPlus) { 10692 // For enum types in C code, use the underlying datatype. 10693 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10694 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 10695 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 10696 // For enum types in C++, use the known bit width of the enumerators. 10697 EnumDecl *Enum = ET->getDecl(); 10698 // In C++11, enums can have a fixed underlying type. Use this type to 10699 // compute the range. 10700 if (Enum->isFixed()) { 10701 return IntRange(C.getIntWidth(QualType(T, 0)), 10702 !ET->isSignedIntegerOrEnumerationType()); 10703 } 10704 10705 unsigned NumPositive = Enum->getNumPositiveBits(); 10706 unsigned NumNegative = Enum->getNumNegativeBits(); 10707 10708 if (NumNegative == 0) 10709 return IntRange(NumPositive, true/*NonNegative*/); 10710 else 10711 return IntRange(std::max(NumPositive + 1, NumNegative), 10712 false/*NonNegative*/); 10713 } 10714 10715 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 10716 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 10717 10718 const BuiltinType *BT = cast<BuiltinType>(T); 10719 assert(BT->isInteger()); 10720 10721 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10722 } 10723 10724 /// Returns the "target" range of a canonical integral type, i.e. 10725 /// the range of values expressible in the type. 10726 /// 10727 /// This matches forValueOfCanonicalType except that enums have the 10728 /// full range of their type, not the range of their enumerators. 10729 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 10730 assert(T->isCanonicalUnqualified()); 10731 10732 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10733 T = VT->getElementType().getTypePtr(); 10734 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10735 T = CT->getElementType().getTypePtr(); 10736 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10737 T = AT->getValueType().getTypePtr(); 10738 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10739 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 10740 10741 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 10742 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 10743 10744 const BuiltinType *BT = cast<BuiltinType>(T); 10745 assert(BT->isInteger()); 10746 10747 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10748 } 10749 10750 /// Returns the supremum of two ranges: i.e. their conservative merge. 10751 static IntRange join(IntRange L, IntRange R) { 10752 bool Unsigned = L.NonNegative && R.NonNegative; 10753 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 10754 L.NonNegative && R.NonNegative); 10755 } 10756 10757 /// Return the range of a bitwise-AND of the two ranges. 10758 static IntRange bit_and(IntRange L, IntRange R) { 10759 unsigned Bits = std::max(L.Width, R.Width); 10760 bool NonNegative = false; 10761 if (L.NonNegative) { 10762 Bits = std::min(Bits, L.Width); 10763 NonNegative = true; 10764 } 10765 if (R.NonNegative) { 10766 Bits = std::min(Bits, R.Width); 10767 NonNegative = true; 10768 } 10769 return IntRange(Bits, NonNegative); 10770 } 10771 10772 /// Return the range of a sum of the two ranges. 10773 static IntRange sum(IntRange L, IntRange R) { 10774 bool Unsigned = L.NonNegative && R.NonNegative; 10775 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 10776 Unsigned); 10777 } 10778 10779 /// Return the range of a difference of the two ranges. 10780 static IntRange difference(IntRange L, IntRange R) { 10781 // We need a 1-bit-wider range if: 10782 // 1) LHS can be negative: least value can be reduced. 10783 // 2) RHS can be negative: greatest value can be increased. 10784 bool CanWiden = !L.NonNegative || !R.NonNegative; 10785 bool Unsigned = L.NonNegative && R.Width == 0; 10786 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 10787 !Unsigned, 10788 Unsigned); 10789 } 10790 10791 /// Return the range of a product of the two ranges. 10792 static IntRange product(IntRange L, IntRange R) { 10793 // If both LHS and RHS can be negative, we can form 10794 // -2^L * -2^R = 2^(L + R) 10795 // which requires L + R + 1 value bits to represent. 10796 bool CanWiden = !L.NonNegative && !R.NonNegative; 10797 bool Unsigned = L.NonNegative && R.NonNegative; 10798 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 10799 Unsigned); 10800 } 10801 10802 /// Return the range of a remainder operation between the two ranges. 10803 static IntRange rem(IntRange L, IntRange R) { 10804 // The result of a remainder can't be larger than the result of 10805 // either side. The sign of the result is the sign of the LHS. 10806 bool Unsigned = L.NonNegative; 10807 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 10808 Unsigned); 10809 } 10810 }; 10811 10812 } // namespace 10813 10814 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 10815 unsigned MaxWidth) { 10816 if (value.isSigned() && value.isNegative()) 10817 return IntRange(value.getMinSignedBits(), false); 10818 10819 if (value.getBitWidth() > MaxWidth) 10820 value = value.trunc(MaxWidth); 10821 10822 // isNonNegative() just checks the sign bit without considering 10823 // signedness. 10824 return IntRange(value.getActiveBits(), true); 10825 } 10826 10827 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 10828 unsigned MaxWidth) { 10829 if (result.isInt()) 10830 return GetValueRange(C, result.getInt(), MaxWidth); 10831 10832 if (result.isVector()) { 10833 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 10834 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 10835 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 10836 R = IntRange::join(R, El); 10837 } 10838 return R; 10839 } 10840 10841 if (result.isComplexInt()) { 10842 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 10843 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 10844 return IntRange::join(R, I); 10845 } 10846 10847 // This can happen with lossless casts to intptr_t of "based" lvalues. 10848 // Assume it might use arbitrary bits. 10849 // FIXME: The only reason we need to pass the type in here is to get 10850 // the sign right on this one case. It would be nice if APValue 10851 // preserved this. 10852 assert(result.isLValue() || result.isAddrLabelDiff()); 10853 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 10854 } 10855 10856 static QualType GetExprType(const Expr *E) { 10857 QualType Ty = E->getType(); 10858 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 10859 Ty = AtomicRHS->getValueType(); 10860 return Ty; 10861 } 10862 10863 /// Pseudo-evaluate the given integer expression, estimating the 10864 /// range of values it might take. 10865 /// 10866 /// \param MaxWidth The width to which the value will be truncated. 10867 /// \param Approximate If \c true, return a likely range for the result: in 10868 /// particular, assume that aritmetic on narrower types doesn't leave 10869 /// those types. If \c false, return a range including all possible 10870 /// result values. 10871 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 10872 bool InConstantContext, bool Approximate) { 10873 E = E->IgnoreParens(); 10874 10875 // Try a full evaluation first. 10876 Expr::EvalResult result; 10877 if (E->EvaluateAsRValue(result, C, InConstantContext)) 10878 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 10879 10880 // I think we only want to look through implicit casts here; if the 10881 // user has an explicit widening cast, we should treat the value as 10882 // being of the new, wider type. 10883 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 10884 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 10885 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 10886 Approximate); 10887 10888 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 10889 10890 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 10891 CE->getCastKind() == CK_BooleanToSignedIntegral; 10892 10893 // Assume that non-integer casts can span the full range of the type. 10894 if (!isIntegerCast) 10895 return OutputTypeRange; 10896 10897 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 10898 std::min(MaxWidth, OutputTypeRange.Width), 10899 InConstantContext, Approximate); 10900 10901 // Bail out if the subexpr's range is as wide as the cast type. 10902 if (SubRange.Width >= OutputTypeRange.Width) 10903 return OutputTypeRange; 10904 10905 // Otherwise, we take the smaller width, and we're non-negative if 10906 // either the output type or the subexpr is. 10907 return IntRange(SubRange.Width, 10908 SubRange.NonNegative || OutputTypeRange.NonNegative); 10909 } 10910 10911 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 10912 // If we can fold the condition, just take that operand. 10913 bool CondResult; 10914 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 10915 return GetExprRange(C, 10916 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 10917 MaxWidth, InConstantContext, Approximate); 10918 10919 // Otherwise, conservatively merge. 10920 // GetExprRange requires an integer expression, but a throw expression 10921 // results in a void type. 10922 Expr *E = CO->getTrueExpr(); 10923 IntRange L = E->getType()->isVoidType() 10924 ? IntRange{0, true} 10925 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 10926 E = CO->getFalseExpr(); 10927 IntRange R = E->getType()->isVoidType() 10928 ? IntRange{0, true} 10929 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 10930 return IntRange::join(L, R); 10931 } 10932 10933 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 10934 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 10935 10936 switch (BO->getOpcode()) { 10937 case BO_Cmp: 10938 llvm_unreachable("builtin <=> should have class type"); 10939 10940 // Boolean-valued operations are single-bit and positive. 10941 case BO_LAnd: 10942 case BO_LOr: 10943 case BO_LT: 10944 case BO_GT: 10945 case BO_LE: 10946 case BO_GE: 10947 case BO_EQ: 10948 case BO_NE: 10949 return IntRange::forBoolType(); 10950 10951 // The type of the assignments is the type of the LHS, so the RHS 10952 // is not necessarily the same type. 10953 case BO_MulAssign: 10954 case BO_DivAssign: 10955 case BO_RemAssign: 10956 case BO_AddAssign: 10957 case BO_SubAssign: 10958 case BO_XorAssign: 10959 case BO_OrAssign: 10960 // TODO: bitfields? 10961 return IntRange::forValueOfType(C, GetExprType(E)); 10962 10963 // Simple assignments just pass through the RHS, which will have 10964 // been coerced to the LHS type. 10965 case BO_Assign: 10966 // TODO: bitfields? 10967 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 10968 Approximate); 10969 10970 // Operations with opaque sources are black-listed. 10971 case BO_PtrMemD: 10972 case BO_PtrMemI: 10973 return IntRange::forValueOfType(C, GetExprType(E)); 10974 10975 // Bitwise-and uses the *infinum* of the two source ranges. 10976 case BO_And: 10977 case BO_AndAssign: 10978 Combine = IntRange::bit_and; 10979 break; 10980 10981 // Left shift gets black-listed based on a judgement call. 10982 case BO_Shl: 10983 // ...except that we want to treat '1 << (blah)' as logically 10984 // positive. It's an important idiom. 10985 if (IntegerLiteral *I 10986 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 10987 if (I->getValue() == 1) { 10988 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 10989 return IntRange(R.Width, /*NonNegative*/ true); 10990 } 10991 } 10992 LLVM_FALLTHROUGH; 10993 10994 case BO_ShlAssign: 10995 return IntRange::forValueOfType(C, GetExprType(E)); 10996 10997 // Right shift by a constant can narrow its left argument. 10998 case BO_Shr: 10999 case BO_ShrAssign: { 11000 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 11001 Approximate); 11002 11003 // If the shift amount is a positive constant, drop the width by 11004 // that much. 11005 if (Optional<llvm::APSInt> shift = 11006 BO->getRHS()->getIntegerConstantExpr(C)) { 11007 if (shift->isNonNegative()) { 11008 unsigned zext = shift->getZExtValue(); 11009 if (zext >= L.Width) 11010 L.Width = (L.NonNegative ? 0 : 1); 11011 else 11012 L.Width -= zext; 11013 } 11014 } 11015 11016 return L; 11017 } 11018 11019 // Comma acts as its right operand. 11020 case BO_Comma: 11021 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11022 Approximate); 11023 11024 case BO_Add: 11025 if (!Approximate) 11026 Combine = IntRange::sum; 11027 break; 11028 11029 case BO_Sub: 11030 if (BO->getLHS()->getType()->isPointerType()) 11031 return IntRange::forValueOfType(C, GetExprType(E)); 11032 if (!Approximate) 11033 Combine = IntRange::difference; 11034 break; 11035 11036 case BO_Mul: 11037 if (!Approximate) 11038 Combine = IntRange::product; 11039 break; 11040 11041 // The width of a division result is mostly determined by the size 11042 // of the LHS. 11043 case BO_Div: { 11044 // Don't 'pre-truncate' the operands. 11045 unsigned opWidth = C.getIntWidth(GetExprType(E)); 11046 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 11047 Approximate); 11048 11049 // If the divisor is constant, use that. 11050 if (Optional<llvm::APSInt> divisor = 11051 BO->getRHS()->getIntegerConstantExpr(C)) { 11052 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 11053 if (log2 >= L.Width) 11054 L.Width = (L.NonNegative ? 0 : 1); 11055 else 11056 L.Width = std::min(L.Width - log2, MaxWidth); 11057 return L; 11058 } 11059 11060 // Otherwise, just use the LHS's width. 11061 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 11062 // could be -1. 11063 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 11064 Approximate); 11065 return IntRange(L.Width, L.NonNegative && R.NonNegative); 11066 } 11067 11068 case BO_Rem: 11069 Combine = IntRange::rem; 11070 break; 11071 11072 // The default behavior is okay for these. 11073 case BO_Xor: 11074 case BO_Or: 11075 break; 11076 } 11077 11078 // Combine the two ranges, but limit the result to the type in which we 11079 // performed the computation. 11080 QualType T = GetExprType(E); 11081 unsigned opWidth = C.getIntWidth(T); 11082 IntRange L = 11083 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 11084 IntRange R = 11085 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 11086 IntRange C = Combine(L, R); 11087 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 11088 C.Width = std::min(C.Width, MaxWidth); 11089 return C; 11090 } 11091 11092 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 11093 switch (UO->getOpcode()) { 11094 // Boolean-valued operations are white-listed. 11095 case UO_LNot: 11096 return IntRange::forBoolType(); 11097 11098 // Operations with opaque sources are black-listed. 11099 case UO_Deref: 11100 case UO_AddrOf: // should be impossible 11101 return IntRange::forValueOfType(C, GetExprType(E)); 11102 11103 default: 11104 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 11105 Approximate); 11106 } 11107 } 11108 11109 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 11110 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 11111 Approximate); 11112 11113 if (const auto *BitField = E->getSourceBitField()) 11114 return IntRange(BitField->getBitWidthValue(C), 11115 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 11116 11117 return IntRange::forValueOfType(C, GetExprType(E)); 11118 } 11119 11120 static IntRange GetExprRange(ASTContext &C, const Expr *E, 11121 bool InConstantContext, bool Approximate) { 11122 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 11123 Approximate); 11124 } 11125 11126 /// Checks whether the given value, which currently has the given 11127 /// source semantics, has the same value when coerced through the 11128 /// target semantics. 11129 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 11130 const llvm::fltSemantics &Src, 11131 const llvm::fltSemantics &Tgt) { 11132 llvm::APFloat truncated = value; 11133 11134 bool ignored; 11135 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 11136 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 11137 11138 return truncated.bitwiseIsEqual(value); 11139 } 11140 11141 /// Checks whether the given value, which currently has the given 11142 /// source semantics, has the same value when coerced through the 11143 /// target semantics. 11144 /// 11145 /// The value might be a vector of floats (or a complex number). 11146 static bool IsSameFloatAfterCast(const APValue &value, 11147 const llvm::fltSemantics &Src, 11148 const llvm::fltSemantics &Tgt) { 11149 if (value.isFloat()) 11150 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 11151 11152 if (value.isVector()) { 11153 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 11154 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 11155 return false; 11156 return true; 11157 } 11158 11159 assert(value.isComplexFloat()); 11160 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 11161 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 11162 } 11163 11164 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 11165 bool IsListInit = false); 11166 11167 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 11168 // Suppress cases where we are comparing against an enum constant. 11169 if (const DeclRefExpr *DR = 11170 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 11171 if (isa<EnumConstantDecl>(DR->getDecl())) 11172 return true; 11173 11174 // Suppress cases where the value is expanded from a macro, unless that macro 11175 // is how a language represents a boolean literal. This is the case in both C 11176 // and Objective-C. 11177 SourceLocation BeginLoc = E->getBeginLoc(); 11178 if (BeginLoc.isMacroID()) { 11179 StringRef MacroName = Lexer::getImmediateMacroName( 11180 BeginLoc, S.getSourceManager(), S.getLangOpts()); 11181 return MacroName != "YES" && MacroName != "NO" && 11182 MacroName != "true" && MacroName != "false"; 11183 } 11184 11185 return false; 11186 } 11187 11188 static bool isKnownToHaveUnsignedValue(Expr *E) { 11189 return E->getType()->isIntegerType() && 11190 (!E->getType()->isSignedIntegerType() || 11191 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 11192 } 11193 11194 namespace { 11195 /// The promoted range of values of a type. In general this has the 11196 /// following structure: 11197 /// 11198 /// |-----------| . . . |-----------| 11199 /// ^ ^ ^ ^ 11200 /// Min HoleMin HoleMax Max 11201 /// 11202 /// ... where there is only a hole if a signed type is promoted to unsigned 11203 /// (in which case Min and Max are the smallest and largest representable 11204 /// values). 11205 struct PromotedRange { 11206 // Min, or HoleMax if there is a hole. 11207 llvm::APSInt PromotedMin; 11208 // Max, or HoleMin if there is a hole. 11209 llvm::APSInt PromotedMax; 11210 11211 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 11212 if (R.Width == 0) 11213 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 11214 else if (R.Width >= BitWidth && !Unsigned) { 11215 // Promotion made the type *narrower*. This happens when promoting 11216 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 11217 // Treat all values of 'signed int' as being in range for now. 11218 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 11219 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 11220 } else { 11221 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 11222 .extOrTrunc(BitWidth); 11223 PromotedMin.setIsUnsigned(Unsigned); 11224 11225 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 11226 .extOrTrunc(BitWidth); 11227 PromotedMax.setIsUnsigned(Unsigned); 11228 } 11229 } 11230 11231 // Determine whether this range is contiguous (has no hole). 11232 bool isContiguous() const { return PromotedMin <= PromotedMax; } 11233 11234 // Where a constant value is within the range. 11235 enum ComparisonResult { 11236 LT = 0x1, 11237 LE = 0x2, 11238 GT = 0x4, 11239 GE = 0x8, 11240 EQ = 0x10, 11241 NE = 0x20, 11242 InRangeFlag = 0x40, 11243 11244 Less = LE | LT | NE, 11245 Min = LE | InRangeFlag, 11246 InRange = InRangeFlag, 11247 Max = GE | InRangeFlag, 11248 Greater = GE | GT | NE, 11249 11250 OnlyValue = LE | GE | EQ | InRangeFlag, 11251 InHole = NE 11252 }; 11253 11254 ComparisonResult compare(const llvm::APSInt &Value) const { 11255 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 11256 Value.isUnsigned() == PromotedMin.isUnsigned()); 11257 if (!isContiguous()) { 11258 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 11259 if (Value.isMinValue()) return Min; 11260 if (Value.isMaxValue()) return Max; 11261 if (Value >= PromotedMin) return InRange; 11262 if (Value <= PromotedMax) return InRange; 11263 return InHole; 11264 } 11265 11266 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 11267 case -1: return Less; 11268 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 11269 case 1: 11270 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 11271 case -1: return InRange; 11272 case 0: return Max; 11273 case 1: return Greater; 11274 } 11275 } 11276 11277 llvm_unreachable("impossible compare result"); 11278 } 11279 11280 static llvm::Optional<StringRef> 11281 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 11282 if (Op == BO_Cmp) { 11283 ComparisonResult LTFlag = LT, GTFlag = GT; 11284 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 11285 11286 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 11287 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 11288 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 11289 return llvm::None; 11290 } 11291 11292 ComparisonResult TrueFlag, FalseFlag; 11293 if (Op == BO_EQ) { 11294 TrueFlag = EQ; 11295 FalseFlag = NE; 11296 } else if (Op == BO_NE) { 11297 TrueFlag = NE; 11298 FalseFlag = EQ; 11299 } else { 11300 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 11301 TrueFlag = LT; 11302 FalseFlag = GE; 11303 } else { 11304 TrueFlag = GT; 11305 FalseFlag = LE; 11306 } 11307 if (Op == BO_GE || Op == BO_LE) 11308 std::swap(TrueFlag, FalseFlag); 11309 } 11310 if (R & TrueFlag) 11311 return StringRef("true"); 11312 if (R & FalseFlag) 11313 return StringRef("false"); 11314 return llvm::None; 11315 } 11316 }; 11317 } 11318 11319 static bool HasEnumType(Expr *E) { 11320 // Strip off implicit integral promotions. 11321 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 11322 if (ICE->getCastKind() != CK_IntegralCast && 11323 ICE->getCastKind() != CK_NoOp) 11324 break; 11325 E = ICE->getSubExpr(); 11326 } 11327 11328 return E->getType()->isEnumeralType(); 11329 } 11330 11331 static int classifyConstantValue(Expr *Constant) { 11332 // The values of this enumeration are used in the diagnostics 11333 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 11334 enum ConstantValueKind { 11335 Miscellaneous = 0, 11336 LiteralTrue, 11337 LiteralFalse 11338 }; 11339 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 11340 return BL->getValue() ? ConstantValueKind::LiteralTrue 11341 : ConstantValueKind::LiteralFalse; 11342 return ConstantValueKind::Miscellaneous; 11343 } 11344 11345 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 11346 Expr *Constant, Expr *Other, 11347 const llvm::APSInt &Value, 11348 bool RhsConstant) { 11349 if (S.inTemplateInstantiation()) 11350 return false; 11351 11352 Expr *OriginalOther = Other; 11353 11354 Constant = Constant->IgnoreParenImpCasts(); 11355 Other = Other->IgnoreParenImpCasts(); 11356 11357 // Suppress warnings on tautological comparisons between values of the same 11358 // enumeration type. There are only two ways we could warn on this: 11359 // - If the constant is outside the range of representable values of 11360 // the enumeration. In such a case, we should warn about the cast 11361 // to enumeration type, not about the comparison. 11362 // - If the constant is the maximum / minimum in-range value. For an 11363 // enumeratin type, such comparisons can be meaningful and useful. 11364 if (Constant->getType()->isEnumeralType() && 11365 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 11366 return false; 11367 11368 IntRange OtherValueRange = GetExprRange( 11369 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 11370 11371 QualType OtherT = Other->getType(); 11372 if (const auto *AT = OtherT->getAs<AtomicType>()) 11373 OtherT = AT->getValueType(); 11374 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 11375 11376 // Special case for ObjC BOOL on targets where its a typedef for a signed char 11377 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 11378 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 11379 S.NSAPIObj->isObjCBOOLType(OtherT) && 11380 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 11381 11382 // Whether we're treating Other as being a bool because of the form of 11383 // expression despite it having another type (typically 'int' in C). 11384 bool OtherIsBooleanDespiteType = 11385 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 11386 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 11387 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 11388 11389 // Check if all values in the range of possible values of this expression 11390 // lead to the same comparison outcome. 11391 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 11392 Value.isUnsigned()); 11393 auto Cmp = OtherPromotedValueRange.compare(Value); 11394 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 11395 if (!Result) 11396 return false; 11397 11398 // Also consider the range determined by the type alone. This allows us to 11399 // classify the warning under the proper diagnostic group. 11400 bool TautologicalTypeCompare = false; 11401 { 11402 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 11403 Value.isUnsigned()); 11404 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 11405 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 11406 RhsConstant)) { 11407 TautologicalTypeCompare = true; 11408 Cmp = TypeCmp; 11409 Result = TypeResult; 11410 } 11411 } 11412 11413 // Don't warn if the non-constant operand actually always evaluates to the 11414 // same value. 11415 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 11416 return false; 11417 11418 // Suppress the diagnostic for an in-range comparison if the constant comes 11419 // from a macro or enumerator. We don't want to diagnose 11420 // 11421 // some_long_value <= INT_MAX 11422 // 11423 // when sizeof(int) == sizeof(long). 11424 bool InRange = Cmp & PromotedRange::InRangeFlag; 11425 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 11426 return false; 11427 11428 // A comparison of an unsigned bit-field against 0 is really a type problem, 11429 // even though at the type level the bit-field might promote to 'signed int'. 11430 if (Other->refersToBitField() && InRange && Value == 0 && 11431 Other->getType()->isUnsignedIntegerOrEnumerationType()) 11432 TautologicalTypeCompare = true; 11433 11434 // If this is a comparison to an enum constant, include that 11435 // constant in the diagnostic. 11436 const EnumConstantDecl *ED = nullptr; 11437 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 11438 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 11439 11440 // Should be enough for uint128 (39 decimal digits) 11441 SmallString<64> PrettySourceValue; 11442 llvm::raw_svector_ostream OS(PrettySourceValue); 11443 if (ED) { 11444 OS << '\'' << *ED << "' (" << Value << ")"; 11445 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 11446 Constant->IgnoreParenImpCasts())) { 11447 OS << (BL->getValue() ? "YES" : "NO"); 11448 } else { 11449 OS << Value; 11450 } 11451 11452 if (!TautologicalTypeCompare) { 11453 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 11454 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 11455 << E->getOpcodeStr() << OS.str() << *Result 11456 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11457 return true; 11458 } 11459 11460 if (IsObjCSignedCharBool) { 11461 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 11462 S.PDiag(diag::warn_tautological_compare_objc_bool) 11463 << OS.str() << *Result); 11464 return true; 11465 } 11466 11467 // FIXME: We use a somewhat different formatting for the in-range cases and 11468 // cases involving boolean values for historical reasons. We should pick a 11469 // consistent way of presenting these diagnostics. 11470 if (!InRange || Other->isKnownToHaveBooleanValue()) { 11471 11472 S.DiagRuntimeBehavior( 11473 E->getOperatorLoc(), E, 11474 S.PDiag(!InRange ? diag::warn_out_of_range_compare 11475 : diag::warn_tautological_bool_compare) 11476 << OS.str() << classifyConstantValue(Constant) << OtherT 11477 << OtherIsBooleanDespiteType << *Result 11478 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 11479 } else { 11480 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 11481 unsigned Diag = 11482 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 11483 ? (HasEnumType(OriginalOther) 11484 ? diag::warn_unsigned_enum_always_true_comparison 11485 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 11486 : diag::warn_unsigned_always_true_comparison) 11487 : diag::warn_tautological_constant_compare; 11488 11489 S.Diag(E->getOperatorLoc(), Diag) 11490 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 11491 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11492 } 11493 11494 return true; 11495 } 11496 11497 /// Analyze the operands of the given comparison. Implements the 11498 /// fallback case from AnalyzeComparison. 11499 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 11500 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11501 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11502 } 11503 11504 /// Implements -Wsign-compare. 11505 /// 11506 /// \param E the binary operator to check for warnings 11507 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 11508 // The type the comparison is being performed in. 11509 QualType T = E->getLHS()->getType(); 11510 11511 // Only analyze comparison operators where both sides have been converted to 11512 // the same type. 11513 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 11514 return AnalyzeImpConvsInComparison(S, E); 11515 11516 // Don't analyze value-dependent comparisons directly. 11517 if (E->isValueDependent()) 11518 return AnalyzeImpConvsInComparison(S, E); 11519 11520 Expr *LHS = E->getLHS(); 11521 Expr *RHS = E->getRHS(); 11522 11523 if (T->isIntegralType(S.Context)) { 11524 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 11525 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 11526 11527 // We don't care about expressions whose result is a constant. 11528 if (RHSValue && LHSValue) 11529 return AnalyzeImpConvsInComparison(S, E); 11530 11531 // We only care about expressions where just one side is literal 11532 if ((bool)RHSValue ^ (bool)LHSValue) { 11533 // Is the constant on the RHS or LHS? 11534 const bool RhsConstant = (bool)RHSValue; 11535 Expr *Const = RhsConstant ? RHS : LHS; 11536 Expr *Other = RhsConstant ? LHS : RHS; 11537 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 11538 11539 // Check whether an integer constant comparison results in a value 11540 // of 'true' or 'false'. 11541 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 11542 return AnalyzeImpConvsInComparison(S, E); 11543 } 11544 } 11545 11546 if (!T->hasUnsignedIntegerRepresentation()) { 11547 // We don't do anything special if this isn't an unsigned integral 11548 // comparison: we're only interested in integral comparisons, and 11549 // signed comparisons only happen in cases we don't care to warn about. 11550 return AnalyzeImpConvsInComparison(S, E); 11551 } 11552 11553 LHS = LHS->IgnoreParenImpCasts(); 11554 RHS = RHS->IgnoreParenImpCasts(); 11555 11556 if (!S.getLangOpts().CPlusPlus) { 11557 // Avoid warning about comparison of integers with different signs when 11558 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 11559 // the type of `E`. 11560 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 11561 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 11562 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 11563 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 11564 } 11565 11566 // Check to see if one of the (unmodified) operands is of different 11567 // signedness. 11568 Expr *signedOperand, *unsignedOperand; 11569 if (LHS->getType()->hasSignedIntegerRepresentation()) { 11570 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 11571 "unsigned comparison between two signed integer expressions?"); 11572 signedOperand = LHS; 11573 unsignedOperand = RHS; 11574 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 11575 signedOperand = RHS; 11576 unsignedOperand = LHS; 11577 } else { 11578 return AnalyzeImpConvsInComparison(S, E); 11579 } 11580 11581 // Otherwise, calculate the effective range of the signed operand. 11582 IntRange signedRange = GetExprRange( 11583 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 11584 11585 // Go ahead and analyze implicit conversions in the operands. Note 11586 // that we skip the implicit conversions on both sides. 11587 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 11588 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 11589 11590 // If the signed range is non-negative, -Wsign-compare won't fire. 11591 if (signedRange.NonNegative) 11592 return; 11593 11594 // For (in)equality comparisons, if the unsigned operand is a 11595 // constant which cannot collide with a overflowed signed operand, 11596 // then reinterpreting the signed operand as unsigned will not 11597 // change the result of the comparison. 11598 if (E->isEqualityOp()) { 11599 unsigned comparisonWidth = S.Context.getIntWidth(T); 11600 IntRange unsignedRange = 11601 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 11602 /*Approximate*/ true); 11603 11604 // We should never be unable to prove that the unsigned operand is 11605 // non-negative. 11606 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 11607 11608 if (unsignedRange.Width < comparisonWidth) 11609 return; 11610 } 11611 11612 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 11613 S.PDiag(diag::warn_mixed_sign_comparison) 11614 << LHS->getType() << RHS->getType() 11615 << LHS->getSourceRange() << RHS->getSourceRange()); 11616 } 11617 11618 /// Analyzes an attempt to assign the given value to a bitfield. 11619 /// 11620 /// Returns true if there was something fishy about the attempt. 11621 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 11622 SourceLocation InitLoc) { 11623 assert(Bitfield->isBitField()); 11624 if (Bitfield->isInvalidDecl()) 11625 return false; 11626 11627 // White-list bool bitfields. 11628 QualType BitfieldType = Bitfield->getType(); 11629 if (BitfieldType->isBooleanType()) 11630 return false; 11631 11632 if (BitfieldType->isEnumeralType()) { 11633 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 11634 // If the underlying enum type was not explicitly specified as an unsigned 11635 // type and the enum contain only positive values, MSVC++ will cause an 11636 // inconsistency by storing this as a signed type. 11637 if (S.getLangOpts().CPlusPlus11 && 11638 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 11639 BitfieldEnumDecl->getNumPositiveBits() > 0 && 11640 BitfieldEnumDecl->getNumNegativeBits() == 0) { 11641 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 11642 << BitfieldEnumDecl; 11643 } 11644 } 11645 11646 if (Bitfield->getType()->isBooleanType()) 11647 return false; 11648 11649 // Ignore value- or type-dependent expressions. 11650 if (Bitfield->getBitWidth()->isValueDependent() || 11651 Bitfield->getBitWidth()->isTypeDependent() || 11652 Init->isValueDependent() || 11653 Init->isTypeDependent()) 11654 return false; 11655 11656 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 11657 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 11658 11659 Expr::EvalResult Result; 11660 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 11661 Expr::SE_AllowSideEffects)) { 11662 // The RHS is not constant. If the RHS has an enum type, make sure the 11663 // bitfield is wide enough to hold all the values of the enum without 11664 // truncation. 11665 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 11666 EnumDecl *ED = EnumTy->getDecl(); 11667 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 11668 11669 // Enum types are implicitly signed on Windows, so check if there are any 11670 // negative enumerators to see if the enum was intended to be signed or 11671 // not. 11672 bool SignedEnum = ED->getNumNegativeBits() > 0; 11673 11674 // Check for surprising sign changes when assigning enum values to a 11675 // bitfield of different signedness. If the bitfield is signed and we 11676 // have exactly the right number of bits to store this unsigned enum, 11677 // suggest changing the enum to an unsigned type. This typically happens 11678 // on Windows where unfixed enums always use an underlying type of 'int'. 11679 unsigned DiagID = 0; 11680 if (SignedEnum && !SignedBitfield) { 11681 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 11682 } else if (SignedBitfield && !SignedEnum && 11683 ED->getNumPositiveBits() == FieldWidth) { 11684 DiagID = diag::warn_signed_bitfield_enum_conversion; 11685 } 11686 11687 if (DiagID) { 11688 S.Diag(InitLoc, DiagID) << Bitfield << ED; 11689 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 11690 SourceRange TypeRange = 11691 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 11692 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 11693 << SignedEnum << TypeRange; 11694 } 11695 11696 // Compute the required bitwidth. If the enum has negative values, we need 11697 // one more bit than the normal number of positive bits to represent the 11698 // sign bit. 11699 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 11700 ED->getNumNegativeBits()) 11701 : ED->getNumPositiveBits(); 11702 11703 // Check the bitwidth. 11704 if (BitsNeeded > FieldWidth) { 11705 Expr *WidthExpr = Bitfield->getBitWidth(); 11706 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 11707 << Bitfield << ED; 11708 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 11709 << BitsNeeded << ED << WidthExpr->getSourceRange(); 11710 } 11711 } 11712 11713 return false; 11714 } 11715 11716 llvm::APSInt Value = Result.Val.getInt(); 11717 11718 unsigned OriginalWidth = Value.getBitWidth(); 11719 11720 if (!Value.isSigned() || Value.isNegative()) 11721 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 11722 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 11723 OriginalWidth = Value.getMinSignedBits(); 11724 11725 if (OriginalWidth <= FieldWidth) 11726 return false; 11727 11728 // Compute the value which the bitfield will contain. 11729 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 11730 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 11731 11732 // Check whether the stored value is equal to the original value. 11733 TruncatedValue = TruncatedValue.extend(OriginalWidth); 11734 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 11735 return false; 11736 11737 // Special-case bitfields of width 1: booleans are naturally 0/1, and 11738 // therefore don't strictly fit into a signed bitfield of width 1. 11739 if (FieldWidth == 1 && Value == 1) 11740 return false; 11741 11742 std::string PrettyValue = Value.toString(10); 11743 std::string PrettyTrunc = TruncatedValue.toString(10); 11744 11745 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 11746 << PrettyValue << PrettyTrunc << OriginalInit->getType() 11747 << Init->getSourceRange(); 11748 11749 return true; 11750 } 11751 11752 /// Analyze the given simple or compound assignment for warning-worthy 11753 /// operations. 11754 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 11755 // Just recurse on the LHS. 11756 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11757 11758 // We want to recurse on the RHS as normal unless we're assigning to 11759 // a bitfield. 11760 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 11761 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 11762 E->getOperatorLoc())) { 11763 // Recurse, ignoring any implicit conversions on the RHS. 11764 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 11765 E->getOperatorLoc()); 11766 } 11767 } 11768 11769 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11770 11771 // Diagnose implicitly sequentially-consistent atomic assignment. 11772 if (E->getLHS()->getType()->isAtomicType()) 11773 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11774 } 11775 11776 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11777 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 11778 SourceLocation CContext, unsigned diag, 11779 bool pruneControlFlow = false) { 11780 if (pruneControlFlow) { 11781 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11782 S.PDiag(diag) 11783 << SourceType << T << E->getSourceRange() 11784 << SourceRange(CContext)); 11785 return; 11786 } 11787 S.Diag(E->getExprLoc(), diag) 11788 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 11789 } 11790 11791 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11792 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 11793 SourceLocation CContext, 11794 unsigned diag, bool pruneControlFlow = false) { 11795 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 11796 } 11797 11798 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 11799 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 11800 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 11801 } 11802 11803 static void adornObjCBoolConversionDiagWithTernaryFixit( 11804 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 11805 Expr *Ignored = SourceExpr->IgnoreImplicit(); 11806 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 11807 Ignored = OVE->getSourceExpr(); 11808 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 11809 isa<BinaryOperator>(Ignored) || 11810 isa<CXXOperatorCallExpr>(Ignored); 11811 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 11812 if (NeedsParens) 11813 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 11814 << FixItHint::CreateInsertion(EndLoc, ")"); 11815 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 11816 } 11817 11818 /// Diagnose an implicit cast from a floating point value to an integer value. 11819 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 11820 SourceLocation CContext) { 11821 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 11822 const bool PruneWarnings = S.inTemplateInstantiation(); 11823 11824 Expr *InnerE = E->IgnoreParenImpCasts(); 11825 // We also want to warn on, e.g., "int i = -1.234" 11826 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 11827 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 11828 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 11829 11830 const bool IsLiteral = 11831 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 11832 11833 llvm::APFloat Value(0.0); 11834 bool IsConstant = 11835 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 11836 if (!IsConstant) { 11837 if (isObjCSignedCharBool(S, T)) { 11838 return adornObjCBoolConversionDiagWithTernaryFixit( 11839 S, E, 11840 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 11841 << E->getType()); 11842 } 11843 11844 return DiagnoseImpCast(S, E, T, CContext, 11845 diag::warn_impcast_float_integer, PruneWarnings); 11846 } 11847 11848 bool isExact = false; 11849 11850 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 11851 T->hasUnsignedIntegerRepresentation()); 11852 llvm::APFloat::opStatus Result = Value.convertToInteger( 11853 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 11854 11855 // FIXME: Force the precision of the source value down so we don't print 11856 // digits which are usually useless (we don't really care here if we 11857 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 11858 // would automatically print the shortest representation, but it's a bit 11859 // tricky to implement. 11860 SmallString<16> PrettySourceValue; 11861 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 11862 precision = (precision * 59 + 195) / 196; 11863 Value.toString(PrettySourceValue, precision); 11864 11865 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 11866 return adornObjCBoolConversionDiagWithTernaryFixit( 11867 S, E, 11868 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 11869 << PrettySourceValue); 11870 } 11871 11872 if (Result == llvm::APFloat::opOK && isExact) { 11873 if (IsLiteral) return; 11874 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 11875 PruneWarnings); 11876 } 11877 11878 // Conversion of a floating-point value to a non-bool integer where the 11879 // integral part cannot be represented by the integer type is undefined. 11880 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 11881 return DiagnoseImpCast( 11882 S, E, T, CContext, 11883 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 11884 : diag::warn_impcast_float_to_integer_out_of_range, 11885 PruneWarnings); 11886 11887 unsigned DiagID = 0; 11888 if (IsLiteral) { 11889 // Warn on floating point literal to integer. 11890 DiagID = diag::warn_impcast_literal_float_to_integer; 11891 } else if (IntegerValue == 0) { 11892 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 11893 return DiagnoseImpCast(S, E, T, CContext, 11894 diag::warn_impcast_float_integer, PruneWarnings); 11895 } 11896 // Warn on non-zero to zero conversion. 11897 DiagID = diag::warn_impcast_float_to_integer_zero; 11898 } else { 11899 if (IntegerValue.isUnsigned()) { 11900 if (!IntegerValue.isMaxValue()) { 11901 return DiagnoseImpCast(S, E, T, CContext, 11902 diag::warn_impcast_float_integer, PruneWarnings); 11903 } 11904 } else { // IntegerValue.isSigned() 11905 if (!IntegerValue.isMaxSignedValue() && 11906 !IntegerValue.isMinSignedValue()) { 11907 return DiagnoseImpCast(S, E, T, CContext, 11908 diag::warn_impcast_float_integer, PruneWarnings); 11909 } 11910 } 11911 // Warn on evaluatable floating point expression to integer conversion. 11912 DiagID = diag::warn_impcast_float_to_integer; 11913 } 11914 11915 SmallString<16> PrettyTargetValue; 11916 if (IsBool) 11917 PrettyTargetValue = Value.isZero() ? "false" : "true"; 11918 else 11919 IntegerValue.toString(PrettyTargetValue); 11920 11921 if (PruneWarnings) { 11922 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11923 S.PDiag(DiagID) 11924 << E->getType() << T.getUnqualifiedType() 11925 << PrettySourceValue << PrettyTargetValue 11926 << E->getSourceRange() << SourceRange(CContext)); 11927 } else { 11928 S.Diag(E->getExprLoc(), DiagID) 11929 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 11930 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 11931 } 11932 } 11933 11934 /// Analyze the given compound assignment for the possible losing of 11935 /// floating-point precision. 11936 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 11937 assert(isa<CompoundAssignOperator>(E) && 11938 "Must be compound assignment operation"); 11939 // Recurse on the LHS and RHS in here 11940 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11941 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11942 11943 if (E->getLHS()->getType()->isAtomicType()) 11944 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 11945 11946 // Now check the outermost expression 11947 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 11948 const auto *RBT = cast<CompoundAssignOperator>(E) 11949 ->getComputationResultType() 11950 ->getAs<BuiltinType>(); 11951 11952 // The below checks assume source is floating point. 11953 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 11954 11955 // If source is floating point but target is an integer. 11956 if (ResultBT->isInteger()) 11957 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 11958 E->getExprLoc(), diag::warn_impcast_float_integer); 11959 11960 if (!ResultBT->isFloatingPoint()) 11961 return; 11962 11963 // If both source and target are floating points, warn about losing precision. 11964 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11965 QualType(ResultBT, 0), QualType(RBT, 0)); 11966 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 11967 // warn about dropping FP rank. 11968 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 11969 diag::warn_impcast_float_result_precision); 11970 } 11971 11972 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 11973 IntRange Range) { 11974 if (!Range.Width) return "0"; 11975 11976 llvm::APSInt ValueInRange = Value; 11977 ValueInRange.setIsSigned(!Range.NonNegative); 11978 ValueInRange = ValueInRange.trunc(Range.Width); 11979 return ValueInRange.toString(10); 11980 } 11981 11982 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 11983 if (!isa<ImplicitCastExpr>(Ex)) 11984 return false; 11985 11986 Expr *InnerE = Ex->IgnoreParenImpCasts(); 11987 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 11988 const Type *Source = 11989 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 11990 if (Target->isDependentType()) 11991 return false; 11992 11993 const BuiltinType *FloatCandidateBT = 11994 dyn_cast<BuiltinType>(ToBool ? Source : Target); 11995 const Type *BoolCandidateType = ToBool ? Target : Source; 11996 11997 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 11998 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 11999 } 12000 12001 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 12002 SourceLocation CC) { 12003 unsigned NumArgs = TheCall->getNumArgs(); 12004 for (unsigned i = 0; i < NumArgs; ++i) { 12005 Expr *CurrA = TheCall->getArg(i); 12006 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 12007 continue; 12008 12009 bool IsSwapped = ((i > 0) && 12010 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 12011 IsSwapped |= ((i < (NumArgs - 1)) && 12012 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 12013 if (IsSwapped) { 12014 // Warn on this floating-point to bool conversion. 12015 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 12016 CurrA->getType(), CC, 12017 diag::warn_impcast_floating_point_to_bool); 12018 } 12019 } 12020 } 12021 12022 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 12023 SourceLocation CC) { 12024 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 12025 E->getExprLoc())) 12026 return; 12027 12028 // Don't warn on functions which have return type nullptr_t. 12029 if (isa<CallExpr>(E)) 12030 return; 12031 12032 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 12033 const Expr::NullPointerConstantKind NullKind = 12034 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 12035 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 12036 return; 12037 12038 // Return if target type is a safe conversion. 12039 if (T->isAnyPointerType() || T->isBlockPointerType() || 12040 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 12041 return; 12042 12043 SourceLocation Loc = E->getSourceRange().getBegin(); 12044 12045 // Venture through the macro stacks to get to the source of macro arguments. 12046 // The new location is a better location than the complete location that was 12047 // passed in. 12048 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 12049 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 12050 12051 // __null is usually wrapped in a macro. Go up a macro if that is the case. 12052 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 12053 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 12054 Loc, S.SourceMgr, S.getLangOpts()); 12055 if (MacroName == "NULL") 12056 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 12057 } 12058 12059 // Only warn if the null and context location are in the same macro expansion. 12060 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 12061 return; 12062 12063 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 12064 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 12065 << FixItHint::CreateReplacement(Loc, 12066 S.getFixItZeroLiteralForType(T, Loc)); 12067 } 12068 12069 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12070 ObjCArrayLiteral *ArrayLiteral); 12071 12072 static void 12073 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12074 ObjCDictionaryLiteral *DictionaryLiteral); 12075 12076 /// Check a single element within a collection literal against the 12077 /// target element type. 12078 static void checkObjCCollectionLiteralElement(Sema &S, 12079 QualType TargetElementType, 12080 Expr *Element, 12081 unsigned ElementKind) { 12082 // Skip a bitcast to 'id' or qualified 'id'. 12083 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 12084 if (ICE->getCastKind() == CK_BitCast && 12085 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 12086 Element = ICE->getSubExpr(); 12087 } 12088 12089 QualType ElementType = Element->getType(); 12090 ExprResult ElementResult(Element); 12091 if (ElementType->getAs<ObjCObjectPointerType>() && 12092 S.CheckSingleAssignmentConstraints(TargetElementType, 12093 ElementResult, 12094 false, false) 12095 != Sema::Compatible) { 12096 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 12097 << ElementType << ElementKind << TargetElementType 12098 << Element->getSourceRange(); 12099 } 12100 12101 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 12102 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 12103 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 12104 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 12105 } 12106 12107 /// Check an Objective-C array literal being converted to the given 12108 /// target type. 12109 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12110 ObjCArrayLiteral *ArrayLiteral) { 12111 if (!S.NSArrayDecl) 12112 return; 12113 12114 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12115 if (!TargetObjCPtr) 12116 return; 12117 12118 if (TargetObjCPtr->isUnspecialized() || 12119 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12120 != S.NSArrayDecl->getCanonicalDecl()) 12121 return; 12122 12123 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12124 if (TypeArgs.size() != 1) 12125 return; 12126 12127 QualType TargetElementType = TypeArgs[0]; 12128 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 12129 checkObjCCollectionLiteralElement(S, TargetElementType, 12130 ArrayLiteral->getElement(I), 12131 0); 12132 } 12133 } 12134 12135 /// Check an Objective-C dictionary literal being converted to the given 12136 /// target type. 12137 static void 12138 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12139 ObjCDictionaryLiteral *DictionaryLiteral) { 12140 if (!S.NSDictionaryDecl) 12141 return; 12142 12143 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12144 if (!TargetObjCPtr) 12145 return; 12146 12147 if (TargetObjCPtr->isUnspecialized() || 12148 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12149 != S.NSDictionaryDecl->getCanonicalDecl()) 12150 return; 12151 12152 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12153 if (TypeArgs.size() != 2) 12154 return; 12155 12156 QualType TargetKeyType = TypeArgs[0]; 12157 QualType TargetObjectType = TypeArgs[1]; 12158 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 12159 auto Element = DictionaryLiteral->getKeyValueElement(I); 12160 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 12161 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 12162 } 12163 } 12164 12165 // Helper function to filter out cases for constant width constant conversion. 12166 // Don't warn on char array initialization or for non-decimal values. 12167 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 12168 SourceLocation CC) { 12169 // If initializing from a constant, and the constant starts with '0', 12170 // then it is a binary, octal, or hexadecimal. Allow these constants 12171 // to fill all the bits, even if there is a sign change. 12172 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 12173 const char FirstLiteralCharacter = 12174 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 12175 if (FirstLiteralCharacter == '0') 12176 return false; 12177 } 12178 12179 // If the CC location points to a '{', and the type is char, then assume 12180 // assume it is an array initialization. 12181 if (CC.isValid() && T->isCharType()) { 12182 const char FirstContextCharacter = 12183 S.getSourceManager().getCharacterData(CC)[0]; 12184 if (FirstContextCharacter == '{') 12185 return false; 12186 } 12187 12188 return true; 12189 } 12190 12191 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 12192 const auto *IL = dyn_cast<IntegerLiteral>(E); 12193 if (!IL) { 12194 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 12195 if (UO->getOpcode() == UO_Minus) 12196 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 12197 } 12198 } 12199 12200 return IL; 12201 } 12202 12203 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 12204 E = E->IgnoreParenImpCasts(); 12205 SourceLocation ExprLoc = E->getExprLoc(); 12206 12207 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12208 BinaryOperator::Opcode Opc = BO->getOpcode(); 12209 Expr::EvalResult Result; 12210 // Do not diagnose unsigned shifts. 12211 if (Opc == BO_Shl) { 12212 const auto *LHS = getIntegerLiteral(BO->getLHS()); 12213 const auto *RHS = getIntegerLiteral(BO->getRHS()); 12214 if (LHS && LHS->getValue() == 0) 12215 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 12216 else if (!E->isValueDependent() && LHS && RHS && 12217 RHS->getValue().isNonNegative() && 12218 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 12219 S.Diag(ExprLoc, diag::warn_left_shift_always) 12220 << (Result.Val.getInt() != 0); 12221 else if (E->getType()->isSignedIntegerType()) 12222 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 12223 } 12224 } 12225 12226 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12227 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 12228 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 12229 if (!LHS || !RHS) 12230 return; 12231 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 12232 (RHS->getValue() == 0 || RHS->getValue() == 1)) 12233 // Do not diagnose common idioms. 12234 return; 12235 if (LHS->getValue() != 0 && RHS->getValue() != 0) 12236 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 12237 } 12238 } 12239 12240 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 12241 SourceLocation CC, 12242 bool *ICContext = nullptr, 12243 bool IsListInit = false) { 12244 if (E->isTypeDependent() || E->isValueDependent()) return; 12245 12246 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 12247 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 12248 if (Source == Target) return; 12249 if (Target->isDependentType()) return; 12250 12251 // If the conversion context location is invalid don't complain. We also 12252 // don't want to emit a warning if the issue occurs from the expansion of 12253 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 12254 // delay this check as long as possible. Once we detect we are in that 12255 // scenario, we just return. 12256 if (CC.isInvalid()) 12257 return; 12258 12259 if (Source->isAtomicType()) 12260 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 12261 12262 // Diagnose implicit casts to bool. 12263 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 12264 if (isa<StringLiteral>(E)) 12265 // Warn on string literal to bool. Checks for string literals in logical 12266 // and expressions, for instance, assert(0 && "error here"), are 12267 // prevented by a check in AnalyzeImplicitConversions(). 12268 return DiagnoseImpCast(S, E, T, CC, 12269 diag::warn_impcast_string_literal_to_bool); 12270 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 12271 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 12272 // This covers the literal expressions that evaluate to Objective-C 12273 // objects. 12274 return DiagnoseImpCast(S, E, T, CC, 12275 diag::warn_impcast_objective_c_literal_to_bool); 12276 } 12277 if (Source->isPointerType() || Source->canDecayToPointerType()) { 12278 // Warn on pointer to bool conversion that is always true. 12279 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 12280 SourceRange(CC)); 12281 } 12282 } 12283 12284 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 12285 // is a typedef for signed char (macOS), then that constant value has to be 1 12286 // or 0. 12287 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 12288 Expr::EvalResult Result; 12289 if (E->EvaluateAsInt(Result, S.getASTContext(), 12290 Expr::SE_AllowSideEffects)) { 12291 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 12292 adornObjCBoolConversionDiagWithTernaryFixit( 12293 S, E, 12294 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 12295 << Result.Val.getInt().toString(10)); 12296 } 12297 return; 12298 } 12299 } 12300 12301 // Check implicit casts from Objective-C collection literals to specialized 12302 // collection types, e.g., NSArray<NSString *> *. 12303 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 12304 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 12305 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 12306 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 12307 12308 // Strip vector types. 12309 if (const auto *SourceVT = dyn_cast<VectorType>(Source)) { 12310 if (Target->isVLSTBuiltinType()) { 12311 auto SourceVectorKind = SourceVT->getVectorKind(); 12312 if (SourceVectorKind == VectorType::SveFixedLengthDataVector || 12313 SourceVectorKind == VectorType::SveFixedLengthPredicateVector || 12314 (SourceVectorKind == VectorType::GenericVector && 12315 S.Context.getTypeSize(Source) == S.getLangOpts().ArmSveVectorBits)) 12316 return; 12317 } 12318 12319 if (!isa<VectorType>(Target)) { 12320 if (S.SourceMgr.isInSystemMacro(CC)) 12321 return; 12322 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 12323 } 12324 12325 // If the vector cast is cast between two vectors of the same size, it is 12326 // a bitcast, not a conversion. 12327 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 12328 return; 12329 12330 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 12331 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 12332 } 12333 if (auto VecTy = dyn_cast<VectorType>(Target)) 12334 Target = VecTy->getElementType().getTypePtr(); 12335 12336 // Strip complex types. 12337 if (isa<ComplexType>(Source)) { 12338 if (!isa<ComplexType>(Target)) { 12339 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 12340 return; 12341 12342 return DiagnoseImpCast(S, E, T, CC, 12343 S.getLangOpts().CPlusPlus 12344 ? diag::err_impcast_complex_scalar 12345 : diag::warn_impcast_complex_scalar); 12346 } 12347 12348 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 12349 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 12350 } 12351 12352 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 12353 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 12354 12355 // If the source is floating point... 12356 if (SourceBT && SourceBT->isFloatingPoint()) { 12357 // ...and the target is floating point... 12358 if (TargetBT && TargetBT->isFloatingPoint()) { 12359 // ...then warn if we're dropping FP rank. 12360 12361 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12362 QualType(SourceBT, 0), QualType(TargetBT, 0)); 12363 if (Order > 0) { 12364 // Don't warn about float constants that are precisely 12365 // representable in the target type. 12366 Expr::EvalResult result; 12367 if (E->EvaluateAsRValue(result, S.Context)) { 12368 // Value might be a float, a float vector, or a float complex. 12369 if (IsSameFloatAfterCast(result.Val, 12370 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 12371 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 12372 return; 12373 } 12374 12375 if (S.SourceMgr.isInSystemMacro(CC)) 12376 return; 12377 12378 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 12379 } 12380 // ... or possibly if we're increasing rank, too 12381 else if (Order < 0) { 12382 if (S.SourceMgr.isInSystemMacro(CC)) 12383 return; 12384 12385 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 12386 } 12387 return; 12388 } 12389 12390 // If the target is integral, always warn. 12391 if (TargetBT && TargetBT->isInteger()) { 12392 if (S.SourceMgr.isInSystemMacro(CC)) 12393 return; 12394 12395 DiagnoseFloatingImpCast(S, E, T, CC); 12396 } 12397 12398 // Detect the case where a call result is converted from floating-point to 12399 // to bool, and the final argument to the call is converted from bool, to 12400 // discover this typo: 12401 // 12402 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 12403 // 12404 // FIXME: This is an incredibly special case; is there some more general 12405 // way to detect this class of misplaced-parentheses bug? 12406 if (Target->isBooleanType() && isa<CallExpr>(E)) { 12407 // Check last argument of function call to see if it is an 12408 // implicit cast from a type matching the type the result 12409 // is being cast to. 12410 CallExpr *CEx = cast<CallExpr>(E); 12411 if (unsigned NumArgs = CEx->getNumArgs()) { 12412 Expr *LastA = CEx->getArg(NumArgs - 1); 12413 Expr *InnerE = LastA->IgnoreParenImpCasts(); 12414 if (isa<ImplicitCastExpr>(LastA) && 12415 InnerE->getType()->isBooleanType()) { 12416 // Warn on this floating-point to bool conversion 12417 DiagnoseImpCast(S, E, T, CC, 12418 diag::warn_impcast_floating_point_to_bool); 12419 } 12420 } 12421 } 12422 return; 12423 } 12424 12425 // Valid casts involving fixed point types should be accounted for here. 12426 if (Source->isFixedPointType()) { 12427 if (Target->isUnsaturatedFixedPointType()) { 12428 Expr::EvalResult Result; 12429 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 12430 S.isConstantEvaluated())) { 12431 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 12432 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 12433 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 12434 if (Value > MaxVal || Value < MinVal) { 12435 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12436 S.PDiag(diag::warn_impcast_fixed_point_range) 12437 << Value.toString() << T 12438 << E->getSourceRange() 12439 << clang::SourceRange(CC)); 12440 return; 12441 } 12442 } 12443 } else if (Target->isIntegerType()) { 12444 Expr::EvalResult Result; 12445 if (!S.isConstantEvaluated() && 12446 E->EvaluateAsFixedPoint(Result, S.Context, 12447 Expr::SE_AllowSideEffects)) { 12448 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 12449 12450 bool Overflowed; 12451 llvm::APSInt IntResult = FXResult.convertToInt( 12452 S.Context.getIntWidth(T), 12453 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 12454 12455 if (Overflowed) { 12456 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12457 S.PDiag(diag::warn_impcast_fixed_point_range) 12458 << FXResult.toString() << T 12459 << E->getSourceRange() 12460 << clang::SourceRange(CC)); 12461 return; 12462 } 12463 } 12464 } 12465 } else if (Target->isUnsaturatedFixedPointType()) { 12466 if (Source->isIntegerType()) { 12467 Expr::EvalResult Result; 12468 if (!S.isConstantEvaluated() && 12469 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 12470 llvm::APSInt Value = Result.Val.getInt(); 12471 12472 bool Overflowed; 12473 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 12474 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 12475 12476 if (Overflowed) { 12477 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12478 S.PDiag(diag::warn_impcast_fixed_point_range) 12479 << Value.toString(/*Radix=*/10) << T 12480 << E->getSourceRange() 12481 << clang::SourceRange(CC)); 12482 return; 12483 } 12484 } 12485 } 12486 } 12487 12488 // If we are casting an integer type to a floating point type without 12489 // initialization-list syntax, we might lose accuracy if the floating 12490 // point type has a narrower significand than the integer type. 12491 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 12492 TargetBT->isFloatingType() && !IsListInit) { 12493 // Determine the number of precision bits in the source integer type. 12494 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 12495 /*Approximate*/ true); 12496 unsigned int SourcePrecision = SourceRange.Width; 12497 12498 // Determine the number of precision bits in the 12499 // target floating point type. 12500 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 12501 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12502 12503 if (SourcePrecision > 0 && TargetPrecision > 0 && 12504 SourcePrecision > TargetPrecision) { 12505 12506 if (Optional<llvm::APSInt> SourceInt = 12507 E->getIntegerConstantExpr(S.Context)) { 12508 // If the source integer is a constant, convert it to the target 12509 // floating point type. Issue a warning if the value changes 12510 // during the whole conversion. 12511 llvm::APFloat TargetFloatValue( 12512 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12513 llvm::APFloat::opStatus ConversionStatus = 12514 TargetFloatValue.convertFromAPInt( 12515 *SourceInt, SourceBT->isSignedInteger(), 12516 llvm::APFloat::rmNearestTiesToEven); 12517 12518 if (ConversionStatus != llvm::APFloat::opOK) { 12519 std::string PrettySourceValue = SourceInt->toString(10); 12520 SmallString<32> PrettyTargetValue; 12521 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 12522 12523 S.DiagRuntimeBehavior( 12524 E->getExprLoc(), E, 12525 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 12526 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12527 << E->getSourceRange() << clang::SourceRange(CC)); 12528 } 12529 } else { 12530 // Otherwise, the implicit conversion may lose precision. 12531 DiagnoseImpCast(S, E, T, CC, 12532 diag::warn_impcast_integer_float_precision); 12533 } 12534 } 12535 } 12536 12537 DiagnoseNullConversion(S, E, T, CC); 12538 12539 S.DiscardMisalignedMemberAddress(Target, E); 12540 12541 if (Target->isBooleanType()) 12542 DiagnoseIntInBoolContext(S, E); 12543 12544 if (!Source->isIntegerType() || !Target->isIntegerType()) 12545 return; 12546 12547 // TODO: remove this early return once the false positives for constant->bool 12548 // in templates, macros, etc, are reduced or removed. 12549 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 12550 return; 12551 12552 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 12553 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 12554 return adornObjCBoolConversionDiagWithTernaryFixit( 12555 S, E, 12556 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 12557 << E->getType()); 12558 } 12559 12560 IntRange SourceTypeRange = 12561 IntRange::forTargetOfCanonicalType(S.Context, Source); 12562 IntRange LikelySourceRange = 12563 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 12564 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 12565 12566 if (LikelySourceRange.Width > TargetRange.Width) { 12567 // If the source is a constant, use a default-on diagnostic. 12568 // TODO: this should happen for bitfield stores, too. 12569 Expr::EvalResult Result; 12570 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 12571 S.isConstantEvaluated())) { 12572 llvm::APSInt Value(32); 12573 Value = Result.Val.getInt(); 12574 12575 if (S.SourceMgr.isInSystemMacro(CC)) 12576 return; 12577 12578 std::string PrettySourceValue = Value.toString(10); 12579 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 12580 12581 S.DiagRuntimeBehavior( 12582 E->getExprLoc(), E, 12583 S.PDiag(diag::warn_impcast_integer_precision_constant) 12584 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12585 << E->getSourceRange() << SourceRange(CC)); 12586 return; 12587 } 12588 12589 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 12590 if (S.SourceMgr.isInSystemMacro(CC)) 12591 return; 12592 12593 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 12594 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 12595 /* pruneControlFlow */ true); 12596 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 12597 } 12598 12599 if (TargetRange.Width > SourceTypeRange.Width) { 12600 if (auto *UO = dyn_cast<UnaryOperator>(E)) 12601 if (UO->getOpcode() == UO_Minus) 12602 if (Source->isUnsignedIntegerType()) { 12603 if (Target->isUnsignedIntegerType()) 12604 return DiagnoseImpCast(S, E, T, CC, 12605 diag::warn_impcast_high_order_zero_bits); 12606 if (Target->isSignedIntegerType()) 12607 return DiagnoseImpCast(S, E, T, CC, 12608 diag::warn_impcast_nonnegative_result); 12609 } 12610 } 12611 12612 if (TargetRange.Width == LikelySourceRange.Width && 12613 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 12614 Source->isSignedIntegerType()) { 12615 // Warn when doing a signed to signed conversion, warn if the positive 12616 // source value is exactly the width of the target type, which will 12617 // cause a negative value to be stored. 12618 12619 Expr::EvalResult Result; 12620 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 12621 !S.SourceMgr.isInSystemMacro(CC)) { 12622 llvm::APSInt Value = Result.Val.getInt(); 12623 if (isSameWidthConstantConversion(S, E, T, CC)) { 12624 std::string PrettySourceValue = Value.toString(10); 12625 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 12626 12627 S.DiagRuntimeBehavior( 12628 E->getExprLoc(), E, 12629 S.PDiag(diag::warn_impcast_integer_precision_constant) 12630 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12631 << E->getSourceRange() << SourceRange(CC)); 12632 return; 12633 } 12634 } 12635 12636 // Fall through for non-constants to give a sign conversion warning. 12637 } 12638 12639 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 12640 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 12641 LikelySourceRange.Width == TargetRange.Width)) { 12642 if (S.SourceMgr.isInSystemMacro(CC)) 12643 return; 12644 12645 unsigned DiagID = diag::warn_impcast_integer_sign; 12646 12647 // Traditionally, gcc has warned about this under -Wsign-compare. 12648 // We also want to warn about it in -Wconversion. 12649 // So if -Wconversion is off, use a completely identical diagnostic 12650 // in the sign-compare group. 12651 // The conditional-checking code will 12652 if (ICContext) { 12653 DiagID = diag::warn_impcast_integer_sign_conditional; 12654 *ICContext = true; 12655 } 12656 12657 return DiagnoseImpCast(S, E, T, CC, DiagID); 12658 } 12659 12660 // Diagnose conversions between different enumeration types. 12661 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 12662 // type, to give us better diagnostics. 12663 QualType SourceType = E->getType(); 12664 if (!S.getLangOpts().CPlusPlus) { 12665 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12666 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 12667 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 12668 SourceType = S.Context.getTypeDeclType(Enum); 12669 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 12670 } 12671 } 12672 12673 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 12674 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 12675 if (SourceEnum->getDecl()->hasNameForLinkage() && 12676 TargetEnum->getDecl()->hasNameForLinkage() && 12677 SourceEnum != TargetEnum) { 12678 if (S.SourceMgr.isInSystemMacro(CC)) 12679 return; 12680 12681 return DiagnoseImpCast(S, E, SourceType, T, CC, 12682 diag::warn_impcast_different_enum_types); 12683 } 12684 } 12685 12686 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 12687 SourceLocation CC, QualType T); 12688 12689 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 12690 SourceLocation CC, bool &ICContext) { 12691 E = E->IgnoreParenImpCasts(); 12692 12693 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 12694 return CheckConditionalOperator(S, CO, CC, T); 12695 12696 AnalyzeImplicitConversions(S, E, CC); 12697 if (E->getType() != T) 12698 return CheckImplicitConversion(S, E, T, CC, &ICContext); 12699 } 12700 12701 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 12702 SourceLocation CC, QualType T) { 12703 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 12704 12705 Expr *TrueExpr = E->getTrueExpr(); 12706 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 12707 TrueExpr = BCO->getCommon(); 12708 12709 bool Suspicious = false; 12710 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 12711 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 12712 12713 if (T->isBooleanType()) 12714 DiagnoseIntInBoolContext(S, E); 12715 12716 // If -Wconversion would have warned about either of the candidates 12717 // for a signedness conversion to the context type... 12718 if (!Suspicious) return; 12719 12720 // ...but it's currently ignored... 12721 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 12722 return; 12723 12724 // ...then check whether it would have warned about either of the 12725 // candidates for a signedness conversion to the condition type. 12726 if (E->getType() == T) return; 12727 12728 Suspicious = false; 12729 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 12730 E->getType(), CC, &Suspicious); 12731 if (!Suspicious) 12732 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 12733 E->getType(), CC, &Suspicious); 12734 } 12735 12736 /// Check conversion of given expression to boolean. 12737 /// Input argument E is a logical expression. 12738 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 12739 if (S.getLangOpts().Bool) 12740 return; 12741 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 12742 return; 12743 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 12744 } 12745 12746 namespace { 12747 struct AnalyzeImplicitConversionsWorkItem { 12748 Expr *E; 12749 SourceLocation CC; 12750 bool IsListInit; 12751 }; 12752 } 12753 12754 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 12755 /// that should be visited are added to WorkList. 12756 static void AnalyzeImplicitConversions( 12757 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 12758 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 12759 Expr *OrigE = Item.E; 12760 SourceLocation CC = Item.CC; 12761 12762 QualType T = OrigE->getType(); 12763 Expr *E = OrigE->IgnoreParenImpCasts(); 12764 12765 // Propagate whether we are in a C++ list initialization expression. 12766 // If so, we do not issue warnings for implicit int-float conversion 12767 // precision loss, because C++11 narrowing already handles it. 12768 bool IsListInit = Item.IsListInit || 12769 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 12770 12771 if (E->isTypeDependent() || E->isValueDependent()) 12772 return; 12773 12774 Expr *SourceExpr = E; 12775 // Examine, but don't traverse into the source expression of an 12776 // OpaqueValueExpr, since it may have multiple parents and we don't want to 12777 // emit duplicate diagnostics. Its fine to examine the form or attempt to 12778 // evaluate it in the context of checking the specific conversion to T though. 12779 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12780 if (auto *Src = OVE->getSourceExpr()) 12781 SourceExpr = Src; 12782 12783 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 12784 if (UO->getOpcode() == UO_Not && 12785 UO->getSubExpr()->isKnownToHaveBooleanValue()) 12786 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 12787 << OrigE->getSourceRange() << T->isBooleanType() 12788 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 12789 12790 // For conditional operators, we analyze the arguments as if they 12791 // were being fed directly into the output. 12792 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 12793 CheckConditionalOperator(S, CO, CC, T); 12794 return; 12795 } 12796 12797 // Check implicit argument conversions for function calls. 12798 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 12799 CheckImplicitArgumentConversions(S, Call, CC); 12800 12801 // Go ahead and check any implicit conversions we might have skipped. 12802 // The non-canonical typecheck is just an optimization; 12803 // CheckImplicitConversion will filter out dead implicit conversions. 12804 if (SourceExpr->getType() != T) 12805 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 12806 12807 // Now continue drilling into this expression. 12808 12809 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 12810 // The bound subexpressions in a PseudoObjectExpr are not reachable 12811 // as transitive children. 12812 // FIXME: Use a more uniform representation for this. 12813 for (auto *SE : POE->semantics()) 12814 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 12815 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 12816 } 12817 12818 // Skip past explicit casts. 12819 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 12820 E = CE->getSubExpr()->IgnoreParenImpCasts(); 12821 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 12822 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12823 WorkList.push_back({E, CC, IsListInit}); 12824 return; 12825 } 12826 12827 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12828 // Do a somewhat different check with comparison operators. 12829 if (BO->isComparisonOp()) 12830 return AnalyzeComparison(S, BO); 12831 12832 // And with simple assignments. 12833 if (BO->getOpcode() == BO_Assign) 12834 return AnalyzeAssignment(S, BO); 12835 // And with compound assignments. 12836 if (BO->isAssignmentOp()) 12837 return AnalyzeCompoundAssignment(S, BO); 12838 } 12839 12840 // These break the otherwise-useful invariant below. Fortunately, 12841 // we don't really need to recurse into them, because any internal 12842 // expressions should have been analyzed already when they were 12843 // built into statements. 12844 if (isa<StmtExpr>(E)) return; 12845 12846 // Don't descend into unevaluated contexts. 12847 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 12848 12849 // Now just recurse over the expression's children. 12850 CC = E->getExprLoc(); 12851 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 12852 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 12853 for (Stmt *SubStmt : E->children()) { 12854 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 12855 if (!ChildExpr) 12856 continue; 12857 12858 if (IsLogicalAndOperator && 12859 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 12860 // Ignore checking string literals that are in logical and operators. 12861 // This is a common pattern for asserts. 12862 continue; 12863 WorkList.push_back({ChildExpr, CC, IsListInit}); 12864 } 12865 12866 if (BO && BO->isLogicalOp()) { 12867 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 12868 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12869 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12870 12871 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 12872 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12873 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12874 } 12875 12876 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 12877 if (U->getOpcode() == UO_LNot) { 12878 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 12879 } else if (U->getOpcode() != UO_AddrOf) { 12880 if (U->getSubExpr()->getType()->isAtomicType()) 12881 S.Diag(U->getSubExpr()->getBeginLoc(), 12882 diag::warn_atomic_implicit_seq_cst); 12883 } 12884 } 12885 } 12886 12887 /// AnalyzeImplicitConversions - Find and report any interesting 12888 /// implicit conversions in the given expression. There are a couple 12889 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 12890 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 12891 bool IsListInit/*= false*/) { 12892 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 12893 WorkList.push_back({OrigE, CC, IsListInit}); 12894 while (!WorkList.empty()) 12895 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 12896 } 12897 12898 /// Diagnose integer type and any valid implicit conversion to it. 12899 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 12900 // Taking into account implicit conversions, 12901 // allow any integer. 12902 if (!E->getType()->isIntegerType()) { 12903 S.Diag(E->getBeginLoc(), 12904 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 12905 return true; 12906 } 12907 // Potentially emit standard warnings for implicit conversions if enabled 12908 // using -Wconversion. 12909 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 12910 return false; 12911 } 12912 12913 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 12914 // Returns true when emitting a warning about taking the address of a reference. 12915 static bool CheckForReference(Sema &SemaRef, const Expr *E, 12916 const PartialDiagnostic &PD) { 12917 E = E->IgnoreParenImpCasts(); 12918 12919 const FunctionDecl *FD = nullptr; 12920 12921 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 12922 if (!DRE->getDecl()->getType()->isReferenceType()) 12923 return false; 12924 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12925 if (!M->getMemberDecl()->getType()->isReferenceType()) 12926 return false; 12927 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 12928 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 12929 return false; 12930 FD = Call->getDirectCallee(); 12931 } else { 12932 return false; 12933 } 12934 12935 SemaRef.Diag(E->getExprLoc(), PD); 12936 12937 // If possible, point to location of function. 12938 if (FD) { 12939 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 12940 } 12941 12942 return true; 12943 } 12944 12945 // Returns true if the SourceLocation is expanded from any macro body. 12946 // Returns false if the SourceLocation is invalid, is from not in a macro 12947 // expansion, or is from expanded from a top-level macro argument. 12948 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 12949 if (Loc.isInvalid()) 12950 return false; 12951 12952 while (Loc.isMacroID()) { 12953 if (SM.isMacroBodyExpansion(Loc)) 12954 return true; 12955 Loc = SM.getImmediateMacroCallerLoc(Loc); 12956 } 12957 12958 return false; 12959 } 12960 12961 /// Diagnose pointers that are always non-null. 12962 /// \param E the expression containing the pointer 12963 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 12964 /// compared to a null pointer 12965 /// \param IsEqual True when the comparison is equal to a null pointer 12966 /// \param Range Extra SourceRange to highlight in the diagnostic 12967 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 12968 Expr::NullPointerConstantKind NullKind, 12969 bool IsEqual, SourceRange Range) { 12970 if (!E) 12971 return; 12972 12973 // Don't warn inside macros. 12974 if (E->getExprLoc().isMacroID()) { 12975 const SourceManager &SM = getSourceManager(); 12976 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 12977 IsInAnyMacroBody(SM, Range.getBegin())) 12978 return; 12979 } 12980 E = E->IgnoreImpCasts(); 12981 12982 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 12983 12984 if (isa<CXXThisExpr>(E)) { 12985 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 12986 : diag::warn_this_bool_conversion; 12987 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 12988 return; 12989 } 12990 12991 bool IsAddressOf = false; 12992 12993 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12994 if (UO->getOpcode() != UO_AddrOf) 12995 return; 12996 IsAddressOf = true; 12997 E = UO->getSubExpr(); 12998 } 12999 13000 if (IsAddressOf) { 13001 unsigned DiagID = IsCompare 13002 ? diag::warn_address_of_reference_null_compare 13003 : diag::warn_address_of_reference_bool_conversion; 13004 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 13005 << IsEqual; 13006 if (CheckForReference(*this, E, PD)) { 13007 return; 13008 } 13009 } 13010 13011 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 13012 bool IsParam = isa<NonNullAttr>(NonnullAttr); 13013 std::string Str; 13014 llvm::raw_string_ostream S(Str); 13015 E->printPretty(S, nullptr, getPrintingPolicy()); 13016 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 13017 : diag::warn_cast_nonnull_to_bool; 13018 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 13019 << E->getSourceRange() << Range << IsEqual; 13020 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 13021 }; 13022 13023 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 13024 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 13025 if (auto *Callee = Call->getDirectCallee()) { 13026 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 13027 ComplainAboutNonnullParamOrCall(A); 13028 return; 13029 } 13030 } 13031 } 13032 13033 // Expect to find a single Decl. Skip anything more complicated. 13034 ValueDecl *D = nullptr; 13035 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 13036 D = R->getDecl(); 13037 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13038 D = M->getMemberDecl(); 13039 } 13040 13041 // Weak Decls can be null. 13042 if (!D || D->isWeak()) 13043 return; 13044 13045 // Check for parameter decl with nonnull attribute 13046 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 13047 if (getCurFunction() && 13048 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 13049 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 13050 ComplainAboutNonnullParamOrCall(A); 13051 return; 13052 } 13053 13054 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 13055 // Skip function template not specialized yet. 13056 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 13057 return; 13058 auto ParamIter = llvm::find(FD->parameters(), PV); 13059 assert(ParamIter != FD->param_end()); 13060 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 13061 13062 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 13063 if (!NonNull->args_size()) { 13064 ComplainAboutNonnullParamOrCall(NonNull); 13065 return; 13066 } 13067 13068 for (const ParamIdx &ArgNo : NonNull->args()) { 13069 if (ArgNo.getASTIndex() == ParamNo) { 13070 ComplainAboutNonnullParamOrCall(NonNull); 13071 return; 13072 } 13073 } 13074 } 13075 } 13076 } 13077 } 13078 13079 QualType T = D->getType(); 13080 const bool IsArray = T->isArrayType(); 13081 const bool IsFunction = T->isFunctionType(); 13082 13083 // Address of function is used to silence the function warning. 13084 if (IsAddressOf && IsFunction) { 13085 return; 13086 } 13087 13088 // Found nothing. 13089 if (!IsAddressOf && !IsFunction && !IsArray) 13090 return; 13091 13092 // Pretty print the expression for the diagnostic. 13093 std::string Str; 13094 llvm::raw_string_ostream S(Str); 13095 E->printPretty(S, nullptr, getPrintingPolicy()); 13096 13097 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 13098 : diag::warn_impcast_pointer_to_bool; 13099 enum { 13100 AddressOf, 13101 FunctionPointer, 13102 ArrayPointer 13103 } DiagType; 13104 if (IsAddressOf) 13105 DiagType = AddressOf; 13106 else if (IsFunction) 13107 DiagType = FunctionPointer; 13108 else if (IsArray) 13109 DiagType = ArrayPointer; 13110 else 13111 llvm_unreachable("Could not determine diagnostic."); 13112 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 13113 << Range << IsEqual; 13114 13115 if (!IsFunction) 13116 return; 13117 13118 // Suggest '&' to silence the function warning. 13119 Diag(E->getExprLoc(), diag::note_function_warning_silence) 13120 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 13121 13122 // Check to see if '()' fixit should be emitted. 13123 QualType ReturnType; 13124 UnresolvedSet<4> NonTemplateOverloads; 13125 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 13126 if (ReturnType.isNull()) 13127 return; 13128 13129 if (IsCompare) { 13130 // There are two cases here. If there is null constant, the only suggest 13131 // for a pointer return type. If the null is 0, then suggest if the return 13132 // type is a pointer or an integer type. 13133 if (!ReturnType->isPointerType()) { 13134 if (NullKind == Expr::NPCK_ZeroExpression || 13135 NullKind == Expr::NPCK_ZeroLiteral) { 13136 if (!ReturnType->isIntegerType()) 13137 return; 13138 } else { 13139 return; 13140 } 13141 } 13142 } else { // !IsCompare 13143 // For function to bool, only suggest if the function pointer has bool 13144 // return type. 13145 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 13146 return; 13147 } 13148 Diag(E->getExprLoc(), diag::note_function_to_function_call) 13149 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 13150 } 13151 13152 /// Diagnoses "dangerous" implicit conversions within the given 13153 /// expression (which is a full expression). Implements -Wconversion 13154 /// and -Wsign-compare. 13155 /// 13156 /// \param CC the "context" location of the implicit conversion, i.e. 13157 /// the most location of the syntactic entity requiring the implicit 13158 /// conversion 13159 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 13160 // Don't diagnose in unevaluated contexts. 13161 if (isUnevaluatedContext()) 13162 return; 13163 13164 // Don't diagnose for value- or type-dependent expressions. 13165 if (E->isTypeDependent() || E->isValueDependent()) 13166 return; 13167 13168 // Check for array bounds violations in cases where the check isn't triggered 13169 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 13170 // ArraySubscriptExpr is on the RHS of a variable initialization. 13171 CheckArrayAccess(E); 13172 13173 // This is not the right CC for (e.g.) a variable initialization. 13174 AnalyzeImplicitConversions(*this, E, CC); 13175 } 13176 13177 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 13178 /// Input argument E is a logical expression. 13179 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 13180 ::CheckBoolLikeConversion(*this, E, CC); 13181 } 13182 13183 /// Diagnose when expression is an integer constant expression and its evaluation 13184 /// results in integer overflow 13185 void Sema::CheckForIntOverflow (Expr *E) { 13186 // Use a work list to deal with nested struct initializers. 13187 SmallVector<Expr *, 2> Exprs(1, E); 13188 13189 do { 13190 Expr *OriginalE = Exprs.pop_back_val(); 13191 Expr *E = OriginalE->IgnoreParenCasts(); 13192 13193 if (isa<BinaryOperator>(E)) { 13194 E->EvaluateForOverflow(Context); 13195 continue; 13196 } 13197 13198 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 13199 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 13200 else if (isa<ObjCBoxedExpr>(OriginalE)) 13201 E->EvaluateForOverflow(Context); 13202 else if (auto Call = dyn_cast<CallExpr>(E)) 13203 Exprs.append(Call->arg_begin(), Call->arg_end()); 13204 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 13205 Exprs.append(Message->arg_begin(), Message->arg_end()); 13206 } while (!Exprs.empty()); 13207 } 13208 13209 namespace { 13210 13211 /// Visitor for expressions which looks for unsequenced operations on the 13212 /// same object. 13213 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 13214 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 13215 13216 /// A tree of sequenced regions within an expression. Two regions are 13217 /// unsequenced if one is an ancestor or a descendent of the other. When we 13218 /// finish processing an expression with sequencing, such as a comma 13219 /// expression, we fold its tree nodes into its parent, since they are 13220 /// unsequenced with respect to nodes we will visit later. 13221 class SequenceTree { 13222 struct Value { 13223 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 13224 unsigned Parent : 31; 13225 unsigned Merged : 1; 13226 }; 13227 SmallVector<Value, 8> Values; 13228 13229 public: 13230 /// A region within an expression which may be sequenced with respect 13231 /// to some other region. 13232 class Seq { 13233 friend class SequenceTree; 13234 13235 unsigned Index; 13236 13237 explicit Seq(unsigned N) : Index(N) {} 13238 13239 public: 13240 Seq() : Index(0) {} 13241 }; 13242 13243 SequenceTree() { Values.push_back(Value(0)); } 13244 Seq root() const { return Seq(0); } 13245 13246 /// Create a new sequence of operations, which is an unsequenced 13247 /// subset of \p Parent. This sequence of operations is sequenced with 13248 /// respect to other children of \p Parent. 13249 Seq allocate(Seq Parent) { 13250 Values.push_back(Value(Parent.Index)); 13251 return Seq(Values.size() - 1); 13252 } 13253 13254 /// Merge a sequence of operations into its parent. 13255 void merge(Seq S) { 13256 Values[S.Index].Merged = true; 13257 } 13258 13259 /// Determine whether two operations are unsequenced. This operation 13260 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 13261 /// should have been merged into its parent as appropriate. 13262 bool isUnsequenced(Seq Cur, Seq Old) { 13263 unsigned C = representative(Cur.Index); 13264 unsigned Target = representative(Old.Index); 13265 while (C >= Target) { 13266 if (C == Target) 13267 return true; 13268 C = Values[C].Parent; 13269 } 13270 return false; 13271 } 13272 13273 private: 13274 /// Pick a representative for a sequence. 13275 unsigned representative(unsigned K) { 13276 if (Values[K].Merged) 13277 // Perform path compression as we go. 13278 return Values[K].Parent = representative(Values[K].Parent); 13279 return K; 13280 } 13281 }; 13282 13283 /// An object for which we can track unsequenced uses. 13284 using Object = const NamedDecl *; 13285 13286 /// Different flavors of object usage which we track. We only track the 13287 /// least-sequenced usage of each kind. 13288 enum UsageKind { 13289 /// A read of an object. Multiple unsequenced reads are OK. 13290 UK_Use, 13291 13292 /// A modification of an object which is sequenced before the value 13293 /// computation of the expression, such as ++n in C++. 13294 UK_ModAsValue, 13295 13296 /// A modification of an object which is not sequenced before the value 13297 /// computation of the expression, such as n++. 13298 UK_ModAsSideEffect, 13299 13300 UK_Count = UK_ModAsSideEffect + 1 13301 }; 13302 13303 /// Bundle together a sequencing region and the expression corresponding 13304 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 13305 struct Usage { 13306 const Expr *UsageExpr; 13307 SequenceTree::Seq Seq; 13308 13309 Usage() : UsageExpr(nullptr), Seq() {} 13310 }; 13311 13312 struct UsageInfo { 13313 Usage Uses[UK_Count]; 13314 13315 /// Have we issued a diagnostic for this object already? 13316 bool Diagnosed; 13317 13318 UsageInfo() : Uses(), Diagnosed(false) {} 13319 }; 13320 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 13321 13322 Sema &SemaRef; 13323 13324 /// Sequenced regions within the expression. 13325 SequenceTree Tree; 13326 13327 /// Declaration modifications and references which we have seen. 13328 UsageInfoMap UsageMap; 13329 13330 /// The region we are currently within. 13331 SequenceTree::Seq Region; 13332 13333 /// Filled in with declarations which were modified as a side-effect 13334 /// (that is, post-increment operations). 13335 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 13336 13337 /// Expressions to check later. We defer checking these to reduce 13338 /// stack usage. 13339 SmallVectorImpl<const Expr *> &WorkList; 13340 13341 /// RAII object wrapping the visitation of a sequenced subexpression of an 13342 /// expression. At the end of this process, the side-effects of the evaluation 13343 /// become sequenced with respect to the value computation of the result, so 13344 /// we downgrade any UK_ModAsSideEffect within the evaluation to 13345 /// UK_ModAsValue. 13346 struct SequencedSubexpression { 13347 SequencedSubexpression(SequenceChecker &Self) 13348 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 13349 Self.ModAsSideEffect = &ModAsSideEffect; 13350 } 13351 13352 ~SequencedSubexpression() { 13353 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 13354 // Add a new usage with usage kind UK_ModAsValue, and then restore 13355 // the previous usage with UK_ModAsSideEffect (thus clearing it if 13356 // the previous one was empty). 13357 UsageInfo &UI = Self.UsageMap[M.first]; 13358 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 13359 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 13360 SideEffectUsage = M.second; 13361 } 13362 Self.ModAsSideEffect = OldModAsSideEffect; 13363 } 13364 13365 SequenceChecker &Self; 13366 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 13367 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 13368 }; 13369 13370 /// RAII object wrapping the visitation of a subexpression which we might 13371 /// choose to evaluate as a constant. If any subexpression is evaluated and 13372 /// found to be non-constant, this allows us to suppress the evaluation of 13373 /// the outer expression. 13374 class EvaluationTracker { 13375 public: 13376 EvaluationTracker(SequenceChecker &Self) 13377 : Self(Self), Prev(Self.EvalTracker) { 13378 Self.EvalTracker = this; 13379 } 13380 13381 ~EvaluationTracker() { 13382 Self.EvalTracker = Prev; 13383 if (Prev) 13384 Prev->EvalOK &= EvalOK; 13385 } 13386 13387 bool evaluate(const Expr *E, bool &Result) { 13388 if (!EvalOK || E->isValueDependent()) 13389 return false; 13390 EvalOK = E->EvaluateAsBooleanCondition( 13391 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 13392 return EvalOK; 13393 } 13394 13395 private: 13396 SequenceChecker &Self; 13397 EvaluationTracker *Prev; 13398 bool EvalOK = true; 13399 } *EvalTracker = nullptr; 13400 13401 /// Find the object which is produced by the specified expression, 13402 /// if any. 13403 Object getObject(const Expr *E, bool Mod) const { 13404 E = E->IgnoreParenCasts(); 13405 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13406 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 13407 return getObject(UO->getSubExpr(), Mod); 13408 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13409 if (BO->getOpcode() == BO_Comma) 13410 return getObject(BO->getRHS(), Mod); 13411 if (Mod && BO->isAssignmentOp()) 13412 return getObject(BO->getLHS(), Mod); 13413 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 13414 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 13415 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 13416 return ME->getMemberDecl(); 13417 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13418 // FIXME: If this is a reference, map through to its value. 13419 return DRE->getDecl(); 13420 return nullptr; 13421 } 13422 13423 /// Note that an object \p O was modified or used by an expression 13424 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 13425 /// the object \p O as obtained via the \p UsageMap. 13426 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 13427 // Get the old usage for the given object and usage kind. 13428 Usage &U = UI.Uses[UK]; 13429 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 13430 // If we have a modification as side effect and are in a sequenced 13431 // subexpression, save the old Usage so that we can restore it later 13432 // in SequencedSubexpression::~SequencedSubexpression. 13433 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 13434 ModAsSideEffect->push_back(std::make_pair(O, U)); 13435 // Then record the new usage with the current sequencing region. 13436 U.UsageExpr = UsageExpr; 13437 U.Seq = Region; 13438 } 13439 } 13440 13441 /// Check whether a modification or use of an object \p O in an expression 13442 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 13443 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 13444 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 13445 /// usage and false we are checking for a mod-use unsequenced usage. 13446 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 13447 UsageKind OtherKind, bool IsModMod) { 13448 if (UI.Diagnosed) 13449 return; 13450 13451 const Usage &U = UI.Uses[OtherKind]; 13452 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 13453 return; 13454 13455 const Expr *Mod = U.UsageExpr; 13456 const Expr *ModOrUse = UsageExpr; 13457 if (OtherKind == UK_Use) 13458 std::swap(Mod, ModOrUse); 13459 13460 SemaRef.DiagRuntimeBehavior( 13461 Mod->getExprLoc(), {Mod, ModOrUse}, 13462 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 13463 : diag::warn_unsequenced_mod_use) 13464 << O << SourceRange(ModOrUse->getExprLoc())); 13465 UI.Diagnosed = true; 13466 } 13467 13468 // A note on note{Pre, Post}{Use, Mod}: 13469 // 13470 // (It helps to follow the algorithm with an expression such as 13471 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 13472 // operations before C++17 and both are well-defined in C++17). 13473 // 13474 // When visiting a node which uses/modify an object we first call notePreUse 13475 // or notePreMod before visiting its sub-expression(s). At this point the 13476 // children of the current node have not yet been visited and so the eventual 13477 // uses/modifications resulting from the children of the current node have not 13478 // been recorded yet. 13479 // 13480 // We then visit the children of the current node. After that notePostUse or 13481 // notePostMod is called. These will 1) detect an unsequenced modification 13482 // as side effect (as in "k++ + k") and 2) add a new usage with the 13483 // appropriate usage kind. 13484 // 13485 // We also have to be careful that some operation sequences modification as 13486 // side effect as well (for example: || or ,). To account for this we wrap 13487 // the visitation of such a sub-expression (for example: the LHS of || or ,) 13488 // with SequencedSubexpression. SequencedSubexpression is an RAII object 13489 // which record usages which are modifications as side effect, and then 13490 // downgrade them (or more accurately restore the previous usage which was a 13491 // modification as side effect) when exiting the scope of the sequenced 13492 // subexpression. 13493 13494 void notePreUse(Object O, const Expr *UseExpr) { 13495 UsageInfo &UI = UsageMap[O]; 13496 // Uses conflict with other modifications. 13497 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 13498 } 13499 13500 void notePostUse(Object O, const Expr *UseExpr) { 13501 UsageInfo &UI = UsageMap[O]; 13502 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 13503 /*IsModMod=*/false); 13504 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 13505 } 13506 13507 void notePreMod(Object O, const Expr *ModExpr) { 13508 UsageInfo &UI = UsageMap[O]; 13509 // Modifications conflict with other modifications and with uses. 13510 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 13511 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 13512 } 13513 13514 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 13515 UsageInfo &UI = UsageMap[O]; 13516 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 13517 /*IsModMod=*/true); 13518 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 13519 } 13520 13521 public: 13522 SequenceChecker(Sema &S, const Expr *E, 13523 SmallVectorImpl<const Expr *> &WorkList) 13524 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 13525 Visit(E); 13526 // Silence a -Wunused-private-field since WorkList is now unused. 13527 // TODO: Evaluate if it can be used, and if not remove it. 13528 (void)this->WorkList; 13529 } 13530 13531 void VisitStmt(const Stmt *S) { 13532 // Skip all statements which aren't expressions for now. 13533 } 13534 13535 void VisitExpr(const Expr *E) { 13536 // By default, just recurse to evaluated subexpressions. 13537 Base::VisitStmt(E); 13538 } 13539 13540 void VisitCastExpr(const CastExpr *E) { 13541 Object O = Object(); 13542 if (E->getCastKind() == CK_LValueToRValue) 13543 O = getObject(E->getSubExpr(), false); 13544 13545 if (O) 13546 notePreUse(O, E); 13547 VisitExpr(E); 13548 if (O) 13549 notePostUse(O, E); 13550 } 13551 13552 void VisitSequencedExpressions(const Expr *SequencedBefore, 13553 const Expr *SequencedAfter) { 13554 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 13555 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 13556 SequenceTree::Seq OldRegion = Region; 13557 13558 { 13559 SequencedSubexpression SeqBefore(*this); 13560 Region = BeforeRegion; 13561 Visit(SequencedBefore); 13562 } 13563 13564 Region = AfterRegion; 13565 Visit(SequencedAfter); 13566 13567 Region = OldRegion; 13568 13569 Tree.merge(BeforeRegion); 13570 Tree.merge(AfterRegion); 13571 } 13572 13573 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 13574 // C++17 [expr.sub]p1: 13575 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 13576 // expression E1 is sequenced before the expression E2. 13577 if (SemaRef.getLangOpts().CPlusPlus17) 13578 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 13579 else { 13580 Visit(ASE->getLHS()); 13581 Visit(ASE->getRHS()); 13582 } 13583 } 13584 13585 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 13586 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 13587 void VisitBinPtrMem(const BinaryOperator *BO) { 13588 // C++17 [expr.mptr.oper]p4: 13589 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 13590 // the expression E1 is sequenced before the expression E2. 13591 if (SemaRef.getLangOpts().CPlusPlus17) 13592 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13593 else { 13594 Visit(BO->getLHS()); 13595 Visit(BO->getRHS()); 13596 } 13597 } 13598 13599 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 13600 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 13601 void VisitBinShlShr(const BinaryOperator *BO) { 13602 // C++17 [expr.shift]p4: 13603 // The expression E1 is sequenced before the expression E2. 13604 if (SemaRef.getLangOpts().CPlusPlus17) 13605 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13606 else { 13607 Visit(BO->getLHS()); 13608 Visit(BO->getRHS()); 13609 } 13610 } 13611 13612 void VisitBinComma(const BinaryOperator *BO) { 13613 // C++11 [expr.comma]p1: 13614 // Every value computation and side effect associated with the left 13615 // expression is sequenced before every value computation and side 13616 // effect associated with the right expression. 13617 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13618 } 13619 13620 void VisitBinAssign(const BinaryOperator *BO) { 13621 SequenceTree::Seq RHSRegion; 13622 SequenceTree::Seq LHSRegion; 13623 if (SemaRef.getLangOpts().CPlusPlus17) { 13624 RHSRegion = Tree.allocate(Region); 13625 LHSRegion = Tree.allocate(Region); 13626 } else { 13627 RHSRegion = Region; 13628 LHSRegion = Region; 13629 } 13630 SequenceTree::Seq OldRegion = Region; 13631 13632 // C++11 [expr.ass]p1: 13633 // [...] the assignment is sequenced after the value computation 13634 // of the right and left operands, [...] 13635 // 13636 // so check it before inspecting the operands and update the 13637 // map afterwards. 13638 Object O = getObject(BO->getLHS(), /*Mod=*/true); 13639 if (O) 13640 notePreMod(O, BO); 13641 13642 if (SemaRef.getLangOpts().CPlusPlus17) { 13643 // C++17 [expr.ass]p1: 13644 // [...] The right operand is sequenced before the left operand. [...] 13645 { 13646 SequencedSubexpression SeqBefore(*this); 13647 Region = RHSRegion; 13648 Visit(BO->getRHS()); 13649 } 13650 13651 Region = LHSRegion; 13652 Visit(BO->getLHS()); 13653 13654 if (O && isa<CompoundAssignOperator>(BO)) 13655 notePostUse(O, BO); 13656 13657 } else { 13658 // C++11 does not specify any sequencing between the LHS and RHS. 13659 Region = LHSRegion; 13660 Visit(BO->getLHS()); 13661 13662 if (O && isa<CompoundAssignOperator>(BO)) 13663 notePostUse(O, BO); 13664 13665 Region = RHSRegion; 13666 Visit(BO->getRHS()); 13667 } 13668 13669 // C++11 [expr.ass]p1: 13670 // the assignment is sequenced [...] before the value computation of the 13671 // assignment expression. 13672 // C11 6.5.16/3 has no such rule. 13673 Region = OldRegion; 13674 if (O) 13675 notePostMod(O, BO, 13676 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 13677 : UK_ModAsSideEffect); 13678 if (SemaRef.getLangOpts().CPlusPlus17) { 13679 Tree.merge(RHSRegion); 13680 Tree.merge(LHSRegion); 13681 } 13682 } 13683 13684 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 13685 VisitBinAssign(CAO); 13686 } 13687 13688 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 13689 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 13690 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 13691 Object O = getObject(UO->getSubExpr(), true); 13692 if (!O) 13693 return VisitExpr(UO); 13694 13695 notePreMod(O, UO); 13696 Visit(UO->getSubExpr()); 13697 // C++11 [expr.pre.incr]p1: 13698 // the expression ++x is equivalent to x+=1 13699 notePostMod(O, UO, 13700 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 13701 : UK_ModAsSideEffect); 13702 } 13703 13704 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 13705 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 13706 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 13707 Object O = getObject(UO->getSubExpr(), true); 13708 if (!O) 13709 return VisitExpr(UO); 13710 13711 notePreMod(O, UO); 13712 Visit(UO->getSubExpr()); 13713 notePostMod(O, UO, UK_ModAsSideEffect); 13714 } 13715 13716 void VisitBinLOr(const BinaryOperator *BO) { 13717 // C++11 [expr.log.or]p2: 13718 // If the second expression is evaluated, every value computation and 13719 // side effect associated with the first expression is sequenced before 13720 // every value computation and side effect associated with the 13721 // second expression. 13722 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 13723 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 13724 SequenceTree::Seq OldRegion = Region; 13725 13726 EvaluationTracker Eval(*this); 13727 { 13728 SequencedSubexpression Sequenced(*this); 13729 Region = LHSRegion; 13730 Visit(BO->getLHS()); 13731 } 13732 13733 // C++11 [expr.log.or]p1: 13734 // [...] the second operand is not evaluated if the first operand 13735 // evaluates to true. 13736 bool EvalResult = false; 13737 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 13738 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 13739 if (ShouldVisitRHS) { 13740 Region = RHSRegion; 13741 Visit(BO->getRHS()); 13742 } 13743 13744 Region = OldRegion; 13745 Tree.merge(LHSRegion); 13746 Tree.merge(RHSRegion); 13747 } 13748 13749 void VisitBinLAnd(const BinaryOperator *BO) { 13750 // C++11 [expr.log.and]p2: 13751 // If the second expression is evaluated, every value computation and 13752 // side effect associated with the first expression is sequenced before 13753 // every value computation and side effect associated with the 13754 // second expression. 13755 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 13756 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 13757 SequenceTree::Seq OldRegion = Region; 13758 13759 EvaluationTracker Eval(*this); 13760 { 13761 SequencedSubexpression Sequenced(*this); 13762 Region = LHSRegion; 13763 Visit(BO->getLHS()); 13764 } 13765 13766 // C++11 [expr.log.and]p1: 13767 // [...] the second operand is not evaluated if the first operand is false. 13768 bool EvalResult = false; 13769 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 13770 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 13771 if (ShouldVisitRHS) { 13772 Region = RHSRegion; 13773 Visit(BO->getRHS()); 13774 } 13775 13776 Region = OldRegion; 13777 Tree.merge(LHSRegion); 13778 Tree.merge(RHSRegion); 13779 } 13780 13781 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 13782 // C++11 [expr.cond]p1: 13783 // [...] Every value computation and side effect associated with the first 13784 // expression is sequenced before every value computation and side effect 13785 // associated with the second or third expression. 13786 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 13787 13788 // No sequencing is specified between the true and false expression. 13789 // However since exactly one of both is going to be evaluated we can 13790 // consider them to be sequenced. This is needed to avoid warning on 13791 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 13792 // both the true and false expressions because we can't evaluate x. 13793 // This will still allow us to detect an expression like (pre C++17) 13794 // "(x ? y += 1 : y += 2) = y". 13795 // 13796 // We don't wrap the visitation of the true and false expression with 13797 // SequencedSubexpression because we don't want to downgrade modifications 13798 // as side effect in the true and false expressions after the visition 13799 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 13800 // not warn between the two "y++", but we should warn between the "y++" 13801 // and the "y". 13802 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 13803 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 13804 SequenceTree::Seq OldRegion = Region; 13805 13806 EvaluationTracker Eval(*this); 13807 { 13808 SequencedSubexpression Sequenced(*this); 13809 Region = ConditionRegion; 13810 Visit(CO->getCond()); 13811 } 13812 13813 // C++11 [expr.cond]p1: 13814 // [...] The first expression is contextually converted to bool (Clause 4). 13815 // It is evaluated and if it is true, the result of the conditional 13816 // expression is the value of the second expression, otherwise that of the 13817 // third expression. Only one of the second and third expressions is 13818 // evaluated. [...] 13819 bool EvalResult = false; 13820 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 13821 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 13822 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 13823 if (ShouldVisitTrueExpr) { 13824 Region = TrueRegion; 13825 Visit(CO->getTrueExpr()); 13826 } 13827 if (ShouldVisitFalseExpr) { 13828 Region = FalseRegion; 13829 Visit(CO->getFalseExpr()); 13830 } 13831 13832 Region = OldRegion; 13833 Tree.merge(ConditionRegion); 13834 Tree.merge(TrueRegion); 13835 Tree.merge(FalseRegion); 13836 } 13837 13838 void VisitCallExpr(const CallExpr *CE) { 13839 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 13840 13841 if (CE->isUnevaluatedBuiltinCall(Context)) 13842 return; 13843 13844 // C++11 [intro.execution]p15: 13845 // When calling a function [...], every value computation and side effect 13846 // associated with any argument expression, or with the postfix expression 13847 // designating the called function, is sequenced before execution of every 13848 // expression or statement in the body of the function [and thus before 13849 // the value computation of its result]. 13850 SequencedSubexpression Sequenced(*this); 13851 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 13852 // C++17 [expr.call]p5 13853 // The postfix-expression is sequenced before each expression in the 13854 // expression-list and any default argument. [...] 13855 SequenceTree::Seq CalleeRegion; 13856 SequenceTree::Seq OtherRegion; 13857 if (SemaRef.getLangOpts().CPlusPlus17) { 13858 CalleeRegion = Tree.allocate(Region); 13859 OtherRegion = Tree.allocate(Region); 13860 } else { 13861 CalleeRegion = Region; 13862 OtherRegion = Region; 13863 } 13864 SequenceTree::Seq OldRegion = Region; 13865 13866 // Visit the callee expression first. 13867 Region = CalleeRegion; 13868 if (SemaRef.getLangOpts().CPlusPlus17) { 13869 SequencedSubexpression Sequenced(*this); 13870 Visit(CE->getCallee()); 13871 } else { 13872 Visit(CE->getCallee()); 13873 } 13874 13875 // Then visit the argument expressions. 13876 Region = OtherRegion; 13877 for (const Expr *Argument : CE->arguments()) 13878 Visit(Argument); 13879 13880 Region = OldRegion; 13881 if (SemaRef.getLangOpts().CPlusPlus17) { 13882 Tree.merge(CalleeRegion); 13883 Tree.merge(OtherRegion); 13884 } 13885 }); 13886 } 13887 13888 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 13889 // C++17 [over.match.oper]p2: 13890 // [...] the operator notation is first transformed to the equivalent 13891 // function-call notation as summarized in Table 12 (where @ denotes one 13892 // of the operators covered in the specified subclause). However, the 13893 // operands are sequenced in the order prescribed for the built-in 13894 // operator (Clause 8). 13895 // 13896 // From the above only overloaded binary operators and overloaded call 13897 // operators have sequencing rules in C++17 that we need to handle 13898 // separately. 13899 if (!SemaRef.getLangOpts().CPlusPlus17 || 13900 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 13901 return VisitCallExpr(CXXOCE); 13902 13903 enum { 13904 NoSequencing, 13905 LHSBeforeRHS, 13906 RHSBeforeLHS, 13907 LHSBeforeRest 13908 } SequencingKind; 13909 switch (CXXOCE->getOperator()) { 13910 case OO_Equal: 13911 case OO_PlusEqual: 13912 case OO_MinusEqual: 13913 case OO_StarEqual: 13914 case OO_SlashEqual: 13915 case OO_PercentEqual: 13916 case OO_CaretEqual: 13917 case OO_AmpEqual: 13918 case OO_PipeEqual: 13919 case OO_LessLessEqual: 13920 case OO_GreaterGreaterEqual: 13921 SequencingKind = RHSBeforeLHS; 13922 break; 13923 13924 case OO_LessLess: 13925 case OO_GreaterGreater: 13926 case OO_AmpAmp: 13927 case OO_PipePipe: 13928 case OO_Comma: 13929 case OO_ArrowStar: 13930 case OO_Subscript: 13931 SequencingKind = LHSBeforeRHS; 13932 break; 13933 13934 case OO_Call: 13935 SequencingKind = LHSBeforeRest; 13936 break; 13937 13938 default: 13939 SequencingKind = NoSequencing; 13940 break; 13941 } 13942 13943 if (SequencingKind == NoSequencing) 13944 return VisitCallExpr(CXXOCE); 13945 13946 // This is a call, so all subexpressions are sequenced before the result. 13947 SequencedSubexpression Sequenced(*this); 13948 13949 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 13950 assert(SemaRef.getLangOpts().CPlusPlus17 && 13951 "Should only get there with C++17 and above!"); 13952 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 13953 "Should only get there with an overloaded binary operator" 13954 " or an overloaded call operator!"); 13955 13956 if (SequencingKind == LHSBeforeRest) { 13957 assert(CXXOCE->getOperator() == OO_Call && 13958 "We should only have an overloaded call operator here!"); 13959 13960 // This is very similar to VisitCallExpr, except that we only have the 13961 // C++17 case. The postfix-expression is the first argument of the 13962 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 13963 // are in the following arguments. 13964 // 13965 // Note that we intentionally do not visit the callee expression since 13966 // it is just a decayed reference to a function. 13967 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 13968 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 13969 SequenceTree::Seq OldRegion = Region; 13970 13971 assert(CXXOCE->getNumArgs() >= 1 && 13972 "An overloaded call operator must have at least one argument" 13973 " for the postfix-expression!"); 13974 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 13975 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 13976 CXXOCE->getNumArgs() - 1); 13977 13978 // Visit the postfix-expression first. 13979 { 13980 Region = PostfixExprRegion; 13981 SequencedSubexpression Sequenced(*this); 13982 Visit(PostfixExpr); 13983 } 13984 13985 // Then visit the argument expressions. 13986 Region = ArgsRegion; 13987 for (const Expr *Arg : Args) 13988 Visit(Arg); 13989 13990 Region = OldRegion; 13991 Tree.merge(PostfixExprRegion); 13992 Tree.merge(ArgsRegion); 13993 } else { 13994 assert(CXXOCE->getNumArgs() == 2 && 13995 "Should only have two arguments here!"); 13996 assert((SequencingKind == LHSBeforeRHS || 13997 SequencingKind == RHSBeforeLHS) && 13998 "Unexpected sequencing kind!"); 13999 14000 // We do not visit the callee expression since it is just a decayed 14001 // reference to a function. 14002 const Expr *E1 = CXXOCE->getArg(0); 14003 const Expr *E2 = CXXOCE->getArg(1); 14004 if (SequencingKind == RHSBeforeLHS) 14005 std::swap(E1, E2); 14006 14007 return VisitSequencedExpressions(E1, E2); 14008 } 14009 }); 14010 } 14011 14012 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 14013 // This is a call, so all subexpressions are sequenced before the result. 14014 SequencedSubexpression Sequenced(*this); 14015 14016 if (!CCE->isListInitialization()) 14017 return VisitExpr(CCE); 14018 14019 // In C++11, list initializations are sequenced. 14020 SmallVector<SequenceTree::Seq, 32> Elts; 14021 SequenceTree::Seq Parent = Region; 14022 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 14023 E = CCE->arg_end(); 14024 I != E; ++I) { 14025 Region = Tree.allocate(Parent); 14026 Elts.push_back(Region); 14027 Visit(*I); 14028 } 14029 14030 // Forget that the initializers are sequenced. 14031 Region = Parent; 14032 for (unsigned I = 0; I < Elts.size(); ++I) 14033 Tree.merge(Elts[I]); 14034 } 14035 14036 void VisitInitListExpr(const InitListExpr *ILE) { 14037 if (!SemaRef.getLangOpts().CPlusPlus11) 14038 return VisitExpr(ILE); 14039 14040 // In C++11, list initializations are sequenced. 14041 SmallVector<SequenceTree::Seq, 32> Elts; 14042 SequenceTree::Seq Parent = Region; 14043 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 14044 const Expr *E = ILE->getInit(I); 14045 if (!E) 14046 continue; 14047 Region = Tree.allocate(Parent); 14048 Elts.push_back(Region); 14049 Visit(E); 14050 } 14051 14052 // Forget that the initializers are sequenced. 14053 Region = Parent; 14054 for (unsigned I = 0; I < Elts.size(); ++I) 14055 Tree.merge(Elts[I]); 14056 } 14057 }; 14058 14059 } // namespace 14060 14061 void Sema::CheckUnsequencedOperations(const Expr *E) { 14062 SmallVector<const Expr *, 8> WorkList; 14063 WorkList.push_back(E); 14064 while (!WorkList.empty()) { 14065 const Expr *Item = WorkList.pop_back_val(); 14066 SequenceChecker(*this, Item, WorkList); 14067 } 14068 } 14069 14070 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 14071 bool IsConstexpr) { 14072 llvm::SaveAndRestore<bool> ConstantContext( 14073 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 14074 CheckImplicitConversions(E, CheckLoc); 14075 if (!E->isInstantiationDependent()) 14076 CheckUnsequencedOperations(E); 14077 if (!IsConstexpr && !E->isValueDependent()) 14078 CheckForIntOverflow(E); 14079 DiagnoseMisalignedMembers(); 14080 } 14081 14082 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 14083 FieldDecl *BitField, 14084 Expr *Init) { 14085 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 14086 } 14087 14088 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 14089 SourceLocation Loc) { 14090 if (!PType->isVariablyModifiedType()) 14091 return; 14092 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 14093 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 14094 return; 14095 } 14096 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 14097 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 14098 return; 14099 } 14100 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 14101 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 14102 return; 14103 } 14104 14105 const ArrayType *AT = S.Context.getAsArrayType(PType); 14106 if (!AT) 14107 return; 14108 14109 if (AT->getSizeModifier() != ArrayType::Star) { 14110 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 14111 return; 14112 } 14113 14114 S.Diag(Loc, diag::err_array_star_in_function_definition); 14115 } 14116 14117 /// CheckParmsForFunctionDef - Check that the parameters of the given 14118 /// function are appropriate for the definition of a function. This 14119 /// takes care of any checks that cannot be performed on the 14120 /// declaration itself, e.g., that the types of each of the function 14121 /// parameters are complete. 14122 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 14123 bool CheckParameterNames) { 14124 bool HasInvalidParm = false; 14125 for (ParmVarDecl *Param : Parameters) { 14126 // C99 6.7.5.3p4: the parameters in a parameter type list in a 14127 // function declarator that is part of a function definition of 14128 // that function shall not have incomplete type. 14129 // 14130 // This is also C++ [dcl.fct]p6. 14131 if (!Param->isInvalidDecl() && 14132 RequireCompleteType(Param->getLocation(), Param->getType(), 14133 diag::err_typecheck_decl_incomplete_type)) { 14134 Param->setInvalidDecl(); 14135 HasInvalidParm = true; 14136 } 14137 14138 // C99 6.9.1p5: If the declarator includes a parameter type list, the 14139 // declaration of each parameter shall include an identifier. 14140 if (CheckParameterNames && Param->getIdentifier() == nullptr && 14141 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 14142 // Diagnose this as an extension in C17 and earlier. 14143 if (!getLangOpts().C2x) 14144 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 14145 } 14146 14147 // C99 6.7.5.3p12: 14148 // If the function declarator is not part of a definition of that 14149 // function, parameters may have incomplete type and may use the [*] 14150 // notation in their sequences of declarator specifiers to specify 14151 // variable length array types. 14152 QualType PType = Param->getOriginalType(); 14153 // FIXME: This diagnostic should point the '[*]' if source-location 14154 // information is added for it. 14155 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 14156 14157 // If the parameter is a c++ class type and it has to be destructed in the 14158 // callee function, declare the destructor so that it can be called by the 14159 // callee function. Do not perform any direct access check on the dtor here. 14160 if (!Param->isInvalidDecl()) { 14161 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 14162 if (!ClassDecl->isInvalidDecl() && 14163 !ClassDecl->hasIrrelevantDestructor() && 14164 !ClassDecl->isDependentContext() && 14165 ClassDecl->isParamDestroyedInCallee()) { 14166 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 14167 MarkFunctionReferenced(Param->getLocation(), Destructor); 14168 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 14169 } 14170 } 14171 } 14172 14173 // Parameters with the pass_object_size attribute only need to be marked 14174 // constant at function definitions. Because we lack information about 14175 // whether we're on a declaration or definition when we're instantiating the 14176 // attribute, we need to check for constness here. 14177 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 14178 if (!Param->getType().isConstQualified()) 14179 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 14180 << Attr->getSpelling() << 1; 14181 14182 // Check for parameter names shadowing fields from the class. 14183 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 14184 // The owning context for the parameter should be the function, but we 14185 // want to see if this function's declaration context is a record. 14186 DeclContext *DC = Param->getDeclContext(); 14187 if (DC && DC->isFunctionOrMethod()) { 14188 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 14189 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 14190 RD, /*DeclIsField*/ false); 14191 } 14192 } 14193 } 14194 14195 return HasInvalidParm; 14196 } 14197 14198 Optional<std::pair<CharUnits, CharUnits>> 14199 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 14200 14201 /// Compute the alignment and offset of the base class object given the 14202 /// derived-to-base cast expression and the alignment and offset of the derived 14203 /// class object. 14204 static std::pair<CharUnits, CharUnits> 14205 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 14206 CharUnits BaseAlignment, CharUnits Offset, 14207 ASTContext &Ctx) { 14208 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 14209 ++PathI) { 14210 const CXXBaseSpecifier *Base = *PathI; 14211 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 14212 if (Base->isVirtual()) { 14213 // The complete object may have a lower alignment than the non-virtual 14214 // alignment of the base, in which case the base may be misaligned. Choose 14215 // the smaller of the non-virtual alignment and BaseAlignment, which is a 14216 // conservative lower bound of the complete object alignment. 14217 CharUnits NonVirtualAlignment = 14218 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 14219 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 14220 Offset = CharUnits::Zero(); 14221 } else { 14222 const ASTRecordLayout &RL = 14223 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 14224 Offset += RL.getBaseClassOffset(BaseDecl); 14225 } 14226 DerivedType = Base->getType(); 14227 } 14228 14229 return std::make_pair(BaseAlignment, Offset); 14230 } 14231 14232 /// Compute the alignment and offset of a binary additive operator. 14233 static Optional<std::pair<CharUnits, CharUnits>> 14234 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 14235 bool IsSub, ASTContext &Ctx) { 14236 QualType PointeeType = PtrE->getType()->getPointeeType(); 14237 14238 if (!PointeeType->isConstantSizeType()) 14239 return llvm::None; 14240 14241 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 14242 14243 if (!P) 14244 return llvm::None; 14245 14246 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 14247 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 14248 CharUnits Offset = EltSize * IdxRes->getExtValue(); 14249 if (IsSub) 14250 Offset = -Offset; 14251 return std::make_pair(P->first, P->second + Offset); 14252 } 14253 14254 // If the integer expression isn't a constant expression, compute the lower 14255 // bound of the alignment using the alignment and offset of the pointer 14256 // expression and the element size. 14257 return std::make_pair( 14258 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 14259 CharUnits::Zero()); 14260 } 14261 14262 /// This helper function takes an lvalue expression and returns the alignment of 14263 /// a VarDecl and a constant offset from the VarDecl. 14264 Optional<std::pair<CharUnits, CharUnits>> 14265 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 14266 E = E->IgnoreParens(); 14267 switch (E->getStmtClass()) { 14268 default: 14269 break; 14270 case Stmt::CStyleCastExprClass: 14271 case Stmt::CXXStaticCastExprClass: 14272 case Stmt::ImplicitCastExprClass: { 14273 auto *CE = cast<CastExpr>(E); 14274 const Expr *From = CE->getSubExpr(); 14275 switch (CE->getCastKind()) { 14276 default: 14277 break; 14278 case CK_NoOp: 14279 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14280 case CK_UncheckedDerivedToBase: 14281 case CK_DerivedToBase: { 14282 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14283 if (!P) 14284 break; 14285 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 14286 P->second, Ctx); 14287 } 14288 } 14289 break; 14290 } 14291 case Stmt::ArraySubscriptExprClass: { 14292 auto *ASE = cast<ArraySubscriptExpr>(E); 14293 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 14294 false, Ctx); 14295 } 14296 case Stmt::DeclRefExprClass: { 14297 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 14298 // FIXME: If VD is captured by copy or is an escaping __block variable, 14299 // use the alignment of VD's type. 14300 if (!VD->getType()->isReferenceType()) 14301 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 14302 if (VD->hasInit()) 14303 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 14304 } 14305 break; 14306 } 14307 case Stmt::MemberExprClass: { 14308 auto *ME = cast<MemberExpr>(E); 14309 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 14310 if (!FD || FD->getType()->isReferenceType()) 14311 break; 14312 Optional<std::pair<CharUnits, CharUnits>> P; 14313 if (ME->isArrow()) 14314 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 14315 else 14316 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 14317 if (!P) 14318 break; 14319 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 14320 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 14321 return std::make_pair(P->first, 14322 P->second + CharUnits::fromQuantity(Offset)); 14323 } 14324 case Stmt::UnaryOperatorClass: { 14325 auto *UO = cast<UnaryOperator>(E); 14326 switch (UO->getOpcode()) { 14327 default: 14328 break; 14329 case UO_Deref: 14330 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 14331 } 14332 break; 14333 } 14334 case Stmt::BinaryOperatorClass: { 14335 auto *BO = cast<BinaryOperator>(E); 14336 auto Opcode = BO->getOpcode(); 14337 switch (Opcode) { 14338 default: 14339 break; 14340 case BO_Comma: 14341 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 14342 } 14343 break; 14344 } 14345 } 14346 return llvm::None; 14347 } 14348 14349 /// This helper function takes a pointer expression and returns the alignment of 14350 /// a VarDecl and a constant offset from the VarDecl. 14351 Optional<std::pair<CharUnits, CharUnits>> 14352 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 14353 E = E->IgnoreParens(); 14354 switch (E->getStmtClass()) { 14355 default: 14356 break; 14357 case Stmt::CStyleCastExprClass: 14358 case Stmt::CXXStaticCastExprClass: 14359 case Stmt::ImplicitCastExprClass: { 14360 auto *CE = cast<CastExpr>(E); 14361 const Expr *From = CE->getSubExpr(); 14362 switch (CE->getCastKind()) { 14363 default: 14364 break; 14365 case CK_NoOp: 14366 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 14367 case CK_ArrayToPointerDecay: 14368 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14369 case CK_UncheckedDerivedToBase: 14370 case CK_DerivedToBase: { 14371 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 14372 if (!P) 14373 break; 14374 return getDerivedToBaseAlignmentAndOffset( 14375 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 14376 } 14377 } 14378 break; 14379 } 14380 case Stmt::CXXThisExprClass: { 14381 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 14382 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 14383 return std::make_pair(Alignment, CharUnits::Zero()); 14384 } 14385 case Stmt::UnaryOperatorClass: { 14386 auto *UO = cast<UnaryOperator>(E); 14387 if (UO->getOpcode() == UO_AddrOf) 14388 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 14389 break; 14390 } 14391 case Stmt::BinaryOperatorClass: { 14392 auto *BO = cast<BinaryOperator>(E); 14393 auto Opcode = BO->getOpcode(); 14394 switch (Opcode) { 14395 default: 14396 break; 14397 case BO_Add: 14398 case BO_Sub: { 14399 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 14400 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 14401 std::swap(LHS, RHS); 14402 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 14403 Ctx); 14404 } 14405 case BO_Comma: 14406 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 14407 } 14408 break; 14409 } 14410 } 14411 return llvm::None; 14412 } 14413 14414 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 14415 // See if we can compute the alignment of a VarDecl and an offset from it. 14416 Optional<std::pair<CharUnits, CharUnits>> P = 14417 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 14418 14419 if (P) 14420 return P->first.alignmentAtOffset(P->second); 14421 14422 // If that failed, return the type's alignment. 14423 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 14424 } 14425 14426 /// CheckCastAlign - Implements -Wcast-align, which warns when a 14427 /// pointer cast increases the alignment requirements. 14428 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 14429 // This is actually a lot of work to potentially be doing on every 14430 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 14431 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 14432 return; 14433 14434 // Ignore dependent types. 14435 if (T->isDependentType() || Op->getType()->isDependentType()) 14436 return; 14437 14438 // Require that the destination be a pointer type. 14439 const PointerType *DestPtr = T->getAs<PointerType>(); 14440 if (!DestPtr) return; 14441 14442 // If the destination has alignment 1, we're done. 14443 QualType DestPointee = DestPtr->getPointeeType(); 14444 if (DestPointee->isIncompleteType()) return; 14445 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 14446 if (DestAlign.isOne()) return; 14447 14448 // Require that the source be a pointer type. 14449 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 14450 if (!SrcPtr) return; 14451 QualType SrcPointee = SrcPtr->getPointeeType(); 14452 14453 // Explicitly allow casts from cv void*. We already implicitly 14454 // allowed casts to cv void*, since they have alignment 1. 14455 // Also allow casts involving incomplete types, which implicitly 14456 // includes 'void'. 14457 if (SrcPointee->isIncompleteType()) return; 14458 14459 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 14460 14461 if (SrcAlign >= DestAlign) return; 14462 14463 Diag(TRange.getBegin(), diag::warn_cast_align) 14464 << Op->getType() << T 14465 << static_cast<unsigned>(SrcAlign.getQuantity()) 14466 << static_cast<unsigned>(DestAlign.getQuantity()) 14467 << TRange << Op->getSourceRange(); 14468 } 14469 14470 /// Check whether this array fits the idiom of a size-one tail padded 14471 /// array member of a struct. 14472 /// 14473 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 14474 /// commonly used to emulate flexible arrays in C89 code. 14475 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 14476 const NamedDecl *ND) { 14477 if (Size != 1 || !ND) return false; 14478 14479 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 14480 if (!FD) return false; 14481 14482 // Don't consider sizes resulting from macro expansions or template argument 14483 // substitution to form C89 tail-padded arrays. 14484 14485 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 14486 while (TInfo) { 14487 TypeLoc TL = TInfo->getTypeLoc(); 14488 // Look through typedefs. 14489 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 14490 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 14491 TInfo = TDL->getTypeSourceInfo(); 14492 continue; 14493 } 14494 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 14495 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 14496 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 14497 return false; 14498 } 14499 break; 14500 } 14501 14502 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 14503 if (!RD) return false; 14504 if (RD->isUnion()) return false; 14505 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 14506 if (!CRD->isStandardLayout()) return false; 14507 } 14508 14509 // See if this is the last field decl in the record. 14510 const Decl *D = FD; 14511 while ((D = D->getNextDeclInContext())) 14512 if (isa<FieldDecl>(D)) 14513 return false; 14514 return true; 14515 } 14516 14517 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 14518 const ArraySubscriptExpr *ASE, 14519 bool AllowOnePastEnd, bool IndexNegated) { 14520 // Already diagnosed by the constant evaluator. 14521 if (isConstantEvaluated()) 14522 return; 14523 14524 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 14525 if (IndexExpr->isValueDependent()) 14526 return; 14527 14528 const Type *EffectiveType = 14529 BaseExpr->getType()->getPointeeOrArrayElementType(); 14530 BaseExpr = BaseExpr->IgnoreParenCasts(); 14531 const ConstantArrayType *ArrayTy = 14532 Context.getAsConstantArrayType(BaseExpr->getType()); 14533 14534 if (!ArrayTy) 14535 return; 14536 14537 const Type *BaseType = ArrayTy->getElementType().getTypePtr(); 14538 if (EffectiveType->isDependentType() || BaseType->isDependentType()) 14539 return; 14540 14541 Expr::EvalResult Result; 14542 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 14543 return; 14544 14545 llvm::APSInt index = Result.Val.getInt(); 14546 if (IndexNegated) 14547 index = -index; 14548 14549 const NamedDecl *ND = nullptr; 14550 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 14551 ND = DRE->getDecl(); 14552 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 14553 ND = ME->getMemberDecl(); 14554 14555 if (index.isUnsigned() || !index.isNegative()) { 14556 // It is possible that the type of the base expression after 14557 // IgnoreParenCasts is incomplete, even though the type of the base 14558 // expression before IgnoreParenCasts is complete (see PR39746 for an 14559 // example). In this case we have no information about whether the array 14560 // access exceeds the array bounds. However we can still diagnose an array 14561 // access which precedes the array bounds. 14562 if (BaseType->isIncompleteType()) 14563 return; 14564 14565 llvm::APInt size = ArrayTy->getSize(); 14566 if (!size.isStrictlyPositive()) 14567 return; 14568 14569 if (BaseType != EffectiveType) { 14570 // Make sure we're comparing apples to apples when comparing index to size 14571 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 14572 uint64_t array_typesize = Context.getTypeSize(BaseType); 14573 // Handle ptrarith_typesize being zero, such as when casting to void* 14574 if (!ptrarith_typesize) ptrarith_typesize = 1; 14575 if (ptrarith_typesize != array_typesize) { 14576 // There's a cast to a different size type involved 14577 uint64_t ratio = array_typesize / ptrarith_typesize; 14578 // TODO: Be smarter about handling cases where array_typesize is not a 14579 // multiple of ptrarith_typesize 14580 if (ptrarith_typesize * ratio == array_typesize) 14581 size *= llvm::APInt(size.getBitWidth(), ratio); 14582 } 14583 } 14584 14585 if (size.getBitWidth() > index.getBitWidth()) 14586 index = index.zext(size.getBitWidth()); 14587 else if (size.getBitWidth() < index.getBitWidth()) 14588 size = size.zext(index.getBitWidth()); 14589 14590 // For array subscripting the index must be less than size, but for pointer 14591 // arithmetic also allow the index (offset) to be equal to size since 14592 // computing the next address after the end of the array is legal and 14593 // commonly done e.g. in C++ iterators and range-based for loops. 14594 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 14595 return; 14596 14597 // Also don't warn for arrays of size 1 which are members of some 14598 // structure. These are often used to approximate flexible arrays in C89 14599 // code. 14600 if (IsTailPaddedMemberArray(*this, size, ND)) 14601 return; 14602 14603 // Suppress the warning if the subscript expression (as identified by the 14604 // ']' location) and the index expression are both from macro expansions 14605 // within a system header. 14606 if (ASE) { 14607 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 14608 ASE->getRBracketLoc()); 14609 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 14610 SourceLocation IndexLoc = 14611 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 14612 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 14613 return; 14614 } 14615 } 14616 14617 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 14618 if (ASE) 14619 DiagID = diag::warn_array_index_exceeds_bounds; 14620 14621 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 14622 PDiag(DiagID) << index.toString(10, true) 14623 << size.toString(10, true) 14624 << (unsigned)size.getLimitedValue(~0U) 14625 << IndexExpr->getSourceRange()); 14626 } else { 14627 unsigned DiagID = diag::warn_array_index_precedes_bounds; 14628 if (!ASE) { 14629 DiagID = diag::warn_ptr_arith_precedes_bounds; 14630 if (index.isNegative()) index = -index; 14631 } 14632 14633 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 14634 PDiag(DiagID) << index.toString(10, true) 14635 << IndexExpr->getSourceRange()); 14636 } 14637 14638 if (!ND) { 14639 // Try harder to find a NamedDecl to point at in the note. 14640 while (const ArraySubscriptExpr *ASE = 14641 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 14642 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 14643 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 14644 ND = DRE->getDecl(); 14645 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 14646 ND = ME->getMemberDecl(); 14647 } 14648 14649 if (ND) 14650 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 14651 PDiag(diag::note_array_declared_here) << ND); 14652 } 14653 14654 void Sema::CheckArrayAccess(const Expr *expr) { 14655 int AllowOnePastEnd = 0; 14656 while (expr) { 14657 expr = expr->IgnoreParenImpCasts(); 14658 switch (expr->getStmtClass()) { 14659 case Stmt::ArraySubscriptExprClass: { 14660 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 14661 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 14662 AllowOnePastEnd > 0); 14663 expr = ASE->getBase(); 14664 break; 14665 } 14666 case Stmt::MemberExprClass: { 14667 expr = cast<MemberExpr>(expr)->getBase(); 14668 break; 14669 } 14670 case Stmt::OMPArraySectionExprClass: { 14671 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 14672 if (ASE->getLowerBound()) 14673 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 14674 /*ASE=*/nullptr, AllowOnePastEnd > 0); 14675 return; 14676 } 14677 case Stmt::UnaryOperatorClass: { 14678 // Only unwrap the * and & unary operators 14679 const UnaryOperator *UO = cast<UnaryOperator>(expr); 14680 expr = UO->getSubExpr(); 14681 switch (UO->getOpcode()) { 14682 case UO_AddrOf: 14683 AllowOnePastEnd++; 14684 break; 14685 case UO_Deref: 14686 AllowOnePastEnd--; 14687 break; 14688 default: 14689 return; 14690 } 14691 break; 14692 } 14693 case Stmt::ConditionalOperatorClass: { 14694 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 14695 if (const Expr *lhs = cond->getLHS()) 14696 CheckArrayAccess(lhs); 14697 if (const Expr *rhs = cond->getRHS()) 14698 CheckArrayAccess(rhs); 14699 return; 14700 } 14701 case Stmt::CXXOperatorCallExprClass: { 14702 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 14703 for (const auto *Arg : OCE->arguments()) 14704 CheckArrayAccess(Arg); 14705 return; 14706 } 14707 default: 14708 return; 14709 } 14710 } 14711 } 14712 14713 //===--- CHECK: Objective-C retain cycles ----------------------------------// 14714 14715 namespace { 14716 14717 struct RetainCycleOwner { 14718 VarDecl *Variable = nullptr; 14719 SourceRange Range; 14720 SourceLocation Loc; 14721 bool Indirect = false; 14722 14723 RetainCycleOwner() = default; 14724 14725 void setLocsFrom(Expr *e) { 14726 Loc = e->getExprLoc(); 14727 Range = e->getSourceRange(); 14728 } 14729 }; 14730 14731 } // namespace 14732 14733 /// Consider whether capturing the given variable can possibly lead to 14734 /// a retain cycle. 14735 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 14736 // In ARC, it's captured strongly iff the variable has __strong 14737 // lifetime. In MRR, it's captured strongly if the variable is 14738 // __block and has an appropriate type. 14739 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 14740 return false; 14741 14742 owner.Variable = var; 14743 if (ref) 14744 owner.setLocsFrom(ref); 14745 return true; 14746 } 14747 14748 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 14749 while (true) { 14750 e = e->IgnoreParens(); 14751 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 14752 switch (cast->getCastKind()) { 14753 case CK_BitCast: 14754 case CK_LValueBitCast: 14755 case CK_LValueToRValue: 14756 case CK_ARCReclaimReturnedObject: 14757 e = cast->getSubExpr(); 14758 continue; 14759 14760 default: 14761 return false; 14762 } 14763 } 14764 14765 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 14766 ObjCIvarDecl *ivar = ref->getDecl(); 14767 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 14768 return false; 14769 14770 // Try to find a retain cycle in the base. 14771 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 14772 return false; 14773 14774 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 14775 owner.Indirect = true; 14776 return true; 14777 } 14778 14779 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 14780 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 14781 if (!var) return false; 14782 return considerVariable(var, ref, owner); 14783 } 14784 14785 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 14786 if (member->isArrow()) return false; 14787 14788 // Don't count this as an indirect ownership. 14789 e = member->getBase(); 14790 continue; 14791 } 14792 14793 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 14794 // Only pay attention to pseudo-objects on property references. 14795 ObjCPropertyRefExpr *pre 14796 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 14797 ->IgnoreParens()); 14798 if (!pre) return false; 14799 if (pre->isImplicitProperty()) return false; 14800 ObjCPropertyDecl *property = pre->getExplicitProperty(); 14801 if (!property->isRetaining() && 14802 !(property->getPropertyIvarDecl() && 14803 property->getPropertyIvarDecl()->getType() 14804 .getObjCLifetime() == Qualifiers::OCL_Strong)) 14805 return false; 14806 14807 owner.Indirect = true; 14808 if (pre->isSuperReceiver()) { 14809 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 14810 if (!owner.Variable) 14811 return false; 14812 owner.Loc = pre->getLocation(); 14813 owner.Range = pre->getSourceRange(); 14814 return true; 14815 } 14816 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 14817 ->getSourceExpr()); 14818 continue; 14819 } 14820 14821 // Array ivars? 14822 14823 return false; 14824 } 14825 } 14826 14827 namespace { 14828 14829 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 14830 ASTContext &Context; 14831 VarDecl *Variable; 14832 Expr *Capturer = nullptr; 14833 bool VarWillBeReased = false; 14834 14835 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 14836 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 14837 Context(Context), Variable(variable) {} 14838 14839 void VisitDeclRefExpr(DeclRefExpr *ref) { 14840 if (ref->getDecl() == Variable && !Capturer) 14841 Capturer = ref; 14842 } 14843 14844 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 14845 if (Capturer) return; 14846 Visit(ref->getBase()); 14847 if (Capturer && ref->isFreeIvar()) 14848 Capturer = ref; 14849 } 14850 14851 void VisitBlockExpr(BlockExpr *block) { 14852 // Look inside nested blocks 14853 if (block->getBlockDecl()->capturesVariable(Variable)) 14854 Visit(block->getBlockDecl()->getBody()); 14855 } 14856 14857 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 14858 if (Capturer) return; 14859 if (OVE->getSourceExpr()) 14860 Visit(OVE->getSourceExpr()); 14861 } 14862 14863 void VisitBinaryOperator(BinaryOperator *BinOp) { 14864 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 14865 return; 14866 Expr *LHS = BinOp->getLHS(); 14867 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 14868 if (DRE->getDecl() != Variable) 14869 return; 14870 if (Expr *RHS = BinOp->getRHS()) { 14871 RHS = RHS->IgnoreParenCasts(); 14872 Optional<llvm::APSInt> Value; 14873 VarWillBeReased = 14874 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 14875 *Value == 0); 14876 } 14877 } 14878 } 14879 }; 14880 14881 } // namespace 14882 14883 /// Check whether the given argument is a block which captures a 14884 /// variable. 14885 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 14886 assert(owner.Variable && owner.Loc.isValid()); 14887 14888 e = e->IgnoreParenCasts(); 14889 14890 // Look through [^{...} copy] and Block_copy(^{...}). 14891 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 14892 Selector Cmd = ME->getSelector(); 14893 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 14894 e = ME->getInstanceReceiver(); 14895 if (!e) 14896 return nullptr; 14897 e = e->IgnoreParenCasts(); 14898 } 14899 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 14900 if (CE->getNumArgs() == 1) { 14901 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 14902 if (Fn) { 14903 const IdentifierInfo *FnI = Fn->getIdentifier(); 14904 if (FnI && FnI->isStr("_Block_copy")) { 14905 e = CE->getArg(0)->IgnoreParenCasts(); 14906 } 14907 } 14908 } 14909 } 14910 14911 BlockExpr *block = dyn_cast<BlockExpr>(e); 14912 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 14913 return nullptr; 14914 14915 FindCaptureVisitor visitor(S.Context, owner.Variable); 14916 visitor.Visit(block->getBlockDecl()->getBody()); 14917 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 14918 } 14919 14920 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 14921 RetainCycleOwner &owner) { 14922 assert(capturer); 14923 assert(owner.Variable && owner.Loc.isValid()); 14924 14925 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 14926 << owner.Variable << capturer->getSourceRange(); 14927 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 14928 << owner.Indirect << owner.Range; 14929 } 14930 14931 /// Check for a keyword selector that starts with the word 'add' or 14932 /// 'set'. 14933 static bool isSetterLikeSelector(Selector sel) { 14934 if (sel.isUnarySelector()) return false; 14935 14936 StringRef str = sel.getNameForSlot(0); 14937 while (!str.empty() && str.front() == '_') str = str.substr(1); 14938 if (str.startswith("set")) 14939 str = str.substr(3); 14940 else if (str.startswith("add")) { 14941 // Specially allow 'addOperationWithBlock:'. 14942 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 14943 return false; 14944 str = str.substr(3); 14945 } 14946 else 14947 return false; 14948 14949 if (str.empty()) return true; 14950 return !isLowercase(str.front()); 14951 } 14952 14953 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 14954 ObjCMessageExpr *Message) { 14955 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 14956 Message->getReceiverInterface(), 14957 NSAPI::ClassId_NSMutableArray); 14958 if (!IsMutableArray) { 14959 return None; 14960 } 14961 14962 Selector Sel = Message->getSelector(); 14963 14964 Optional<NSAPI::NSArrayMethodKind> MKOpt = 14965 S.NSAPIObj->getNSArrayMethodKind(Sel); 14966 if (!MKOpt) { 14967 return None; 14968 } 14969 14970 NSAPI::NSArrayMethodKind MK = *MKOpt; 14971 14972 switch (MK) { 14973 case NSAPI::NSMutableArr_addObject: 14974 case NSAPI::NSMutableArr_insertObjectAtIndex: 14975 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 14976 return 0; 14977 case NSAPI::NSMutableArr_replaceObjectAtIndex: 14978 return 1; 14979 14980 default: 14981 return None; 14982 } 14983 14984 return None; 14985 } 14986 14987 static 14988 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 14989 ObjCMessageExpr *Message) { 14990 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 14991 Message->getReceiverInterface(), 14992 NSAPI::ClassId_NSMutableDictionary); 14993 if (!IsMutableDictionary) { 14994 return None; 14995 } 14996 14997 Selector Sel = Message->getSelector(); 14998 14999 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 15000 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 15001 if (!MKOpt) { 15002 return None; 15003 } 15004 15005 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 15006 15007 switch (MK) { 15008 case NSAPI::NSMutableDict_setObjectForKey: 15009 case NSAPI::NSMutableDict_setValueForKey: 15010 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 15011 return 0; 15012 15013 default: 15014 return None; 15015 } 15016 15017 return None; 15018 } 15019 15020 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 15021 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 15022 Message->getReceiverInterface(), 15023 NSAPI::ClassId_NSMutableSet); 15024 15025 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 15026 Message->getReceiverInterface(), 15027 NSAPI::ClassId_NSMutableOrderedSet); 15028 if (!IsMutableSet && !IsMutableOrderedSet) { 15029 return None; 15030 } 15031 15032 Selector Sel = Message->getSelector(); 15033 15034 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 15035 if (!MKOpt) { 15036 return None; 15037 } 15038 15039 NSAPI::NSSetMethodKind MK = *MKOpt; 15040 15041 switch (MK) { 15042 case NSAPI::NSMutableSet_addObject: 15043 case NSAPI::NSOrderedSet_setObjectAtIndex: 15044 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 15045 case NSAPI::NSOrderedSet_insertObjectAtIndex: 15046 return 0; 15047 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 15048 return 1; 15049 } 15050 15051 return None; 15052 } 15053 15054 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 15055 if (!Message->isInstanceMessage()) { 15056 return; 15057 } 15058 15059 Optional<int> ArgOpt; 15060 15061 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 15062 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 15063 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 15064 return; 15065 } 15066 15067 int ArgIndex = *ArgOpt; 15068 15069 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 15070 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 15071 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 15072 } 15073 15074 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 15075 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15076 if (ArgRE->isObjCSelfExpr()) { 15077 Diag(Message->getSourceRange().getBegin(), 15078 diag::warn_objc_circular_container) 15079 << ArgRE->getDecl() << StringRef("'super'"); 15080 } 15081 } 15082 } else { 15083 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 15084 15085 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 15086 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 15087 } 15088 15089 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 15090 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15091 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 15092 ValueDecl *Decl = ReceiverRE->getDecl(); 15093 Diag(Message->getSourceRange().getBegin(), 15094 diag::warn_objc_circular_container) 15095 << Decl << Decl; 15096 if (!ArgRE->isObjCSelfExpr()) { 15097 Diag(Decl->getLocation(), 15098 diag::note_objc_circular_container_declared_here) 15099 << Decl; 15100 } 15101 } 15102 } 15103 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 15104 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 15105 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 15106 ObjCIvarDecl *Decl = IvarRE->getDecl(); 15107 Diag(Message->getSourceRange().getBegin(), 15108 diag::warn_objc_circular_container) 15109 << Decl << Decl; 15110 Diag(Decl->getLocation(), 15111 diag::note_objc_circular_container_declared_here) 15112 << Decl; 15113 } 15114 } 15115 } 15116 } 15117 } 15118 15119 /// Check a message send to see if it's likely to cause a retain cycle. 15120 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 15121 // Only check instance methods whose selector looks like a setter. 15122 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 15123 return; 15124 15125 // Try to find a variable that the receiver is strongly owned by. 15126 RetainCycleOwner owner; 15127 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 15128 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 15129 return; 15130 } else { 15131 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 15132 owner.Variable = getCurMethodDecl()->getSelfDecl(); 15133 owner.Loc = msg->getSuperLoc(); 15134 owner.Range = msg->getSuperLoc(); 15135 } 15136 15137 // Check whether the receiver is captured by any of the arguments. 15138 const ObjCMethodDecl *MD = msg->getMethodDecl(); 15139 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 15140 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 15141 // noescape blocks should not be retained by the method. 15142 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 15143 continue; 15144 return diagnoseRetainCycle(*this, capturer, owner); 15145 } 15146 } 15147 } 15148 15149 /// Check a property assign to see if it's likely to cause a retain cycle. 15150 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 15151 RetainCycleOwner owner; 15152 if (!findRetainCycleOwner(*this, receiver, owner)) 15153 return; 15154 15155 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 15156 diagnoseRetainCycle(*this, capturer, owner); 15157 } 15158 15159 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 15160 RetainCycleOwner Owner; 15161 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 15162 return; 15163 15164 // Because we don't have an expression for the variable, we have to set the 15165 // location explicitly here. 15166 Owner.Loc = Var->getLocation(); 15167 Owner.Range = Var->getSourceRange(); 15168 15169 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 15170 diagnoseRetainCycle(*this, Capturer, Owner); 15171 } 15172 15173 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 15174 Expr *RHS, bool isProperty) { 15175 // Check if RHS is an Objective-C object literal, which also can get 15176 // immediately zapped in a weak reference. Note that we explicitly 15177 // allow ObjCStringLiterals, since those are designed to never really die. 15178 RHS = RHS->IgnoreParenImpCasts(); 15179 15180 // This enum needs to match with the 'select' in 15181 // warn_objc_arc_literal_assign (off-by-1). 15182 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 15183 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 15184 return false; 15185 15186 S.Diag(Loc, diag::warn_arc_literal_assign) 15187 << (unsigned) Kind 15188 << (isProperty ? 0 : 1) 15189 << RHS->getSourceRange(); 15190 15191 return true; 15192 } 15193 15194 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 15195 Qualifiers::ObjCLifetime LT, 15196 Expr *RHS, bool isProperty) { 15197 // Strip off any implicit cast added to get to the one ARC-specific. 15198 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 15199 if (cast->getCastKind() == CK_ARCConsumeObject) { 15200 S.Diag(Loc, diag::warn_arc_retained_assign) 15201 << (LT == Qualifiers::OCL_ExplicitNone) 15202 << (isProperty ? 0 : 1) 15203 << RHS->getSourceRange(); 15204 return true; 15205 } 15206 RHS = cast->getSubExpr(); 15207 } 15208 15209 if (LT == Qualifiers::OCL_Weak && 15210 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 15211 return true; 15212 15213 return false; 15214 } 15215 15216 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 15217 QualType LHS, Expr *RHS) { 15218 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 15219 15220 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 15221 return false; 15222 15223 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 15224 return true; 15225 15226 return false; 15227 } 15228 15229 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 15230 Expr *LHS, Expr *RHS) { 15231 QualType LHSType; 15232 // PropertyRef on LHS type need be directly obtained from 15233 // its declaration as it has a PseudoType. 15234 ObjCPropertyRefExpr *PRE 15235 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 15236 if (PRE && !PRE->isImplicitProperty()) { 15237 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 15238 if (PD) 15239 LHSType = PD->getType(); 15240 } 15241 15242 if (LHSType.isNull()) 15243 LHSType = LHS->getType(); 15244 15245 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 15246 15247 if (LT == Qualifiers::OCL_Weak) { 15248 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 15249 getCurFunction()->markSafeWeakUse(LHS); 15250 } 15251 15252 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 15253 return; 15254 15255 // FIXME. Check for other life times. 15256 if (LT != Qualifiers::OCL_None) 15257 return; 15258 15259 if (PRE) { 15260 if (PRE->isImplicitProperty()) 15261 return; 15262 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 15263 if (!PD) 15264 return; 15265 15266 unsigned Attributes = PD->getPropertyAttributes(); 15267 if (Attributes & ObjCPropertyAttribute::kind_assign) { 15268 // when 'assign' attribute was not explicitly specified 15269 // by user, ignore it and rely on property type itself 15270 // for lifetime info. 15271 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 15272 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 15273 LHSType->isObjCRetainableType()) 15274 return; 15275 15276 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 15277 if (cast->getCastKind() == CK_ARCConsumeObject) { 15278 Diag(Loc, diag::warn_arc_retained_property_assign) 15279 << RHS->getSourceRange(); 15280 return; 15281 } 15282 RHS = cast->getSubExpr(); 15283 } 15284 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 15285 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 15286 return; 15287 } 15288 } 15289 } 15290 15291 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 15292 15293 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 15294 SourceLocation StmtLoc, 15295 const NullStmt *Body) { 15296 // Do not warn if the body is a macro that expands to nothing, e.g: 15297 // 15298 // #define CALL(x) 15299 // if (condition) 15300 // CALL(0); 15301 if (Body->hasLeadingEmptyMacro()) 15302 return false; 15303 15304 // Get line numbers of statement and body. 15305 bool StmtLineInvalid; 15306 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 15307 &StmtLineInvalid); 15308 if (StmtLineInvalid) 15309 return false; 15310 15311 bool BodyLineInvalid; 15312 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 15313 &BodyLineInvalid); 15314 if (BodyLineInvalid) 15315 return false; 15316 15317 // Warn if null statement and body are on the same line. 15318 if (StmtLine != BodyLine) 15319 return false; 15320 15321 return true; 15322 } 15323 15324 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 15325 const Stmt *Body, 15326 unsigned DiagID) { 15327 // Since this is a syntactic check, don't emit diagnostic for template 15328 // instantiations, this just adds noise. 15329 if (CurrentInstantiationScope) 15330 return; 15331 15332 // The body should be a null statement. 15333 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 15334 if (!NBody) 15335 return; 15336 15337 // Do the usual checks. 15338 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 15339 return; 15340 15341 Diag(NBody->getSemiLoc(), DiagID); 15342 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 15343 } 15344 15345 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 15346 const Stmt *PossibleBody) { 15347 assert(!CurrentInstantiationScope); // Ensured by caller 15348 15349 SourceLocation StmtLoc; 15350 const Stmt *Body; 15351 unsigned DiagID; 15352 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 15353 StmtLoc = FS->getRParenLoc(); 15354 Body = FS->getBody(); 15355 DiagID = diag::warn_empty_for_body; 15356 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 15357 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 15358 Body = WS->getBody(); 15359 DiagID = diag::warn_empty_while_body; 15360 } else 15361 return; // Neither `for' nor `while'. 15362 15363 // The body should be a null statement. 15364 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 15365 if (!NBody) 15366 return; 15367 15368 // Skip expensive checks if diagnostic is disabled. 15369 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 15370 return; 15371 15372 // Do the usual checks. 15373 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 15374 return; 15375 15376 // `for(...);' and `while(...);' are popular idioms, so in order to keep 15377 // noise level low, emit diagnostics only if for/while is followed by a 15378 // CompoundStmt, e.g.: 15379 // for (int i = 0; i < n; i++); 15380 // { 15381 // a(i); 15382 // } 15383 // or if for/while is followed by a statement with more indentation 15384 // than for/while itself: 15385 // for (int i = 0; i < n; i++); 15386 // a(i); 15387 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 15388 if (!ProbableTypo) { 15389 bool BodyColInvalid; 15390 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 15391 PossibleBody->getBeginLoc(), &BodyColInvalid); 15392 if (BodyColInvalid) 15393 return; 15394 15395 bool StmtColInvalid; 15396 unsigned StmtCol = 15397 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 15398 if (StmtColInvalid) 15399 return; 15400 15401 if (BodyCol > StmtCol) 15402 ProbableTypo = true; 15403 } 15404 15405 if (ProbableTypo) { 15406 Diag(NBody->getSemiLoc(), DiagID); 15407 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 15408 } 15409 } 15410 15411 //===--- CHECK: Warn on self move with std::move. -------------------------===// 15412 15413 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 15414 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 15415 SourceLocation OpLoc) { 15416 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 15417 return; 15418 15419 if (inTemplateInstantiation()) 15420 return; 15421 15422 // Strip parens and casts away. 15423 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 15424 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 15425 15426 // Check for a call expression 15427 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 15428 if (!CE || CE->getNumArgs() != 1) 15429 return; 15430 15431 // Check for a call to std::move 15432 if (!CE->isCallToStdMove()) 15433 return; 15434 15435 // Get argument from std::move 15436 RHSExpr = CE->getArg(0); 15437 15438 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 15439 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 15440 15441 // Two DeclRefExpr's, check that the decls are the same. 15442 if (LHSDeclRef && RHSDeclRef) { 15443 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 15444 return; 15445 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 15446 RHSDeclRef->getDecl()->getCanonicalDecl()) 15447 return; 15448 15449 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15450 << LHSExpr->getSourceRange() 15451 << RHSExpr->getSourceRange(); 15452 return; 15453 } 15454 15455 // Member variables require a different approach to check for self moves. 15456 // MemberExpr's are the same if every nested MemberExpr refers to the same 15457 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 15458 // the base Expr's are CXXThisExpr's. 15459 const Expr *LHSBase = LHSExpr; 15460 const Expr *RHSBase = RHSExpr; 15461 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 15462 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 15463 if (!LHSME || !RHSME) 15464 return; 15465 15466 while (LHSME && RHSME) { 15467 if (LHSME->getMemberDecl()->getCanonicalDecl() != 15468 RHSME->getMemberDecl()->getCanonicalDecl()) 15469 return; 15470 15471 LHSBase = LHSME->getBase(); 15472 RHSBase = RHSME->getBase(); 15473 LHSME = dyn_cast<MemberExpr>(LHSBase); 15474 RHSME = dyn_cast<MemberExpr>(RHSBase); 15475 } 15476 15477 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 15478 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 15479 if (LHSDeclRef && RHSDeclRef) { 15480 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 15481 return; 15482 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 15483 RHSDeclRef->getDecl()->getCanonicalDecl()) 15484 return; 15485 15486 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15487 << LHSExpr->getSourceRange() 15488 << RHSExpr->getSourceRange(); 15489 return; 15490 } 15491 15492 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 15493 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15494 << LHSExpr->getSourceRange() 15495 << RHSExpr->getSourceRange(); 15496 } 15497 15498 //===--- Layout compatibility ----------------------------------------------// 15499 15500 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 15501 15502 /// Check if two enumeration types are layout-compatible. 15503 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 15504 // C++11 [dcl.enum] p8: 15505 // Two enumeration types are layout-compatible if they have the same 15506 // underlying type. 15507 return ED1->isComplete() && ED2->isComplete() && 15508 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 15509 } 15510 15511 /// Check if two fields are layout-compatible. 15512 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 15513 FieldDecl *Field2) { 15514 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 15515 return false; 15516 15517 if (Field1->isBitField() != Field2->isBitField()) 15518 return false; 15519 15520 if (Field1->isBitField()) { 15521 // Make sure that the bit-fields are the same length. 15522 unsigned Bits1 = Field1->getBitWidthValue(C); 15523 unsigned Bits2 = Field2->getBitWidthValue(C); 15524 15525 if (Bits1 != Bits2) 15526 return false; 15527 } 15528 15529 return true; 15530 } 15531 15532 /// Check if two standard-layout structs are layout-compatible. 15533 /// (C++11 [class.mem] p17) 15534 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 15535 RecordDecl *RD2) { 15536 // If both records are C++ classes, check that base classes match. 15537 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 15538 // If one of records is a CXXRecordDecl we are in C++ mode, 15539 // thus the other one is a CXXRecordDecl, too. 15540 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 15541 // Check number of base classes. 15542 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 15543 return false; 15544 15545 // Check the base classes. 15546 for (CXXRecordDecl::base_class_const_iterator 15547 Base1 = D1CXX->bases_begin(), 15548 BaseEnd1 = D1CXX->bases_end(), 15549 Base2 = D2CXX->bases_begin(); 15550 Base1 != BaseEnd1; 15551 ++Base1, ++Base2) { 15552 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 15553 return false; 15554 } 15555 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 15556 // If only RD2 is a C++ class, it should have zero base classes. 15557 if (D2CXX->getNumBases() > 0) 15558 return false; 15559 } 15560 15561 // Check the fields. 15562 RecordDecl::field_iterator Field2 = RD2->field_begin(), 15563 Field2End = RD2->field_end(), 15564 Field1 = RD1->field_begin(), 15565 Field1End = RD1->field_end(); 15566 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 15567 if (!isLayoutCompatible(C, *Field1, *Field2)) 15568 return false; 15569 } 15570 if (Field1 != Field1End || Field2 != Field2End) 15571 return false; 15572 15573 return true; 15574 } 15575 15576 /// Check if two standard-layout unions are layout-compatible. 15577 /// (C++11 [class.mem] p18) 15578 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 15579 RecordDecl *RD2) { 15580 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 15581 for (auto *Field2 : RD2->fields()) 15582 UnmatchedFields.insert(Field2); 15583 15584 for (auto *Field1 : RD1->fields()) { 15585 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 15586 I = UnmatchedFields.begin(), 15587 E = UnmatchedFields.end(); 15588 15589 for ( ; I != E; ++I) { 15590 if (isLayoutCompatible(C, Field1, *I)) { 15591 bool Result = UnmatchedFields.erase(*I); 15592 (void) Result; 15593 assert(Result); 15594 break; 15595 } 15596 } 15597 if (I == E) 15598 return false; 15599 } 15600 15601 return UnmatchedFields.empty(); 15602 } 15603 15604 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 15605 RecordDecl *RD2) { 15606 if (RD1->isUnion() != RD2->isUnion()) 15607 return false; 15608 15609 if (RD1->isUnion()) 15610 return isLayoutCompatibleUnion(C, RD1, RD2); 15611 else 15612 return isLayoutCompatibleStruct(C, RD1, RD2); 15613 } 15614 15615 /// Check if two types are layout-compatible in C++11 sense. 15616 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 15617 if (T1.isNull() || T2.isNull()) 15618 return false; 15619 15620 // C++11 [basic.types] p11: 15621 // If two types T1 and T2 are the same type, then T1 and T2 are 15622 // layout-compatible types. 15623 if (C.hasSameType(T1, T2)) 15624 return true; 15625 15626 T1 = T1.getCanonicalType().getUnqualifiedType(); 15627 T2 = T2.getCanonicalType().getUnqualifiedType(); 15628 15629 const Type::TypeClass TC1 = T1->getTypeClass(); 15630 const Type::TypeClass TC2 = T2->getTypeClass(); 15631 15632 if (TC1 != TC2) 15633 return false; 15634 15635 if (TC1 == Type::Enum) { 15636 return isLayoutCompatible(C, 15637 cast<EnumType>(T1)->getDecl(), 15638 cast<EnumType>(T2)->getDecl()); 15639 } else if (TC1 == Type::Record) { 15640 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 15641 return false; 15642 15643 return isLayoutCompatible(C, 15644 cast<RecordType>(T1)->getDecl(), 15645 cast<RecordType>(T2)->getDecl()); 15646 } 15647 15648 return false; 15649 } 15650 15651 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 15652 15653 /// Given a type tag expression find the type tag itself. 15654 /// 15655 /// \param TypeExpr Type tag expression, as it appears in user's code. 15656 /// 15657 /// \param VD Declaration of an identifier that appears in a type tag. 15658 /// 15659 /// \param MagicValue Type tag magic value. 15660 /// 15661 /// \param isConstantEvaluated wether the evalaution should be performed in 15662 15663 /// constant context. 15664 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 15665 const ValueDecl **VD, uint64_t *MagicValue, 15666 bool isConstantEvaluated) { 15667 while(true) { 15668 if (!TypeExpr) 15669 return false; 15670 15671 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 15672 15673 switch (TypeExpr->getStmtClass()) { 15674 case Stmt::UnaryOperatorClass: { 15675 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 15676 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 15677 TypeExpr = UO->getSubExpr(); 15678 continue; 15679 } 15680 return false; 15681 } 15682 15683 case Stmt::DeclRefExprClass: { 15684 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 15685 *VD = DRE->getDecl(); 15686 return true; 15687 } 15688 15689 case Stmt::IntegerLiteralClass: { 15690 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 15691 llvm::APInt MagicValueAPInt = IL->getValue(); 15692 if (MagicValueAPInt.getActiveBits() <= 64) { 15693 *MagicValue = MagicValueAPInt.getZExtValue(); 15694 return true; 15695 } else 15696 return false; 15697 } 15698 15699 case Stmt::BinaryConditionalOperatorClass: 15700 case Stmt::ConditionalOperatorClass: { 15701 const AbstractConditionalOperator *ACO = 15702 cast<AbstractConditionalOperator>(TypeExpr); 15703 bool Result; 15704 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 15705 isConstantEvaluated)) { 15706 if (Result) 15707 TypeExpr = ACO->getTrueExpr(); 15708 else 15709 TypeExpr = ACO->getFalseExpr(); 15710 continue; 15711 } 15712 return false; 15713 } 15714 15715 case Stmt::BinaryOperatorClass: { 15716 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 15717 if (BO->getOpcode() == BO_Comma) { 15718 TypeExpr = BO->getRHS(); 15719 continue; 15720 } 15721 return false; 15722 } 15723 15724 default: 15725 return false; 15726 } 15727 } 15728 } 15729 15730 /// Retrieve the C type corresponding to type tag TypeExpr. 15731 /// 15732 /// \param TypeExpr Expression that specifies a type tag. 15733 /// 15734 /// \param MagicValues Registered magic values. 15735 /// 15736 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 15737 /// kind. 15738 /// 15739 /// \param TypeInfo Information about the corresponding C type. 15740 /// 15741 /// \param isConstantEvaluated wether the evalaution should be performed in 15742 /// constant context. 15743 /// 15744 /// \returns true if the corresponding C type was found. 15745 static bool GetMatchingCType( 15746 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 15747 const ASTContext &Ctx, 15748 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 15749 *MagicValues, 15750 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 15751 bool isConstantEvaluated) { 15752 FoundWrongKind = false; 15753 15754 // Variable declaration that has type_tag_for_datatype attribute. 15755 const ValueDecl *VD = nullptr; 15756 15757 uint64_t MagicValue; 15758 15759 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 15760 return false; 15761 15762 if (VD) { 15763 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 15764 if (I->getArgumentKind() != ArgumentKind) { 15765 FoundWrongKind = true; 15766 return false; 15767 } 15768 TypeInfo.Type = I->getMatchingCType(); 15769 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 15770 TypeInfo.MustBeNull = I->getMustBeNull(); 15771 return true; 15772 } 15773 return false; 15774 } 15775 15776 if (!MagicValues) 15777 return false; 15778 15779 llvm::DenseMap<Sema::TypeTagMagicValue, 15780 Sema::TypeTagData>::const_iterator I = 15781 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 15782 if (I == MagicValues->end()) 15783 return false; 15784 15785 TypeInfo = I->second; 15786 return true; 15787 } 15788 15789 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 15790 uint64_t MagicValue, QualType Type, 15791 bool LayoutCompatible, 15792 bool MustBeNull) { 15793 if (!TypeTagForDatatypeMagicValues) 15794 TypeTagForDatatypeMagicValues.reset( 15795 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 15796 15797 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 15798 (*TypeTagForDatatypeMagicValues)[Magic] = 15799 TypeTagData(Type, LayoutCompatible, MustBeNull); 15800 } 15801 15802 static bool IsSameCharType(QualType T1, QualType T2) { 15803 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 15804 if (!BT1) 15805 return false; 15806 15807 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 15808 if (!BT2) 15809 return false; 15810 15811 BuiltinType::Kind T1Kind = BT1->getKind(); 15812 BuiltinType::Kind T2Kind = BT2->getKind(); 15813 15814 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 15815 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 15816 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 15817 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 15818 } 15819 15820 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 15821 const ArrayRef<const Expr *> ExprArgs, 15822 SourceLocation CallSiteLoc) { 15823 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 15824 bool IsPointerAttr = Attr->getIsPointer(); 15825 15826 // Retrieve the argument representing the 'type_tag'. 15827 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 15828 if (TypeTagIdxAST >= ExprArgs.size()) { 15829 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 15830 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 15831 return; 15832 } 15833 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 15834 bool FoundWrongKind; 15835 TypeTagData TypeInfo; 15836 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 15837 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 15838 TypeInfo, isConstantEvaluated())) { 15839 if (FoundWrongKind) 15840 Diag(TypeTagExpr->getExprLoc(), 15841 diag::warn_type_tag_for_datatype_wrong_kind) 15842 << TypeTagExpr->getSourceRange(); 15843 return; 15844 } 15845 15846 // Retrieve the argument representing the 'arg_idx'. 15847 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 15848 if (ArgumentIdxAST >= ExprArgs.size()) { 15849 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 15850 << 1 << Attr->getArgumentIdx().getSourceIndex(); 15851 return; 15852 } 15853 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 15854 if (IsPointerAttr) { 15855 // Skip implicit cast of pointer to `void *' (as a function argument). 15856 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 15857 if (ICE->getType()->isVoidPointerType() && 15858 ICE->getCastKind() == CK_BitCast) 15859 ArgumentExpr = ICE->getSubExpr(); 15860 } 15861 QualType ArgumentType = ArgumentExpr->getType(); 15862 15863 // Passing a `void*' pointer shouldn't trigger a warning. 15864 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 15865 return; 15866 15867 if (TypeInfo.MustBeNull) { 15868 // Type tag with matching void type requires a null pointer. 15869 if (!ArgumentExpr->isNullPointerConstant(Context, 15870 Expr::NPC_ValueDependentIsNotNull)) { 15871 Diag(ArgumentExpr->getExprLoc(), 15872 diag::warn_type_safety_null_pointer_required) 15873 << ArgumentKind->getName() 15874 << ArgumentExpr->getSourceRange() 15875 << TypeTagExpr->getSourceRange(); 15876 } 15877 return; 15878 } 15879 15880 QualType RequiredType = TypeInfo.Type; 15881 if (IsPointerAttr) 15882 RequiredType = Context.getPointerType(RequiredType); 15883 15884 bool mismatch = false; 15885 if (!TypeInfo.LayoutCompatible) { 15886 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 15887 15888 // C++11 [basic.fundamental] p1: 15889 // Plain char, signed char, and unsigned char are three distinct types. 15890 // 15891 // But we treat plain `char' as equivalent to `signed char' or `unsigned 15892 // char' depending on the current char signedness mode. 15893 if (mismatch) 15894 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 15895 RequiredType->getPointeeType())) || 15896 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 15897 mismatch = false; 15898 } else 15899 if (IsPointerAttr) 15900 mismatch = !isLayoutCompatible(Context, 15901 ArgumentType->getPointeeType(), 15902 RequiredType->getPointeeType()); 15903 else 15904 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 15905 15906 if (mismatch) 15907 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 15908 << ArgumentType << ArgumentKind 15909 << TypeInfo.LayoutCompatible << RequiredType 15910 << ArgumentExpr->getSourceRange() 15911 << TypeTagExpr->getSourceRange(); 15912 } 15913 15914 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 15915 CharUnits Alignment) { 15916 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 15917 } 15918 15919 void Sema::DiagnoseMisalignedMembers() { 15920 for (MisalignedMember &m : MisalignedMembers) { 15921 const NamedDecl *ND = m.RD; 15922 if (ND->getName().empty()) { 15923 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 15924 ND = TD; 15925 } 15926 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 15927 << m.MD << ND << m.E->getSourceRange(); 15928 } 15929 MisalignedMembers.clear(); 15930 } 15931 15932 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 15933 E = E->IgnoreParens(); 15934 if (!T->isPointerType() && !T->isIntegerType()) 15935 return; 15936 if (isa<UnaryOperator>(E) && 15937 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 15938 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 15939 if (isa<MemberExpr>(Op)) { 15940 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 15941 if (MA != MisalignedMembers.end() && 15942 (T->isIntegerType() || 15943 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 15944 Context.getTypeAlignInChars( 15945 T->getPointeeType()) <= MA->Alignment)))) 15946 MisalignedMembers.erase(MA); 15947 } 15948 } 15949 } 15950 15951 void Sema::RefersToMemberWithReducedAlignment( 15952 Expr *E, 15953 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 15954 Action) { 15955 const auto *ME = dyn_cast<MemberExpr>(E); 15956 if (!ME) 15957 return; 15958 15959 // No need to check expressions with an __unaligned-qualified type. 15960 if (E->getType().getQualifiers().hasUnaligned()) 15961 return; 15962 15963 // For a chain of MemberExpr like "a.b.c.d" this list 15964 // will keep FieldDecl's like [d, c, b]. 15965 SmallVector<FieldDecl *, 4> ReverseMemberChain; 15966 const MemberExpr *TopME = nullptr; 15967 bool AnyIsPacked = false; 15968 do { 15969 QualType BaseType = ME->getBase()->getType(); 15970 if (BaseType->isDependentType()) 15971 return; 15972 if (ME->isArrow()) 15973 BaseType = BaseType->getPointeeType(); 15974 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 15975 if (RD->isInvalidDecl()) 15976 return; 15977 15978 ValueDecl *MD = ME->getMemberDecl(); 15979 auto *FD = dyn_cast<FieldDecl>(MD); 15980 // We do not care about non-data members. 15981 if (!FD || FD->isInvalidDecl()) 15982 return; 15983 15984 AnyIsPacked = 15985 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 15986 ReverseMemberChain.push_back(FD); 15987 15988 TopME = ME; 15989 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 15990 } while (ME); 15991 assert(TopME && "We did not compute a topmost MemberExpr!"); 15992 15993 // Not the scope of this diagnostic. 15994 if (!AnyIsPacked) 15995 return; 15996 15997 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 15998 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 15999 // TODO: The innermost base of the member expression may be too complicated. 16000 // For now, just disregard these cases. This is left for future 16001 // improvement. 16002 if (!DRE && !isa<CXXThisExpr>(TopBase)) 16003 return; 16004 16005 // Alignment expected by the whole expression. 16006 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 16007 16008 // No need to do anything else with this case. 16009 if (ExpectedAlignment.isOne()) 16010 return; 16011 16012 // Synthesize offset of the whole access. 16013 CharUnits Offset; 16014 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 16015 I++) { 16016 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 16017 } 16018 16019 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 16020 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 16021 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 16022 16023 // The base expression of the innermost MemberExpr may give 16024 // stronger guarantees than the class containing the member. 16025 if (DRE && !TopME->isArrow()) { 16026 const ValueDecl *VD = DRE->getDecl(); 16027 if (!VD->getType()->isReferenceType()) 16028 CompleteObjectAlignment = 16029 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 16030 } 16031 16032 // Check if the synthesized offset fulfills the alignment. 16033 if (Offset % ExpectedAlignment != 0 || 16034 // It may fulfill the offset it but the effective alignment may still be 16035 // lower than the expected expression alignment. 16036 CompleteObjectAlignment < ExpectedAlignment) { 16037 // If this happens, we want to determine a sensible culprit of this. 16038 // Intuitively, watching the chain of member expressions from right to 16039 // left, we start with the required alignment (as required by the field 16040 // type) but some packed attribute in that chain has reduced the alignment. 16041 // It may happen that another packed structure increases it again. But if 16042 // we are here such increase has not been enough. So pointing the first 16043 // FieldDecl that either is packed or else its RecordDecl is, 16044 // seems reasonable. 16045 FieldDecl *FD = nullptr; 16046 CharUnits Alignment; 16047 for (FieldDecl *FDI : ReverseMemberChain) { 16048 if (FDI->hasAttr<PackedAttr>() || 16049 FDI->getParent()->hasAttr<PackedAttr>()) { 16050 FD = FDI; 16051 Alignment = std::min( 16052 Context.getTypeAlignInChars(FD->getType()), 16053 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 16054 break; 16055 } 16056 } 16057 assert(FD && "We did not find a packed FieldDecl!"); 16058 Action(E, FD->getParent(), FD, Alignment); 16059 } 16060 } 16061 16062 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 16063 using namespace std::placeholders; 16064 16065 RefersToMemberWithReducedAlignment( 16066 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 16067 _2, _3, _4)); 16068 } 16069 16070 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 16071 ExprResult CallResult) { 16072 if (checkArgCount(*this, TheCall, 1)) 16073 return ExprError(); 16074 16075 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 16076 if (MatrixArg.isInvalid()) 16077 return MatrixArg; 16078 Expr *Matrix = MatrixArg.get(); 16079 16080 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 16081 if (!MType) { 16082 Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg); 16083 return ExprError(); 16084 } 16085 16086 // Create returned matrix type by swapping rows and columns of the argument 16087 // matrix type. 16088 QualType ResultType = Context.getConstantMatrixType( 16089 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 16090 16091 // Change the return type to the type of the returned matrix. 16092 TheCall->setType(ResultType); 16093 16094 // Update call argument to use the possibly converted matrix argument. 16095 TheCall->setArg(0, Matrix); 16096 return CallResult; 16097 } 16098 16099 // Get and verify the matrix dimensions. 16100 static llvm::Optional<unsigned> 16101 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 16102 SourceLocation ErrorPos; 16103 Optional<llvm::APSInt> Value = 16104 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 16105 if (!Value) { 16106 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 16107 << Name; 16108 return {}; 16109 } 16110 uint64_t Dim = Value->getZExtValue(); 16111 if (!ConstantMatrixType::isDimensionValid(Dim)) { 16112 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 16113 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 16114 return {}; 16115 } 16116 return Dim; 16117 } 16118 16119 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 16120 ExprResult CallResult) { 16121 if (!getLangOpts().MatrixTypes) { 16122 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 16123 return ExprError(); 16124 } 16125 16126 if (checkArgCount(*this, TheCall, 4)) 16127 return ExprError(); 16128 16129 unsigned PtrArgIdx = 0; 16130 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 16131 Expr *RowsExpr = TheCall->getArg(1); 16132 Expr *ColumnsExpr = TheCall->getArg(2); 16133 Expr *StrideExpr = TheCall->getArg(3); 16134 16135 bool ArgError = false; 16136 16137 // Check pointer argument. 16138 { 16139 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 16140 if (PtrConv.isInvalid()) 16141 return PtrConv; 16142 PtrExpr = PtrConv.get(); 16143 TheCall->setArg(0, PtrExpr); 16144 if (PtrExpr->isTypeDependent()) { 16145 TheCall->setType(Context.DependentTy); 16146 return TheCall; 16147 } 16148 } 16149 16150 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 16151 QualType ElementTy; 16152 if (!PtrTy) { 16153 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 16154 << PtrArgIdx + 1; 16155 ArgError = true; 16156 } else { 16157 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 16158 16159 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 16160 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 16161 << PtrArgIdx + 1; 16162 ArgError = true; 16163 } 16164 } 16165 16166 // Apply default Lvalue conversions and convert the expression to size_t. 16167 auto ApplyArgumentConversions = [this](Expr *E) { 16168 ExprResult Conv = DefaultLvalueConversion(E); 16169 if (Conv.isInvalid()) 16170 return Conv; 16171 16172 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 16173 }; 16174 16175 // Apply conversion to row and column expressions. 16176 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 16177 if (!RowsConv.isInvalid()) { 16178 RowsExpr = RowsConv.get(); 16179 TheCall->setArg(1, RowsExpr); 16180 } else 16181 RowsExpr = nullptr; 16182 16183 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 16184 if (!ColumnsConv.isInvalid()) { 16185 ColumnsExpr = ColumnsConv.get(); 16186 TheCall->setArg(2, ColumnsExpr); 16187 } else 16188 ColumnsExpr = nullptr; 16189 16190 // If any any part of the result matrix type is still pending, just use 16191 // Context.DependentTy, until all parts are resolved. 16192 if ((RowsExpr && RowsExpr->isTypeDependent()) || 16193 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 16194 TheCall->setType(Context.DependentTy); 16195 return CallResult; 16196 } 16197 16198 // Check row and column dimenions. 16199 llvm::Optional<unsigned> MaybeRows; 16200 if (RowsExpr) 16201 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 16202 16203 llvm::Optional<unsigned> MaybeColumns; 16204 if (ColumnsExpr) 16205 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 16206 16207 // Check stride argument. 16208 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 16209 if (StrideConv.isInvalid()) 16210 return ExprError(); 16211 StrideExpr = StrideConv.get(); 16212 TheCall->setArg(3, StrideExpr); 16213 16214 if (MaybeRows) { 16215 if (Optional<llvm::APSInt> Value = 16216 StrideExpr->getIntegerConstantExpr(Context)) { 16217 uint64_t Stride = Value->getZExtValue(); 16218 if (Stride < *MaybeRows) { 16219 Diag(StrideExpr->getBeginLoc(), 16220 diag::err_builtin_matrix_stride_too_small); 16221 ArgError = true; 16222 } 16223 } 16224 } 16225 16226 if (ArgError || !MaybeRows || !MaybeColumns) 16227 return ExprError(); 16228 16229 TheCall->setType( 16230 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 16231 return CallResult; 16232 } 16233 16234 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 16235 ExprResult CallResult) { 16236 if (checkArgCount(*this, TheCall, 3)) 16237 return ExprError(); 16238 16239 unsigned PtrArgIdx = 1; 16240 Expr *MatrixExpr = TheCall->getArg(0); 16241 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 16242 Expr *StrideExpr = TheCall->getArg(2); 16243 16244 bool ArgError = false; 16245 16246 { 16247 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 16248 if (MatrixConv.isInvalid()) 16249 return MatrixConv; 16250 MatrixExpr = MatrixConv.get(); 16251 TheCall->setArg(0, MatrixExpr); 16252 } 16253 if (MatrixExpr->isTypeDependent()) { 16254 TheCall->setType(Context.DependentTy); 16255 return TheCall; 16256 } 16257 16258 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 16259 if (!MatrixTy) { 16260 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0; 16261 ArgError = true; 16262 } 16263 16264 { 16265 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 16266 if (PtrConv.isInvalid()) 16267 return PtrConv; 16268 PtrExpr = PtrConv.get(); 16269 TheCall->setArg(1, PtrExpr); 16270 if (PtrExpr->isTypeDependent()) { 16271 TheCall->setType(Context.DependentTy); 16272 return TheCall; 16273 } 16274 } 16275 16276 // Check pointer argument. 16277 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 16278 if (!PtrTy) { 16279 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 16280 << PtrArgIdx + 1; 16281 ArgError = true; 16282 } else { 16283 QualType ElementTy = PtrTy->getPointeeType(); 16284 if (ElementTy.isConstQualified()) { 16285 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 16286 ArgError = true; 16287 } 16288 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 16289 if (MatrixTy && 16290 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 16291 Diag(PtrExpr->getBeginLoc(), 16292 diag::err_builtin_matrix_pointer_arg_mismatch) 16293 << ElementTy << MatrixTy->getElementType(); 16294 ArgError = true; 16295 } 16296 } 16297 16298 // Apply default Lvalue conversions and convert the stride expression to 16299 // size_t. 16300 { 16301 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 16302 if (StrideConv.isInvalid()) 16303 return StrideConv; 16304 16305 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 16306 if (StrideConv.isInvalid()) 16307 return StrideConv; 16308 StrideExpr = StrideConv.get(); 16309 TheCall->setArg(2, StrideExpr); 16310 } 16311 16312 // Check stride argument. 16313 if (MatrixTy) { 16314 if (Optional<llvm::APSInt> Value = 16315 StrideExpr->getIntegerConstantExpr(Context)) { 16316 uint64_t Stride = Value->getZExtValue(); 16317 if (Stride < MatrixTy->getNumRows()) { 16318 Diag(StrideExpr->getBeginLoc(), 16319 diag::err_builtin_matrix_stride_too_small); 16320 ArgError = true; 16321 } 16322 } 16323 } 16324 16325 if (ArgError) 16326 return ExprError(); 16327 16328 return CallResult; 16329 } 16330 16331 /// \brief Enforce the bounds of a TCB 16332 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 16333 /// directly calls other functions in the same TCB as marked by the enforce_tcb 16334 /// and enforce_tcb_leaf attributes. 16335 void Sema::CheckTCBEnforcement(const CallExpr *TheCall, 16336 const FunctionDecl *Callee) { 16337 const FunctionDecl *Caller = getCurFunctionDecl(); 16338 16339 // Calls to builtins are not enforced. 16340 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() || 16341 Callee->getBuiltinID() != 0) 16342 return; 16343 16344 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 16345 // all TCBs the callee is a part of. 16346 llvm::StringSet<> CalleeTCBs; 16347 for_each(Callee->specific_attrs<EnforceTCBAttr>(), 16348 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 16349 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(), 16350 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 16351 16352 // Go through the TCBs the caller is a part of and emit warnings if Caller 16353 // is in a TCB that the Callee is not. 16354 for_each( 16355 Caller->specific_attrs<EnforceTCBAttr>(), 16356 [&](const auto *A) { 16357 StringRef CallerTCB = A->getTCBName(); 16358 if (CalleeTCBs.count(CallerTCB) == 0) { 16359 this->Diag(TheCall->getExprLoc(), 16360 diag::warn_tcb_enforcement_violation) << Callee 16361 << CallerTCB; 16362 } 16363 }); 16364 } 16365