1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/Stmt.h" 34 #include "clang/AST/TemplateBase.h" 35 #include "clang/AST/Type.h" 36 #include "clang/AST/TypeLoc.h" 37 #include "clang/AST/UnresolvedSet.h" 38 #include "clang/Basic/AddressSpaces.h" 39 #include "clang/Basic/CharInfo.h" 40 #include "clang/Basic/Diagnostic.h" 41 #include "clang/Basic/IdentifierTable.h" 42 #include "clang/Basic/LLVM.h" 43 #include "clang/Basic/LangOptions.h" 44 #include "clang/Basic/OpenCLOptions.h" 45 #include "clang/Basic/OperatorKinds.h" 46 #include "clang/Basic/PartialDiagnostic.h" 47 #include "clang/Basic/SourceLocation.h" 48 #include "clang/Basic/SourceManager.h" 49 #include "clang/Basic/Specifiers.h" 50 #include "clang/Basic/SyncScope.h" 51 #include "clang/Basic/TargetBuiltins.h" 52 #include "clang/Basic/TargetCXXABI.h" 53 #include "clang/Basic/TargetInfo.h" 54 #include "clang/Basic/TypeTraits.h" 55 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 56 #include "clang/Sema/Initialization.h" 57 #include "clang/Sema/Lookup.h" 58 #include "clang/Sema/Ownership.h" 59 #include "clang/Sema/Scope.h" 60 #include "clang/Sema/ScopeInfo.h" 61 #include "clang/Sema/Sema.h" 62 #include "clang/Sema/SemaInternal.h" 63 #include "llvm/ADT/APFloat.h" 64 #include "llvm/ADT/APInt.h" 65 #include "llvm/ADT/APSInt.h" 66 #include "llvm/ADT/ArrayRef.h" 67 #include "llvm/ADT/DenseMap.h" 68 #include "llvm/ADT/FoldingSet.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallBitVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallString.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/StringSwitch.h" 78 #include "llvm/ADT/Triple.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/Casting.h" 81 #include "llvm/Support/Compiler.h" 82 #include "llvm/Support/ConvertUTF.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/Format.h" 85 #include "llvm/Support/Locale.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/SaveAndRestore.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include <algorithm> 90 #include <cassert> 91 #include <cstddef> 92 #include <cstdint> 93 #include <functional> 94 #include <limits> 95 #include <string> 96 #include <tuple> 97 #include <utility> 98 99 using namespace clang; 100 using namespace sema; 101 102 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 103 unsigned ByteNo) const { 104 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 105 Context.getTargetInfo()); 106 } 107 108 /// Checks that a call expression's argument count is the desired number. 109 /// This is useful when doing custom type-checking. Returns true on error. 110 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 111 unsigned argCount = call->getNumArgs(); 112 if (argCount == desiredArgCount) return false; 113 114 if (argCount < desiredArgCount) 115 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 116 << 0 /*function call*/ << desiredArgCount << argCount 117 << call->getSourceRange(); 118 119 // Highlight all the excess arguments. 120 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 121 call->getArg(argCount - 1)->getEndLoc()); 122 123 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 124 << 0 /*function call*/ << desiredArgCount << argCount 125 << call->getArg(1)->getSourceRange(); 126 } 127 128 /// Check that the first argument to __builtin_annotation is an integer 129 /// and the second argument is a non-wide string literal. 130 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 131 if (checkArgCount(S, TheCall, 2)) 132 return true; 133 134 // First argument should be an integer. 135 Expr *ValArg = TheCall->getArg(0); 136 QualType Ty = ValArg->getType(); 137 if (!Ty->isIntegerType()) { 138 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 139 << ValArg->getSourceRange(); 140 return true; 141 } 142 143 // Second argument should be a constant string. 144 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 145 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 146 if (!Literal || !Literal->isAscii()) { 147 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 148 << StrArg->getSourceRange(); 149 return true; 150 } 151 152 TheCall->setType(Ty); 153 return false; 154 } 155 156 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 157 // We need at least one argument. 158 if (TheCall->getNumArgs() < 1) { 159 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 160 << 0 << 1 << TheCall->getNumArgs() 161 << TheCall->getCallee()->getSourceRange(); 162 return true; 163 } 164 165 // All arguments should be wide string literals. 166 for (Expr *Arg : TheCall->arguments()) { 167 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 168 if (!Literal || !Literal->isWide()) { 169 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 170 << Arg->getSourceRange(); 171 return true; 172 } 173 } 174 175 return false; 176 } 177 178 /// Check that the argument to __builtin_addressof is a glvalue, and set the 179 /// result type to the corresponding pointer type. 180 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 181 if (checkArgCount(S, TheCall, 1)) 182 return true; 183 184 ExprResult Arg(TheCall->getArg(0)); 185 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 186 if (ResultType.isNull()) 187 return true; 188 189 TheCall->setArg(0, Arg.get()); 190 TheCall->setType(ResultType); 191 return false; 192 } 193 194 /// Check the number of arguments and set the result type to 195 /// the argument type. 196 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 197 if (checkArgCount(S, TheCall, 1)) 198 return true; 199 200 TheCall->setType(TheCall->getArg(0)->getType()); 201 return false; 202 } 203 204 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) { 205 if (checkArgCount(S, TheCall, 3)) 206 return true; 207 208 // First two arguments should be integers. 209 for (unsigned I = 0; I < 2; ++I) { 210 ExprResult Arg = TheCall->getArg(I); 211 QualType Ty = Arg.get()->getType(); 212 if (!Ty->isIntegerType()) { 213 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 214 << Ty << Arg.get()->getSourceRange(); 215 return true; 216 } 217 InitializedEntity Entity = InitializedEntity::InitializeParameter( 218 S.getASTContext(), Ty, /*consume*/ false); 219 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 220 if (Arg.isInvalid()) 221 return true; 222 TheCall->setArg(I, Arg.get()); 223 } 224 225 // Third argument should be a pointer to a non-const integer. 226 // IRGen correctly handles volatile, restrict, and address spaces, and 227 // the other qualifiers aren't possible. 228 { 229 ExprResult Arg = TheCall->getArg(2); 230 QualType Ty = Arg.get()->getType(); 231 const auto *PtrTy = Ty->getAs<PointerType>(); 232 if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() && 233 !PtrTy->getPointeeType().isConstQualified())) { 234 S.Diag(Arg.get()->getBeginLoc(), 235 diag::err_overflow_builtin_must_be_ptr_int) 236 << Ty << Arg.get()->getSourceRange(); 237 return true; 238 } 239 InitializedEntity Entity = InitializedEntity::InitializeParameter( 240 S.getASTContext(), Ty, /*consume*/ false); 241 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 242 if (Arg.isInvalid()) 243 return true; 244 TheCall->setArg(2, Arg.get()); 245 } 246 return false; 247 } 248 249 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 250 if (checkArgCount(S, BuiltinCall, 2)) 251 return true; 252 253 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 254 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 255 Expr *Call = BuiltinCall->getArg(0); 256 Expr *Chain = BuiltinCall->getArg(1); 257 258 if (Call->getStmtClass() != Stmt::CallExprClass) { 259 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 260 << Call->getSourceRange(); 261 return true; 262 } 263 264 auto CE = cast<CallExpr>(Call); 265 if (CE->getCallee()->getType()->isBlockPointerType()) { 266 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 267 << Call->getSourceRange(); 268 return true; 269 } 270 271 const Decl *TargetDecl = CE->getCalleeDecl(); 272 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 273 if (FD->getBuiltinID()) { 274 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 275 << Call->getSourceRange(); 276 return true; 277 } 278 279 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 280 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 281 << Call->getSourceRange(); 282 return true; 283 } 284 285 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 286 if (ChainResult.isInvalid()) 287 return true; 288 if (!ChainResult.get()->getType()->isPointerType()) { 289 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 290 << Chain->getSourceRange(); 291 return true; 292 } 293 294 QualType ReturnTy = CE->getCallReturnType(S.Context); 295 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 296 QualType BuiltinTy = S.Context.getFunctionType( 297 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 298 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 299 300 Builtin = 301 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 302 303 BuiltinCall->setType(CE->getType()); 304 BuiltinCall->setValueKind(CE->getValueKind()); 305 BuiltinCall->setObjectKind(CE->getObjectKind()); 306 BuiltinCall->setCallee(Builtin); 307 BuiltinCall->setArg(1, ChainResult.get()); 308 309 return false; 310 } 311 312 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a 313 /// __builtin_*_chk function, then use the object size argument specified in the 314 /// source. Otherwise, infer the object size using __builtin_object_size. 315 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 316 CallExpr *TheCall) { 317 // FIXME: There are some more useful checks we could be doing here: 318 // - Analyze the format string of sprintf to see how much of buffer is used. 319 // - Evaluate strlen of strcpy arguments, use as object size. 320 321 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 322 isConstantEvaluated()) 323 return; 324 325 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 326 if (!BuiltinID) 327 return; 328 329 unsigned DiagID = 0; 330 bool IsChkVariant = false; 331 unsigned SizeIndex, ObjectIndex; 332 switch (BuiltinID) { 333 default: 334 return; 335 case Builtin::BI__builtin___memcpy_chk: 336 case Builtin::BI__builtin___memmove_chk: 337 case Builtin::BI__builtin___memset_chk: 338 case Builtin::BI__builtin___strlcat_chk: 339 case Builtin::BI__builtin___strlcpy_chk: 340 case Builtin::BI__builtin___strncat_chk: 341 case Builtin::BI__builtin___strncpy_chk: 342 case Builtin::BI__builtin___stpncpy_chk: 343 case Builtin::BI__builtin___memccpy_chk: { 344 DiagID = diag::warn_builtin_chk_overflow; 345 IsChkVariant = true; 346 SizeIndex = TheCall->getNumArgs() - 2; 347 ObjectIndex = TheCall->getNumArgs() - 1; 348 break; 349 } 350 351 case Builtin::BI__builtin___snprintf_chk: 352 case Builtin::BI__builtin___vsnprintf_chk: { 353 DiagID = diag::warn_builtin_chk_overflow; 354 IsChkVariant = true; 355 SizeIndex = 1; 356 ObjectIndex = 3; 357 break; 358 } 359 360 case Builtin::BIstrncat: 361 case Builtin::BI__builtin_strncat: 362 case Builtin::BIstrncpy: 363 case Builtin::BI__builtin_strncpy: 364 case Builtin::BIstpncpy: 365 case Builtin::BI__builtin_stpncpy: { 366 // Whether these functions overflow depends on the runtime strlen of the 367 // string, not just the buffer size, so emitting the "always overflow" 368 // diagnostic isn't quite right. We should still diagnose passing a buffer 369 // size larger than the destination buffer though; this is a runtime abort 370 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 371 DiagID = diag::warn_fortify_source_size_mismatch; 372 SizeIndex = TheCall->getNumArgs() - 1; 373 ObjectIndex = 0; 374 break; 375 } 376 377 case Builtin::BImemcpy: 378 case Builtin::BI__builtin_memcpy: 379 case Builtin::BImemmove: 380 case Builtin::BI__builtin_memmove: 381 case Builtin::BImemset: 382 case Builtin::BI__builtin_memset: { 383 DiagID = diag::warn_fortify_source_overflow; 384 SizeIndex = TheCall->getNumArgs() - 1; 385 ObjectIndex = 0; 386 break; 387 } 388 case Builtin::BIsnprintf: 389 case Builtin::BI__builtin_snprintf: 390 case Builtin::BIvsnprintf: 391 case Builtin::BI__builtin_vsnprintf: { 392 DiagID = diag::warn_fortify_source_size_mismatch; 393 SizeIndex = 1; 394 ObjectIndex = 0; 395 break; 396 } 397 } 398 399 llvm::APSInt ObjectSize; 400 // For __builtin___*_chk, the object size is explicitly provided by the caller 401 // (usually using __builtin_object_size). Use that value to check this call. 402 if (IsChkVariant) { 403 Expr::EvalResult Result; 404 Expr *SizeArg = TheCall->getArg(ObjectIndex); 405 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 406 return; 407 ObjectSize = Result.Val.getInt(); 408 409 // Otherwise, try to evaluate an imaginary call to __builtin_object_size. 410 } else { 411 // If the parameter has a pass_object_size attribute, then we should use its 412 // (potentially) more strict checking mode. Otherwise, conservatively assume 413 // type 0. 414 int BOSType = 0; 415 if (const auto *POS = 416 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>()) 417 BOSType = POS->getType(); 418 419 Expr *ObjArg = TheCall->getArg(ObjectIndex); 420 uint64_t Result; 421 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 422 return; 423 // Get the object size in the target's size_t width. 424 const TargetInfo &TI = getASTContext().getTargetInfo(); 425 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 426 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 427 } 428 429 // Evaluate the number of bytes of the object that this call will use. 430 Expr::EvalResult Result; 431 Expr *UsedSizeArg = TheCall->getArg(SizeIndex); 432 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext())) 433 return; 434 llvm::APSInt UsedSize = Result.Val.getInt(); 435 436 if (UsedSize.ule(ObjectSize)) 437 return; 438 439 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 440 // Skim off the details of whichever builtin was called to produce a better 441 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 442 if (IsChkVariant) { 443 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 444 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 445 } else if (FunctionName.startswith("__builtin_")) { 446 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 447 } 448 449 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 450 PDiag(DiagID) 451 << FunctionName << ObjectSize.toString(/*Radix=*/10) 452 << UsedSize.toString(/*Radix=*/10)); 453 } 454 455 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 456 Scope::ScopeFlags NeededScopeFlags, 457 unsigned DiagID) { 458 // Scopes aren't available during instantiation. Fortunately, builtin 459 // functions cannot be template args so they cannot be formed through template 460 // instantiation. Therefore checking once during the parse is sufficient. 461 if (SemaRef.inTemplateInstantiation()) 462 return false; 463 464 Scope *S = SemaRef.getCurScope(); 465 while (S && !S->isSEHExceptScope()) 466 S = S->getParent(); 467 if (!S || !(S->getFlags() & NeededScopeFlags)) { 468 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 469 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 470 << DRE->getDecl()->getIdentifier(); 471 return true; 472 } 473 474 return false; 475 } 476 477 static inline bool isBlockPointer(Expr *Arg) { 478 return Arg->getType()->isBlockPointerType(); 479 } 480 481 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 482 /// void*, which is a requirement of device side enqueue. 483 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 484 const BlockPointerType *BPT = 485 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 486 ArrayRef<QualType> Params = 487 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 488 unsigned ArgCounter = 0; 489 bool IllegalParams = false; 490 // Iterate through the block parameters until either one is found that is not 491 // a local void*, or the block is valid. 492 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 493 I != E; ++I, ++ArgCounter) { 494 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 495 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 496 LangAS::opencl_local) { 497 // Get the location of the error. If a block literal has been passed 498 // (BlockExpr) then we can point straight to the offending argument, 499 // else we just point to the variable reference. 500 SourceLocation ErrorLoc; 501 if (isa<BlockExpr>(BlockArg)) { 502 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 503 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 504 } else if (isa<DeclRefExpr>(BlockArg)) { 505 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 506 } 507 S.Diag(ErrorLoc, 508 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 509 IllegalParams = true; 510 } 511 } 512 513 return IllegalParams; 514 } 515 516 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 517 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { 518 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 519 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 520 return true; 521 } 522 return false; 523 } 524 525 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 526 if (checkArgCount(S, TheCall, 2)) 527 return true; 528 529 if (checkOpenCLSubgroupExt(S, TheCall)) 530 return true; 531 532 // First argument is an ndrange_t type. 533 Expr *NDRangeArg = TheCall->getArg(0); 534 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 535 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 536 << TheCall->getDirectCallee() << "'ndrange_t'"; 537 return true; 538 } 539 540 Expr *BlockArg = TheCall->getArg(1); 541 if (!isBlockPointer(BlockArg)) { 542 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 543 << TheCall->getDirectCallee() << "block"; 544 return true; 545 } 546 return checkOpenCLBlockArgs(S, BlockArg); 547 } 548 549 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 550 /// get_kernel_work_group_size 551 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 552 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 553 if (checkArgCount(S, TheCall, 1)) 554 return true; 555 556 Expr *BlockArg = TheCall->getArg(0); 557 if (!isBlockPointer(BlockArg)) { 558 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 559 << TheCall->getDirectCallee() << "block"; 560 return true; 561 } 562 return checkOpenCLBlockArgs(S, BlockArg); 563 } 564 565 /// Diagnose integer type and any valid implicit conversion to it. 566 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 567 const QualType &IntType); 568 569 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 570 unsigned Start, unsigned End) { 571 bool IllegalParams = false; 572 for (unsigned I = Start; I <= End; ++I) 573 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 574 S.Context.getSizeType()); 575 return IllegalParams; 576 } 577 578 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 579 /// 'local void*' parameter of passed block. 580 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 581 Expr *BlockArg, 582 unsigned NumNonVarArgs) { 583 const BlockPointerType *BPT = 584 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 585 unsigned NumBlockParams = 586 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 587 unsigned TotalNumArgs = TheCall->getNumArgs(); 588 589 // For each argument passed to the block, a corresponding uint needs to 590 // be passed to describe the size of the local memory. 591 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 592 S.Diag(TheCall->getBeginLoc(), 593 diag::err_opencl_enqueue_kernel_local_size_args); 594 return true; 595 } 596 597 // Check that the sizes of the local memory are specified by integers. 598 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 599 TotalNumArgs - 1); 600 } 601 602 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 603 /// overload formats specified in Table 6.13.17.1. 604 /// int enqueue_kernel(queue_t queue, 605 /// kernel_enqueue_flags_t flags, 606 /// const ndrange_t ndrange, 607 /// void (^block)(void)) 608 /// int enqueue_kernel(queue_t queue, 609 /// kernel_enqueue_flags_t flags, 610 /// const ndrange_t ndrange, 611 /// uint num_events_in_wait_list, 612 /// clk_event_t *event_wait_list, 613 /// clk_event_t *event_ret, 614 /// void (^block)(void)) 615 /// int enqueue_kernel(queue_t queue, 616 /// kernel_enqueue_flags_t flags, 617 /// const ndrange_t ndrange, 618 /// void (^block)(local void*, ...), 619 /// uint size0, ...) 620 /// int enqueue_kernel(queue_t queue, 621 /// kernel_enqueue_flags_t flags, 622 /// const ndrange_t ndrange, 623 /// uint num_events_in_wait_list, 624 /// clk_event_t *event_wait_list, 625 /// clk_event_t *event_ret, 626 /// void (^block)(local void*, ...), 627 /// uint size0, ...) 628 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 629 unsigned NumArgs = TheCall->getNumArgs(); 630 631 if (NumArgs < 4) { 632 S.Diag(TheCall->getBeginLoc(), 633 diag::err_typecheck_call_too_few_args_at_least) 634 << 0 << 4 << NumArgs; 635 return true; 636 } 637 638 Expr *Arg0 = TheCall->getArg(0); 639 Expr *Arg1 = TheCall->getArg(1); 640 Expr *Arg2 = TheCall->getArg(2); 641 Expr *Arg3 = TheCall->getArg(3); 642 643 // First argument always needs to be a queue_t type. 644 if (!Arg0->getType()->isQueueT()) { 645 S.Diag(TheCall->getArg(0)->getBeginLoc(), 646 diag::err_opencl_builtin_expected_type) 647 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 648 return true; 649 } 650 651 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 652 if (!Arg1->getType()->isIntegerType()) { 653 S.Diag(TheCall->getArg(1)->getBeginLoc(), 654 diag::err_opencl_builtin_expected_type) 655 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 656 return true; 657 } 658 659 // Third argument is always an ndrange_t type. 660 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 661 S.Diag(TheCall->getArg(2)->getBeginLoc(), 662 diag::err_opencl_builtin_expected_type) 663 << TheCall->getDirectCallee() << "'ndrange_t'"; 664 return true; 665 } 666 667 // With four arguments, there is only one form that the function could be 668 // called in: no events and no variable arguments. 669 if (NumArgs == 4) { 670 // check that the last argument is the right block type. 671 if (!isBlockPointer(Arg3)) { 672 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 673 << TheCall->getDirectCallee() << "block"; 674 return true; 675 } 676 // we have a block type, check the prototype 677 const BlockPointerType *BPT = 678 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 679 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 680 S.Diag(Arg3->getBeginLoc(), 681 diag::err_opencl_enqueue_kernel_blocks_no_args); 682 return true; 683 } 684 return false; 685 } 686 // we can have block + varargs. 687 if (isBlockPointer(Arg3)) 688 return (checkOpenCLBlockArgs(S, Arg3) || 689 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 690 // last two cases with either exactly 7 args or 7 args and varargs. 691 if (NumArgs >= 7) { 692 // check common block argument. 693 Expr *Arg6 = TheCall->getArg(6); 694 if (!isBlockPointer(Arg6)) { 695 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 696 << TheCall->getDirectCallee() << "block"; 697 return true; 698 } 699 if (checkOpenCLBlockArgs(S, Arg6)) 700 return true; 701 702 // Forth argument has to be any integer type. 703 if (!Arg3->getType()->isIntegerType()) { 704 S.Diag(TheCall->getArg(3)->getBeginLoc(), 705 diag::err_opencl_builtin_expected_type) 706 << TheCall->getDirectCallee() << "integer"; 707 return true; 708 } 709 // check remaining common arguments. 710 Expr *Arg4 = TheCall->getArg(4); 711 Expr *Arg5 = TheCall->getArg(5); 712 713 // Fifth argument is always passed as a pointer to clk_event_t. 714 if (!Arg4->isNullPointerConstant(S.Context, 715 Expr::NPC_ValueDependentIsNotNull) && 716 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 717 S.Diag(TheCall->getArg(4)->getBeginLoc(), 718 diag::err_opencl_builtin_expected_type) 719 << TheCall->getDirectCallee() 720 << S.Context.getPointerType(S.Context.OCLClkEventTy); 721 return true; 722 } 723 724 // Sixth argument is always passed as a pointer to clk_event_t. 725 if (!Arg5->isNullPointerConstant(S.Context, 726 Expr::NPC_ValueDependentIsNotNull) && 727 !(Arg5->getType()->isPointerType() && 728 Arg5->getType()->getPointeeType()->isClkEventT())) { 729 S.Diag(TheCall->getArg(5)->getBeginLoc(), 730 diag::err_opencl_builtin_expected_type) 731 << TheCall->getDirectCallee() 732 << S.Context.getPointerType(S.Context.OCLClkEventTy); 733 return true; 734 } 735 736 if (NumArgs == 7) 737 return false; 738 739 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 740 } 741 742 // None of the specific case has been detected, give generic error 743 S.Diag(TheCall->getBeginLoc(), 744 diag::err_opencl_enqueue_kernel_incorrect_args); 745 return true; 746 } 747 748 /// Returns OpenCL access qual. 749 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 750 return D->getAttr<OpenCLAccessAttr>(); 751 } 752 753 /// Returns true if pipe element type is different from the pointer. 754 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 755 const Expr *Arg0 = Call->getArg(0); 756 // First argument type should always be pipe. 757 if (!Arg0->getType()->isPipeType()) { 758 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 759 << Call->getDirectCallee() << Arg0->getSourceRange(); 760 return true; 761 } 762 OpenCLAccessAttr *AccessQual = 763 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 764 // Validates the access qualifier is compatible with the call. 765 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 766 // read_only and write_only, and assumed to be read_only if no qualifier is 767 // specified. 768 switch (Call->getDirectCallee()->getBuiltinID()) { 769 case Builtin::BIread_pipe: 770 case Builtin::BIreserve_read_pipe: 771 case Builtin::BIcommit_read_pipe: 772 case Builtin::BIwork_group_reserve_read_pipe: 773 case Builtin::BIsub_group_reserve_read_pipe: 774 case Builtin::BIwork_group_commit_read_pipe: 775 case Builtin::BIsub_group_commit_read_pipe: 776 if (!(!AccessQual || AccessQual->isReadOnly())) { 777 S.Diag(Arg0->getBeginLoc(), 778 diag::err_opencl_builtin_pipe_invalid_access_modifier) 779 << "read_only" << Arg0->getSourceRange(); 780 return true; 781 } 782 break; 783 case Builtin::BIwrite_pipe: 784 case Builtin::BIreserve_write_pipe: 785 case Builtin::BIcommit_write_pipe: 786 case Builtin::BIwork_group_reserve_write_pipe: 787 case Builtin::BIsub_group_reserve_write_pipe: 788 case Builtin::BIwork_group_commit_write_pipe: 789 case Builtin::BIsub_group_commit_write_pipe: 790 if (!(AccessQual && AccessQual->isWriteOnly())) { 791 S.Diag(Arg0->getBeginLoc(), 792 diag::err_opencl_builtin_pipe_invalid_access_modifier) 793 << "write_only" << Arg0->getSourceRange(); 794 return true; 795 } 796 break; 797 default: 798 break; 799 } 800 return false; 801 } 802 803 /// Returns true if pipe element type is different from the pointer. 804 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 805 const Expr *Arg0 = Call->getArg(0); 806 const Expr *ArgIdx = Call->getArg(Idx); 807 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 808 const QualType EltTy = PipeTy->getElementType(); 809 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 810 // The Idx argument should be a pointer and the type of the pointer and 811 // the type of pipe element should also be the same. 812 if (!ArgTy || 813 !S.Context.hasSameType( 814 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 815 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 816 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 817 << ArgIdx->getType() << ArgIdx->getSourceRange(); 818 return true; 819 } 820 return false; 821 } 822 823 // Performs semantic analysis for the read/write_pipe call. 824 // \param S Reference to the semantic analyzer. 825 // \param Call A pointer to the builtin call. 826 // \return True if a semantic error has been found, false otherwise. 827 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 828 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 829 // functions have two forms. 830 switch (Call->getNumArgs()) { 831 case 2: 832 if (checkOpenCLPipeArg(S, Call)) 833 return true; 834 // The call with 2 arguments should be 835 // read/write_pipe(pipe T, T*). 836 // Check packet type T. 837 if (checkOpenCLPipePacketType(S, Call, 1)) 838 return true; 839 break; 840 841 case 4: { 842 if (checkOpenCLPipeArg(S, Call)) 843 return true; 844 // The call with 4 arguments should be 845 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 846 // Check reserve_id_t. 847 if (!Call->getArg(1)->getType()->isReserveIDT()) { 848 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 849 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 850 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 851 return true; 852 } 853 854 // Check the index. 855 const Expr *Arg2 = Call->getArg(2); 856 if (!Arg2->getType()->isIntegerType() && 857 !Arg2->getType()->isUnsignedIntegerType()) { 858 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 859 << Call->getDirectCallee() << S.Context.UnsignedIntTy 860 << Arg2->getType() << Arg2->getSourceRange(); 861 return true; 862 } 863 864 // Check packet type T. 865 if (checkOpenCLPipePacketType(S, Call, 3)) 866 return true; 867 } break; 868 default: 869 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 870 << Call->getDirectCallee() << Call->getSourceRange(); 871 return true; 872 } 873 874 return false; 875 } 876 877 // Performs a semantic analysis on the {work_group_/sub_group_ 878 // /_}reserve_{read/write}_pipe 879 // \param S Reference to the semantic analyzer. 880 // \param Call The call to the builtin function to be analyzed. 881 // \return True if a semantic error was found, false otherwise. 882 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 883 if (checkArgCount(S, Call, 2)) 884 return true; 885 886 if (checkOpenCLPipeArg(S, Call)) 887 return true; 888 889 // Check the reserve size. 890 if (!Call->getArg(1)->getType()->isIntegerType() && 891 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 892 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 893 << Call->getDirectCallee() << S.Context.UnsignedIntTy 894 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 895 return true; 896 } 897 898 // Since return type of reserve_read/write_pipe built-in function is 899 // reserve_id_t, which is not defined in the builtin def file , we used int 900 // as return type and need to override the return type of these functions. 901 Call->setType(S.Context.OCLReserveIDTy); 902 903 return false; 904 } 905 906 // Performs a semantic analysis on {work_group_/sub_group_ 907 // /_}commit_{read/write}_pipe 908 // \param S Reference to the semantic analyzer. 909 // \param Call The call to the builtin function to be analyzed. 910 // \return True if a semantic error was found, false otherwise. 911 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 912 if (checkArgCount(S, Call, 2)) 913 return true; 914 915 if (checkOpenCLPipeArg(S, Call)) 916 return true; 917 918 // Check reserve_id_t. 919 if (!Call->getArg(1)->getType()->isReserveIDT()) { 920 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 921 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 922 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 923 return true; 924 } 925 926 return false; 927 } 928 929 // Performs a semantic analysis on the call to built-in Pipe 930 // Query Functions. 931 // \param S Reference to the semantic analyzer. 932 // \param Call The call to the builtin function to be analyzed. 933 // \return True if a semantic error was found, false otherwise. 934 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 935 if (checkArgCount(S, Call, 1)) 936 return true; 937 938 if (!Call->getArg(0)->getType()->isPipeType()) { 939 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 940 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 941 return true; 942 } 943 944 return false; 945 } 946 947 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 948 // Performs semantic analysis for the to_global/local/private call. 949 // \param S Reference to the semantic analyzer. 950 // \param BuiltinID ID of the builtin function. 951 // \param Call A pointer to the builtin call. 952 // \return True if a semantic error has been found, false otherwise. 953 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 954 CallExpr *Call) { 955 if (Call->getNumArgs() != 1) { 956 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num) 957 << Call->getDirectCallee() << Call->getSourceRange(); 958 return true; 959 } 960 961 auto RT = Call->getArg(0)->getType(); 962 if (!RT->isPointerType() || RT->getPointeeType() 963 .getAddressSpace() == LangAS::opencl_constant) { 964 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 965 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 966 return true; 967 } 968 969 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 970 S.Diag(Call->getArg(0)->getBeginLoc(), 971 diag::warn_opencl_generic_address_space_arg) 972 << Call->getDirectCallee()->getNameInfo().getAsString() 973 << Call->getArg(0)->getSourceRange(); 974 } 975 976 RT = RT->getPointeeType(); 977 auto Qual = RT.getQualifiers(); 978 switch (BuiltinID) { 979 case Builtin::BIto_global: 980 Qual.setAddressSpace(LangAS::opencl_global); 981 break; 982 case Builtin::BIto_local: 983 Qual.setAddressSpace(LangAS::opencl_local); 984 break; 985 case Builtin::BIto_private: 986 Qual.setAddressSpace(LangAS::opencl_private); 987 break; 988 default: 989 llvm_unreachable("Invalid builtin function"); 990 } 991 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 992 RT.getUnqualifiedType(), Qual))); 993 994 return false; 995 } 996 997 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 998 if (checkArgCount(S, TheCall, 1)) 999 return ExprError(); 1000 1001 // Compute __builtin_launder's parameter type from the argument. 1002 // The parameter type is: 1003 // * The type of the argument if it's not an array or function type, 1004 // Otherwise, 1005 // * The decayed argument type. 1006 QualType ParamTy = [&]() { 1007 QualType ArgTy = TheCall->getArg(0)->getType(); 1008 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1009 return S.Context.getPointerType(Ty->getElementType()); 1010 if (ArgTy->isFunctionType()) { 1011 return S.Context.getPointerType(ArgTy); 1012 } 1013 return ArgTy; 1014 }(); 1015 1016 TheCall->setType(ParamTy); 1017 1018 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1019 if (!ParamTy->isPointerType()) 1020 return 0; 1021 if (ParamTy->isFunctionPointerType()) 1022 return 1; 1023 if (ParamTy->isVoidPointerType()) 1024 return 2; 1025 return llvm::Optional<unsigned>{}; 1026 }(); 1027 if (DiagSelect.hasValue()) { 1028 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1029 << DiagSelect.getValue() << TheCall->getSourceRange(); 1030 return ExprError(); 1031 } 1032 1033 // We either have an incomplete class type, or we have a class template 1034 // whose instantiation has not been forced. Example: 1035 // 1036 // template <class T> struct Foo { T value; }; 1037 // Foo<int> *p = nullptr; 1038 // auto *d = __builtin_launder(p); 1039 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1040 diag::err_incomplete_type)) 1041 return ExprError(); 1042 1043 assert(ParamTy->getPointeeType()->isObjectType() && 1044 "Unhandled non-object pointer case"); 1045 1046 InitializedEntity Entity = 1047 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1048 ExprResult Arg = 1049 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1050 if (Arg.isInvalid()) 1051 return ExprError(); 1052 TheCall->setArg(0, Arg.get()); 1053 1054 return TheCall; 1055 } 1056 1057 // Emit an error and return true if the current architecture is not in the list 1058 // of supported architectures. 1059 static bool 1060 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1061 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1062 llvm::Triple::ArchType CurArch = 1063 S.getASTContext().getTargetInfo().getTriple().getArch(); 1064 if (llvm::is_contained(SupportedArchs, CurArch)) 1065 return false; 1066 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1067 << TheCall->getSourceRange(); 1068 return true; 1069 } 1070 1071 ExprResult 1072 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1073 CallExpr *TheCall) { 1074 ExprResult TheCallResult(TheCall); 1075 1076 // Find out if any arguments are required to be integer constant expressions. 1077 unsigned ICEArguments = 0; 1078 ASTContext::GetBuiltinTypeError Error; 1079 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1080 if (Error != ASTContext::GE_None) 1081 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1082 1083 // If any arguments are required to be ICE's, check and diagnose. 1084 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1085 // Skip arguments not required to be ICE's. 1086 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1087 1088 llvm::APSInt Result; 1089 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1090 return true; 1091 ICEArguments &= ~(1 << ArgNo); 1092 } 1093 1094 switch (BuiltinID) { 1095 case Builtin::BI__builtin___CFStringMakeConstantString: 1096 assert(TheCall->getNumArgs() == 1 && 1097 "Wrong # arguments to builtin CFStringMakeConstantString"); 1098 if (CheckObjCString(TheCall->getArg(0))) 1099 return ExprError(); 1100 break; 1101 case Builtin::BI__builtin_ms_va_start: 1102 case Builtin::BI__builtin_stdarg_start: 1103 case Builtin::BI__builtin_va_start: 1104 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1105 return ExprError(); 1106 break; 1107 case Builtin::BI__va_start: { 1108 switch (Context.getTargetInfo().getTriple().getArch()) { 1109 case llvm::Triple::aarch64: 1110 case llvm::Triple::arm: 1111 case llvm::Triple::thumb: 1112 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1113 return ExprError(); 1114 break; 1115 default: 1116 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1117 return ExprError(); 1118 break; 1119 } 1120 break; 1121 } 1122 1123 // The acquire, release, and no fence variants are ARM and AArch64 only. 1124 case Builtin::BI_interlockedbittestandset_acq: 1125 case Builtin::BI_interlockedbittestandset_rel: 1126 case Builtin::BI_interlockedbittestandset_nf: 1127 case Builtin::BI_interlockedbittestandreset_acq: 1128 case Builtin::BI_interlockedbittestandreset_rel: 1129 case Builtin::BI_interlockedbittestandreset_nf: 1130 if (CheckBuiltinTargetSupport( 1131 *this, BuiltinID, TheCall, 1132 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1133 return ExprError(); 1134 break; 1135 1136 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1137 case Builtin::BI_bittest64: 1138 case Builtin::BI_bittestandcomplement64: 1139 case Builtin::BI_bittestandreset64: 1140 case Builtin::BI_bittestandset64: 1141 case Builtin::BI_interlockedbittestandreset64: 1142 case Builtin::BI_interlockedbittestandset64: 1143 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1144 {llvm::Triple::x86_64, llvm::Triple::arm, 1145 llvm::Triple::thumb, llvm::Triple::aarch64})) 1146 return ExprError(); 1147 break; 1148 1149 case Builtin::BI__builtin_isgreater: 1150 case Builtin::BI__builtin_isgreaterequal: 1151 case Builtin::BI__builtin_isless: 1152 case Builtin::BI__builtin_islessequal: 1153 case Builtin::BI__builtin_islessgreater: 1154 case Builtin::BI__builtin_isunordered: 1155 if (SemaBuiltinUnorderedCompare(TheCall)) 1156 return ExprError(); 1157 break; 1158 case Builtin::BI__builtin_fpclassify: 1159 if (SemaBuiltinFPClassification(TheCall, 6)) 1160 return ExprError(); 1161 break; 1162 case Builtin::BI__builtin_isfinite: 1163 case Builtin::BI__builtin_isinf: 1164 case Builtin::BI__builtin_isinf_sign: 1165 case Builtin::BI__builtin_isnan: 1166 case Builtin::BI__builtin_isnormal: 1167 case Builtin::BI__builtin_signbit: 1168 case Builtin::BI__builtin_signbitf: 1169 case Builtin::BI__builtin_signbitl: 1170 if (SemaBuiltinFPClassification(TheCall, 1)) 1171 return ExprError(); 1172 break; 1173 case Builtin::BI__builtin_shufflevector: 1174 return SemaBuiltinShuffleVector(TheCall); 1175 // TheCall will be freed by the smart pointer here, but that's fine, since 1176 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1177 case Builtin::BI__builtin_prefetch: 1178 if (SemaBuiltinPrefetch(TheCall)) 1179 return ExprError(); 1180 break; 1181 case Builtin::BI__builtin_alloca_with_align: 1182 if (SemaBuiltinAllocaWithAlign(TheCall)) 1183 return ExprError(); 1184 LLVM_FALLTHROUGH; 1185 case Builtin::BI__builtin_alloca: 1186 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1187 << TheCall->getDirectCallee(); 1188 break; 1189 case Builtin::BI__assume: 1190 case Builtin::BI__builtin_assume: 1191 if (SemaBuiltinAssume(TheCall)) 1192 return ExprError(); 1193 break; 1194 case Builtin::BI__builtin_assume_aligned: 1195 if (SemaBuiltinAssumeAligned(TheCall)) 1196 return ExprError(); 1197 break; 1198 case Builtin::BI__builtin_dynamic_object_size: 1199 case Builtin::BI__builtin_object_size: 1200 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1201 return ExprError(); 1202 break; 1203 case Builtin::BI__builtin_longjmp: 1204 if (SemaBuiltinLongjmp(TheCall)) 1205 return ExprError(); 1206 break; 1207 case Builtin::BI__builtin_setjmp: 1208 if (SemaBuiltinSetjmp(TheCall)) 1209 return ExprError(); 1210 break; 1211 case Builtin::BI_setjmp: 1212 case Builtin::BI_setjmpex: 1213 if (checkArgCount(*this, TheCall, 1)) 1214 return true; 1215 break; 1216 case Builtin::BI__builtin_classify_type: 1217 if (checkArgCount(*this, TheCall, 1)) return true; 1218 TheCall->setType(Context.IntTy); 1219 break; 1220 case Builtin::BI__builtin_constant_p: { 1221 if (checkArgCount(*this, TheCall, 1)) return true; 1222 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1223 if (Arg.isInvalid()) return true; 1224 TheCall->setArg(0, Arg.get()); 1225 TheCall->setType(Context.IntTy); 1226 break; 1227 } 1228 case Builtin::BI__builtin_launder: 1229 return SemaBuiltinLaunder(*this, TheCall); 1230 case Builtin::BI__sync_fetch_and_add: 1231 case Builtin::BI__sync_fetch_and_add_1: 1232 case Builtin::BI__sync_fetch_and_add_2: 1233 case Builtin::BI__sync_fetch_and_add_4: 1234 case Builtin::BI__sync_fetch_and_add_8: 1235 case Builtin::BI__sync_fetch_and_add_16: 1236 case Builtin::BI__sync_fetch_and_sub: 1237 case Builtin::BI__sync_fetch_and_sub_1: 1238 case Builtin::BI__sync_fetch_and_sub_2: 1239 case Builtin::BI__sync_fetch_and_sub_4: 1240 case Builtin::BI__sync_fetch_and_sub_8: 1241 case Builtin::BI__sync_fetch_and_sub_16: 1242 case Builtin::BI__sync_fetch_and_or: 1243 case Builtin::BI__sync_fetch_and_or_1: 1244 case Builtin::BI__sync_fetch_and_or_2: 1245 case Builtin::BI__sync_fetch_and_or_4: 1246 case Builtin::BI__sync_fetch_and_or_8: 1247 case Builtin::BI__sync_fetch_and_or_16: 1248 case Builtin::BI__sync_fetch_and_and: 1249 case Builtin::BI__sync_fetch_and_and_1: 1250 case Builtin::BI__sync_fetch_and_and_2: 1251 case Builtin::BI__sync_fetch_and_and_4: 1252 case Builtin::BI__sync_fetch_and_and_8: 1253 case Builtin::BI__sync_fetch_and_and_16: 1254 case Builtin::BI__sync_fetch_and_xor: 1255 case Builtin::BI__sync_fetch_and_xor_1: 1256 case Builtin::BI__sync_fetch_and_xor_2: 1257 case Builtin::BI__sync_fetch_and_xor_4: 1258 case Builtin::BI__sync_fetch_and_xor_8: 1259 case Builtin::BI__sync_fetch_and_xor_16: 1260 case Builtin::BI__sync_fetch_and_nand: 1261 case Builtin::BI__sync_fetch_and_nand_1: 1262 case Builtin::BI__sync_fetch_and_nand_2: 1263 case Builtin::BI__sync_fetch_and_nand_4: 1264 case Builtin::BI__sync_fetch_and_nand_8: 1265 case Builtin::BI__sync_fetch_and_nand_16: 1266 case Builtin::BI__sync_add_and_fetch: 1267 case Builtin::BI__sync_add_and_fetch_1: 1268 case Builtin::BI__sync_add_and_fetch_2: 1269 case Builtin::BI__sync_add_and_fetch_4: 1270 case Builtin::BI__sync_add_and_fetch_8: 1271 case Builtin::BI__sync_add_and_fetch_16: 1272 case Builtin::BI__sync_sub_and_fetch: 1273 case Builtin::BI__sync_sub_and_fetch_1: 1274 case Builtin::BI__sync_sub_and_fetch_2: 1275 case Builtin::BI__sync_sub_and_fetch_4: 1276 case Builtin::BI__sync_sub_and_fetch_8: 1277 case Builtin::BI__sync_sub_and_fetch_16: 1278 case Builtin::BI__sync_and_and_fetch: 1279 case Builtin::BI__sync_and_and_fetch_1: 1280 case Builtin::BI__sync_and_and_fetch_2: 1281 case Builtin::BI__sync_and_and_fetch_4: 1282 case Builtin::BI__sync_and_and_fetch_8: 1283 case Builtin::BI__sync_and_and_fetch_16: 1284 case Builtin::BI__sync_or_and_fetch: 1285 case Builtin::BI__sync_or_and_fetch_1: 1286 case Builtin::BI__sync_or_and_fetch_2: 1287 case Builtin::BI__sync_or_and_fetch_4: 1288 case Builtin::BI__sync_or_and_fetch_8: 1289 case Builtin::BI__sync_or_and_fetch_16: 1290 case Builtin::BI__sync_xor_and_fetch: 1291 case Builtin::BI__sync_xor_and_fetch_1: 1292 case Builtin::BI__sync_xor_and_fetch_2: 1293 case Builtin::BI__sync_xor_and_fetch_4: 1294 case Builtin::BI__sync_xor_and_fetch_8: 1295 case Builtin::BI__sync_xor_and_fetch_16: 1296 case Builtin::BI__sync_nand_and_fetch: 1297 case Builtin::BI__sync_nand_and_fetch_1: 1298 case Builtin::BI__sync_nand_and_fetch_2: 1299 case Builtin::BI__sync_nand_and_fetch_4: 1300 case Builtin::BI__sync_nand_and_fetch_8: 1301 case Builtin::BI__sync_nand_and_fetch_16: 1302 case Builtin::BI__sync_val_compare_and_swap: 1303 case Builtin::BI__sync_val_compare_and_swap_1: 1304 case Builtin::BI__sync_val_compare_and_swap_2: 1305 case Builtin::BI__sync_val_compare_and_swap_4: 1306 case Builtin::BI__sync_val_compare_and_swap_8: 1307 case Builtin::BI__sync_val_compare_and_swap_16: 1308 case Builtin::BI__sync_bool_compare_and_swap: 1309 case Builtin::BI__sync_bool_compare_and_swap_1: 1310 case Builtin::BI__sync_bool_compare_and_swap_2: 1311 case Builtin::BI__sync_bool_compare_and_swap_4: 1312 case Builtin::BI__sync_bool_compare_and_swap_8: 1313 case Builtin::BI__sync_bool_compare_and_swap_16: 1314 case Builtin::BI__sync_lock_test_and_set: 1315 case Builtin::BI__sync_lock_test_and_set_1: 1316 case Builtin::BI__sync_lock_test_and_set_2: 1317 case Builtin::BI__sync_lock_test_and_set_4: 1318 case Builtin::BI__sync_lock_test_and_set_8: 1319 case Builtin::BI__sync_lock_test_and_set_16: 1320 case Builtin::BI__sync_lock_release: 1321 case Builtin::BI__sync_lock_release_1: 1322 case Builtin::BI__sync_lock_release_2: 1323 case Builtin::BI__sync_lock_release_4: 1324 case Builtin::BI__sync_lock_release_8: 1325 case Builtin::BI__sync_lock_release_16: 1326 case Builtin::BI__sync_swap: 1327 case Builtin::BI__sync_swap_1: 1328 case Builtin::BI__sync_swap_2: 1329 case Builtin::BI__sync_swap_4: 1330 case Builtin::BI__sync_swap_8: 1331 case Builtin::BI__sync_swap_16: 1332 return SemaBuiltinAtomicOverloaded(TheCallResult); 1333 case Builtin::BI__sync_synchronize: 1334 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1335 << TheCall->getCallee()->getSourceRange(); 1336 break; 1337 case Builtin::BI__builtin_nontemporal_load: 1338 case Builtin::BI__builtin_nontemporal_store: 1339 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1340 #define BUILTIN(ID, TYPE, ATTRS) 1341 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1342 case Builtin::BI##ID: \ 1343 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1344 #include "clang/Basic/Builtins.def" 1345 case Builtin::BI__annotation: 1346 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1347 return ExprError(); 1348 break; 1349 case Builtin::BI__builtin_annotation: 1350 if (SemaBuiltinAnnotation(*this, TheCall)) 1351 return ExprError(); 1352 break; 1353 case Builtin::BI__builtin_addressof: 1354 if (SemaBuiltinAddressof(*this, TheCall)) 1355 return ExprError(); 1356 break; 1357 case Builtin::BI__builtin_add_overflow: 1358 case Builtin::BI__builtin_sub_overflow: 1359 case Builtin::BI__builtin_mul_overflow: 1360 if (SemaBuiltinOverflow(*this, TheCall)) 1361 return ExprError(); 1362 break; 1363 case Builtin::BI__builtin_operator_new: 1364 case Builtin::BI__builtin_operator_delete: { 1365 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1366 ExprResult Res = 1367 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1368 if (Res.isInvalid()) 1369 CorrectDelayedTyposInExpr(TheCallResult.get()); 1370 return Res; 1371 } 1372 case Builtin::BI__builtin_dump_struct: { 1373 // We first want to ensure we are called with 2 arguments 1374 if (checkArgCount(*this, TheCall, 2)) 1375 return ExprError(); 1376 // Ensure that the first argument is of type 'struct XX *' 1377 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1378 const QualType PtrArgType = PtrArg->getType(); 1379 if (!PtrArgType->isPointerType() || 1380 !PtrArgType->getPointeeType()->isRecordType()) { 1381 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1382 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1383 << "structure pointer"; 1384 return ExprError(); 1385 } 1386 1387 // Ensure that the second argument is of type 'FunctionType' 1388 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1389 const QualType FnPtrArgType = FnPtrArg->getType(); 1390 if (!FnPtrArgType->isPointerType()) { 1391 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1392 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1393 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1394 return ExprError(); 1395 } 1396 1397 const auto *FuncType = 1398 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1399 1400 if (!FuncType) { 1401 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1402 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1403 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1404 return ExprError(); 1405 } 1406 1407 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1408 if (!FT->getNumParams()) { 1409 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1410 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1411 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1412 return ExprError(); 1413 } 1414 QualType PT = FT->getParamType(0); 1415 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1416 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1417 !PT->getPointeeType().isConstQualified()) { 1418 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1419 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1420 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1421 return ExprError(); 1422 } 1423 } 1424 1425 TheCall->setType(Context.IntTy); 1426 break; 1427 } 1428 case Builtin::BI__builtin_preserve_access_index: 1429 if (SemaBuiltinPreserveAI(*this, TheCall)) 1430 return ExprError(); 1431 break; 1432 case Builtin::BI__builtin_call_with_static_chain: 1433 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1434 return ExprError(); 1435 break; 1436 case Builtin::BI__exception_code: 1437 case Builtin::BI_exception_code: 1438 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1439 diag::err_seh___except_block)) 1440 return ExprError(); 1441 break; 1442 case Builtin::BI__exception_info: 1443 case Builtin::BI_exception_info: 1444 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1445 diag::err_seh___except_filter)) 1446 return ExprError(); 1447 break; 1448 case Builtin::BI__GetExceptionInfo: 1449 if (checkArgCount(*this, TheCall, 1)) 1450 return ExprError(); 1451 1452 if (CheckCXXThrowOperand( 1453 TheCall->getBeginLoc(), 1454 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1455 TheCall)) 1456 return ExprError(); 1457 1458 TheCall->setType(Context.VoidPtrTy); 1459 break; 1460 // OpenCL v2.0, s6.13.16 - Pipe functions 1461 case Builtin::BIread_pipe: 1462 case Builtin::BIwrite_pipe: 1463 // Since those two functions are declared with var args, we need a semantic 1464 // check for the argument. 1465 if (SemaBuiltinRWPipe(*this, TheCall)) 1466 return ExprError(); 1467 break; 1468 case Builtin::BIreserve_read_pipe: 1469 case Builtin::BIreserve_write_pipe: 1470 case Builtin::BIwork_group_reserve_read_pipe: 1471 case Builtin::BIwork_group_reserve_write_pipe: 1472 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1473 return ExprError(); 1474 break; 1475 case Builtin::BIsub_group_reserve_read_pipe: 1476 case Builtin::BIsub_group_reserve_write_pipe: 1477 if (checkOpenCLSubgroupExt(*this, TheCall) || 1478 SemaBuiltinReserveRWPipe(*this, TheCall)) 1479 return ExprError(); 1480 break; 1481 case Builtin::BIcommit_read_pipe: 1482 case Builtin::BIcommit_write_pipe: 1483 case Builtin::BIwork_group_commit_read_pipe: 1484 case Builtin::BIwork_group_commit_write_pipe: 1485 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1486 return ExprError(); 1487 break; 1488 case Builtin::BIsub_group_commit_read_pipe: 1489 case Builtin::BIsub_group_commit_write_pipe: 1490 if (checkOpenCLSubgroupExt(*this, TheCall) || 1491 SemaBuiltinCommitRWPipe(*this, TheCall)) 1492 return ExprError(); 1493 break; 1494 case Builtin::BIget_pipe_num_packets: 1495 case Builtin::BIget_pipe_max_packets: 1496 if (SemaBuiltinPipePackets(*this, TheCall)) 1497 return ExprError(); 1498 break; 1499 case Builtin::BIto_global: 1500 case Builtin::BIto_local: 1501 case Builtin::BIto_private: 1502 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1503 return ExprError(); 1504 break; 1505 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1506 case Builtin::BIenqueue_kernel: 1507 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1508 return ExprError(); 1509 break; 1510 case Builtin::BIget_kernel_work_group_size: 1511 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1512 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1513 return ExprError(); 1514 break; 1515 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1516 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1517 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1518 return ExprError(); 1519 break; 1520 case Builtin::BI__builtin_os_log_format: 1521 case Builtin::BI__builtin_os_log_format_buffer_size: 1522 if (SemaBuiltinOSLogFormat(TheCall)) 1523 return ExprError(); 1524 break; 1525 } 1526 1527 // Since the target specific builtins for each arch overlap, only check those 1528 // of the arch we are compiling for. 1529 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1530 switch (Context.getTargetInfo().getTriple().getArch()) { 1531 case llvm::Triple::arm: 1532 case llvm::Triple::armeb: 1533 case llvm::Triple::thumb: 1534 case llvm::Triple::thumbeb: 1535 if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall)) 1536 return ExprError(); 1537 break; 1538 case llvm::Triple::aarch64: 1539 case llvm::Triple::aarch64_be: 1540 if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall)) 1541 return ExprError(); 1542 break; 1543 case llvm::Triple::bpfeb: 1544 case llvm::Triple::bpfel: 1545 if (CheckBPFBuiltinFunctionCall(BuiltinID, TheCall)) 1546 return ExprError(); 1547 break; 1548 case llvm::Triple::hexagon: 1549 if (CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall)) 1550 return ExprError(); 1551 break; 1552 case llvm::Triple::mips: 1553 case llvm::Triple::mipsel: 1554 case llvm::Triple::mips64: 1555 case llvm::Triple::mips64el: 1556 if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall)) 1557 return ExprError(); 1558 break; 1559 case llvm::Triple::systemz: 1560 if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall)) 1561 return ExprError(); 1562 break; 1563 case llvm::Triple::x86: 1564 case llvm::Triple::x86_64: 1565 if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) 1566 return ExprError(); 1567 break; 1568 case llvm::Triple::ppc: 1569 case llvm::Triple::ppc64: 1570 case llvm::Triple::ppc64le: 1571 if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall)) 1572 return ExprError(); 1573 break; 1574 default: 1575 break; 1576 } 1577 } 1578 1579 return TheCallResult; 1580 } 1581 1582 // Get the valid immediate range for the specified NEON type code. 1583 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 1584 NeonTypeFlags Type(t); 1585 int IsQuad = ForceQuad ? true : Type.isQuad(); 1586 switch (Type.getEltType()) { 1587 case NeonTypeFlags::Int8: 1588 case NeonTypeFlags::Poly8: 1589 return shift ? 7 : (8 << IsQuad) - 1; 1590 case NeonTypeFlags::Int16: 1591 case NeonTypeFlags::Poly16: 1592 return shift ? 15 : (4 << IsQuad) - 1; 1593 case NeonTypeFlags::Int32: 1594 return shift ? 31 : (2 << IsQuad) - 1; 1595 case NeonTypeFlags::Int64: 1596 case NeonTypeFlags::Poly64: 1597 return shift ? 63 : (1 << IsQuad) - 1; 1598 case NeonTypeFlags::Poly128: 1599 return shift ? 127 : (1 << IsQuad) - 1; 1600 case NeonTypeFlags::Float16: 1601 assert(!shift && "cannot shift float types!"); 1602 return (4 << IsQuad) - 1; 1603 case NeonTypeFlags::Float32: 1604 assert(!shift && "cannot shift float types!"); 1605 return (2 << IsQuad) - 1; 1606 case NeonTypeFlags::Float64: 1607 assert(!shift && "cannot shift float types!"); 1608 return (1 << IsQuad) - 1; 1609 } 1610 llvm_unreachable("Invalid NeonTypeFlag!"); 1611 } 1612 1613 /// getNeonEltType - Return the QualType corresponding to the elements of 1614 /// the vector type specified by the NeonTypeFlags. This is used to check 1615 /// the pointer arguments for Neon load/store intrinsics. 1616 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 1617 bool IsPolyUnsigned, bool IsInt64Long) { 1618 switch (Flags.getEltType()) { 1619 case NeonTypeFlags::Int8: 1620 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 1621 case NeonTypeFlags::Int16: 1622 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 1623 case NeonTypeFlags::Int32: 1624 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 1625 case NeonTypeFlags::Int64: 1626 if (IsInt64Long) 1627 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 1628 else 1629 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 1630 : Context.LongLongTy; 1631 case NeonTypeFlags::Poly8: 1632 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 1633 case NeonTypeFlags::Poly16: 1634 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 1635 case NeonTypeFlags::Poly64: 1636 if (IsInt64Long) 1637 return Context.UnsignedLongTy; 1638 else 1639 return Context.UnsignedLongLongTy; 1640 case NeonTypeFlags::Poly128: 1641 break; 1642 case NeonTypeFlags::Float16: 1643 return Context.HalfTy; 1644 case NeonTypeFlags::Float32: 1645 return Context.FloatTy; 1646 case NeonTypeFlags::Float64: 1647 return Context.DoubleTy; 1648 } 1649 llvm_unreachable("Invalid NeonTypeFlag!"); 1650 } 1651 1652 bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1653 llvm::APSInt Result; 1654 uint64_t mask = 0; 1655 unsigned TV = 0; 1656 int PtrArgNum = -1; 1657 bool HasConstPtr = false; 1658 switch (BuiltinID) { 1659 #define GET_NEON_OVERLOAD_CHECK 1660 #include "clang/Basic/arm_neon.inc" 1661 #include "clang/Basic/arm_fp16.inc" 1662 #undef GET_NEON_OVERLOAD_CHECK 1663 } 1664 1665 // For NEON intrinsics which are overloaded on vector element type, validate 1666 // the immediate which specifies which variant to emit. 1667 unsigned ImmArg = TheCall->getNumArgs()-1; 1668 if (mask) { 1669 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 1670 return true; 1671 1672 TV = Result.getLimitedValue(64); 1673 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 1674 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 1675 << TheCall->getArg(ImmArg)->getSourceRange(); 1676 } 1677 1678 if (PtrArgNum >= 0) { 1679 // Check that pointer arguments have the specified type. 1680 Expr *Arg = TheCall->getArg(PtrArgNum); 1681 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 1682 Arg = ICE->getSubExpr(); 1683 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 1684 QualType RHSTy = RHS.get()->getType(); 1685 1686 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch(); 1687 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 1688 Arch == llvm::Triple::aarch64_be; 1689 bool IsInt64Long = 1690 Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong; 1691 QualType EltTy = 1692 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 1693 if (HasConstPtr) 1694 EltTy = EltTy.withConst(); 1695 QualType LHSTy = Context.getPointerType(EltTy); 1696 AssignConvertType ConvTy; 1697 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 1698 if (RHS.isInvalid()) 1699 return true; 1700 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 1701 RHS.get(), AA_Assigning)) 1702 return true; 1703 } 1704 1705 // For NEON intrinsics which take an immediate value as part of the 1706 // instruction, range check them here. 1707 unsigned i = 0, l = 0, u = 0; 1708 switch (BuiltinID) { 1709 default: 1710 return false; 1711 #define GET_NEON_IMMEDIATE_CHECK 1712 #include "clang/Basic/arm_neon.inc" 1713 #include "clang/Basic/arm_fp16.inc" 1714 #undef GET_NEON_IMMEDIATE_CHECK 1715 } 1716 1717 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 1718 } 1719 1720 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1721 switch (BuiltinID) { 1722 default: 1723 return false; 1724 #include "clang/Basic/arm_mve_builtin_sema.inc" 1725 } 1726 } 1727 1728 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 1729 unsigned MaxWidth) { 1730 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 1731 BuiltinID == ARM::BI__builtin_arm_ldaex || 1732 BuiltinID == ARM::BI__builtin_arm_strex || 1733 BuiltinID == ARM::BI__builtin_arm_stlex || 1734 BuiltinID == AArch64::BI__builtin_arm_ldrex || 1735 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1736 BuiltinID == AArch64::BI__builtin_arm_strex || 1737 BuiltinID == AArch64::BI__builtin_arm_stlex) && 1738 "unexpected ARM builtin"); 1739 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 1740 BuiltinID == ARM::BI__builtin_arm_ldaex || 1741 BuiltinID == AArch64::BI__builtin_arm_ldrex || 1742 BuiltinID == AArch64::BI__builtin_arm_ldaex; 1743 1744 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1745 1746 // Ensure that we have the proper number of arguments. 1747 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 1748 return true; 1749 1750 // Inspect the pointer argument of the atomic builtin. This should always be 1751 // a pointer type, whose element is an integral scalar or pointer type. 1752 // Because it is a pointer type, we don't have to worry about any implicit 1753 // casts here. 1754 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 1755 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 1756 if (PointerArgRes.isInvalid()) 1757 return true; 1758 PointerArg = PointerArgRes.get(); 1759 1760 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 1761 if (!pointerType) { 1762 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 1763 << PointerArg->getType() << PointerArg->getSourceRange(); 1764 return true; 1765 } 1766 1767 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 1768 // task is to insert the appropriate casts into the AST. First work out just 1769 // what the appropriate type is. 1770 QualType ValType = pointerType->getPointeeType(); 1771 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 1772 if (IsLdrex) 1773 AddrType.addConst(); 1774 1775 // Issue a warning if the cast is dodgy. 1776 CastKind CastNeeded = CK_NoOp; 1777 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 1778 CastNeeded = CK_BitCast; 1779 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 1780 << PointerArg->getType() << Context.getPointerType(AddrType) 1781 << AA_Passing << PointerArg->getSourceRange(); 1782 } 1783 1784 // Finally, do the cast and replace the argument with the corrected version. 1785 AddrType = Context.getPointerType(AddrType); 1786 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 1787 if (PointerArgRes.isInvalid()) 1788 return true; 1789 PointerArg = PointerArgRes.get(); 1790 1791 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 1792 1793 // In general, we allow ints, floats and pointers to be loaded and stored. 1794 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 1795 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 1796 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 1797 << PointerArg->getType() << PointerArg->getSourceRange(); 1798 return true; 1799 } 1800 1801 // But ARM doesn't have instructions to deal with 128-bit versions. 1802 if (Context.getTypeSize(ValType) > MaxWidth) { 1803 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 1804 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 1805 << PointerArg->getType() << PointerArg->getSourceRange(); 1806 return true; 1807 } 1808 1809 switch (ValType.getObjCLifetime()) { 1810 case Qualifiers::OCL_None: 1811 case Qualifiers::OCL_ExplicitNone: 1812 // okay 1813 break; 1814 1815 case Qualifiers::OCL_Weak: 1816 case Qualifiers::OCL_Strong: 1817 case Qualifiers::OCL_Autoreleasing: 1818 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 1819 << ValType << PointerArg->getSourceRange(); 1820 return true; 1821 } 1822 1823 if (IsLdrex) { 1824 TheCall->setType(ValType); 1825 return false; 1826 } 1827 1828 // Initialize the argument to be stored. 1829 ExprResult ValArg = TheCall->getArg(0); 1830 InitializedEntity Entity = InitializedEntity::InitializeParameter( 1831 Context, ValType, /*consume*/ false); 1832 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 1833 if (ValArg.isInvalid()) 1834 return true; 1835 TheCall->setArg(0, ValArg.get()); 1836 1837 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 1838 // but the custom checker bypasses all default analysis. 1839 TheCall->setType(Context.IntTy); 1840 return false; 1841 } 1842 1843 bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 1844 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 1845 BuiltinID == ARM::BI__builtin_arm_ldaex || 1846 BuiltinID == ARM::BI__builtin_arm_strex || 1847 BuiltinID == ARM::BI__builtin_arm_stlex) { 1848 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 1849 } 1850 1851 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 1852 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 1853 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 1854 } 1855 1856 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 1857 BuiltinID == ARM::BI__builtin_arm_wsr64) 1858 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 1859 1860 if (BuiltinID == ARM::BI__builtin_arm_rsr || 1861 BuiltinID == ARM::BI__builtin_arm_rsrp || 1862 BuiltinID == ARM::BI__builtin_arm_wsr || 1863 BuiltinID == ARM::BI__builtin_arm_wsrp) 1864 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1865 1866 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) 1867 return true; 1868 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 1869 return true; 1870 1871 // For intrinsics which take an immediate value as part of the instruction, 1872 // range check them here. 1873 // FIXME: VFP Intrinsics should error if VFP not present. 1874 switch (BuiltinID) { 1875 default: return false; 1876 case ARM::BI__builtin_arm_ssat: 1877 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 1878 case ARM::BI__builtin_arm_usat: 1879 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 1880 case ARM::BI__builtin_arm_ssat16: 1881 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 1882 case ARM::BI__builtin_arm_usat16: 1883 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 1884 case ARM::BI__builtin_arm_vcvtr_f: 1885 case ARM::BI__builtin_arm_vcvtr_d: 1886 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 1887 case ARM::BI__builtin_arm_dmb: 1888 case ARM::BI__builtin_arm_dsb: 1889 case ARM::BI__builtin_arm_isb: 1890 case ARM::BI__builtin_arm_dbg: 1891 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 1892 } 1893 } 1894 1895 bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, 1896 CallExpr *TheCall) { 1897 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 1898 BuiltinID == AArch64::BI__builtin_arm_ldaex || 1899 BuiltinID == AArch64::BI__builtin_arm_strex || 1900 BuiltinID == AArch64::BI__builtin_arm_stlex) { 1901 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 1902 } 1903 1904 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 1905 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 1906 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 1907 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 1908 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 1909 } 1910 1911 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 1912 BuiltinID == AArch64::BI__builtin_arm_wsr64) 1913 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1914 1915 // Memory Tagging Extensions (MTE) Intrinsics 1916 if (BuiltinID == AArch64::BI__builtin_arm_irg || 1917 BuiltinID == AArch64::BI__builtin_arm_addg || 1918 BuiltinID == AArch64::BI__builtin_arm_gmi || 1919 BuiltinID == AArch64::BI__builtin_arm_ldg || 1920 BuiltinID == AArch64::BI__builtin_arm_stg || 1921 BuiltinID == AArch64::BI__builtin_arm_subp) { 1922 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 1923 } 1924 1925 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 1926 BuiltinID == AArch64::BI__builtin_arm_rsrp || 1927 BuiltinID == AArch64::BI__builtin_arm_wsr || 1928 BuiltinID == AArch64::BI__builtin_arm_wsrp) 1929 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 1930 1931 // Only check the valid encoding range. Any constant in this range would be 1932 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 1933 // an exception for incorrect registers. This matches MSVC behavior. 1934 if (BuiltinID == AArch64::BI_ReadStatusReg || 1935 BuiltinID == AArch64::BI_WriteStatusReg) 1936 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 1937 1938 if (BuiltinID == AArch64::BI__getReg) 1939 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 1940 1941 if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) 1942 return true; 1943 1944 // For intrinsics which take an immediate value as part of the instruction, 1945 // range check them here. 1946 unsigned i = 0, l = 0, u = 0; 1947 switch (BuiltinID) { 1948 default: return false; 1949 case AArch64::BI__builtin_arm_dmb: 1950 case AArch64::BI__builtin_arm_dsb: 1951 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 1952 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 1953 } 1954 1955 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 1956 } 1957 1958 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 1959 CallExpr *TheCall) { 1960 assert(BuiltinID == BPF::BI__builtin_preserve_field_info && 1961 "unexpected ARM builtin"); 1962 1963 if (checkArgCount(*this, TheCall, 2)) 1964 return true; 1965 1966 // The first argument needs to be a record field access. 1967 // If it is an array element access, we delay decision 1968 // to BPF backend to check whether the access is a 1969 // field access or not. 1970 Expr *Arg = TheCall->getArg(0); 1971 if (Arg->getType()->getAsPlaceholderType() || 1972 (Arg->IgnoreParens()->getObjectKind() != OK_BitField && 1973 !dyn_cast<MemberExpr>(Arg->IgnoreParens()) && 1974 !dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens()))) { 1975 Diag(Arg->getBeginLoc(), diag::err_preserve_field_info_not_field) 1976 << 1 << Arg->getSourceRange(); 1977 return true; 1978 } 1979 1980 // The second argument needs to be a constant int 1981 llvm::APSInt Value; 1982 if (!TheCall->getArg(1)->isIntegerConstantExpr(Value, Context)) { 1983 Diag(Arg->getBeginLoc(), diag::err_preserve_field_info_not_const) 1984 << 2 << Arg->getSourceRange(); 1985 return true; 1986 } 1987 1988 TheCall->setType(Context.UnsignedIntTy); 1989 return false; 1990 } 1991 1992 bool Sema::CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) { 1993 struct BuiltinAndString { 1994 unsigned BuiltinID; 1995 const char *Str; 1996 }; 1997 1998 static BuiltinAndString ValidCPU[] = { 1999 { Hexagon::BI__builtin_HEXAGON_A6_vcmpbeq_notany, "v65,v66" }, 2000 { Hexagon::BI__builtin_HEXAGON_A6_vminub_RdP, "v62,v65,v66" }, 2001 { Hexagon::BI__builtin_HEXAGON_F2_dfadd, "v66" }, 2002 { Hexagon::BI__builtin_HEXAGON_F2_dfsub, "v66" }, 2003 { Hexagon::BI__builtin_HEXAGON_M2_mnaci, "v66" }, 2004 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffb, "v62,v65,v66" }, 2005 { Hexagon::BI__builtin_HEXAGON_M6_vabsdiffub, "v62,v65,v66" }, 2006 { Hexagon::BI__builtin_HEXAGON_S2_mask, "v66" }, 2007 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, "v60,v62,v65,v66" }, 2008 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, "v60,v62,v65,v66" }, 2009 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, "v60,v62,v65,v66" }, 2010 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, "v60,v62,v65,v66" }, 2011 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, "v60,v62,v65,v66" }, 2012 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, "v60,v62,v65,v66" }, 2013 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, "v60,v62,v65,v66" }, 2014 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, "v60,v62,v65,v66" }, 2015 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, "v60,v62,v65,v66" }, 2016 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, "v60,v62,v65,v66" }, 2017 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, "v60,v62,v65,v66" }, 2018 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, "v60,v62,v65,v66" }, 2019 { Hexagon::BI__builtin_HEXAGON_S6_vsplatrbp, "v62,v65,v66" }, 2020 { Hexagon::BI__builtin_HEXAGON_S6_vtrunehb_ppp, "v62,v65,v66" }, 2021 { Hexagon::BI__builtin_HEXAGON_S6_vtrunohb_ppp, "v62,v65,v66" }, 2022 }; 2023 2024 static BuiltinAndString ValidHVX[] = { 2025 { Hexagon::BI__builtin_HEXAGON_V6_hi, "v60,v62,v65,v66" }, 2026 { Hexagon::BI__builtin_HEXAGON_V6_hi_128B, "v60,v62,v65,v66" }, 2027 { Hexagon::BI__builtin_HEXAGON_V6_lo, "v60,v62,v65,v66" }, 2028 { Hexagon::BI__builtin_HEXAGON_V6_lo_128B, "v60,v62,v65,v66" }, 2029 { Hexagon::BI__builtin_HEXAGON_V6_extractw, "v60,v62,v65,v66" }, 2030 { Hexagon::BI__builtin_HEXAGON_V6_extractw_128B, "v60,v62,v65,v66" }, 2031 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb, "v62,v65,v66" }, 2032 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatb_128B, "v62,v65,v66" }, 2033 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath, "v62,v65,v66" }, 2034 { Hexagon::BI__builtin_HEXAGON_V6_lvsplath_128B, "v62,v65,v66" }, 2035 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw, "v60,v62,v65,v66" }, 2036 { Hexagon::BI__builtin_HEXAGON_V6_lvsplatw_128B, "v60,v62,v65,v66" }, 2037 { Hexagon::BI__builtin_HEXAGON_V6_pred_and, "v60,v62,v65,v66" }, 2038 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_128B, "v60,v62,v65,v66" }, 2039 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n, "v60,v62,v65,v66" }, 2040 { Hexagon::BI__builtin_HEXAGON_V6_pred_and_n_128B, "v60,v62,v65,v66" }, 2041 { Hexagon::BI__builtin_HEXAGON_V6_pred_not, "v60,v62,v65,v66" }, 2042 { Hexagon::BI__builtin_HEXAGON_V6_pred_not_128B, "v60,v62,v65,v66" }, 2043 { Hexagon::BI__builtin_HEXAGON_V6_pred_or, "v60,v62,v65,v66" }, 2044 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_128B, "v60,v62,v65,v66" }, 2045 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n, "v60,v62,v65,v66" }, 2046 { Hexagon::BI__builtin_HEXAGON_V6_pred_or_n_128B, "v60,v62,v65,v66" }, 2047 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2, "v60,v62,v65,v66" }, 2048 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2_128B, "v60,v62,v65,v66" }, 2049 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2, "v62,v65,v66" }, 2050 { Hexagon::BI__builtin_HEXAGON_V6_pred_scalar2v2_128B, "v62,v65,v66" }, 2051 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor, "v60,v62,v65,v66" }, 2052 { Hexagon::BI__builtin_HEXAGON_V6_pred_xor_128B, "v60,v62,v65,v66" }, 2053 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh, "v62,v65,v66" }, 2054 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqh_128B, "v62,v65,v66" }, 2055 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw, "v62,v65,v66" }, 2056 { Hexagon::BI__builtin_HEXAGON_V6_shuffeqw_128B, "v62,v65,v66" }, 2057 { Hexagon::BI__builtin_HEXAGON_V6_vabsb, "v65,v66" }, 2058 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_128B, "v65,v66" }, 2059 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat, "v65,v66" }, 2060 { Hexagon::BI__builtin_HEXAGON_V6_vabsb_sat_128B, "v65,v66" }, 2061 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh, "v60,v62,v65,v66" }, 2062 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffh_128B, "v60,v62,v65,v66" }, 2063 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub, "v60,v62,v65,v66" }, 2064 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffub_128B, "v60,v62,v65,v66" }, 2065 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh, "v60,v62,v65,v66" }, 2066 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffuh_128B, "v60,v62,v65,v66" }, 2067 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw, "v60,v62,v65,v66" }, 2068 { Hexagon::BI__builtin_HEXAGON_V6_vabsdiffw_128B, "v60,v62,v65,v66" }, 2069 { Hexagon::BI__builtin_HEXAGON_V6_vabsh, "v60,v62,v65,v66" }, 2070 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_128B, "v60,v62,v65,v66" }, 2071 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat, "v60,v62,v65,v66" }, 2072 { Hexagon::BI__builtin_HEXAGON_V6_vabsh_sat_128B, "v60,v62,v65,v66" }, 2073 { Hexagon::BI__builtin_HEXAGON_V6_vabsw, "v60,v62,v65,v66" }, 2074 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_128B, "v60,v62,v65,v66" }, 2075 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat, "v60,v62,v65,v66" }, 2076 { Hexagon::BI__builtin_HEXAGON_V6_vabsw_sat_128B, "v60,v62,v65,v66" }, 2077 { Hexagon::BI__builtin_HEXAGON_V6_vaddb, "v60,v62,v65,v66" }, 2078 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_128B, "v60,v62,v65,v66" }, 2079 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv, "v60,v62,v65,v66" }, 2080 { Hexagon::BI__builtin_HEXAGON_V6_vaddb_dv_128B, "v60,v62,v65,v66" }, 2081 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat, "v62,v65,v66" }, 2082 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_128B, "v62,v65,v66" }, 2083 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv, "v62,v65,v66" }, 2084 { Hexagon::BI__builtin_HEXAGON_V6_vaddbsat_dv_128B, "v62,v65,v66" }, 2085 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry, "v62,v65,v66" }, 2086 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B, "v62,v65,v66" }, 2087 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat, "v66" }, 2088 { Hexagon::BI__builtin_HEXAGON_V6_vaddcarrysat_128B, "v66" }, 2089 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh, "v62,v65,v66" }, 2090 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbh_128B, "v62,v65,v66" }, 2091 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw, "v62,v65,v66" }, 2092 { Hexagon::BI__builtin_HEXAGON_V6_vaddclbw_128B, "v62,v65,v66" }, 2093 { Hexagon::BI__builtin_HEXAGON_V6_vaddh, "v60,v62,v65,v66" }, 2094 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_128B, "v60,v62,v65,v66" }, 2095 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv, "v60,v62,v65,v66" }, 2096 { Hexagon::BI__builtin_HEXAGON_V6_vaddh_dv_128B, "v60,v62,v65,v66" }, 2097 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat, "v60,v62,v65,v66" }, 2098 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_128B, "v60,v62,v65,v66" }, 2099 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv, "v60,v62,v65,v66" }, 2100 { Hexagon::BI__builtin_HEXAGON_V6_vaddhsat_dv_128B, "v60,v62,v65,v66" }, 2101 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw, "v60,v62,v65,v66" }, 2102 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_128B, "v60,v62,v65,v66" }, 2103 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc, "v62,v65,v66" }, 2104 { Hexagon::BI__builtin_HEXAGON_V6_vaddhw_acc_128B, "v62,v65,v66" }, 2105 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh, "v60,v62,v65,v66" }, 2106 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_128B, "v60,v62,v65,v66" }, 2107 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc, "v62,v65,v66" }, 2108 { Hexagon::BI__builtin_HEXAGON_V6_vaddubh_acc_128B, "v62,v65,v66" }, 2109 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat, "v60,v62,v65,v66" }, 2110 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_128B, "v60,v62,v65,v66" }, 2111 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv, "v60,v62,v65,v66" }, 2112 { Hexagon::BI__builtin_HEXAGON_V6_vaddubsat_dv_128B, "v60,v62,v65,v66" }, 2113 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat, "v62,v65,v66" }, 2114 { Hexagon::BI__builtin_HEXAGON_V6_vaddububb_sat_128B, "v62,v65,v66" }, 2115 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat, "v60,v62,v65,v66" }, 2116 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_128B, "v60,v62,v65,v66" }, 2117 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv, "v60,v62,v65,v66" }, 2118 { Hexagon::BI__builtin_HEXAGON_V6_vadduhsat_dv_128B, "v60,v62,v65,v66" }, 2119 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw, "v60,v62,v65,v66" }, 2120 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_128B, "v60,v62,v65,v66" }, 2121 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc, "v62,v65,v66" }, 2122 { Hexagon::BI__builtin_HEXAGON_V6_vadduhw_acc_128B, "v62,v65,v66" }, 2123 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat, "v62,v65,v66" }, 2124 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_128B, "v62,v65,v66" }, 2125 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv, "v62,v65,v66" }, 2126 { Hexagon::BI__builtin_HEXAGON_V6_vadduwsat_dv_128B, "v62,v65,v66" }, 2127 { Hexagon::BI__builtin_HEXAGON_V6_vaddw, "v60,v62,v65,v66" }, 2128 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_128B, "v60,v62,v65,v66" }, 2129 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv, "v60,v62,v65,v66" }, 2130 { Hexagon::BI__builtin_HEXAGON_V6_vaddw_dv_128B, "v60,v62,v65,v66" }, 2131 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat, "v60,v62,v65,v66" }, 2132 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_128B, "v60,v62,v65,v66" }, 2133 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv, "v60,v62,v65,v66" }, 2134 { Hexagon::BI__builtin_HEXAGON_V6_vaddwsat_dv_128B, "v60,v62,v65,v66" }, 2135 { Hexagon::BI__builtin_HEXAGON_V6_valignb, "v60,v62,v65,v66" }, 2136 { Hexagon::BI__builtin_HEXAGON_V6_valignb_128B, "v60,v62,v65,v66" }, 2137 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, "v60,v62,v65,v66" }, 2138 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, "v60,v62,v65,v66" }, 2139 { Hexagon::BI__builtin_HEXAGON_V6_vand, "v60,v62,v65,v66" }, 2140 { Hexagon::BI__builtin_HEXAGON_V6_vand_128B, "v60,v62,v65,v66" }, 2141 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt, "v62,v65,v66" }, 2142 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_128B, "v62,v65,v66" }, 2143 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc, "v62,v65,v66" }, 2144 { Hexagon::BI__builtin_HEXAGON_V6_vandnqrt_acc_128B, "v62,v65,v66" }, 2145 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt, "v60,v62,v65,v66" }, 2146 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_128B, "v60,v62,v65,v66" }, 2147 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc, "v60,v62,v65,v66" }, 2148 { Hexagon::BI__builtin_HEXAGON_V6_vandqrt_acc_128B, "v60,v62,v65,v66" }, 2149 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv, "v62,v65,v66" }, 2150 { Hexagon::BI__builtin_HEXAGON_V6_vandvnqv_128B, "v62,v65,v66" }, 2151 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv, "v62,v65,v66" }, 2152 { Hexagon::BI__builtin_HEXAGON_V6_vandvqv_128B, "v62,v65,v66" }, 2153 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt, "v60,v62,v65,v66" }, 2154 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_128B, "v60,v62,v65,v66" }, 2155 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc, "v60,v62,v65,v66" }, 2156 { Hexagon::BI__builtin_HEXAGON_V6_vandvrt_acc_128B, "v60,v62,v65,v66" }, 2157 { Hexagon::BI__builtin_HEXAGON_V6_vaslh, "v60,v62,v65,v66" }, 2158 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_128B, "v60,v62,v65,v66" }, 2159 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc, "v65,v66" }, 2160 { Hexagon::BI__builtin_HEXAGON_V6_vaslh_acc_128B, "v65,v66" }, 2161 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv, "v60,v62,v65,v66" }, 2162 { Hexagon::BI__builtin_HEXAGON_V6_vaslhv_128B, "v60,v62,v65,v66" }, 2163 { Hexagon::BI__builtin_HEXAGON_V6_vaslw, "v60,v62,v65,v66" }, 2164 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_128B, "v60,v62,v65,v66" }, 2165 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc, "v60,v62,v65,v66" }, 2166 { Hexagon::BI__builtin_HEXAGON_V6_vaslw_acc_128B, "v60,v62,v65,v66" }, 2167 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv, "v60,v62,v65,v66" }, 2168 { Hexagon::BI__builtin_HEXAGON_V6_vaslwv_128B, "v60,v62,v65,v66" }, 2169 { Hexagon::BI__builtin_HEXAGON_V6_vasrh, "v60,v62,v65,v66" }, 2170 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_128B, "v60,v62,v65,v66" }, 2171 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc, "v65,v66" }, 2172 { Hexagon::BI__builtin_HEXAGON_V6_vasrh_acc_128B, "v65,v66" }, 2173 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat, "v60,v62,v65,v66" }, 2174 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbrndsat_128B, "v60,v62,v65,v66" }, 2175 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat, "v62,v65,v66" }, 2176 { Hexagon::BI__builtin_HEXAGON_V6_vasrhbsat_128B, "v62,v65,v66" }, 2177 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat, "v60,v62,v65,v66" }, 2178 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubrndsat_128B, "v60,v62,v65,v66" }, 2179 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat, "v60,v62,v65,v66" }, 2180 { Hexagon::BI__builtin_HEXAGON_V6_vasrhubsat_128B, "v60,v62,v65,v66" }, 2181 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv, "v60,v62,v65,v66" }, 2182 { Hexagon::BI__builtin_HEXAGON_V6_vasrhv_128B, "v60,v62,v65,v66" }, 2183 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into, "v66" }, 2184 { Hexagon::BI__builtin_HEXAGON_V6_vasr_into_128B, "v66" }, 2185 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat, "v65,v66" }, 2186 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubrndsat_128B, "v65,v66" }, 2187 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat, "v65,v66" }, 2188 { Hexagon::BI__builtin_HEXAGON_V6_vasruhubsat_128B, "v65,v66" }, 2189 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat, "v62,v65,v66" }, 2190 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "v62,v65,v66" }, 2191 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat, "v65,v66" }, 2192 { Hexagon::BI__builtin_HEXAGON_V6_vasruwuhsat_128B, "v65,v66" }, 2193 { Hexagon::BI__builtin_HEXAGON_V6_vasrw, "v60,v62,v65,v66" }, 2194 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_128B, "v60,v62,v65,v66" }, 2195 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc, "v60,v62,v65,v66" }, 2196 { Hexagon::BI__builtin_HEXAGON_V6_vasrw_acc_128B, "v60,v62,v65,v66" }, 2197 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh, "v60,v62,v65,v66" }, 2198 { Hexagon::BI__builtin_HEXAGON_V6_vasrwh_128B, "v60,v62,v65,v66" }, 2199 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat, "v60,v62,v65,v66" }, 2200 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhrndsat_128B, "v60,v62,v65,v66" }, 2201 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat, "v60,v62,v65,v66" }, 2202 { Hexagon::BI__builtin_HEXAGON_V6_vasrwhsat_128B, "v60,v62,v65,v66" }, 2203 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat, "v62,v65,v66" }, 2204 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "v62,v65,v66" }, 2205 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat, "v60,v62,v65,v66" }, 2206 { Hexagon::BI__builtin_HEXAGON_V6_vasrwuhsat_128B, "v60,v62,v65,v66" }, 2207 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv, "v60,v62,v65,v66" }, 2208 { Hexagon::BI__builtin_HEXAGON_V6_vasrwv_128B, "v60,v62,v65,v66" }, 2209 { Hexagon::BI__builtin_HEXAGON_V6_vassign, "v60,v62,v65,v66" }, 2210 { Hexagon::BI__builtin_HEXAGON_V6_vassign_128B, "v60,v62,v65,v66" }, 2211 { Hexagon::BI__builtin_HEXAGON_V6_vassignp, "v60,v62,v65,v66" }, 2212 { Hexagon::BI__builtin_HEXAGON_V6_vassignp_128B, "v60,v62,v65,v66" }, 2213 { Hexagon::BI__builtin_HEXAGON_V6_vavgb, "v65,v66" }, 2214 { Hexagon::BI__builtin_HEXAGON_V6_vavgb_128B, "v65,v66" }, 2215 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd, "v65,v66" }, 2216 { Hexagon::BI__builtin_HEXAGON_V6_vavgbrnd_128B, "v65,v66" }, 2217 { Hexagon::BI__builtin_HEXAGON_V6_vavgh, "v60,v62,v65,v66" }, 2218 { Hexagon::BI__builtin_HEXAGON_V6_vavgh_128B, "v60,v62,v65,v66" }, 2219 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd, "v60,v62,v65,v66" }, 2220 { Hexagon::BI__builtin_HEXAGON_V6_vavghrnd_128B, "v60,v62,v65,v66" }, 2221 { Hexagon::BI__builtin_HEXAGON_V6_vavgub, "v60,v62,v65,v66" }, 2222 { Hexagon::BI__builtin_HEXAGON_V6_vavgub_128B, "v60,v62,v65,v66" }, 2223 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd, "v60,v62,v65,v66" }, 2224 { Hexagon::BI__builtin_HEXAGON_V6_vavgubrnd_128B, "v60,v62,v65,v66" }, 2225 { Hexagon::BI__builtin_HEXAGON_V6_vavguh, "v60,v62,v65,v66" }, 2226 { Hexagon::BI__builtin_HEXAGON_V6_vavguh_128B, "v60,v62,v65,v66" }, 2227 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd, "v60,v62,v65,v66" }, 2228 { Hexagon::BI__builtin_HEXAGON_V6_vavguhrnd_128B, "v60,v62,v65,v66" }, 2229 { Hexagon::BI__builtin_HEXAGON_V6_vavguw, "v65,v66" }, 2230 { Hexagon::BI__builtin_HEXAGON_V6_vavguw_128B, "v65,v66" }, 2231 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd, "v65,v66" }, 2232 { Hexagon::BI__builtin_HEXAGON_V6_vavguwrnd_128B, "v65,v66" }, 2233 { Hexagon::BI__builtin_HEXAGON_V6_vavgw, "v60,v62,v65,v66" }, 2234 { Hexagon::BI__builtin_HEXAGON_V6_vavgw_128B, "v60,v62,v65,v66" }, 2235 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd, "v60,v62,v65,v66" }, 2236 { Hexagon::BI__builtin_HEXAGON_V6_vavgwrnd_128B, "v60,v62,v65,v66" }, 2237 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h, "v60,v62,v65,v66" }, 2238 { Hexagon::BI__builtin_HEXAGON_V6_vcl0h_128B, "v60,v62,v65,v66" }, 2239 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w, "v60,v62,v65,v66" }, 2240 { Hexagon::BI__builtin_HEXAGON_V6_vcl0w_128B, "v60,v62,v65,v66" }, 2241 { Hexagon::BI__builtin_HEXAGON_V6_vcombine, "v60,v62,v65,v66" }, 2242 { Hexagon::BI__builtin_HEXAGON_V6_vcombine_128B, "v60,v62,v65,v66" }, 2243 { Hexagon::BI__builtin_HEXAGON_V6_vd0, "v60,v62,v65,v66" }, 2244 { Hexagon::BI__builtin_HEXAGON_V6_vd0_128B, "v60,v62,v65,v66" }, 2245 { Hexagon::BI__builtin_HEXAGON_V6_vdd0, "v65,v66" }, 2246 { Hexagon::BI__builtin_HEXAGON_V6_vdd0_128B, "v65,v66" }, 2247 { Hexagon::BI__builtin_HEXAGON_V6_vdealb, "v60,v62,v65,v66" }, 2248 { Hexagon::BI__builtin_HEXAGON_V6_vdealb_128B, "v60,v62,v65,v66" }, 2249 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w, "v60,v62,v65,v66" }, 2250 { Hexagon::BI__builtin_HEXAGON_V6_vdealb4w_128B, "v60,v62,v65,v66" }, 2251 { Hexagon::BI__builtin_HEXAGON_V6_vdealh, "v60,v62,v65,v66" }, 2252 { Hexagon::BI__builtin_HEXAGON_V6_vdealh_128B, "v60,v62,v65,v66" }, 2253 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd, "v60,v62,v65,v66" }, 2254 { Hexagon::BI__builtin_HEXAGON_V6_vdealvdd_128B, "v60,v62,v65,v66" }, 2255 { Hexagon::BI__builtin_HEXAGON_V6_vdelta, "v60,v62,v65,v66" }, 2256 { Hexagon::BI__builtin_HEXAGON_V6_vdelta_128B, "v60,v62,v65,v66" }, 2257 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus, "v60,v62,v65,v66" }, 2258 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_128B, "v60,v62,v65,v66" }, 2259 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc, "v60,v62,v65,v66" }, 2260 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_acc_128B, "v60,v62,v65,v66" }, 2261 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv, "v60,v62,v65,v66" }, 2262 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_128B, "v60,v62,v65,v66" }, 2263 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc, "v60,v62,v65,v66" }, 2264 { Hexagon::BI__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "v60,v62,v65,v66" }, 2265 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb, "v60,v62,v65,v66" }, 2266 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_128B, "v60,v62,v65,v66" }, 2267 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc, "v60,v62,v65,v66" }, 2268 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "v60,v62,v65,v66" }, 2269 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv, "v60,v62,v65,v66" }, 2270 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "v60,v62,v65,v66" }, 2271 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "v60,v62,v65,v66" }, 2272 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "v60,v62,v65,v66" }, 2273 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat, "v60,v62,v65,v66" }, 2274 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_128B, "v60,v62,v65,v66" }, 2275 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc, "v60,v62,v65,v66" }, 2276 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "v60,v62,v65,v66" }, 2277 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat, "v60,v62,v65,v66" }, 2278 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_128B, "v60,v62,v65,v66" }, 2279 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc, "v60,v62,v65,v66" }, 2280 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "v60,v62,v65,v66" }, 2281 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat, "v60,v62,v65,v66" }, 2282 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "v60,v62,v65,v66" }, 2283 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "v60,v62,v65,v66" }, 2284 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "v60,v62,v65,v66" }, 2285 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat, "v60,v62,v65,v66" }, 2286 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_128B, "v60,v62,v65,v66" }, 2287 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc, "v60,v62,v65,v66" }, 2288 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "v60,v62,v65,v66" }, 2289 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat, "v60,v62,v65,v66" }, 2290 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_128B, "v60,v62,v65,v66" }, 2291 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc, "v60,v62,v65,v66" }, 2292 { Hexagon::BI__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "v60,v62,v65,v66" }, 2293 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh, "v60,v62,v65,v66" }, 2294 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_128B, "v60,v62,v65,v66" }, 2295 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc, "v60,v62,v65,v66" }, 2296 { Hexagon::BI__builtin_HEXAGON_V6_vdsaduh_acc_128B, "v60,v62,v65,v66" }, 2297 { Hexagon::BI__builtin_HEXAGON_V6_veqb, "v60,v62,v65,v66" }, 2298 { Hexagon::BI__builtin_HEXAGON_V6_veqb_128B, "v60,v62,v65,v66" }, 2299 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and, "v60,v62,v65,v66" }, 2300 { Hexagon::BI__builtin_HEXAGON_V6_veqb_and_128B, "v60,v62,v65,v66" }, 2301 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or, "v60,v62,v65,v66" }, 2302 { Hexagon::BI__builtin_HEXAGON_V6_veqb_or_128B, "v60,v62,v65,v66" }, 2303 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor, "v60,v62,v65,v66" }, 2304 { Hexagon::BI__builtin_HEXAGON_V6_veqb_xor_128B, "v60,v62,v65,v66" }, 2305 { Hexagon::BI__builtin_HEXAGON_V6_veqh, "v60,v62,v65,v66" }, 2306 { Hexagon::BI__builtin_HEXAGON_V6_veqh_128B, "v60,v62,v65,v66" }, 2307 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and, "v60,v62,v65,v66" }, 2308 { Hexagon::BI__builtin_HEXAGON_V6_veqh_and_128B, "v60,v62,v65,v66" }, 2309 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or, "v60,v62,v65,v66" }, 2310 { Hexagon::BI__builtin_HEXAGON_V6_veqh_or_128B, "v60,v62,v65,v66" }, 2311 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor, "v60,v62,v65,v66" }, 2312 { Hexagon::BI__builtin_HEXAGON_V6_veqh_xor_128B, "v60,v62,v65,v66" }, 2313 { Hexagon::BI__builtin_HEXAGON_V6_veqw, "v60,v62,v65,v66" }, 2314 { Hexagon::BI__builtin_HEXAGON_V6_veqw_128B, "v60,v62,v65,v66" }, 2315 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and, "v60,v62,v65,v66" }, 2316 { Hexagon::BI__builtin_HEXAGON_V6_veqw_and_128B, "v60,v62,v65,v66" }, 2317 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or, "v60,v62,v65,v66" }, 2318 { Hexagon::BI__builtin_HEXAGON_V6_veqw_or_128B, "v60,v62,v65,v66" }, 2319 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor, "v60,v62,v65,v66" }, 2320 { Hexagon::BI__builtin_HEXAGON_V6_veqw_xor_128B, "v60,v62,v65,v66" }, 2321 { Hexagon::BI__builtin_HEXAGON_V6_vgtb, "v60,v62,v65,v66" }, 2322 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_128B, "v60,v62,v65,v66" }, 2323 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and, "v60,v62,v65,v66" }, 2324 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_and_128B, "v60,v62,v65,v66" }, 2325 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or, "v60,v62,v65,v66" }, 2326 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_or_128B, "v60,v62,v65,v66" }, 2327 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor, "v60,v62,v65,v66" }, 2328 { Hexagon::BI__builtin_HEXAGON_V6_vgtb_xor_128B, "v60,v62,v65,v66" }, 2329 { Hexagon::BI__builtin_HEXAGON_V6_vgth, "v60,v62,v65,v66" }, 2330 { Hexagon::BI__builtin_HEXAGON_V6_vgth_128B, "v60,v62,v65,v66" }, 2331 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and, "v60,v62,v65,v66" }, 2332 { Hexagon::BI__builtin_HEXAGON_V6_vgth_and_128B, "v60,v62,v65,v66" }, 2333 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or, "v60,v62,v65,v66" }, 2334 { Hexagon::BI__builtin_HEXAGON_V6_vgth_or_128B, "v60,v62,v65,v66" }, 2335 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor, "v60,v62,v65,v66" }, 2336 { Hexagon::BI__builtin_HEXAGON_V6_vgth_xor_128B, "v60,v62,v65,v66" }, 2337 { Hexagon::BI__builtin_HEXAGON_V6_vgtub, "v60,v62,v65,v66" }, 2338 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_128B, "v60,v62,v65,v66" }, 2339 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and, "v60,v62,v65,v66" }, 2340 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_and_128B, "v60,v62,v65,v66" }, 2341 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or, "v60,v62,v65,v66" }, 2342 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_or_128B, "v60,v62,v65,v66" }, 2343 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor, "v60,v62,v65,v66" }, 2344 { Hexagon::BI__builtin_HEXAGON_V6_vgtub_xor_128B, "v60,v62,v65,v66" }, 2345 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh, "v60,v62,v65,v66" }, 2346 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_128B, "v60,v62,v65,v66" }, 2347 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and, "v60,v62,v65,v66" }, 2348 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_and_128B, "v60,v62,v65,v66" }, 2349 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or, "v60,v62,v65,v66" }, 2350 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_or_128B, "v60,v62,v65,v66" }, 2351 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor, "v60,v62,v65,v66" }, 2352 { Hexagon::BI__builtin_HEXAGON_V6_vgtuh_xor_128B, "v60,v62,v65,v66" }, 2353 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw, "v60,v62,v65,v66" }, 2354 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_128B, "v60,v62,v65,v66" }, 2355 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and, "v60,v62,v65,v66" }, 2356 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_and_128B, "v60,v62,v65,v66" }, 2357 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or, "v60,v62,v65,v66" }, 2358 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_or_128B, "v60,v62,v65,v66" }, 2359 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor, "v60,v62,v65,v66" }, 2360 { Hexagon::BI__builtin_HEXAGON_V6_vgtuw_xor_128B, "v60,v62,v65,v66" }, 2361 { Hexagon::BI__builtin_HEXAGON_V6_vgtw, "v60,v62,v65,v66" }, 2362 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_128B, "v60,v62,v65,v66" }, 2363 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and, "v60,v62,v65,v66" }, 2364 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_and_128B, "v60,v62,v65,v66" }, 2365 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or, "v60,v62,v65,v66" }, 2366 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_or_128B, "v60,v62,v65,v66" }, 2367 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor, "v60,v62,v65,v66" }, 2368 { Hexagon::BI__builtin_HEXAGON_V6_vgtw_xor_128B, "v60,v62,v65,v66" }, 2369 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr, "v60,v62,v65,v66" }, 2370 { Hexagon::BI__builtin_HEXAGON_V6_vinsertwr_128B, "v60,v62,v65,v66" }, 2371 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb, "v60,v62,v65,v66" }, 2372 { Hexagon::BI__builtin_HEXAGON_V6_vlalignb_128B, "v60,v62,v65,v66" }, 2373 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, "v60,v62,v65,v66" }, 2374 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, "v60,v62,v65,v66" }, 2375 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb, "v62,v65,v66" }, 2376 { Hexagon::BI__builtin_HEXAGON_V6_vlsrb_128B, "v62,v65,v66" }, 2377 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh, "v60,v62,v65,v66" }, 2378 { Hexagon::BI__builtin_HEXAGON_V6_vlsrh_128B, "v60,v62,v65,v66" }, 2379 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv, "v60,v62,v65,v66" }, 2380 { Hexagon::BI__builtin_HEXAGON_V6_vlsrhv_128B, "v60,v62,v65,v66" }, 2381 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw, "v60,v62,v65,v66" }, 2382 { Hexagon::BI__builtin_HEXAGON_V6_vlsrw_128B, "v60,v62,v65,v66" }, 2383 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv, "v60,v62,v65,v66" }, 2384 { Hexagon::BI__builtin_HEXAGON_V6_vlsrwv_128B, "v60,v62,v65,v66" }, 2385 { Hexagon::BI__builtin_HEXAGON_V6_vlut4, "v65,v66" }, 2386 { Hexagon::BI__builtin_HEXAGON_V6_vlut4_128B, "v65,v66" }, 2387 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb, "v60,v62,v65,v66" }, 2388 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_128B, "v60,v62,v65,v66" }, 2389 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, "v62,v65,v66" }, 2390 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, "v62,v65,v66" }, 2391 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm, "v62,v65,v66" }, 2392 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_nm_128B, "v62,v65,v66" }, 2393 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc, "v60,v62,v65,v66" }, 2394 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "v60,v62,v65,v66" }, 2395 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, "v62,v65,v66" }, 2396 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "v62,v65,v66" }, 2397 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh, "v60,v62,v65,v66" }, 2398 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_128B, "v60,v62,v65,v66" }, 2399 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, "v62,v65,v66" }, 2400 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, "v62,v65,v66" }, 2401 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm, "v62,v65,v66" }, 2402 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_nm_128B, "v62,v65,v66" }, 2403 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc, "v60,v62,v65,v66" }, 2404 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "v60,v62,v65,v66" }, 2405 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, "v62,v65,v66" }, 2406 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "v62,v65,v66" }, 2407 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb, "v62,v65,v66" }, 2408 { Hexagon::BI__builtin_HEXAGON_V6_vmaxb_128B, "v62,v65,v66" }, 2409 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh, "v60,v62,v65,v66" }, 2410 { Hexagon::BI__builtin_HEXAGON_V6_vmaxh_128B, "v60,v62,v65,v66" }, 2411 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub, "v60,v62,v65,v66" }, 2412 { Hexagon::BI__builtin_HEXAGON_V6_vmaxub_128B, "v60,v62,v65,v66" }, 2413 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh, "v60,v62,v65,v66" }, 2414 { Hexagon::BI__builtin_HEXAGON_V6_vmaxuh_128B, "v60,v62,v65,v66" }, 2415 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw, "v60,v62,v65,v66" }, 2416 { Hexagon::BI__builtin_HEXAGON_V6_vmaxw_128B, "v60,v62,v65,v66" }, 2417 { Hexagon::BI__builtin_HEXAGON_V6_vminb, "v62,v65,v66" }, 2418 { Hexagon::BI__builtin_HEXAGON_V6_vminb_128B, "v62,v65,v66" }, 2419 { Hexagon::BI__builtin_HEXAGON_V6_vminh, "v60,v62,v65,v66" }, 2420 { Hexagon::BI__builtin_HEXAGON_V6_vminh_128B, "v60,v62,v65,v66" }, 2421 { Hexagon::BI__builtin_HEXAGON_V6_vminub, "v60,v62,v65,v66" }, 2422 { Hexagon::BI__builtin_HEXAGON_V6_vminub_128B, "v60,v62,v65,v66" }, 2423 { Hexagon::BI__builtin_HEXAGON_V6_vminuh, "v60,v62,v65,v66" }, 2424 { Hexagon::BI__builtin_HEXAGON_V6_vminuh_128B, "v60,v62,v65,v66" }, 2425 { Hexagon::BI__builtin_HEXAGON_V6_vminw, "v60,v62,v65,v66" }, 2426 { Hexagon::BI__builtin_HEXAGON_V6_vminw_128B, "v60,v62,v65,v66" }, 2427 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus, "v60,v62,v65,v66" }, 2428 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_128B, "v60,v62,v65,v66" }, 2429 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc, "v60,v62,v65,v66" }, 2430 { Hexagon::BI__builtin_HEXAGON_V6_vmpabus_acc_128B, "v60,v62,v65,v66" }, 2431 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv, "v60,v62,v65,v66" }, 2432 { Hexagon::BI__builtin_HEXAGON_V6_vmpabusv_128B, "v60,v62,v65,v66" }, 2433 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu, "v65,v66" }, 2434 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_128B, "v65,v66" }, 2435 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc, "v65,v66" }, 2436 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuu_acc_128B, "v65,v66" }, 2437 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv, "v60,v62,v65,v66" }, 2438 { Hexagon::BI__builtin_HEXAGON_V6_vmpabuuv_128B, "v60,v62,v65,v66" }, 2439 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb, "v60,v62,v65,v66" }, 2440 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_128B, "v60,v62,v65,v66" }, 2441 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc, "v60,v62,v65,v66" }, 2442 { Hexagon::BI__builtin_HEXAGON_V6_vmpahb_acc_128B, "v60,v62,v65,v66" }, 2443 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat, "v65,v66" }, 2444 { Hexagon::BI__builtin_HEXAGON_V6_vmpahhsat_128B, "v65,v66" }, 2445 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb, "v62,v65,v66" }, 2446 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_128B, "v62,v65,v66" }, 2447 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc, "v62,v65,v66" }, 2448 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhb_acc_128B, "v62,v65,v66" }, 2449 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat, "v65,v66" }, 2450 { Hexagon::BI__builtin_HEXAGON_V6_vmpauhuhsat_128B, "v65,v66" }, 2451 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat, "v65,v66" }, 2452 { Hexagon::BI__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "v65,v66" }, 2453 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus, "v60,v62,v65,v66" }, 2454 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_128B, "v60,v62,v65,v66" }, 2455 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc, "v60,v62,v65,v66" }, 2456 { Hexagon::BI__builtin_HEXAGON_V6_vmpybus_acc_128B, "v60,v62,v65,v66" }, 2457 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv, "v60,v62,v65,v66" }, 2458 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_128B, "v60,v62,v65,v66" }, 2459 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc, "v60,v62,v65,v66" }, 2460 { Hexagon::BI__builtin_HEXAGON_V6_vmpybusv_acc_128B, "v60,v62,v65,v66" }, 2461 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv, "v60,v62,v65,v66" }, 2462 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_128B, "v60,v62,v65,v66" }, 2463 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc, "v60,v62,v65,v66" }, 2464 { Hexagon::BI__builtin_HEXAGON_V6_vmpybv_acc_128B, "v60,v62,v65,v66" }, 2465 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh, "v60,v62,v65,v66" }, 2466 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_128B, "v60,v62,v65,v66" }, 2467 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64, "v62,v65,v66" }, 2468 { Hexagon::BI__builtin_HEXAGON_V6_vmpyewuh_64_128B, "v62,v65,v66" }, 2469 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh, "v60,v62,v65,v66" }, 2470 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_128B, "v60,v62,v65,v66" }, 2471 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc, "v65,v66" }, 2472 { Hexagon::BI__builtin_HEXAGON_V6_vmpyh_acc_128B, "v65,v66" }, 2473 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc, "v60,v62,v65,v66" }, 2474 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "v60,v62,v65,v66" }, 2475 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs, "v60,v62,v65,v66" }, 2476 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhsrs_128B, "v60,v62,v65,v66" }, 2477 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss, "v60,v62,v65,v66" }, 2478 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhss_128B, "v60,v62,v65,v66" }, 2479 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus, "v60,v62,v65,v66" }, 2480 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_128B, "v60,v62,v65,v66" }, 2481 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc, "v60,v62,v65,v66" }, 2482 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhus_acc_128B, "v60,v62,v65,v66" }, 2483 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv, "v60,v62,v65,v66" }, 2484 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_128B, "v60,v62,v65,v66" }, 2485 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc, "v60,v62,v65,v66" }, 2486 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhv_acc_128B, "v60,v62,v65,v66" }, 2487 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs, "v60,v62,v65,v66" }, 2488 { Hexagon::BI__builtin_HEXAGON_V6_vmpyhvsrs_128B, "v60,v62,v65,v66" }, 2489 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh, "v60,v62,v65,v66" }, 2490 { Hexagon::BI__builtin_HEXAGON_V6_vmpyieoh_128B, "v60,v62,v65,v66" }, 2491 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc, "v60,v62,v65,v66" }, 2492 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "v60,v62,v65,v66" }, 2493 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh, "v60,v62,v65,v66" }, 2494 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_128B, "v60,v62,v65,v66" }, 2495 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc, "v60,v62,v65,v66" }, 2496 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "v60,v62,v65,v66" }, 2497 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih, "v60,v62,v65,v66" }, 2498 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_128B, "v60,v62,v65,v66" }, 2499 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc, "v60,v62,v65,v66" }, 2500 { Hexagon::BI__builtin_HEXAGON_V6_vmpyih_acc_128B, "v60,v62,v65,v66" }, 2501 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb, "v60,v62,v65,v66" }, 2502 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_128B, "v60,v62,v65,v66" }, 2503 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc, "v60,v62,v65,v66" }, 2504 { Hexagon::BI__builtin_HEXAGON_V6_vmpyihb_acc_128B, "v60,v62,v65,v66" }, 2505 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh, "v60,v62,v65,v66" }, 2506 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiowh_128B, "v60,v62,v65,v66" }, 2507 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb, "v60,v62,v65,v66" }, 2508 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_128B, "v60,v62,v65,v66" }, 2509 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc, "v60,v62,v65,v66" }, 2510 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "v60,v62,v65,v66" }, 2511 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh, "v60,v62,v65,v66" }, 2512 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_128B, "v60,v62,v65,v66" }, 2513 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc, "v60,v62,v65,v66" }, 2514 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "v60,v62,v65,v66" }, 2515 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub, "v62,v65,v66" }, 2516 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_128B, "v62,v65,v66" }, 2517 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc, "v62,v65,v66" }, 2518 { Hexagon::BI__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "v62,v65,v66" }, 2519 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh, "v60,v62,v65,v66" }, 2520 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_128B, "v60,v62,v65,v66" }, 2521 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc, "v62,v65,v66" }, 2522 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "v62,v65,v66" }, 2523 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd, "v60,v62,v65,v66" }, 2524 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "v60,v62,v65,v66" }, 2525 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "v60,v62,v65,v66" }, 2526 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "v60,v62,v65,v66" }, 2527 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc, "v60,v62,v65,v66" }, 2528 { Hexagon::BI__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "v60,v62,v65,v66" }, 2529 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub, "v60,v62,v65,v66" }, 2530 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_128B, "v60,v62,v65,v66" }, 2531 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc, "v60,v62,v65,v66" }, 2532 { Hexagon::BI__builtin_HEXAGON_V6_vmpyub_acc_128B, "v60,v62,v65,v66" }, 2533 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv, "v60,v62,v65,v66" }, 2534 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_128B, "v60,v62,v65,v66" }, 2535 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc, "v60,v62,v65,v66" }, 2536 { Hexagon::BI__builtin_HEXAGON_V6_vmpyubv_acc_128B, "v60,v62,v65,v66" }, 2537 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh, "v60,v62,v65,v66" }, 2538 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_128B, "v60,v62,v65,v66" }, 2539 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc, "v60,v62,v65,v66" }, 2540 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuh_acc_128B, "v60,v62,v65,v66" }, 2541 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe, "v65,v66" }, 2542 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_128B, "v65,v66" }, 2543 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc, "v65,v66" }, 2544 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "v65,v66" }, 2545 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv, "v60,v62,v65,v66" }, 2546 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_128B, "v60,v62,v65,v66" }, 2547 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc, "v60,v62,v65,v66" }, 2548 { Hexagon::BI__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "v60,v62,v65,v66" }, 2549 { Hexagon::BI__builtin_HEXAGON_V6_vmux, "v60,v62,v65,v66" }, 2550 { Hexagon::BI__builtin_HEXAGON_V6_vmux_128B, "v60,v62,v65,v66" }, 2551 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb, "v65,v66" }, 2552 { Hexagon::BI__builtin_HEXAGON_V6_vnavgb_128B, "v65,v66" }, 2553 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh, "v60,v62,v65,v66" }, 2554 { Hexagon::BI__builtin_HEXAGON_V6_vnavgh_128B, "v60,v62,v65,v66" }, 2555 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub, "v60,v62,v65,v66" }, 2556 { Hexagon::BI__builtin_HEXAGON_V6_vnavgub_128B, "v60,v62,v65,v66" }, 2557 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw, "v60,v62,v65,v66" }, 2558 { Hexagon::BI__builtin_HEXAGON_V6_vnavgw_128B, "v60,v62,v65,v66" }, 2559 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth, "v60,v62,v65,v66" }, 2560 { Hexagon::BI__builtin_HEXAGON_V6_vnormamth_128B, "v60,v62,v65,v66" }, 2561 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw, "v60,v62,v65,v66" }, 2562 { Hexagon::BI__builtin_HEXAGON_V6_vnormamtw_128B, "v60,v62,v65,v66" }, 2563 { Hexagon::BI__builtin_HEXAGON_V6_vnot, "v60,v62,v65,v66" }, 2564 { Hexagon::BI__builtin_HEXAGON_V6_vnot_128B, "v60,v62,v65,v66" }, 2565 { Hexagon::BI__builtin_HEXAGON_V6_vor, "v60,v62,v65,v66" }, 2566 { Hexagon::BI__builtin_HEXAGON_V6_vor_128B, "v60,v62,v65,v66" }, 2567 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb, "v60,v62,v65,v66" }, 2568 { Hexagon::BI__builtin_HEXAGON_V6_vpackeb_128B, "v60,v62,v65,v66" }, 2569 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh, "v60,v62,v65,v66" }, 2570 { Hexagon::BI__builtin_HEXAGON_V6_vpackeh_128B, "v60,v62,v65,v66" }, 2571 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat, "v60,v62,v65,v66" }, 2572 { Hexagon::BI__builtin_HEXAGON_V6_vpackhb_sat_128B, "v60,v62,v65,v66" }, 2573 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat, "v60,v62,v65,v66" }, 2574 { Hexagon::BI__builtin_HEXAGON_V6_vpackhub_sat_128B, "v60,v62,v65,v66" }, 2575 { Hexagon::BI__builtin_HEXAGON_V6_vpackob, "v60,v62,v65,v66" }, 2576 { Hexagon::BI__builtin_HEXAGON_V6_vpackob_128B, "v60,v62,v65,v66" }, 2577 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh, "v60,v62,v65,v66" }, 2578 { Hexagon::BI__builtin_HEXAGON_V6_vpackoh_128B, "v60,v62,v65,v66" }, 2579 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat, "v60,v62,v65,v66" }, 2580 { Hexagon::BI__builtin_HEXAGON_V6_vpackwh_sat_128B, "v60,v62,v65,v66" }, 2581 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat, "v60,v62,v65,v66" }, 2582 { Hexagon::BI__builtin_HEXAGON_V6_vpackwuh_sat_128B, "v60,v62,v65,v66" }, 2583 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth, "v60,v62,v65,v66" }, 2584 { Hexagon::BI__builtin_HEXAGON_V6_vpopcounth_128B, "v60,v62,v65,v66" }, 2585 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb, "v65,v66" }, 2586 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqb_128B, "v65,v66" }, 2587 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh, "v65,v66" }, 2588 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqh_128B, "v65,v66" }, 2589 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw, "v65,v66" }, 2590 { Hexagon::BI__builtin_HEXAGON_V6_vprefixqw_128B, "v65,v66" }, 2591 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta, "v60,v62,v65,v66" }, 2592 { Hexagon::BI__builtin_HEXAGON_V6_vrdelta_128B, "v60,v62,v65,v66" }, 2593 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt, "v65" }, 2594 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_128B, "v65" }, 2595 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc, "v65" }, 2596 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B, "v65" }, 2597 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus, "v60,v62,v65,v66" }, 2598 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_128B, "v60,v62,v65,v66" }, 2599 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc, "v60,v62,v65,v66" }, 2600 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybus_acc_128B, "v60,v62,v65,v66" }, 2601 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, "v60,v62,v65,v66" }, 2602 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, "v60,v62,v65,v66" }, 2603 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, "v60,v62,v65,v66" }, 2604 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "v60,v62,v65,v66" }, 2605 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv, "v60,v62,v65,v66" }, 2606 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_128B, "v60,v62,v65,v66" }, 2607 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc, "v60,v62,v65,v66" }, 2608 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "v60,v62,v65,v66" }, 2609 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv, "v60,v62,v65,v66" }, 2610 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_128B, "v60,v62,v65,v66" }, 2611 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc, "v60,v62,v65,v66" }, 2612 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybv_acc_128B, "v60,v62,v65,v66" }, 2613 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub, "v60,v62,v65,v66" }, 2614 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_128B, "v60,v62,v65,v66" }, 2615 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc, "v60,v62,v65,v66" }, 2616 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_acc_128B, "v60,v62,v65,v66" }, 2617 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, "v60,v62,v65,v66" }, 2618 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, "v60,v62,v65,v66" }, 2619 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, "v60,v62,v65,v66" }, 2620 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "v60,v62,v65,v66" }, 2621 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt, "v65" }, 2622 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_128B, "v65" }, 2623 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc, "v65" }, 2624 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B, "v65" }, 2625 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv, "v60,v62,v65,v66" }, 2626 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_128B, "v60,v62,v65,v66" }, 2627 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc, "v60,v62,v65,v66" }, 2628 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "v60,v62,v65,v66" }, 2629 { Hexagon::BI__builtin_HEXAGON_V6_vror, "v60,v62,v65,v66" }, 2630 { Hexagon::BI__builtin_HEXAGON_V6_vror_128B, "v60,v62,v65,v66" }, 2631 { Hexagon::BI__builtin_HEXAGON_V6_vrotr, "v66" }, 2632 { Hexagon::BI__builtin_HEXAGON_V6_vrotr_128B, "v66" }, 2633 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb, "v60,v62,v65,v66" }, 2634 { Hexagon::BI__builtin_HEXAGON_V6_vroundhb_128B, "v60,v62,v65,v66" }, 2635 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub, "v60,v62,v65,v66" }, 2636 { Hexagon::BI__builtin_HEXAGON_V6_vroundhub_128B, "v60,v62,v65,v66" }, 2637 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub, "v62,v65,v66" }, 2638 { Hexagon::BI__builtin_HEXAGON_V6_vrounduhub_128B, "v62,v65,v66" }, 2639 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh, "v62,v65,v66" }, 2640 { Hexagon::BI__builtin_HEXAGON_V6_vrounduwuh_128B, "v62,v65,v66" }, 2641 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh, "v60,v62,v65,v66" }, 2642 { Hexagon::BI__builtin_HEXAGON_V6_vroundwh_128B, "v60,v62,v65,v66" }, 2643 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh, "v60,v62,v65,v66" }, 2644 { Hexagon::BI__builtin_HEXAGON_V6_vroundwuh_128B, "v60,v62,v65,v66" }, 2645 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, "v60,v62,v65,v66" }, 2646 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, "v60,v62,v65,v66" }, 2647 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, "v60,v62,v65,v66" }, 2648 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, "v60,v62,v65,v66" }, 2649 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw, "v66" }, 2650 { Hexagon::BI__builtin_HEXAGON_V6_vsatdw_128B, "v66" }, 2651 { Hexagon::BI__builtin_HEXAGON_V6_vsathub, "v60,v62,v65,v66" }, 2652 { Hexagon::BI__builtin_HEXAGON_V6_vsathub_128B, "v60,v62,v65,v66" }, 2653 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh, "v62,v65,v66" }, 2654 { Hexagon::BI__builtin_HEXAGON_V6_vsatuwuh_128B, "v62,v65,v66" }, 2655 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh, "v60,v62,v65,v66" }, 2656 { Hexagon::BI__builtin_HEXAGON_V6_vsatwh_128B, "v60,v62,v65,v66" }, 2657 { Hexagon::BI__builtin_HEXAGON_V6_vsb, "v60,v62,v65,v66" }, 2658 { Hexagon::BI__builtin_HEXAGON_V6_vsb_128B, "v60,v62,v65,v66" }, 2659 { Hexagon::BI__builtin_HEXAGON_V6_vsh, "v60,v62,v65,v66" }, 2660 { Hexagon::BI__builtin_HEXAGON_V6_vsh_128B, "v60,v62,v65,v66" }, 2661 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh, "v60,v62,v65,v66" }, 2662 { Hexagon::BI__builtin_HEXAGON_V6_vshufeh_128B, "v60,v62,v65,v66" }, 2663 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb, "v60,v62,v65,v66" }, 2664 { Hexagon::BI__builtin_HEXAGON_V6_vshuffb_128B, "v60,v62,v65,v66" }, 2665 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb, "v60,v62,v65,v66" }, 2666 { Hexagon::BI__builtin_HEXAGON_V6_vshuffeb_128B, "v60,v62,v65,v66" }, 2667 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh, "v60,v62,v65,v66" }, 2668 { Hexagon::BI__builtin_HEXAGON_V6_vshuffh_128B, "v60,v62,v65,v66" }, 2669 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob, "v60,v62,v65,v66" }, 2670 { Hexagon::BI__builtin_HEXAGON_V6_vshuffob_128B, "v60,v62,v65,v66" }, 2671 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd, "v60,v62,v65,v66" }, 2672 { Hexagon::BI__builtin_HEXAGON_V6_vshuffvdd_128B, "v60,v62,v65,v66" }, 2673 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb, "v60,v62,v65,v66" }, 2674 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeb_128B, "v60,v62,v65,v66" }, 2675 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh, "v60,v62,v65,v66" }, 2676 { Hexagon::BI__builtin_HEXAGON_V6_vshufoeh_128B, "v60,v62,v65,v66" }, 2677 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh, "v60,v62,v65,v66" }, 2678 { Hexagon::BI__builtin_HEXAGON_V6_vshufoh_128B, "v60,v62,v65,v66" }, 2679 { Hexagon::BI__builtin_HEXAGON_V6_vsubb, "v60,v62,v65,v66" }, 2680 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_128B, "v60,v62,v65,v66" }, 2681 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv, "v60,v62,v65,v66" }, 2682 { Hexagon::BI__builtin_HEXAGON_V6_vsubb_dv_128B, "v60,v62,v65,v66" }, 2683 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat, "v62,v65,v66" }, 2684 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_128B, "v62,v65,v66" }, 2685 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv, "v62,v65,v66" }, 2686 { Hexagon::BI__builtin_HEXAGON_V6_vsubbsat_dv_128B, "v62,v65,v66" }, 2687 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry, "v62,v65,v66" }, 2688 { Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B, "v62,v65,v66" }, 2689 { Hexagon::BI__builtin_HEXAGON_V6_vsubh, "v60,v62,v65,v66" }, 2690 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_128B, "v60,v62,v65,v66" }, 2691 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv, "v60,v62,v65,v66" }, 2692 { Hexagon::BI__builtin_HEXAGON_V6_vsubh_dv_128B, "v60,v62,v65,v66" }, 2693 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat, "v60,v62,v65,v66" }, 2694 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_128B, "v60,v62,v65,v66" }, 2695 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv, "v60,v62,v65,v66" }, 2696 { Hexagon::BI__builtin_HEXAGON_V6_vsubhsat_dv_128B, "v60,v62,v65,v66" }, 2697 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw, "v60,v62,v65,v66" }, 2698 { Hexagon::BI__builtin_HEXAGON_V6_vsubhw_128B, "v60,v62,v65,v66" }, 2699 { Hexagon::BI__builtin_HEXAGON_V6_vsububh, "v60,v62,v65,v66" }, 2700 { Hexagon::BI__builtin_HEXAGON_V6_vsububh_128B, "v60,v62,v65,v66" }, 2701 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat, "v60,v62,v65,v66" }, 2702 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_128B, "v60,v62,v65,v66" }, 2703 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv, "v60,v62,v65,v66" }, 2704 { Hexagon::BI__builtin_HEXAGON_V6_vsububsat_dv_128B, "v60,v62,v65,v66" }, 2705 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat, "v62,v65,v66" }, 2706 { Hexagon::BI__builtin_HEXAGON_V6_vsubububb_sat_128B, "v62,v65,v66" }, 2707 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat, "v60,v62,v65,v66" }, 2708 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_128B, "v60,v62,v65,v66" }, 2709 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv, "v60,v62,v65,v66" }, 2710 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "v60,v62,v65,v66" }, 2711 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw, "v60,v62,v65,v66" }, 2712 { Hexagon::BI__builtin_HEXAGON_V6_vsubuhw_128B, "v60,v62,v65,v66" }, 2713 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat, "v62,v65,v66" }, 2714 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_128B, "v62,v65,v66" }, 2715 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv, "v62,v65,v66" }, 2716 { Hexagon::BI__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "v62,v65,v66" }, 2717 { Hexagon::BI__builtin_HEXAGON_V6_vsubw, "v60,v62,v65,v66" }, 2718 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_128B, "v60,v62,v65,v66" }, 2719 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv, "v60,v62,v65,v66" }, 2720 { Hexagon::BI__builtin_HEXAGON_V6_vsubw_dv_128B, "v60,v62,v65,v66" }, 2721 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat, "v60,v62,v65,v66" }, 2722 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_128B, "v60,v62,v65,v66" }, 2723 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv, "v60,v62,v65,v66" }, 2724 { Hexagon::BI__builtin_HEXAGON_V6_vsubwsat_dv_128B, "v60,v62,v65,v66" }, 2725 { Hexagon::BI__builtin_HEXAGON_V6_vswap, "v60,v62,v65,v66" }, 2726 { Hexagon::BI__builtin_HEXAGON_V6_vswap_128B, "v60,v62,v65,v66" }, 2727 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb, "v60,v62,v65,v66" }, 2728 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_128B, "v60,v62,v65,v66" }, 2729 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc, "v60,v62,v65,v66" }, 2730 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyb_acc_128B, "v60,v62,v65,v66" }, 2731 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus, "v60,v62,v65,v66" }, 2732 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_128B, "v60,v62,v65,v66" }, 2733 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc, "v60,v62,v65,v66" }, 2734 { Hexagon::BI__builtin_HEXAGON_V6_vtmpybus_acc_128B, "v60,v62,v65,v66" }, 2735 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb, "v60,v62,v65,v66" }, 2736 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_128B, "v60,v62,v65,v66" }, 2737 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc, "v60,v62,v65,v66" }, 2738 { Hexagon::BI__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "v60,v62,v65,v66" }, 2739 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb, "v60,v62,v65,v66" }, 2740 { Hexagon::BI__builtin_HEXAGON_V6_vunpackb_128B, "v60,v62,v65,v66" }, 2741 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh, "v60,v62,v65,v66" }, 2742 { Hexagon::BI__builtin_HEXAGON_V6_vunpackh_128B, "v60,v62,v65,v66" }, 2743 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob, "v60,v62,v65,v66" }, 2744 { Hexagon::BI__builtin_HEXAGON_V6_vunpackob_128B, "v60,v62,v65,v66" }, 2745 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh, "v60,v62,v65,v66" }, 2746 { Hexagon::BI__builtin_HEXAGON_V6_vunpackoh_128B, "v60,v62,v65,v66" }, 2747 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub, "v60,v62,v65,v66" }, 2748 { Hexagon::BI__builtin_HEXAGON_V6_vunpackub_128B, "v60,v62,v65,v66" }, 2749 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh, "v60,v62,v65,v66" }, 2750 { Hexagon::BI__builtin_HEXAGON_V6_vunpackuh_128B, "v60,v62,v65,v66" }, 2751 { Hexagon::BI__builtin_HEXAGON_V6_vxor, "v60,v62,v65,v66" }, 2752 { Hexagon::BI__builtin_HEXAGON_V6_vxor_128B, "v60,v62,v65,v66" }, 2753 { Hexagon::BI__builtin_HEXAGON_V6_vzb, "v60,v62,v65,v66" }, 2754 { Hexagon::BI__builtin_HEXAGON_V6_vzb_128B, "v60,v62,v65,v66" }, 2755 { Hexagon::BI__builtin_HEXAGON_V6_vzh, "v60,v62,v65,v66" }, 2756 { Hexagon::BI__builtin_HEXAGON_V6_vzh_128B, "v60,v62,v65,v66" }, 2757 }; 2758 2759 // Sort the tables on first execution so we can binary search them. 2760 auto SortCmp = [](const BuiltinAndString &LHS, const BuiltinAndString &RHS) { 2761 return LHS.BuiltinID < RHS.BuiltinID; 2762 }; 2763 static const bool SortOnce = 2764 (llvm::sort(ValidCPU, SortCmp), 2765 llvm::sort(ValidHVX, SortCmp), true); 2766 (void)SortOnce; 2767 auto LowerBoundCmp = [](const BuiltinAndString &BI, unsigned BuiltinID) { 2768 return BI.BuiltinID < BuiltinID; 2769 }; 2770 2771 const TargetInfo &TI = Context.getTargetInfo(); 2772 2773 const BuiltinAndString *FC = 2774 llvm::lower_bound(ValidCPU, BuiltinID, LowerBoundCmp); 2775 if (FC != std::end(ValidCPU) && FC->BuiltinID == BuiltinID) { 2776 const TargetOptions &Opts = TI.getTargetOpts(); 2777 StringRef CPU = Opts.CPU; 2778 if (!CPU.empty()) { 2779 assert(CPU.startswith("hexagon") && "Unexpected CPU name"); 2780 CPU.consume_front("hexagon"); 2781 SmallVector<StringRef, 3> CPUs; 2782 StringRef(FC->Str).split(CPUs, ','); 2783 if (llvm::none_of(CPUs, [CPU](StringRef S) { return S == CPU; })) 2784 return Diag(TheCall->getBeginLoc(), 2785 diag::err_hexagon_builtin_unsupported_cpu); 2786 } 2787 } 2788 2789 const BuiltinAndString *FH = 2790 llvm::lower_bound(ValidHVX, BuiltinID, LowerBoundCmp); 2791 if (FH != std::end(ValidHVX) && FH->BuiltinID == BuiltinID) { 2792 if (!TI.hasFeature("hvx")) 2793 return Diag(TheCall->getBeginLoc(), 2794 diag::err_hexagon_builtin_requires_hvx); 2795 2796 SmallVector<StringRef, 3> HVXs; 2797 StringRef(FH->Str).split(HVXs, ','); 2798 bool IsValid = llvm::any_of(HVXs, 2799 [&TI] (StringRef V) { 2800 std::string F = "hvx" + V.str(); 2801 return TI.hasFeature(F); 2802 }); 2803 if (!IsValid) 2804 return Diag(TheCall->getBeginLoc(), 2805 diag::err_hexagon_builtin_unsupported_hvx); 2806 } 2807 2808 return false; 2809 } 2810 2811 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2812 struct ArgInfo { 2813 uint8_t OpNum; 2814 bool IsSigned; 2815 uint8_t BitWidth; 2816 uint8_t Align; 2817 }; 2818 struct BuiltinInfo { 2819 unsigned BuiltinID; 2820 ArgInfo Infos[2]; 2821 }; 2822 2823 static BuiltinInfo Infos[] = { 2824 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2825 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2826 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2827 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 0 }} }, 2828 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2829 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2830 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2831 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2832 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2833 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2834 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2835 2836 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2837 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2838 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2839 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2840 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2841 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2842 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2843 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2844 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2845 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2846 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2847 2848 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2849 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2850 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2851 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2852 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2853 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2854 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2855 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2856 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2857 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2858 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2859 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2860 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2861 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2862 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2863 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2864 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2865 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2866 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2867 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2868 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2869 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2870 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2871 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2872 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2873 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2874 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2875 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2876 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2877 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2878 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2879 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2880 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2881 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2882 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2883 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2884 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2885 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2886 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2887 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2888 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2889 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2890 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2891 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2892 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2893 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2894 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2895 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2896 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2897 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2898 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2899 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2900 {{ 1, false, 6, 0 }} }, 2901 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2902 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2903 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2904 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2905 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2906 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2907 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2908 {{ 1, false, 5, 0 }} }, 2909 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2910 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2911 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2912 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2913 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2914 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2915 { 2, false, 5, 0 }} }, 2916 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2917 { 2, false, 6, 0 }} }, 2918 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2919 { 3, false, 5, 0 }} }, 2920 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2921 { 3, false, 6, 0 }} }, 2922 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2923 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2924 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2925 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2926 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2927 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2928 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2929 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2930 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2931 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2932 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2933 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2934 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2935 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2936 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2937 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2938 {{ 2, false, 4, 0 }, 2939 { 3, false, 5, 0 }} }, 2940 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2941 {{ 2, false, 4, 0 }, 2942 { 3, false, 5, 0 }} }, 2943 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2944 {{ 2, false, 4, 0 }, 2945 { 3, false, 5, 0 }} }, 2946 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2947 {{ 2, false, 4, 0 }, 2948 { 3, false, 5, 0 }} }, 2949 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2950 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2951 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2952 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2953 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2954 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2955 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2956 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2957 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2958 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2959 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2960 { 2, false, 5, 0 }} }, 2961 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2962 { 2, false, 6, 0 }} }, 2963 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2964 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2965 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2966 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2967 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2968 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2969 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2970 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2971 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2972 {{ 1, false, 4, 0 }} }, 2973 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2974 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2975 {{ 1, false, 4, 0 }} }, 2976 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2977 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2978 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2979 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2980 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2981 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2982 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2983 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2984 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2985 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2986 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2987 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2988 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2989 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2990 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2991 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2992 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2993 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2994 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2995 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2996 {{ 3, false, 1, 0 }} }, 2997 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2998 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2999 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3000 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3001 {{ 3, false, 1, 0 }} }, 3002 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3003 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3004 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3005 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3006 {{ 3, false, 1, 0 }} }, 3007 }; 3008 3009 // Use a dynamically initialized static to sort the table exactly once on 3010 // first run. 3011 static const bool SortOnce = 3012 (llvm::sort(Infos, 3013 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3014 return LHS.BuiltinID < RHS.BuiltinID; 3015 }), 3016 true); 3017 (void)SortOnce; 3018 3019 const BuiltinInfo *F = llvm::partition_point( 3020 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3021 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3022 return false; 3023 3024 bool Error = false; 3025 3026 for (const ArgInfo &A : F->Infos) { 3027 // Ignore empty ArgInfo elements. 3028 if (A.BitWidth == 0) 3029 continue; 3030 3031 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3032 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3033 if (!A.Align) { 3034 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3035 } else { 3036 unsigned M = 1 << A.Align; 3037 Min *= M; 3038 Max *= M; 3039 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 3040 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3041 } 3042 } 3043 return Error; 3044 } 3045 3046 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3047 CallExpr *TheCall) { 3048 return CheckHexagonBuiltinCpu(BuiltinID, TheCall) || 3049 CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3050 } 3051 3052 3053 // CheckMipsBuiltinFunctionCall - Checks the constant value passed to the 3054 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3055 // ordering for DSP is unspecified. MSA is ordered by the data format used 3056 // by the underlying instruction i.e., df/m, df/n and then by size. 3057 // 3058 // FIXME: The size tests here should instead be tablegen'd along with the 3059 // definitions from include/clang/Basic/BuiltinsMips.def. 3060 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3061 // be too. 3062 bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3063 unsigned i = 0, l = 0, u = 0, m = 0; 3064 switch (BuiltinID) { 3065 default: return false; 3066 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3067 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3068 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3069 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3070 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3071 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3072 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3073 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3074 // df/m field. 3075 // These intrinsics take an unsigned 3 bit immediate. 3076 case Mips::BI__builtin_msa_bclri_b: 3077 case Mips::BI__builtin_msa_bnegi_b: 3078 case Mips::BI__builtin_msa_bseti_b: 3079 case Mips::BI__builtin_msa_sat_s_b: 3080 case Mips::BI__builtin_msa_sat_u_b: 3081 case Mips::BI__builtin_msa_slli_b: 3082 case Mips::BI__builtin_msa_srai_b: 3083 case Mips::BI__builtin_msa_srari_b: 3084 case Mips::BI__builtin_msa_srli_b: 3085 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3086 case Mips::BI__builtin_msa_binsli_b: 3087 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3088 // These intrinsics take an unsigned 4 bit immediate. 3089 case Mips::BI__builtin_msa_bclri_h: 3090 case Mips::BI__builtin_msa_bnegi_h: 3091 case Mips::BI__builtin_msa_bseti_h: 3092 case Mips::BI__builtin_msa_sat_s_h: 3093 case Mips::BI__builtin_msa_sat_u_h: 3094 case Mips::BI__builtin_msa_slli_h: 3095 case Mips::BI__builtin_msa_srai_h: 3096 case Mips::BI__builtin_msa_srari_h: 3097 case Mips::BI__builtin_msa_srli_h: 3098 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3099 case Mips::BI__builtin_msa_binsli_h: 3100 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3101 // These intrinsics take an unsigned 5 bit immediate. 3102 // The first block of intrinsics actually have an unsigned 5 bit field, 3103 // not a df/n field. 3104 case Mips::BI__builtin_msa_cfcmsa: 3105 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3106 case Mips::BI__builtin_msa_clei_u_b: 3107 case Mips::BI__builtin_msa_clei_u_h: 3108 case Mips::BI__builtin_msa_clei_u_w: 3109 case Mips::BI__builtin_msa_clei_u_d: 3110 case Mips::BI__builtin_msa_clti_u_b: 3111 case Mips::BI__builtin_msa_clti_u_h: 3112 case Mips::BI__builtin_msa_clti_u_w: 3113 case Mips::BI__builtin_msa_clti_u_d: 3114 case Mips::BI__builtin_msa_maxi_u_b: 3115 case Mips::BI__builtin_msa_maxi_u_h: 3116 case Mips::BI__builtin_msa_maxi_u_w: 3117 case Mips::BI__builtin_msa_maxi_u_d: 3118 case Mips::BI__builtin_msa_mini_u_b: 3119 case Mips::BI__builtin_msa_mini_u_h: 3120 case Mips::BI__builtin_msa_mini_u_w: 3121 case Mips::BI__builtin_msa_mini_u_d: 3122 case Mips::BI__builtin_msa_addvi_b: 3123 case Mips::BI__builtin_msa_addvi_h: 3124 case Mips::BI__builtin_msa_addvi_w: 3125 case Mips::BI__builtin_msa_addvi_d: 3126 case Mips::BI__builtin_msa_bclri_w: 3127 case Mips::BI__builtin_msa_bnegi_w: 3128 case Mips::BI__builtin_msa_bseti_w: 3129 case Mips::BI__builtin_msa_sat_s_w: 3130 case Mips::BI__builtin_msa_sat_u_w: 3131 case Mips::BI__builtin_msa_slli_w: 3132 case Mips::BI__builtin_msa_srai_w: 3133 case Mips::BI__builtin_msa_srari_w: 3134 case Mips::BI__builtin_msa_srli_w: 3135 case Mips::BI__builtin_msa_srlri_w: 3136 case Mips::BI__builtin_msa_subvi_b: 3137 case Mips::BI__builtin_msa_subvi_h: 3138 case Mips::BI__builtin_msa_subvi_w: 3139 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3140 case Mips::BI__builtin_msa_binsli_w: 3141 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3142 // These intrinsics take an unsigned 6 bit immediate. 3143 case Mips::BI__builtin_msa_bclri_d: 3144 case Mips::BI__builtin_msa_bnegi_d: 3145 case Mips::BI__builtin_msa_bseti_d: 3146 case Mips::BI__builtin_msa_sat_s_d: 3147 case Mips::BI__builtin_msa_sat_u_d: 3148 case Mips::BI__builtin_msa_slli_d: 3149 case Mips::BI__builtin_msa_srai_d: 3150 case Mips::BI__builtin_msa_srari_d: 3151 case Mips::BI__builtin_msa_srli_d: 3152 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3153 case Mips::BI__builtin_msa_binsli_d: 3154 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3155 // These intrinsics take a signed 5 bit immediate. 3156 case Mips::BI__builtin_msa_ceqi_b: 3157 case Mips::BI__builtin_msa_ceqi_h: 3158 case Mips::BI__builtin_msa_ceqi_w: 3159 case Mips::BI__builtin_msa_ceqi_d: 3160 case Mips::BI__builtin_msa_clti_s_b: 3161 case Mips::BI__builtin_msa_clti_s_h: 3162 case Mips::BI__builtin_msa_clti_s_w: 3163 case Mips::BI__builtin_msa_clti_s_d: 3164 case Mips::BI__builtin_msa_clei_s_b: 3165 case Mips::BI__builtin_msa_clei_s_h: 3166 case Mips::BI__builtin_msa_clei_s_w: 3167 case Mips::BI__builtin_msa_clei_s_d: 3168 case Mips::BI__builtin_msa_maxi_s_b: 3169 case Mips::BI__builtin_msa_maxi_s_h: 3170 case Mips::BI__builtin_msa_maxi_s_w: 3171 case Mips::BI__builtin_msa_maxi_s_d: 3172 case Mips::BI__builtin_msa_mini_s_b: 3173 case Mips::BI__builtin_msa_mini_s_h: 3174 case Mips::BI__builtin_msa_mini_s_w: 3175 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3176 // These intrinsics take an unsigned 8 bit immediate. 3177 case Mips::BI__builtin_msa_andi_b: 3178 case Mips::BI__builtin_msa_nori_b: 3179 case Mips::BI__builtin_msa_ori_b: 3180 case Mips::BI__builtin_msa_shf_b: 3181 case Mips::BI__builtin_msa_shf_h: 3182 case Mips::BI__builtin_msa_shf_w: 3183 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3184 case Mips::BI__builtin_msa_bseli_b: 3185 case Mips::BI__builtin_msa_bmnzi_b: 3186 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3187 // df/n format 3188 // These intrinsics take an unsigned 4 bit immediate. 3189 case Mips::BI__builtin_msa_copy_s_b: 3190 case Mips::BI__builtin_msa_copy_u_b: 3191 case Mips::BI__builtin_msa_insve_b: 3192 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3193 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3194 // These intrinsics take an unsigned 3 bit immediate. 3195 case Mips::BI__builtin_msa_copy_s_h: 3196 case Mips::BI__builtin_msa_copy_u_h: 3197 case Mips::BI__builtin_msa_insve_h: 3198 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3199 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3200 // These intrinsics take an unsigned 2 bit immediate. 3201 case Mips::BI__builtin_msa_copy_s_w: 3202 case Mips::BI__builtin_msa_copy_u_w: 3203 case Mips::BI__builtin_msa_insve_w: 3204 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3205 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3206 // These intrinsics take an unsigned 1 bit immediate. 3207 case Mips::BI__builtin_msa_copy_s_d: 3208 case Mips::BI__builtin_msa_copy_u_d: 3209 case Mips::BI__builtin_msa_insve_d: 3210 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3211 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3212 // Memory offsets and immediate loads. 3213 // These intrinsics take a signed 10 bit immediate. 3214 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3215 case Mips::BI__builtin_msa_ldi_h: 3216 case Mips::BI__builtin_msa_ldi_w: 3217 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3218 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3219 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3220 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3221 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3222 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3223 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3224 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3225 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3226 } 3227 3228 if (!m) 3229 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3230 3231 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3232 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3233 } 3234 3235 bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3236 unsigned i = 0, l = 0, u = 0; 3237 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3238 BuiltinID == PPC::BI__builtin_divdeu || 3239 BuiltinID == PPC::BI__builtin_bpermd; 3240 bool IsTarget64Bit = Context.getTargetInfo() 3241 .getTypeWidth(Context 3242 .getTargetInfo() 3243 .getIntPtrType()) == 64; 3244 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3245 BuiltinID == PPC::BI__builtin_divweu || 3246 BuiltinID == PPC::BI__builtin_divde || 3247 BuiltinID == PPC::BI__builtin_divdeu; 3248 3249 if (Is64BitBltin && !IsTarget64Bit) 3250 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3251 << TheCall->getSourceRange(); 3252 3253 if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) || 3254 (BuiltinID == PPC::BI__builtin_bpermd && 3255 !Context.getTargetInfo().hasFeature("bpermd"))) 3256 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3257 << TheCall->getSourceRange(); 3258 3259 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3260 if (!Context.getTargetInfo().hasFeature("vsx")) 3261 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3262 << TheCall->getSourceRange(); 3263 return false; 3264 }; 3265 3266 switch (BuiltinID) { 3267 default: return false; 3268 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3269 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3270 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3271 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3272 case PPC::BI__builtin_altivec_dss: 3273 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3274 case PPC::BI__builtin_tbegin: 3275 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3276 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3277 case PPC::BI__builtin_tabortwc: 3278 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3279 case PPC::BI__builtin_tabortwci: 3280 case PPC::BI__builtin_tabortdci: 3281 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3282 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3283 case PPC::BI__builtin_altivec_dst: 3284 case PPC::BI__builtin_altivec_dstt: 3285 case PPC::BI__builtin_altivec_dstst: 3286 case PPC::BI__builtin_altivec_dststt: 3287 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3288 case PPC::BI__builtin_vsx_xxpermdi: 3289 case PPC::BI__builtin_vsx_xxsldwi: 3290 return SemaBuiltinVSX(TheCall); 3291 case PPC::BI__builtin_unpack_vector_int128: 3292 return SemaVSXCheck(TheCall) || 3293 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3294 case PPC::BI__builtin_pack_vector_int128: 3295 return SemaVSXCheck(TheCall); 3296 } 3297 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3298 } 3299 3300 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3301 CallExpr *TheCall) { 3302 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3303 Expr *Arg = TheCall->getArg(0); 3304 llvm::APSInt AbortCode(32); 3305 if (Arg->isIntegerConstantExpr(AbortCode, Context) && 3306 AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) 3307 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3308 << Arg->getSourceRange(); 3309 } 3310 3311 // For intrinsics which take an immediate value as part of the instruction, 3312 // range check them here. 3313 unsigned i = 0, l = 0, u = 0; 3314 switch (BuiltinID) { 3315 default: return false; 3316 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3317 case SystemZ::BI__builtin_s390_verimb: 3318 case SystemZ::BI__builtin_s390_verimh: 3319 case SystemZ::BI__builtin_s390_verimf: 3320 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3321 case SystemZ::BI__builtin_s390_vfaeb: 3322 case SystemZ::BI__builtin_s390_vfaeh: 3323 case SystemZ::BI__builtin_s390_vfaef: 3324 case SystemZ::BI__builtin_s390_vfaebs: 3325 case SystemZ::BI__builtin_s390_vfaehs: 3326 case SystemZ::BI__builtin_s390_vfaefs: 3327 case SystemZ::BI__builtin_s390_vfaezb: 3328 case SystemZ::BI__builtin_s390_vfaezh: 3329 case SystemZ::BI__builtin_s390_vfaezf: 3330 case SystemZ::BI__builtin_s390_vfaezbs: 3331 case SystemZ::BI__builtin_s390_vfaezhs: 3332 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3333 case SystemZ::BI__builtin_s390_vfisb: 3334 case SystemZ::BI__builtin_s390_vfidb: 3335 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3336 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3337 case SystemZ::BI__builtin_s390_vftcisb: 3338 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3339 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3340 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3341 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3342 case SystemZ::BI__builtin_s390_vstrcb: 3343 case SystemZ::BI__builtin_s390_vstrch: 3344 case SystemZ::BI__builtin_s390_vstrcf: 3345 case SystemZ::BI__builtin_s390_vstrczb: 3346 case SystemZ::BI__builtin_s390_vstrczh: 3347 case SystemZ::BI__builtin_s390_vstrczf: 3348 case SystemZ::BI__builtin_s390_vstrcbs: 3349 case SystemZ::BI__builtin_s390_vstrchs: 3350 case SystemZ::BI__builtin_s390_vstrcfs: 3351 case SystemZ::BI__builtin_s390_vstrczbs: 3352 case SystemZ::BI__builtin_s390_vstrczhs: 3353 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3354 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3355 case SystemZ::BI__builtin_s390_vfminsb: 3356 case SystemZ::BI__builtin_s390_vfmaxsb: 3357 case SystemZ::BI__builtin_s390_vfmindb: 3358 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3359 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3360 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3361 } 3362 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3363 } 3364 3365 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3366 /// This checks that the target supports __builtin_cpu_supports and 3367 /// that the string argument is constant and valid. 3368 static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) { 3369 Expr *Arg = TheCall->getArg(0); 3370 3371 // Check if the argument is a string literal. 3372 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3373 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3374 << Arg->getSourceRange(); 3375 3376 // Check the contents of the string. 3377 StringRef Feature = 3378 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3379 if (!S.Context.getTargetInfo().validateCpuSupports(Feature)) 3380 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3381 << Arg->getSourceRange(); 3382 return false; 3383 } 3384 3385 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3386 /// This checks that the target supports __builtin_cpu_is and 3387 /// that the string argument is constant and valid. 3388 static bool SemaBuiltinCpuIs(Sema &S, CallExpr *TheCall) { 3389 Expr *Arg = TheCall->getArg(0); 3390 3391 // Check if the argument is a string literal. 3392 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3393 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3394 << Arg->getSourceRange(); 3395 3396 // Check the contents of the string. 3397 StringRef Feature = 3398 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3399 if (!S.Context.getTargetInfo().validateCpuIs(Feature)) 3400 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3401 << Arg->getSourceRange(); 3402 return false; 3403 } 3404 3405 // Check if the rounding mode is legal. 3406 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3407 // Indicates if this instruction has rounding control or just SAE. 3408 bool HasRC = false; 3409 3410 unsigned ArgNum = 0; 3411 switch (BuiltinID) { 3412 default: 3413 return false; 3414 case X86::BI__builtin_ia32_vcvttsd2si32: 3415 case X86::BI__builtin_ia32_vcvttsd2si64: 3416 case X86::BI__builtin_ia32_vcvttsd2usi32: 3417 case X86::BI__builtin_ia32_vcvttsd2usi64: 3418 case X86::BI__builtin_ia32_vcvttss2si32: 3419 case X86::BI__builtin_ia32_vcvttss2si64: 3420 case X86::BI__builtin_ia32_vcvttss2usi32: 3421 case X86::BI__builtin_ia32_vcvttss2usi64: 3422 ArgNum = 1; 3423 break; 3424 case X86::BI__builtin_ia32_maxpd512: 3425 case X86::BI__builtin_ia32_maxps512: 3426 case X86::BI__builtin_ia32_minpd512: 3427 case X86::BI__builtin_ia32_minps512: 3428 ArgNum = 2; 3429 break; 3430 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3431 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3432 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3433 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3434 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3435 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3436 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3437 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3438 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3439 case X86::BI__builtin_ia32_exp2pd_mask: 3440 case X86::BI__builtin_ia32_exp2ps_mask: 3441 case X86::BI__builtin_ia32_getexppd512_mask: 3442 case X86::BI__builtin_ia32_getexpps512_mask: 3443 case X86::BI__builtin_ia32_rcp28pd_mask: 3444 case X86::BI__builtin_ia32_rcp28ps_mask: 3445 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3446 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3447 case X86::BI__builtin_ia32_vcomisd: 3448 case X86::BI__builtin_ia32_vcomiss: 3449 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3450 ArgNum = 3; 3451 break; 3452 case X86::BI__builtin_ia32_cmppd512_mask: 3453 case X86::BI__builtin_ia32_cmpps512_mask: 3454 case X86::BI__builtin_ia32_cmpsd_mask: 3455 case X86::BI__builtin_ia32_cmpss_mask: 3456 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3457 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3458 case X86::BI__builtin_ia32_getexpss128_round_mask: 3459 case X86::BI__builtin_ia32_getmantpd512_mask: 3460 case X86::BI__builtin_ia32_getmantps512_mask: 3461 case X86::BI__builtin_ia32_maxsd_round_mask: 3462 case X86::BI__builtin_ia32_maxss_round_mask: 3463 case X86::BI__builtin_ia32_minsd_round_mask: 3464 case X86::BI__builtin_ia32_minss_round_mask: 3465 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3466 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3467 case X86::BI__builtin_ia32_reducepd512_mask: 3468 case X86::BI__builtin_ia32_reduceps512_mask: 3469 case X86::BI__builtin_ia32_rndscalepd_mask: 3470 case X86::BI__builtin_ia32_rndscaleps_mask: 3471 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3472 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3473 ArgNum = 4; 3474 break; 3475 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3476 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3477 case X86::BI__builtin_ia32_fixupimmps512_mask: 3478 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3479 case X86::BI__builtin_ia32_fixupimmsd_mask: 3480 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3481 case X86::BI__builtin_ia32_fixupimmss_mask: 3482 case X86::BI__builtin_ia32_fixupimmss_maskz: 3483 case X86::BI__builtin_ia32_getmantsd_round_mask: 3484 case X86::BI__builtin_ia32_getmantss_round_mask: 3485 case X86::BI__builtin_ia32_rangepd512_mask: 3486 case X86::BI__builtin_ia32_rangeps512_mask: 3487 case X86::BI__builtin_ia32_rangesd128_round_mask: 3488 case X86::BI__builtin_ia32_rangess128_round_mask: 3489 case X86::BI__builtin_ia32_reducesd_mask: 3490 case X86::BI__builtin_ia32_reducess_mask: 3491 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3492 case X86::BI__builtin_ia32_rndscaless_round_mask: 3493 ArgNum = 5; 3494 break; 3495 case X86::BI__builtin_ia32_vcvtsd2si64: 3496 case X86::BI__builtin_ia32_vcvtsd2si32: 3497 case X86::BI__builtin_ia32_vcvtsd2usi32: 3498 case X86::BI__builtin_ia32_vcvtsd2usi64: 3499 case X86::BI__builtin_ia32_vcvtss2si32: 3500 case X86::BI__builtin_ia32_vcvtss2si64: 3501 case X86::BI__builtin_ia32_vcvtss2usi32: 3502 case X86::BI__builtin_ia32_vcvtss2usi64: 3503 case X86::BI__builtin_ia32_sqrtpd512: 3504 case X86::BI__builtin_ia32_sqrtps512: 3505 ArgNum = 1; 3506 HasRC = true; 3507 break; 3508 case X86::BI__builtin_ia32_addpd512: 3509 case X86::BI__builtin_ia32_addps512: 3510 case X86::BI__builtin_ia32_divpd512: 3511 case X86::BI__builtin_ia32_divps512: 3512 case X86::BI__builtin_ia32_mulpd512: 3513 case X86::BI__builtin_ia32_mulps512: 3514 case X86::BI__builtin_ia32_subpd512: 3515 case X86::BI__builtin_ia32_subps512: 3516 case X86::BI__builtin_ia32_cvtsi2sd64: 3517 case X86::BI__builtin_ia32_cvtsi2ss32: 3518 case X86::BI__builtin_ia32_cvtsi2ss64: 3519 case X86::BI__builtin_ia32_cvtusi2sd64: 3520 case X86::BI__builtin_ia32_cvtusi2ss32: 3521 case X86::BI__builtin_ia32_cvtusi2ss64: 3522 ArgNum = 2; 3523 HasRC = true; 3524 break; 3525 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3526 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3527 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3528 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3529 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3530 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 3531 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3532 case X86::BI__builtin_ia32_cvtps2dq512_mask: 3533 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3534 case X86::BI__builtin_ia32_cvtps2udq512_mask: 3535 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3536 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3537 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3538 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3539 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3540 ArgNum = 3; 3541 HasRC = true; 3542 break; 3543 case X86::BI__builtin_ia32_addss_round_mask: 3544 case X86::BI__builtin_ia32_addsd_round_mask: 3545 case X86::BI__builtin_ia32_divss_round_mask: 3546 case X86::BI__builtin_ia32_divsd_round_mask: 3547 case X86::BI__builtin_ia32_mulss_round_mask: 3548 case X86::BI__builtin_ia32_mulsd_round_mask: 3549 case X86::BI__builtin_ia32_subss_round_mask: 3550 case X86::BI__builtin_ia32_subsd_round_mask: 3551 case X86::BI__builtin_ia32_scalefpd512_mask: 3552 case X86::BI__builtin_ia32_scalefps512_mask: 3553 case X86::BI__builtin_ia32_scalefsd_round_mask: 3554 case X86::BI__builtin_ia32_scalefss_round_mask: 3555 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3556 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3557 case X86::BI__builtin_ia32_sqrtss_round_mask: 3558 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3559 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3560 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3561 case X86::BI__builtin_ia32_vfmaddss3_mask: 3562 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3563 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3564 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3565 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3566 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3567 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3568 case X86::BI__builtin_ia32_vfmaddps512_mask: 3569 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3570 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3571 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3572 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3573 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3574 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3575 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3576 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3577 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3578 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3579 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3580 ArgNum = 4; 3581 HasRC = true; 3582 break; 3583 } 3584 3585 llvm::APSInt Result; 3586 3587 // We can't check the value of a dependent argument. 3588 Expr *Arg = TheCall->getArg(ArgNum); 3589 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3590 return false; 3591 3592 // Check constant-ness first. 3593 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3594 return true; 3595 3596 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3597 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3598 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 3599 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 3600 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3601 Result == 8/*ROUND_NO_EXC*/ || 3602 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 3603 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3604 return false; 3605 3606 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3607 << Arg->getSourceRange(); 3608 } 3609 3610 // Check if the gather/scatter scale is legal. 3611 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3612 CallExpr *TheCall) { 3613 unsigned ArgNum = 0; 3614 switch (BuiltinID) { 3615 default: 3616 return false; 3617 case X86::BI__builtin_ia32_gatherpfdpd: 3618 case X86::BI__builtin_ia32_gatherpfdps: 3619 case X86::BI__builtin_ia32_gatherpfqpd: 3620 case X86::BI__builtin_ia32_gatherpfqps: 3621 case X86::BI__builtin_ia32_scatterpfdpd: 3622 case X86::BI__builtin_ia32_scatterpfdps: 3623 case X86::BI__builtin_ia32_scatterpfqpd: 3624 case X86::BI__builtin_ia32_scatterpfqps: 3625 ArgNum = 3; 3626 break; 3627 case X86::BI__builtin_ia32_gatherd_pd: 3628 case X86::BI__builtin_ia32_gatherd_pd256: 3629 case X86::BI__builtin_ia32_gatherq_pd: 3630 case X86::BI__builtin_ia32_gatherq_pd256: 3631 case X86::BI__builtin_ia32_gatherd_ps: 3632 case X86::BI__builtin_ia32_gatherd_ps256: 3633 case X86::BI__builtin_ia32_gatherq_ps: 3634 case X86::BI__builtin_ia32_gatherq_ps256: 3635 case X86::BI__builtin_ia32_gatherd_q: 3636 case X86::BI__builtin_ia32_gatherd_q256: 3637 case X86::BI__builtin_ia32_gatherq_q: 3638 case X86::BI__builtin_ia32_gatherq_q256: 3639 case X86::BI__builtin_ia32_gatherd_d: 3640 case X86::BI__builtin_ia32_gatherd_d256: 3641 case X86::BI__builtin_ia32_gatherq_d: 3642 case X86::BI__builtin_ia32_gatherq_d256: 3643 case X86::BI__builtin_ia32_gather3div2df: 3644 case X86::BI__builtin_ia32_gather3div2di: 3645 case X86::BI__builtin_ia32_gather3div4df: 3646 case X86::BI__builtin_ia32_gather3div4di: 3647 case X86::BI__builtin_ia32_gather3div4sf: 3648 case X86::BI__builtin_ia32_gather3div4si: 3649 case X86::BI__builtin_ia32_gather3div8sf: 3650 case X86::BI__builtin_ia32_gather3div8si: 3651 case X86::BI__builtin_ia32_gather3siv2df: 3652 case X86::BI__builtin_ia32_gather3siv2di: 3653 case X86::BI__builtin_ia32_gather3siv4df: 3654 case X86::BI__builtin_ia32_gather3siv4di: 3655 case X86::BI__builtin_ia32_gather3siv4sf: 3656 case X86::BI__builtin_ia32_gather3siv4si: 3657 case X86::BI__builtin_ia32_gather3siv8sf: 3658 case X86::BI__builtin_ia32_gather3siv8si: 3659 case X86::BI__builtin_ia32_gathersiv8df: 3660 case X86::BI__builtin_ia32_gathersiv16sf: 3661 case X86::BI__builtin_ia32_gatherdiv8df: 3662 case X86::BI__builtin_ia32_gatherdiv16sf: 3663 case X86::BI__builtin_ia32_gathersiv8di: 3664 case X86::BI__builtin_ia32_gathersiv16si: 3665 case X86::BI__builtin_ia32_gatherdiv8di: 3666 case X86::BI__builtin_ia32_gatherdiv16si: 3667 case X86::BI__builtin_ia32_scatterdiv2df: 3668 case X86::BI__builtin_ia32_scatterdiv2di: 3669 case X86::BI__builtin_ia32_scatterdiv4df: 3670 case X86::BI__builtin_ia32_scatterdiv4di: 3671 case X86::BI__builtin_ia32_scatterdiv4sf: 3672 case X86::BI__builtin_ia32_scatterdiv4si: 3673 case X86::BI__builtin_ia32_scatterdiv8sf: 3674 case X86::BI__builtin_ia32_scatterdiv8si: 3675 case X86::BI__builtin_ia32_scattersiv2df: 3676 case X86::BI__builtin_ia32_scattersiv2di: 3677 case X86::BI__builtin_ia32_scattersiv4df: 3678 case X86::BI__builtin_ia32_scattersiv4di: 3679 case X86::BI__builtin_ia32_scattersiv4sf: 3680 case X86::BI__builtin_ia32_scattersiv4si: 3681 case X86::BI__builtin_ia32_scattersiv8sf: 3682 case X86::BI__builtin_ia32_scattersiv8si: 3683 case X86::BI__builtin_ia32_scattersiv8df: 3684 case X86::BI__builtin_ia32_scattersiv16sf: 3685 case X86::BI__builtin_ia32_scatterdiv8df: 3686 case X86::BI__builtin_ia32_scatterdiv16sf: 3687 case X86::BI__builtin_ia32_scattersiv8di: 3688 case X86::BI__builtin_ia32_scattersiv16si: 3689 case X86::BI__builtin_ia32_scatterdiv8di: 3690 case X86::BI__builtin_ia32_scatterdiv16si: 3691 ArgNum = 4; 3692 break; 3693 } 3694 3695 llvm::APSInt Result; 3696 3697 // We can't check the value of a dependent argument. 3698 Expr *Arg = TheCall->getArg(ArgNum); 3699 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3700 return false; 3701 3702 // Check constant-ness first. 3703 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3704 return true; 3705 3706 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3707 return false; 3708 3709 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3710 << Arg->getSourceRange(); 3711 } 3712 3713 static bool isX86_32Builtin(unsigned BuiltinID) { 3714 // These builtins only work on x86-32 targets. 3715 switch (BuiltinID) { 3716 case X86::BI__builtin_ia32_readeflags_u32: 3717 case X86::BI__builtin_ia32_writeeflags_u32: 3718 return true; 3719 } 3720 3721 return false; 3722 } 3723 3724 bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3725 if (BuiltinID == X86::BI__builtin_cpu_supports) 3726 return SemaBuiltinCpuSupports(*this, TheCall); 3727 3728 if (BuiltinID == X86::BI__builtin_cpu_is) 3729 return SemaBuiltinCpuIs(*this, TheCall); 3730 3731 // Check for 32-bit only builtins on a 64-bit target. 3732 const llvm::Triple &TT = Context.getTargetInfo().getTriple(); 3733 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3734 return Diag(TheCall->getCallee()->getBeginLoc(), 3735 diag::err_32_bit_builtin_64_bit_tgt); 3736 3737 // If the intrinsic has rounding or SAE make sure its valid. 3738 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3739 return true; 3740 3741 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3742 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3743 return true; 3744 3745 // For intrinsics which take an immediate value as part of the instruction, 3746 // range check them here. 3747 int i = 0, l = 0, u = 0; 3748 switch (BuiltinID) { 3749 default: 3750 return false; 3751 case X86::BI__builtin_ia32_vec_ext_v2si: 3752 case X86::BI__builtin_ia32_vec_ext_v2di: 3753 case X86::BI__builtin_ia32_vextractf128_pd256: 3754 case X86::BI__builtin_ia32_vextractf128_ps256: 3755 case X86::BI__builtin_ia32_vextractf128_si256: 3756 case X86::BI__builtin_ia32_extract128i256: 3757 case X86::BI__builtin_ia32_extractf64x4_mask: 3758 case X86::BI__builtin_ia32_extracti64x4_mask: 3759 case X86::BI__builtin_ia32_extractf32x8_mask: 3760 case X86::BI__builtin_ia32_extracti32x8_mask: 3761 case X86::BI__builtin_ia32_extractf64x2_256_mask: 3762 case X86::BI__builtin_ia32_extracti64x2_256_mask: 3763 case X86::BI__builtin_ia32_extractf32x4_256_mask: 3764 case X86::BI__builtin_ia32_extracti32x4_256_mask: 3765 i = 1; l = 0; u = 1; 3766 break; 3767 case X86::BI__builtin_ia32_vec_set_v2di: 3768 case X86::BI__builtin_ia32_vinsertf128_pd256: 3769 case X86::BI__builtin_ia32_vinsertf128_ps256: 3770 case X86::BI__builtin_ia32_vinsertf128_si256: 3771 case X86::BI__builtin_ia32_insert128i256: 3772 case X86::BI__builtin_ia32_insertf32x8: 3773 case X86::BI__builtin_ia32_inserti32x8: 3774 case X86::BI__builtin_ia32_insertf64x4: 3775 case X86::BI__builtin_ia32_inserti64x4: 3776 case X86::BI__builtin_ia32_insertf64x2_256: 3777 case X86::BI__builtin_ia32_inserti64x2_256: 3778 case X86::BI__builtin_ia32_insertf32x4_256: 3779 case X86::BI__builtin_ia32_inserti32x4_256: 3780 i = 2; l = 0; u = 1; 3781 break; 3782 case X86::BI__builtin_ia32_vpermilpd: 3783 case X86::BI__builtin_ia32_vec_ext_v4hi: 3784 case X86::BI__builtin_ia32_vec_ext_v4si: 3785 case X86::BI__builtin_ia32_vec_ext_v4sf: 3786 case X86::BI__builtin_ia32_vec_ext_v4di: 3787 case X86::BI__builtin_ia32_extractf32x4_mask: 3788 case X86::BI__builtin_ia32_extracti32x4_mask: 3789 case X86::BI__builtin_ia32_extractf64x2_512_mask: 3790 case X86::BI__builtin_ia32_extracti64x2_512_mask: 3791 i = 1; l = 0; u = 3; 3792 break; 3793 case X86::BI_mm_prefetch: 3794 case X86::BI__builtin_ia32_vec_ext_v8hi: 3795 case X86::BI__builtin_ia32_vec_ext_v8si: 3796 i = 1; l = 0; u = 7; 3797 break; 3798 case X86::BI__builtin_ia32_sha1rnds4: 3799 case X86::BI__builtin_ia32_blendpd: 3800 case X86::BI__builtin_ia32_shufpd: 3801 case X86::BI__builtin_ia32_vec_set_v4hi: 3802 case X86::BI__builtin_ia32_vec_set_v4si: 3803 case X86::BI__builtin_ia32_vec_set_v4di: 3804 case X86::BI__builtin_ia32_shuf_f32x4_256: 3805 case X86::BI__builtin_ia32_shuf_f64x2_256: 3806 case X86::BI__builtin_ia32_shuf_i32x4_256: 3807 case X86::BI__builtin_ia32_shuf_i64x2_256: 3808 case X86::BI__builtin_ia32_insertf64x2_512: 3809 case X86::BI__builtin_ia32_inserti64x2_512: 3810 case X86::BI__builtin_ia32_insertf32x4: 3811 case X86::BI__builtin_ia32_inserti32x4: 3812 i = 2; l = 0; u = 3; 3813 break; 3814 case X86::BI__builtin_ia32_vpermil2pd: 3815 case X86::BI__builtin_ia32_vpermil2pd256: 3816 case X86::BI__builtin_ia32_vpermil2ps: 3817 case X86::BI__builtin_ia32_vpermil2ps256: 3818 i = 3; l = 0; u = 3; 3819 break; 3820 case X86::BI__builtin_ia32_cmpb128_mask: 3821 case X86::BI__builtin_ia32_cmpw128_mask: 3822 case X86::BI__builtin_ia32_cmpd128_mask: 3823 case X86::BI__builtin_ia32_cmpq128_mask: 3824 case X86::BI__builtin_ia32_cmpb256_mask: 3825 case X86::BI__builtin_ia32_cmpw256_mask: 3826 case X86::BI__builtin_ia32_cmpd256_mask: 3827 case X86::BI__builtin_ia32_cmpq256_mask: 3828 case X86::BI__builtin_ia32_cmpb512_mask: 3829 case X86::BI__builtin_ia32_cmpw512_mask: 3830 case X86::BI__builtin_ia32_cmpd512_mask: 3831 case X86::BI__builtin_ia32_cmpq512_mask: 3832 case X86::BI__builtin_ia32_ucmpb128_mask: 3833 case X86::BI__builtin_ia32_ucmpw128_mask: 3834 case X86::BI__builtin_ia32_ucmpd128_mask: 3835 case X86::BI__builtin_ia32_ucmpq128_mask: 3836 case X86::BI__builtin_ia32_ucmpb256_mask: 3837 case X86::BI__builtin_ia32_ucmpw256_mask: 3838 case X86::BI__builtin_ia32_ucmpd256_mask: 3839 case X86::BI__builtin_ia32_ucmpq256_mask: 3840 case X86::BI__builtin_ia32_ucmpb512_mask: 3841 case X86::BI__builtin_ia32_ucmpw512_mask: 3842 case X86::BI__builtin_ia32_ucmpd512_mask: 3843 case X86::BI__builtin_ia32_ucmpq512_mask: 3844 case X86::BI__builtin_ia32_vpcomub: 3845 case X86::BI__builtin_ia32_vpcomuw: 3846 case X86::BI__builtin_ia32_vpcomud: 3847 case X86::BI__builtin_ia32_vpcomuq: 3848 case X86::BI__builtin_ia32_vpcomb: 3849 case X86::BI__builtin_ia32_vpcomw: 3850 case X86::BI__builtin_ia32_vpcomd: 3851 case X86::BI__builtin_ia32_vpcomq: 3852 case X86::BI__builtin_ia32_vec_set_v8hi: 3853 case X86::BI__builtin_ia32_vec_set_v8si: 3854 i = 2; l = 0; u = 7; 3855 break; 3856 case X86::BI__builtin_ia32_vpermilpd256: 3857 case X86::BI__builtin_ia32_roundps: 3858 case X86::BI__builtin_ia32_roundpd: 3859 case X86::BI__builtin_ia32_roundps256: 3860 case X86::BI__builtin_ia32_roundpd256: 3861 case X86::BI__builtin_ia32_getmantpd128_mask: 3862 case X86::BI__builtin_ia32_getmantpd256_mask: 3863 case X86::BI__builtin_ia32_getmantps128_mask: 3864 case X86::BI__builtin_ia32_getmantps256_mask: 3865 case X86::BI__builtin_ia32_getmantpd512_mask: 3866 case X86::BI__builtin_ia32_getmantps512_mask: 3867 case X86::BI__builtin_ia32_vec_ext_v16qi: 3868 case X86::BI__builtin_ia32_vec_ext_v16hi: 3869 i = 1; l = 0; u = 15; 3870 break; 3871 case X86::BI__builtin_ia32_pblendd128: 3872 case X86::BI__builtin_ia32_blendps: 3873 case X86::BI__builtin_ia32_blendpd256: 3874 case X86::BI__builtin_ia32_shufpd256: 3875 case X86::BI__builtin_ia32_roundss: 3876 case X86::BI__builtin_ia32_roundsd: 3877 case X86::BI__builtin_ia32_rangepd128_mask: 3878 case X86::BI__builtin_ia32_rangepd256_mask: 3879 case X86::BI__builtin_ia32_rangepd512_mask: 3880 case X86::BI__builtin_ia32_rangeps128_mask: 3881 case X86::BI__builtin_ia32_rangeps256_mask: 3882 case X86::BI__builtin_ia32_rangeps512_mask: 3883 case X86::BI__builtin_ia32_getmantsd_round_mask: 3884 case X86::BI__builtin_ia32_getmantss_round_mask: 3885 case X86::BI__builtin_ia32_vec_set_v16qi: 3886 case X86::BI__builtin_ia32_vec_set_v16hi: 3887 i = 2; l = 0; u = 15; 3888 break; 3889 case X86::BI__builtin_ia32_vec_ext_v32qi: 3890 i = 1; l = 0; u = 31; 3891 break; 3892 case X86::BI__builtin_ia32_cmpps: 3893 case X86::BI__builtin_ia32_cmpss: 3894 case X86::BI__builtin_ia32_cmppd: 3895 case X86::BI__builtin_ia32_cmpsd: 3896 case X86::BI__builtin_ia32_cmpps256: 3897 case X86::BI__builtin_ia32_cmppd256: 3898 case X86::BI__builtin_ia32_cmpps128_mask: 3899 case X86::BI__builtin_ia32_cmppd128_mask: 3900 case X86::BI__builtin_ia32_cmpps256_mask: 3901 case X86::BI__builtin_ia32_cmppd256_mask: 3902 case X86::BI__builtin_ia32_cmpps512_mask: 3903 case X86::BI__builtin_ia32_cmppd512_mask: 3904 case X86::BI__builtin_ia32_cmpsd_mask: 3905 case X86::BI__builtin_ia32_cmpss_mask: 3906 case X86::BI__builtin_ia32_vec_set_v32qi: 3907 i = 2; l = 0; u = 31; 3908 break; 3909 case X86::BI__builtin_ia32_permdf256: 3910 case X86::BI__builtin_ia32_permdi256: 3911 case X86::BI__builtin_ia32_permdf512: 3912 case X86::BI__builtin_ia32_permdi512: 3913 case X86::BI__builtin_ia32_vpermilps: 3914 case X86::BI__builtin_ia32_vpermilps256: 3915 case X86::BI__builtin_ia32_vpermilpd512: 3916 case X86::BI__builtin_ia32_vpermilps512: 3917 case X86::BI__builtin_ia32_pshufd: 3918 case X86::BI__builtin_ia32_pshufd256: 3919 case X86::BI__builtin_ia32_pshufd512: 3920 case X86::BI__builtin_ia32_pshufhw: 3921 case X86::BI__builtin_ia32_pshufhw256: 3922 case X86::BI__builtin_ia32_pshufhw512: 3923 case X86::BI__builtin_ia32_pshuflw: 3924 case X86::BI__builtin_ia32_pshuflw256: 3925 case X86::BI__builtin_ia32_pshuflw512: 3926 case X86::BI__builtin_ia32_vcvtps2ph: 3927 case X86::BI__builtin_ia32_vcvtps2ph_mask: 3928 case X86::BI__builtin_ia32_vcvtps2ph256: 3929 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 3930 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 3931 case X86::BI__builtin_ia32_rndscaleps_128_mask: 3932 case X86::BI__builtin_ia32_rndscalepd_128_mask: 3933 case X86::BI__builtin_ia32_rndscaleps_256_mask: 3934 case X86::BI__builtin_ia32_rndscalepd_256_mask: 3935 case X86::BI__builtin_ia32_rndscaleps_mask: 3936 case X86::BI__builtin_ia32_rndscalepd_mask: 3937 case X86::BI__builtin_ia32_reducepd128_mask: 3938 case X86::BI__builtin_ia32_reducepd256_mask: 3939 case X86::BI__builtin_ia32_reducepd512_mask: 3940 case X86::BI__builtin_ia32_reduceps128_mask: 3941 case X86::BI__builtin_ia32_reduceps256_mask: 3942 case X86::BI__builtin_ia32_reduceps512_mask: 3943 case X86::BI__builtin_ia32_prold512: 3944 case X86::BI__builtin_ia32_prolq512: 3945 case X86::BI__builtin_ia32_prold128: 3946 case X86::BI__builtin_ia32_prold256: 3947 case X86::BI__builtin_ia32_prolq128: 3948 case X86::BI__builtin_ia32_prolq256: 3949 case X86::BI__builtin_ia32_prord512: 3950 case X86::BI__builtin_ia32_prorq512: 3951 case X86::BI__builtin_ia32_prord128: 3952 case X86::BI__builtin_ia32_prord256: 3953 case X86::BI__builtin_ia32_prorq128: 3954 case X86::BI__builtin_ia32_prorq256: 3955 case X86::BI__builtin_ia32_fpclasspd128_mask: 3956 case X86::BI__builtin_ia32_fpclasspd256_mask: 3957 case X86::BI__builtin_ia32_fpclassps128_mask: 3958 case X86::BI__builtin_ia32_fpclassps256_mask: 3959 case X86::BI__builtin_ia32_fpclassps512_mask: 3960 case X86::BI__builtin_ia32_fpclasspd512_mask: 3961 case X86::BI__builtin_ia32_fpclasssd_mask: 3962 case X86::BI__builtin_ia32_fpclassss_mask: 3963 case X86::BI__builtin_ia32_pslldqi128_byteshift: 3964 case X86::BI__builtin_ia32_pslldqi256_byteshift: 3965 case X86::BI__builtin_ia32_pslldqi512_byteshift: 3966 case X86::BI__builtin_ia32_psrldqi128_byteshift: 3967 case X86::BI__builtin_ia32_psrldqi256_byteshift: 3968 case X86::BI__builtin_ia32_psrldqi512_byteshift: 3969 case X86::BI__builtin_ia32_kshiftliqi: 3970 case X86::BI__builtin_ia32_kshiftlihi: 3971 case X86::BI__builtin_ia32_kshiftlisi: 3972 case X86::BI__builtin_ia32_kshiftlidi: 3973 case X86::BI__builtin_ia32_kshiftriqi: 3974 case X86::BI__builtin_ia32_kshiftrihi: 3975 case X86::BI__builtin_ia32_kshiftrisi: 3976 case X86::BI__builtin_ia32_kshiftridi: 3977 i = 1; l = 0; u = 255; 3978 break; 3979 case X86::BI__builtin_ia32_vperm2f128_pd256: 3980 case X86::BI__builtin_ia32_vperm2f128_ps256: 3981 case X86::BI__builtin_ia32_vperm2f128_si256: 3982 case X86::BI__builtin_ia32_permti256: 3983 case X86::BI__builtin_ia32_pblendw128: 3984 case X86::BI__builtin_ia32_pblendw256: 3985 case X86::BI__builtin_ia32_blendps256: 3986 case X86::BI__builtin_ia32_pblendd256: 3987 case X86::BI__builtin_ia32_palignr128: 3988 case X86::BI__builtin_ia32_palignr256: 3989 case X86::BI__builtin_ia32_palignr512: 3990 case X86::BI__builtin_ia32_alignq512: 3991 case X86::BI__builtin_ia32_alignd512: 3992 case X86::BI__builtin_ia32_alignd128: 3993 case X86::BI__builtin_ia32_alignd256: 3994 case X86::BI__builtin_ia32_alignq128: 3995 case X86::BI__builtin_ia32_alignq256: 3996 case X86::BI__builtin_ia32_vcomisd: 3997 case X86::BI__builtin_ia32_vcomiss: 3998 case X86::BI__builtin_ia32_shuf_f32x4: 3999 case X86::BI__builtin_ia32_shuf_f64x2: 4000 case X86::BI__builtin_ia32_shuf_i32x4: 4001 case X86::BI__builtin_ia32_shuf_i64x2: 4002 case X86::BI__builtin_ia32_shufpd512: 4003 case X86::BI__builtin_ia32_shufps: 4004 case X86::BI__builtin_ia32_shufps256: 4005 case X86::BI__builtin_ia32_shufps512: 4006 case X86::BI__builtin_ia32_dbpsadbw128: 4007 case X86::BI__builtin_ia32_dbpsadbw256: 4008 case X86::BI__builtin_ia32_dbpsadbw512: 4009 case X86::BI__builtin_ia32_vpshldd128: 4010 case X86::BI__builtin_ia32_vpshldd256: 4011 case X86::BI__builtin_ia32_vpshldd512: 4012 case X86::BI__builtin_ia32_vpshldq128: 4013 case X86::BI__builtin_ia32_vpshldq256: 4014 case X86::BI__builtin_ia32_vpshldq512: 4015 case X86::BI__builtin_ia32_vpshldw128: 4016 case X86::BI__builtin_ia32_vpshldw256: 4017 case X86::BI__builtin_ia32_vpshldw512: 4018 case X86::BI__builtin_ia32_vpshrdd128: 4019 case X86::BI__builtin_ia32_vpshrdd256: 4020 case X86::BI__builtin_ia32_vpshrdd512: 4021 case X86::BI__builtin_ia32_vpshrdq128: 4022 case X86::BI__builtin_ia32_vpshrdq256: 4023 case X86::BI__builtin_ia32_vpshrdq512: 4024 case X86::BI__builtin_ia32_vpshrdw128: 4025 case X86::BI__builtin_ia32_vpshrdw256: 4026 case X86::BI__builtin_ia32_vpshrdw512: 4027 i = 2; l = 0; u = 255; 4028 break; 4029 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4030 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4031 case X86::BI__builtin_ia32_fixupimmps512_mask: 4032 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4033 case X86::BI__builtin_ia32_fixupimmsd_mask: 4034 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4035 case X86::BI__builtin_ia32_fixupimmss_mask: 4036 case X86::BI__builtin_ia32_fixupimmss_maskz: 4037 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4038 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4039 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4040 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4041 case X86::BI__builtin_ia32_fixupimmps128_mask: 4042 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4043 case X86::BI__builtin_ia32_fixupimmps256_mask: 4044 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4045 case X86::BI__builtin_ia32_pternlogd512_mask: 4046 case X86::BI__builtin_ia32_pternlogd512_maskz: 4047 case X86::BI__builtin_ia32_pternlogq512_mask: 4048 case X86::BI__builtin_ia32_pternlogq512_maskz: 4049 case X86::BI__builtin_ia32_pternlogd128_mask: 4050 case X86::BI__builtin_ia32_pternlogd128_maskz: 4051 case X86::BI__builtin_ia32_pternlogd256_mask: 4052 case X86::BI__builtin_ia32_pternlogd256_maskz: 4053 case X86::BI__builtin_ia32_pternlogq128_mask: 4054 case X86::BI__builtin_ia32_pternlogq128_maskz: 4055 case X86::BI__builtin_ia32_pternlogq256_mask: 4056 case X86::BI__builtin_ia32_pternlogq256_maskz: 4057 i = 3; l = 0; u = 255; 4058 break; 4059 case X86::BI__builtin_ia32_gatherpfdpd: 4060 case X86::BI__builtin_ia32_gatherpfdps: 4061 case X86::BI__builtin_ia32_gatherpfqpd: 4062 case X86::BI__builtin_ia32_gatherpfqps: 4063 case X86::BI__builtin_ia32_scatterpfdpd: 4064 case X86::BI__builtin_ia32_scatterpfdps: 4065 case X86::BI__builtin_ia32_scatterpfqpd: 4066 case X86::BI__builtin_ia32_scatterpfqps: 4067 i = 4; l = 2; u = 3; 4068 break; 4069 case X86::BI__builtin_ia32_reducesd_mask: 4070 case X86::BI__builtin_ia32_reducess_mask: 4071 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4072 case X86::BI__builtin_ia32_rndscaless_round_mask: 4073 i = 4; l = 0; u = 255; 4074 break; 4075 } 4076 4077 // Note that we don't force a hard error on the range check here, allowing 4078 // template-generated or macro-generated dead code to potentially have out-of- 4079 // range values. These need to code generate, but don't need to necessarily 4080 // make any sense. We use a warning that defaults to an error. 4081 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4082 } 4083 4084 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4085 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4086 /// Returns true when the format fits the function and the FormatStringInfo has 4087 /// been populated. 4088 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4089 FormatStringInfo *FSI) { 4090 FSI->HasVAListArg = Format->getFirstArg() == 0; 4091 FSI->FormatIdx = Format->getFormatIdx() - 1; 4092 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4093 4094 // The way the format attribute works in GCC, the implicit this argument 4095 // of member functions is counted. However, it doesn't appear in our own 4096 // lists, so decrement format_idx in that case. 4097 if (IsCXXMember) { 4098 if(FSI->FormatIdx == 0) 4099 return false; 4100 --FSI->FormatIdx; 4101 if (FSI->FirstDataArg != 0) 4102 --FSI->FirstDataArg; 4103 } 4104 return true; 4105 } 4106 4107 /// Checks if a the given expression evaluates to null. 4108 /// 4109 /// Returns true if the value evaluates to null. 4110 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4111 // If the expression has non-null type, it doesn't evaluate to null. 4112 if (auto nullability 4113 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4114 if (*nullability == NullabilityKind::NonNull) 4115 return false; 4116 } 4117 4118 // As a special case, transparent unions initialized with zero are 4119 // considered null for the purposes of the nonnull attribute. 4120 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4121 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4122 if (const CompoundLiteralExpr *CLE = 4123 dyn_cast<CompoundLiteralExpr>(Expr)) 4124 if (const InitListExpr *ILE = 4125 dyn_cast<InitListExpr>(CLE->getInitializer())) 4126 Expr = ILE->getInit(0); 4127 } 4128 4129 bool Result; 4130 return (!Expr->isValueDependent() && 4131 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4132 !Result); 4133 } 4134 4135 static void CheckNonNullArgument(Sema &S, 4136 const Expr *ArgExpr, 4137 SourceLocation CallSiteLoc) { 4138 if (CheckNonNullExpr(S, ArgExpr)) 4139 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4140 S.PDiag(diag::warn_null_arg) 4141 << ArgExpr->getSourceRange()); 4142 } 4143 4144 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4145 FormatStringInfo FSI; 4146 if ((GetFormatStringType(Format) == FST_NSString) && 4147 getFormatStringInfo(Format, false, &FSI)) { 4148 Idx = FSI.FormatIdx; 4149 return true; 4150 } 4151 return false; 4152 } 4153 4154 /// Diagnose use of %s directive in an NSString which is being passed 4155 /// as formatting string to formatting method. 4156 static void 4157 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4158 const NamedDecl *FDecl, 4159 Expr **Args, 4160 unsigned NumArgs) { 4161 unsigned Idx = 0; 4162 bool Format = false; 4163 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4164 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4165 Idx = 2; 4166 Format = true; 4167 } 4168 else 4169 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4170 if (S.GetFormatNSStringIdx(I, Idx)) { 4171 Format = true; 4172 break; 4173 } 4174 } 4175 if (!Format || NumArgs <= Idx) 4176 return; 4177 const Expr *FormatExpr = Args[Idx]; 4178 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4179 FormatExpr = CSCE->getSubExpr(); 4180 const StringLiteral *FormatString; 4181 if (const ObjCStringLiteral *OSL = 4182 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4183 FormatString = OSL->getString(); 4184 else 4185 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4186 if (!FormatString) 4187 return; 4188 if (S.FormatStringHasSArg(FormatString)) { 4189 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4190 << "%s" << 1 << 1; 4191 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4192 << FDecl->getDeclName(); 4193 } 4194 } 4195 4196 /// Determine whether the given type has a non-null nullability annotation. 4197 static bool isNonNullType(ASTContext &ctx, QualType type) { 4198 if (auto nullability = type->getNullability(ctx)) 4199 return *nullability == NullabilityKind::NonNull; 4200 4201 return false; 4202 } 4203 4204 static void CheckNonNullArguments(Sema &S, 4205 const NamedDecl *FDecl, 4206 const FunctionProtoType *Proto, 4207 ArrayRef<const Expr *> Args, 4208 SourceLocation CallSiteLoc) { 4209 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4210 4211 // Already checked by by constant evaluator. 4212 if (S.isConstantEvaluated()) 4213 return; 4214 // Check the attributes attached to the method/function itself. 4215 llvm::SmallBitVector NonNullArgs; 4216 if (FDecl) { 4217 // Handle the nonnull attribute on the function/method declaration itself. 4218 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4219 if (!NonNull->args_size()) { 4220 // Easy case: all pointer arguments are nonnull. 4221 for (const auto *Arg : Args) 4222 if (S.isValidPointerAttrType(Arg->getType())) 4223 CheckNonNullArgument(S, Arg, CallSiteLoc); 4224 return; 4225 } 4226 4227 for (const ParamIdx &Idx : NonNull->args()) { 4228 unsigned IdxAST = Idx.getASTIndex(); 4229 if (IdxAST >= Args.size()) 4230 continue; 4231 if (NonNullArgs.empty()) 4232 NonNullArgs.resize(Args.size()); 4233 NonNullArgs.set(IdxAST); 4234 } 4235 } 4236 } 4237 4238 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4239 // Handle the nonnull attribute on the parameters of the 4240 // function/method. 4241 ArrayRef<ParmVarDecl*> parms; 4242 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4243 parms = FD->parameters(); 4244 else 4245 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4246 4247 unsigned ParamIndex = 0; 4248 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4249 I != E; ++I, ++ParamIndex) { 4250 const ParmVarDecl *PVD = *I; 4251 if (PVD->hasAttr<NonNullAttr>() || 4252 isNonNullType(S.Context, PVD->getType())) { 4253 if (NonNullArgs.empty()) 4254 NonNullArgs.resize(Args.size()); 4255 4256 NonNullArgs.set(ParamIndex); 4257 } 4258 } 4259 } else { 4260 // If we have a non-function, non-method declaration but no 4261 // function prototype, try to dig out the function prototype. 4262 if (!Proto) { 4263 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4264 QualType type = VD->getType().getNonReferenceType(); 4265 if (auto pointerType = type->getAs<PointerType>()) 4266 type = pointerType->getPointeeType(); 4267 else if (auto blockType = type->getAs<BlockPointerType>()) 4268 type = blockType->getPointeeType(); 4269 // FIXME: data member pointers? 4270 4271 // Dig out the function prototype, if there is one. 4272 Proto = type->getAs<FunctionProtoType>(); 4273 } 4274 } 4275 4276 // Fill in non-null argument information from the nullability 4277 // information on the parameter types (if we have them). 4278 if (Proto) { 4279 unsigned Index = 0; 4280 for (auto paramType : Proto->getParamTypes()) { 4281 if (isNonNullType(S.Context, paramType)) { 4282 if (NonNullArgs.empty()) 4283 NonNullArgs.resize(Args.size()); 4284 4285 NonNullArgs.set(Index); 4286 } 4287 4288 ++Index; 4289 } 4290 } 4291 } 4292 4293 // Check for non-null arguments. 4294 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4295 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4296 if (NonNullArgs[ArgIndex]) 4297 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4298 } 4299 } 4300 4301 /// Handles the checks for format strings, non-POD arguments to vararg 4302 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4303 /// attributes. 4304 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4305 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4306 bool IsMemberFunction, SourceLocation Loc, 4307 SourceRange Range, VariadicCallType CallType) { 4308 // FIXME: We should check as much as we can in the template definition. 4309 if (CurContext->isDependentContext()) 4310 return; 4311 4312 // Printf and scanf checking. 4313 llvm::SmallBitVector CheckedVarArgs; 4314 if (FDecl) { 4315 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4316 // Only create vector if there are format attributes. 4317 CheckedVarArgs.resize(Args.size()); 4318 4319 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4320 CheckedVarArgs); 4321 } 4322 } 4323 4324 // Refuse POD arguments that weren't caught by the format string 4325 // checks above. 4326 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4327 if (CallType != VariadicDoesNotApply && 4328 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4329 unsigned NumParams = Proto ? Proto->getNumParams() 4330 : FDecl && isa<FunctionDecl>(FDecl) 4331 ? cast<FunctionDecl>(FDecl)->getNumParams() 4332 : FDecl && isa<ObjCMethodDecl>(FDecl) 4333 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4334 : 0; 4335 4336 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4337 // Args[ArgIdx] can be null in malformed code. 4338 if (const Expr *Arg = Args[ArgIdx]) { 4339 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4340 checkVariadicArgument(Arg, CallType); 4341 } 4342 } 4343 } 4344 4345 if (FDecl || Proto) { 4346 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4347 4348 // Type safety checking. 4349 if (FDecl) { 4350 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4351 CheckArgumentWithTypeTag(I, Args, Loc); 4352 } 4353 } 4354 4355 if (FD) 4356 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4357 } 4358 4359 /// CheckConstructorCall - Check a constructor call for correctness and safety 4360 /// properties not enforced by the C type system. 4361 void Sema::CheckConstructorCall(FunctionDecl *FDecl, 4362 ArrayRef<const Expr *> Args, 4363 const FunctionProtoType *Proto, 4364 SourceLocation Loc) { 4365 VariadicCallType CallType = 4366 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4367 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4368 Loc, SourceRange(), CallType); 4369 } 4370 4371 /// CheckFunctionCall - Check a direct function call for various correctness 4372 /// and safety properties not strictly enforced by the C type system. 4373 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4374 const FunctionProtoType *Proto) { 4375 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4376 isa<CXXMethodDecl>(FDecl); 4377 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4378 IsMemberOperatorCall; 4379 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4380 TheCall->getCallee()); 4381 Expr** Args = TheCall->getArgs(); 4382 unsigned NumArgs = TheCall->getNumArgs(); 4383 4384 Expr *ImplicitThis = nullptr; 4385 if (IsMemberOperatorCall) { 4386 // If this is a call to a member operator, hide the first argument 4387 // from checkCall. 4388 // FIXME: Our choice of AST representation here is less than ideal. 4389 ImplicitThis = Args[0]; 4390 ++Args; 4391 --NumArgs; 4392 } else if (IsMemberFunction) 4393 ImplicitThis = 4394 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4395 4396 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4397 IsMemberFunction, TheCall->getRParenLoc(), 4398 TheCall->getCallee()->getSourceRange(), CallType); 4399 4400 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4401 // None of the checks below are needed for functions that don't have 4402 // simple names (e.g., C++ conversion functions). 4403 if (!FnInfo) 4404 return false; 4405 4406 CheckAbsoluteValueFunction(TheCall, FDecl); 4407 CheckMaxUnsignedZero(TheCall, FDecl); 4408 4409 if (getLangOpts().ObjC) 4410 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4411 4412 unsigned CMId = FDecl->getMemoryFunctionKind(); 4413 if (CMId == 0) 4414 return false; 4415 4416 // Handle memory setting and copying functions. 4417 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) 4418 CheckStrlcpycatArguments(TheCall, FnInfo); 4419 else if (CMId == Builtin::BIstrncat) 4420 CheckStrncatArguments(TheCall, FnInfo); 4421 else 4422 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4423 4424 return false; 4425 } 4426 4427 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4428 ArrayRef<const Expr *> Args) { 4429 VariadicCallType CallType = 4430 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4431 4432 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4433 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4434 CallType); 4435 4436 return false; 4437 } 4438 4439 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4440 const FunctionProtoType *Proto) { 4441 QualType Ty; 4442 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4443 Ty = V->getType().getNonReferenceType(); 4444 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4445 Ty = F->getType().getNonReferenceType(); 4446 else 4447 return false; 4448 4449 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4450 !Ty->isFunctionProtoType()) 4451 return false; 4452 4453 VariadicCallType CallType; 4454 if (!Proto || !Proto->isVariadic()) { 4455 CallType = VariadicDoesNotApply; 4456 } else if (Ty->isBlockPointerType()) { 4457 CallType = VariadicBlock; 4458 } else { // Ty->isFunctionPointerType() 4459 CallType = VariadicFunction; 4460 } 4461 4462 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4463 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4464 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4465 TheCall->getCallee()->getSourceRange(), CallType); 4466 4467 return false; 4468 } 4469 4470 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4471 /// such as function pointers returned from functions. 4472 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4473 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4474 TheCall->getCallee()); 4475 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4476 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4477 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4478 TheCall->getCallee()->getSourceRange(), CallType); 4479 4480 return false; 4481 } 4482 4483 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4484 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4485 return false; 4486 4487 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4488 switch (Op) { 4489 case AtomicExpr::AO__c11_atomic_init: 4490 case AtomicExpr::AO__opencl_atomic_init: 4491 llvm_unreachable("There is no ordering argument for an init"); 4492 4493 case AtomicExpr::AO__c11_atomic_load: 4494 case AtomicExpr::AO__opencl_atomic_load: 4495 case AtomicExpr::AO__atomic_load_n: 4496 case AtomicExpr::AO__atomic_load: 4497 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4498 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4499 4500 case AtomicExpr::AO__c11_atomic_store: 4501 case AtomicExpr::AO__opencl_atomic_store: 4502 case AtomicExpr::AO__atomic_store: 4503 case AtomicExpr::AO__atomic_store_n: 4504 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4505 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4506 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4507 4508 default: 4509 return true; 4510 } 4511 } 4512 4513 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4514 AtomicExpr::AtomicOp Op) { 4515 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4516 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4517 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 4518 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 4519 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 4520 Op); 4521 } 4522 4523 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 4524 SourceLocation RParenLoc, MultiExprArg Args, 4525 AtomicExpr::AtomicOp Op, 4526 AtomicArgumentOrder ArgOrder) { 4527 // All the non-OpenCL operations take one of the following forms. 4528 // The OpenCL operations take the __c11 forms with one extra argument for 4529 // synchronization scope. 4530 enum { 4531 // C __c11_atomic_init(A *, C) 4532 Init, 4533 4534 // C __c11_atomic_load(A *, int) 4535 Load, 4536 4537 // void __atomic_load(A *, CP, int) 4538 LoadCopy, 4539 4540 // void __atomic_store(A *, CP, int) 4541 Copy, 4542 4543 // C __c11_atomic_add(A *, M, int) 4544 Arithmetic, 4545 4546 // C __atomic_exchange_n(A *, CP, int) 4547 Xchg, 4548 4549 // void __atomic_exchange(A *, C *, CP, int) 4550 GNUXchg, 4551 4552 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4553 C11CmpXchg, 4554 4555 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4556 GNUCmpXchg 4557 } Form = Init; 4558 4559 const unsigned NumForm = GNUCmpXchg + 1; 4560 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4561 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4562 // where: 4563 // C is an appropriate type, 4564 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4565 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4566 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4567 // the int parameters are for orderings. 4568 4569 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4570 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4571 "need to update code for modified forms"); 4572 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4573 AtomicExpr::AO__c11_atomic_fetch_xor + 1 == 4574 AtomicExpr::AO__atomic_load, 4575 "need to update code for modified C11 atomics"); 4576 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4577 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4578 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4579 Op <= AtomicExpr::AO__c11_atomic_fetch_xor) || 4580 IsOpenCL; 4581 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4582 Op == AtomicExpr::AO__atomic_store_n || 4583 Op == AtomicExpr::AO__atomic_exchange_n || 4584 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4585 bool IsAddSub = false; 4586 bool IsMinMax = false; 4587 4588 switch (Op) { 4589 case AtomicExpr::AO__c11_atomic_init: 4590 case AtomicExpr::AO__opencl_atomic_init: 4591 Form = Init; 4592 break; 4593 4594 case AtomicExpr::AO__c11_atomic_load: 4595 case AtomicExpr::AO__opencl_atomic_load: 4596 case AtomicExpr::AO__atomic_load_n: 4597 Form = Load; 4598 break; 4599 4600 case AtomicExpr::AO__atomic_load: 4601 Form = LoadCopy; 4602 break; 4603 4604 case AtomicExpr::AO__c11_atomic_store: 4605 case AtomicExpr::AO__opencl_atomic_store: 4606 case AtomicExpr::AO__atomic_store: 4607 case AtomicExpr::AO__atomic_store_n: 4608 Form = Copy; 4609 break; 4610 4611 case AtomicExpr::AO__c11_atomic_fetch_add: 4612 case AtomicExpr::AO__c11_atomic_fetch_sub: 4613 case AtomicExpr::AO__opencl_atomic_fetch_add: 4614 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4615 case AtomicExpr::AO__opencl_atomic_fetch_min: 4616 case AtomicExpr::AO__opencl_atomic_fetch_max: 4617 case AtomicExpr::AO__atomic_fetch_add: 4618 case AtomicExpr::AO__atomic_fetch_sub: 4619 case AtomicExpr::AO__atomic_add_fetch: 4620 case AtomicExpr::AO__atomic_sub_fetch: 4621 IsAddSub = true; 4622 LLVM_FALLTHROUGH; 4623 case AtomicExpr::AO__c11_atomic_fetch_and: 4624 case AtomicExpr::AO__c11_atomic_fetch_or: 4625 case AtomicExpr::AO__c11_atomic_fetch_xor: 4626 case AtomicExpr::AO__opencl_atomic_fetch_and: 4627 case AtomicExpr::AO__opencl_atomic_fetch_or: 4628 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4629 case AtomicExpr::AO__atomic_fetch_and: 4630 case AtomicExpr::AO__atomic_fetch_or: 4631 case AtomicExpr::AO__atomic_fetch_xor: 4632 case AtomicExpr::AO__atomic_fetch_nand: 4633 case AtomicExpr::AO__atomic_and_fetch: 4634 case AtomicExpr::AO__atomic_or_fetch: 4635 case AtomicExpr::AO__atomic_xor_fetch: 4636 case AtomicExpr::AO__atomic_nand_fetch: 4637 Form = Arithmetic; 4638 break; 4639 4640 case AtomicExpr::AO__atomic_fetch_min: 4641 case AtomicExpr::AO__atomic_fetch_max: 4642 IsMinMax = true; 4643 Form = Arithmetic; 4644 break; 4645 4646 case AtomicExpr::AO__c11_atomic_exchange: 4647 case AtomicExpr::AO__opencl_atomic_exchange: 4648 case AtomicExpr::AO__atomic_exchange_n: 4649 Form = Xchg; 4650 break; 4651 4652 case AtomicExpr::AO__atomic_exchange: 4653 Form = GNUXchg; 4654 break; 4655 4656 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 4657 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 4658 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 4659 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 4660 Form = C11CmpXchg; 4661 break; 4662 4663 case AtomicExpr::AO__atomic_compare_exchange: 4664 case AtomicExpr::AO__atomic_compare_exchange_n: 4665 Form = GNUCmpXchg; 4666 break; 4667 } 4668 4669 unsigned AdjustedNumArgs = NumArgs[Form]; 4670 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 4671 ++AdjustedNumArgs; 4672 // Check we have the right number of arguments. 4673 if (Args.size() < AdjustedNumArgs) { 4674 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 4675 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4676 << ExprRange; 4677 return ExprError(); 4678 } else if (Args.size() > AdjustedNumArgs) { 4679 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 4680 diag::err_typecheck_call_too_many_args) 4681 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4682 << ExprRange; 4683 return ExprError(); 4684 } 4685 4686 // Inspect the first argument of the atomic operation. 4687 Expr *Ptr = Args[0]; 4688 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 4689 if (ConvertedPtr.isInvalid()) 4690 return ExprError(); 4691 4692 Ptr = ConvertedPtr.get(); 4693 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 4694 if (!pointerType) { 4695 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 4696 << Ptr->getType() << Ptr->getSourceRange(); 4697 return ExprError(); 4698 } 4699 4700 // For a __c11 builtin, this should be a pointer to an _Atomic type. 4701 QualType AtomTy = pointerType->getPointeeType(); // 'A' 4702 QualType ValType = AtomTy; // 'C' 4703 if (IsC11) { 4704 if (!AtomTy->isAtomicType()) { 4705 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 4706 << Ptr->getType() << Ptr->getSourceRange(); 4707 return ExprError(); 4708 } 4709 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 4710 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 4711 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 4712 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 4713 << Ptr->getSourceRange(); 4714 return ExprError(); 4715 } 4716 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 4717 } else if (Form != Load && Form != LoadCopy) { 4718 if (ValType.isConstQualified()) { 4719 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 4720 << Ptr->getType() << Ptr->getSourceRange(); 4721 return ExprError(); 4722 } 4723 } 4724 4725 // For an arithmetic operation, the implied arithmetic must be well-formed. 4726 if (Form == Arithmetic) { 4727 // gcc does not enforce these rules for GNU atomics, but we do so for sanity. 4728 if (IsAddSub && !ValType->isIntegerType() 4729 && !ValType->isPointerType()) { 4730 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4731 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4732 return ExprError(); 4733 } 4734 if (IsMinMax) { 4735 const BuiltinType *BT = ValType->getAs<BuiltinType>(); 4736 if (!BT || (BT->getKind() != BuiltinType::Int && 4737 BT->getKind() != BuiltinType::UInt)) { 4738 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_int32_or_ptr); 4739 return ExprError(); 4740 } 4741 } 4742 if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) { 4743 Diag(ExprRange.getBegin(), diag::err_atomic_op_bitwise_needs_atomic_int) 4744 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4745 return ExprError(); 4746 } 4747 if (IsC11 && ValType->isPointerType() && 4748 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 4749 diag::err_incomplete_type)) { 4750 return ExprError(); 4751 } 4752 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 4753 // For __atomic_*_n operations, the value type must be a scalar integral or 4754 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 4755 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4756 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4757 return ExprError(); 4758 } 4759 4760 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 4761 !AtomTy->isScalarType()) { 4762 // For GNU atomics, require a trivially-copyable type. This is not part of 4763 // the GNU atomics specification, but we enforce it for sanity. 4764 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 4765 << Ptr->getType() << Ptr->getSourceRange(); 4766 return ExprError(); 4767 } 4768 4769 switch (ValType.getObjCLifetime()) { 4770 case Qualifiers::OCL_None: 4771 case Qualifiers::OCL_ExplicitNone: 4772 // okay 4773 break; 4774 4775 case Qualifiers::OCL_Weak: 4776 case Qualifiers::OCL_Strong: 4777 case Qualifiers::OCL_Autoreleasing: 4778 // FIXME: Can this happen? By this point, ValType should be known 4779 // to be trivially copyable. 4780 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 4781 << ValType << Ptr->getSourceRange(); 4782 return ExprError(); 4783 } 4784 4785 // All atomic operations have an overload which takes a pointer to a volatile 4786 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 4787 // into the result or the other operands. Similarly atomic_load takes a 4788 // pointer to a const 'A'. 4789 ValType.removeLocalVolatile(); 4790 ValType.removeLocalConst(); 4791 QualType ResultType = ValType; 4792 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 4793 Form == Init) 4794 ResultType = Context.VoidTy; 4795 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 4796 ResultType = Context.BoolTy; 4797 4798 // The type of a parameter passed 'by value'. In the GNU atomics, such 4799 // arguments are actually passed as pointers. 4800 QualType ByValType = ValType; // 'CP' 4801 bool IsPassedByAddress = false; 4802 if (!IsC11 && !IsN) { 4803 ByValType = Ptr->getType(); 4804 IsPassedByAddress = true; 4805 } 4806 4807 SmallVector<Expr *, 5> APIOrderedArgs; 4808 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 4809 APIOrderedArgs.push_back(Args[0]); 4810 switch (Form) { 4811 case Init: 4812 case Load: 4813 APIOrderedArgs.push_back(Args[1]); // Val1/Order 4814 break; 4815 case LoadCopy: 4816 case Copy: 4817 case Arithmetic: 4818 case Xchg: 4819 APIOrderedArgs.push_back(Args[2]); // Val1 4820 APIOrderedArgs.push_back(Args[1]); // Order 4821 break; 4822 case GNUXchg: 4823 APIOrderedArgs.push_back(Args[2]); // Val1 4824 APIOrderedArgs.push_back(Args[3]); // Val2 4825 APIOrderedArgs.push_back(Args[1]); // Order 4826 break; 4827 case C11CmpXchg: 4828 APIOrderedArgs.push_back(Args[2]); // Val1 4829 APIOrderedArgs.push_back(Args[4]); // Val2 4830 APIOrderedArgs.push_back(Args[1]); // Order 4831 APIOrderedArgs.push_back(Args[3]); // OrderFail 4832 break; 4833 case GNUCmpXchg: 4834 APIOrderedArgs.push_back(Args[2]); // Val1 4835 APIOrderedArgs.push_back(Args[4]); // Val2 4836 APIOrderedArgs.push_back(Args[5]); // Weak 4837 APIOrderedArgs.push_back(Args[1]); // Order 4838 APIOrderedArgs.push_back(Args[3]); // OrderFail 4839 break; 4840 } 4841 } else 4842 APIOrderedArgs.append(Args.begin(), Args.end()); 4843 4844 // The first argument's non-CV pointer type is used to deduce the type of 4845 // subsequent arguments, except for: 4846 // - weak flag (always converted to bool) 4847 // - memory order (always converted to int) 4848 // - scope (always converted to int) 4849 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 4850 QualType Ty; 4851 if (i < NumVals[Form] + 1) { 4852 switch (i) { 4853 case 0: 4854 // The first argument is always a pointer. It has a fixed type. 4855 // It is always dereferenced, a nullptr is undefined. 4856 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 4857 // Nothing else to do: we already know all we want about this pointer. 4858 continue; 4859 case 1: 4860 // The second argument is the non-atomic operand. For arithmetic, this 4861 // is always passed by value, and for a compare_exchange it is always 4862 // passed by address. For the rest, GNU uses by-address and C11 uses 4863 // by-value. 4864 assert(Form != Load); 4865 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) 4866 Ty = ValType; 4867 else if (Form == Copy || Form == Xchg) { 4868 if (IsPassedByAddress) { 4869 // The value pointer is always dereferenced, a nullptr is undefined. 4870 CheckNonNullArgument(*this, APIOrderedArgs[i], 4871 ExprRange.getBegin()); 4872 } 4873 Ty = ByValType; 4874 } else if (Form == Arithmetic) 4875 Ty = Context.getPointerDiffType(); 4876 else { 4877 Expr *ValArg = APIOrderedArgs[i]; 4878 // The value pointer is always dereferenced, a nullptr is undefined. 4879 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 4880 LangAS AS = LangAS::Default; 4881 // Keep address space of non-atomic pointer type. 4882 if (const PointerType *PtrTy = 4883 ValArg->getType()->getAs<PointerType>()) { 4884 AS = PtrTy->getPointeeType().getAddressSpace(); 4885 } 4886 Ty = Context.getPointerType( 4887 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 4888 } 4889 break; 4890 case 2: 4891 // The third argument to compare_exchange / GNU exchange is the desired 4892 // value, either by-value (for the C11 and *_n variant) or as a pointer. 4893 if (IsPassedByAddress) 4894 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 4895 Ty = ByValType; 4896 break; 4897 case 3: 4898 // The fourth argument to GNU compare_exchange is a 'weak' flag. 4899 Ty = Context.BoolTy; 4900 break; 4901 } 4902 } else { 4903 // The order(s) and scope are always converted to int. 4904 Ty = Context.IntTy; 4905 } 4906 4907 InitializedEntity Entity = 4908 InitializedEntity::InitializeParameter(Context, Ty, false); 4909 ExprResult Arg = APIOrderedArgs[i]; 4910 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 4911 if (Arg.isInvalid()) 4912 return true; 4913 APIOrderedArgs[i] = Arg.get(); 4914 } 4915 4916 // Permute the arguments into a 'consistent' order. 4917 SmallVector<Expr*, 5> SubExprs; 4918 SubExprs.push_back(Ptr); 4919 switch (Form) { 4920 case Init: 4921 // Note, AtomicExpr::getVal1() has a special case for this atomic. 4922 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4923 break; 4924 case Load: 4925 SubExprs.push_back(APIOrderedArgs[1]); // Order 4926 break; 4927 case LoadCopy: 4928 case Copy: 4929 case Arithmetic: 4930 case Xchg: 4931 SubExprs.push_back(APIOrderedArgs[2]); // Order 4932 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4933 break; 4934 case GNUXchg: 4935 // Note, AtomicExpr::getVal2() has a special case for this atomic. 4936 SubExprs.push_back(APIOrderedArgs[3]); // Order 4937 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4938 SubExprs.push_back(APIOrderedArgs[2]); // Val2 4939 break; 4940 case C11CmpXchg: 4941 SubExprs.push_back(APIOrderedArgs[3]); // Order 4942 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4943 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 4944 SubExprs.push_back(APIOrderedArgs[2]); // Val2 4945 break; 4946 case GNUCmpXchg: 4947 SubExprs.push_back(APIOrderedArgs[4]); // Order 4948 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4949 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 4950 SubExprs.push_back(APIOrderedArgs[2]); // Val2 4951 SubExprs.push_back(APIOrderedArgs[3]); // Weak 4952 break; 4953 } 4954 4955 if (SubExprs.size() >= 2 && Form != Init) { 4956 llvm::APSInt Result(32); 4957 if (SubExprs[1]->isIntegerConstantExpr(Result, Context) && 4958 !isValidOrderingForOp(Result.getSExtValue(), Op)) 4959 Diag(SubExprs[1]->getBeginLoc(), 4960 diag::warn_atomic_op_has_invalid_memory_order) 4961 << SubExprs[1]->getSourceRange(); 4962 } 4963 4964 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 4965 auto *Scope = Args[Args.size() - 1]; 4966 llvm::APSInt Result(32); 4967 if (Scope->isIntegerConstantExpr(Result, Context) && 4968 !ScopeModel->isValid(Result.getZExtValue())) { 4969 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 4970 << Scope->getSourceRange(); 4971 } 4972 SubExprs.push_back(Scope); 4973 } 4974 4975 AtomicExpr *AE = new (Context) 4976 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 4977 4978 if ((Op == AtomicExpr::AO__c11_atomic_load || 4979 Op == AtomicExpr::AO__c11_atomic_store || 4980 Op == AtomicExpr::AO__opencl_atomic_load || 4981 Op == AtomicExpr::AO__opencl_atomic_store ) && 4982 Context.AtomicUsesUnsupportedLibcall(AE)) 4983 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 4984 << ((Op == AtomicExpr::AO__c11_atomic_load || 4985 Op == AtomicExpr::AO__opencl_atomic_load) 4986 ? 0 4987 : 1); 4988 4989 return AE; 4990 } 4991 4992 /// checkBuiltinArgument - Given a call to a builtin function, perform 4993 /// normal type-checking on the given argument, updating the call in 4994 /// place. This is useful when a builtin function requires custom 4995 /// type-checking for some of its arguments but not necessarily all of 4996 /// them. 4997 /// 4998 /// Returns true on error. 4999 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 5000 FunctionDecl *Fn = E->getDirectCallee(); 5001 assert(Fn && "builtin call without direct callee!"); 5002 5003 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 5004 InitializedEntity Entity = 5005 InitializedEntity::InitializeParameter(S.Context, Param); 5006 5007 ExprResult Arg = E->getArg(0); 5008 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 5009 if (Arg.isInvalid()) 5010 return true; 5011 5012 E->setArg(ArgIndex, Arg.get()); 5013 return false; 5014 } 5015 5016 /// We have a call to a function like __sync_fetch_and_add, which is an 5017 /// overloaded function based on the pointer type of its first argument. 5018 /// The main BuildCallExpr routines have already promoted the types of 5019 /// arguments because all of these calls are prototyped as void(...). 5020 /// 5021 /// This function goes through and does final semantic checking for these 5022 /// builtins, as well as generating any warnings. 5023 ExprResult 5024 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 5025 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 5026 Expr *Callee = TheCall->getCallee(); 5027 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 5028 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5029 5030 // Ensure that we have at least one argument to do type inference from. 5031 if (TheCall->getNumArgs() < 1) { 5032 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5033 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 5034 return ExprError(); 5035 } 5036 5037 // Inspect the first argument of the atomic builtin. This should always be 5038 // a pointer type, whose element is an integral scalar or pointer type. 5039 // Because it is a pointer type, we don't have to worry about any implicit 5040 // casts here. 5041 // FIXME: We don't allow floating point scalars as input. 5042 Expr *FirstArg = TheCall->getArg(0); 5043 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 5044 if (FirstArgResult.isInvalid()) 5045 return ExprError(); 5046 FirstArg = FirstArgResult.get(); 5047 TheCall->setArg(0, FirstArg); 5048 5049 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 5050 if (!pointerType) { 5051 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 5052 << FirstArg->getType() << FirstArg->getSourceRange(); 5053 return ExprError(); 5054 } 5055 5056 QualType ValType = pointerType->getPointeeType(); 5057 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5058 !ValType->isBlockPointerType()) { 5059 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 5060 << FirstArg->getType() << FirstArg->getSourceRange(); 5061 return ExprError(); 5062 } 5063 5064 if (ValType.isConstQualified()) { 5065 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 5066 << FirstArg->getType() << FirstArg->getSourceRange(); 5067 return ExprError(); 5068 } 5069 5070 switch (ValType.getObjCLifetime()) { 5071 case Qualifiers::OCL_None: 5072 case Qualifiers::OCL_ExplicitNone: 5073 // okay 5074 break; 5075 5076 case Qualifiers::OCL_Weak: 5077 case Qualifiers::OCL_Strong: 5078 case Qualifiers::OCL_Autoreleasing: 5079 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 5080 << ValType << FirstArg->getSourceRange(); 5081 return ExprError(); 5082 } 5083 5084 // Strip any qualifiers off ValType. 5085 ValType = ValType.getUnqualifiedType(); 5086 5087 // The majority of builtins return a value, but a few have special return 5088 // types, so allow them to override appropriately below. 5089 QualType ResultType = ValType; 5090 5091 // We need to figure out which concrete builtin this maps onto. For example, 5092 // __sync_fetch_and_add with a 2 byte object turns into 5093 // __sync_fetch_and_add_2. 5094 #define BUILTIN_ROW(x) \ 5095 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 5096 Builtin::BI##x##_8, Builtin::BI##x##_16 } 5097 5098 static const unsigned BuiltinIndices[][5] = { 5099 BUILTIN_ROW(__sync_fetch_and_add), 5100 BUILTIN_ROW(__sync_fetch_and_sub), 5101 BUILTIN_ROW(__sync_fetch_and_or), 5102 BUILTIN_ROW(__sync_fetch_and_and), 5103 BUILTIN_ROW(__sync_fetch_and_xor), 5104 BUILTIN_ROW(__sync_fetch_and_nand), 5105 5106 BUILTIN_ROW(__sync_add_and_fetch), 5107 BUILTIN_ROW(__sync_sub_and_fetch), 5108 BUILTIN_ROW(__sync_and_and_fetch), 5109 BUILTIN_ROW(__sync_or_and_fetch), 5110 BUILTIN_ROW(__sync_xor_and_fetch), 5111 BUILTIN_ROW(__sync_nand_and_fetch), 5112 5113 BUILTIN_ROW(__sync_val_compare_and_swap), 5114 BUILTIN_ROW(__sync_bool_compare_and_swap), 5115 BUILTIN_ROW(__sync_lock_test_and_set), 5116 BUILTIN_ROW(__sync_lock_release), 5117 BUILTIN_ROW(__sync_swap) 5118 }; 5119 #undef BUILTIN_ROW 5120 5121 // Determine the index of the size. 5122 unsigned SizeIndex; 5123 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 5124 case 1: SizeIndex = 0; break; 5125 case 2: SizeIndex = 1; break; 5126 case 4: SizeIndex = 2; break; 5127 case 8: SizeIndex = 3; break; 5128 case 16: SizeIndex = 4; break; 5129 default: 5130 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5131 << FirstArg->getType() << FirstArg->getSourceRange(); 5132 return ExprError(); 5133 } 5134 5135 // Each of these builtins has one pointer argument, followed by some number of 5136 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5137 // that we ignore. Find out which row of BuiltinIndices to read from as well 5138 // as the number of fixed args. 5139 unsigned BuiltinID = FDecl->getBuiltinID(); 5140 unsigned BuiltinIndex, NumFixed = 1; 5141 bool WarnAboutSemanticsChange = false; 5142 switch (BuiltinID) { 5143 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5144 case Builtin::BI__sync_fetch_and_add: 5145 case Builtin::BI__sync_fetch_and_add_1: 5146 case Builtin::BI__sync_fetch_and_add_2: 5147 case Builtin::BI__sync_fetch_and_add_4: 5148 case Builtin::BI__sync_fetch_and_add_8: 5149 case Builtin::BI__sync_fetch_and_add_16: 5150 BuiltinIndex = 0; 5151 break; 5152 5153 case Builtin::BI__sync_fetch_and_sub: 5154 case Builtin::BI__sync_fetch_and_sub_1: 5155 case Builtin::BI__sync_fetch_and_sub_2: 5156 case Builtin::BI__sync_fetch_and_sub_4: 5157 case Builtin::BI__sync_fetch_and_sub_8: 5158 case Builtin::BI__sync_fetch_and_sub_16: 5159 BuiltinIndex = 1; 5160 break; 5161 5162 case Builtin::BI__sync_fetch_and_or: 5163 case Builtin::BI__sync_fetch_and_or_1: 5164 case Builtin::BI__sync_fetch_and_or_2: 5165 case Builtin::BI__sync_fetch_and_or_4: 5166 case Builtin::BI__sync_fetch_and_or_8: 5167 case Builtin::BI__sync_fetch_and_or_16: 5168 BuiltinIndex = 2; 5169 break; 5170 5171 case Builtin::BI__sync_fetch_and_and: 5172 case Builtin::BI__sync_fetch_and_and_1: 5173 case Builtin::BI__sync_fetch_and_and_2: 5174 case Builtin::BI__sync_fetch_and_and_4: 5175 case Builtin::BI__sync_fetch_and_and_8: 5176 case Builtin::BI__sync_fetch_and_and_16: 5177 BuiltinIndex = 3; 5178 break; 5179 5180 case Builtin::BI__sync_fetch_and_xor: 5181 case Builtin::BI__sync_fetch_and_xor_1: 5182 case Builtin::BI__sync_fetch_and_xor_2: 5183 case Builtin::BI__sync_fetch_and_xor_4: 5184 case Builtin::BI__sync_fetch_and_xor_8: 5185 case Builtin::BI__sync_fetch_and_xor_16: 5186 BuiltinIndex = 4; 5187 break; 5188 5189 case Builtin::BI__sync_fetch_and_nand: 5190 case Builtin::BI__sync_fetch_and_nand_1: 5191 case Builtin::BI__sync_fetch_and_nand_2: 5192 case Builtin::BI__sync_fetch_and_nand_4: 5193 case Builtin::BI__sync_fetch_and_nand_8: 5194 case Builtin::BI__sync_fetch_and_nand_16: 5195 BuiltinIndex = 5; 5196 WarnAboutSemanticsChange = true; 5197 break; 5198 5199 case Builtin::BI__sync_add_and_fetch: 5200 case Builtin::BI__sync_add_and_fetch_1: 5201 case Builtin::BI__sync_add_and_fetch_2: 5202 case Builtin::BI__sync_add_and_fetch_4: 5203 case Builtin::BI__sync_add_and_fetch_8: 5204 case Builtin::BI__sync_add_and_fetch_16: 5205 BuiltinIndex = 6; 5206 break; 5207 5208 case Builtin::BI__sync_sub_and_fetch: 5209 case Builtin::BI__sync_sub_and_fetch_1: 5210 case Builtin::BI__sync_sub_and_fetch_2: 5211 case Builtin::BI__sync_sub_and_fetch_4: 5212 case Builtin::BI__sync_sub_and_fetch_8: 5213 case Builtin::BI__sync_sub_and_fetch_16: 5214 BuiltinIndex = 7; 5215 break; 5216 5217 case Builtin::BI__sync_and_and_fetch: 5218 case Builtin::BI__sync_and_and_fetch_1: 5219 case Builtin::BI__sync_and_and_fetch_2: 5220 case Builtin::BI__sync_and_and_fetch_4: 5221 case Builtin::BI__sync_and_and_fetch_8: 5222 case Builtin::BI__sync_and_and_fetch_16: 5223 BuiltinIndex = 8; 5224 break; 5225 5226 case Builtin::BI__sync_or_and_fetch: 5227 case Builtin::BI__sync_or_and_fetch_1: 5228 case Builtin::BI__sync_or_and_fetch_2: 5229 case Builtin::BI__sync_or_and_fetch_4: 5230 case Builtin::BI__sync_or_and_fetch_8: 5231 case Builtin::BI__sync_or_and_fetch_16: 5232 BuiltinIndex = 9; 5233 break; 5234 5235 case Builtin::BI__sync_xor_and_fetch: 5236 case Builtin::BI__sync_xor_and_fetch_1: 5237 case Builtin::BI__sync_xor_and_fetch_2: 5238 case Builtin::BI__sync_xor_and_fetch_4: 5239 case Builtin::BI__sync_xor_and_fetch_8: 5240 case Builtin::BI__sync_xor_and_fetch_16: 5241 BuiltinIndex = 10; 5242 break; 5243 5244 case Builtin::BI__sync_nand_and_fetch: 5245 case Builtin::BI__sync_nand_and_fetch_1: 5246 case Builtin::BI__sync_nand_and_fetch_2: 5247 case Builtin::BI__sync_nand_and_fetch_4: 5248 case Builtin::BI__sync_nand_and_fetch_8: 5249 case Builtin::BI__sync_nand_and_fetch_16: 5250 BuiltinIndex = 11; 5251 WarnAboutSemanticsChange = true; 5252 break; 5253 5254 case Builtin::BI__sync_val_compare_and_swap: 5255 case Builtin::BI__sync_val_compare_and_swap_1: 5256 case Builtin::BI__sync_val_compare_and_swap_2: 5257 case Builtin::BI__sync_val_compare_and_swap_4: 5258 case Builtin::BI__sync_val_compare_and_swap_8: 5259 case Builtin::BI__sync_val_compare_and_swap_16: 5260 BuiltinIndex = 12; 5261 NumFixed = 2; 5262 break; 5263 5264 case Builtin::BI__sync_bool_compare_and_swap: 5265 case Builtin::BI__sync_bool_compare_and_swap_1: 5266 case Builtin::BI__sync_bool_compare_and_swap_2: 5267 case Builtin::BI__sync_bool_compare_and_swap_4: 5268 case Builtin::BI__sync_bool_compare_and_swap_8: 5269 case Builtin::BI__sync_bool_compare_and_swap_16: 5270 BuiltinIndex = 13; 5271 NumFixed = 2; 5272 ResultType = Context.BoolTy; 5273 break; 5274 5275 case Builtin::BI__sync_lock_test_and_set: 5276 case Builtin::BI__sync_lock_test_and_set_1: 5277 case Builtin::BI__sync_lock_test_and_set_2: 5278 case Builtin::BI__sync_lock_test_and_set_4: 5279 case Builtin::BI__sync_lock_test_and_set_8: 5280 case Builtin::BI__sync_lock_test_and_set_16: 5281 BuiltinIndex = 14; 5282 break; 5283 5284 case Builtin::BI__sync_lock_release: 5285 case Builtin::BI__sync_lock_release_1: 5286 case Builtin::BI__sync_lock_release_2: 5287 case Builtin::BI__sync_lock_release_4: 5288 case Builtin::BI__sync_lock_release_8: 5289 case Builtin::BI__sync_lock_release_16: 5290 BuiltinIndex = 15; 5291 NumFixed = 0; 5292 ResultType = Context.VoidTy; 5293 break; 5294 5295 case Builtin::BI__sync_swap: 5296 case Builtin::BI__sync_swap_1: 5297 case Builtin::BI__sync_swap_2: 5298 case Builtin::BI__sync_swap_4: 5299 case Builtin::BI__sync_swap_8: 5300 case Builtin::BI__sync_swap_16: 5301 BuiltinIndex = 16; 5302 break; 5303 } 5304 5305 // Now that we know how many fixed arguments we expect, first check that we 5306 // have at least that many. 5307 if (TheCall->getNumArgs() < 1+NumFixed) { 5308 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5309 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5310 << Callee->getSourceRange(); 5311 return ExprError(); 5312 } 5313 5314 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5315 << Callee->getSourceRange(); 5316 5317 if (WarnAboutSemanticsChange) { 5318 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5319 << Callee->getSourceRange(); 5320 } 5321 5322 // Get the decl for the concrete builtin from this, we can tell what the 5323 // concrete integer type we should convert to is. 5324 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5325 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5326 FunctionDecl *NewBuiltinDecl; 5327 if (NewBuiltinID == BuiltinID) 5328 NewBuiltinDecl = FDecl; 5329 else { 5330 // Perform builtin lookup to avoid redeclaring it. 5331 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5332 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5333 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5334 assert(Res.getFoundDecl()); 5335 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5336 if (!NewBuiltinDecl) 5337 return ExprError(); 5338 } 5339 5340 // The first argument --- the pointer --- has a fixed type; we 5341 // deduce the types of the rest of the arguments accordingly. Walk 5342 // the remaining arguments, converting them to the deduced value type. 5343 for (unsigned i = 0; i != NumFixed; ++i) { 5344 ExprResult Arg = TheCall->getArg(i+1); 5345 5346 // GCC does an implicit conversion to the pointer or integer ValType. This 5347 // can fail in some cases (1i -> int**), check for this error case now. 5348 // Initialize the argument. 5349 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5350 ValType, /*consume*/ false); 5351 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5352 if (Arg.isInvalid()) 5353 return ExprError(); 5354 5355 // Okay, we have something that *can* be converted to the right type. Check 5356 // to see if there is a potentially weird extension going on here. This can 5357 // happen when you do an atomic operation on something like an char* and 5358 // pass in 42. The 42 gets converted to char. This is even more strange 5359 // for things like 45.123 -> char, etc. 5360 // FIXME: Do this check. 5361 TheCall->setArg(i+1, Arg.get()); 5362 } 5363 5364 // Create a new DeclRefExpr to refer to the new decl. 5365 DeclRefExpr *NewDRE = DeclRefExpr::Create( 5366 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 5367 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 5368 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 5369 5370 // Set the callee in the CallExpr. 5371 // FIXME: This loses syntactic information. 5372 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5373 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5374 CK_BuiltinFnToFnPtr); 5375 TheCall->setCallee(PromotedCall.get()); 5376 5377 // Change the result type of the call to match the original value type. This 5378 // is arbitrary, but the codegen for these builtins ins design to handle it 5379 // gracefully. 5380 TheCall->setType(ResultType); 5381 5382 return TheCallResult; 5383 } 5384 5385 /// SemaBuiltinNontemporalOverloaded - We have a call to 5386 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5387 /// overloaded function based on the pointer type of its last argument. 5388 /// 5389 /// This function goes through and does final semantic checking for these 5390 /// builtins. 5391 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5392 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5393 DeclRefExpr *DRE = 5394 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5395 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5396 unsigned BuiltinID = FDecl->getBuiltinID(); 5397 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5398 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5399 "Unexpected nontemporal load/store builtin!"); 5400 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5401 unsigned numArgs = isStore ? 2 : 1; 5402 5403 // Ensure that we have the proper number of arguments. 5404 if (checkArgCount(*this, TheCall, numArgs)) 5405 return ExprError(); 5406 5407 // Inspect the last argument of the nontemporal builtin. This should always 5408 // be a pointer type, from which we imply the type of the memory access. 5409 // Because it is a pointer type, we don't have to worry about any implicit 5410 // casts here. 5411 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5412 ExprResult PointerArgResult = 5413 DefaultFunctionArrayLvalueConversion(PointerArg); 5414 5415 if (PointerArgResult.isInvalid()) 5416 return ExprError(); 5417 PointerArg = PointerArgResult.get(); 5418 TheCall->setArg(numArgs - 1, PointerArg); 5419 5420 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5421 if (!pointerType) { 5422 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5423 << PointerArg->getType() << PointerArg->getSourceRange(); 5424 return ExprError(); 5425 } 5426 5427 QualType ValType = pointerType->getPointeeType(); 5428 5429 // Strip any qualifiers off ValType. 5430 ValType = ValType.getUnqualifiedType(); 5431 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5432 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5433 !ValType->isVectorType()) { 5434 Diag(DRE->getBeginLoc(), 5435 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5436 << PointerArg->getType() << PointerArg->getSourceRange(); 5437 return ExprError(); 5438 } 5439 5440 if (!isStore) { 5441 TheCall->setType(ValType); 5442 return TheCallResult; 5443 } 5444 5445 ExprResult ValArg = TheCall->getArg(0); 5446 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5447 Context, ValType, /*consume*/ false); 5448 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5449 if (ValArg.isInvalid()) 5450 return ExprError(); 5451 5452 TheCall->setArg(0, ValArg.get()); 5453 TheCall->setType(Context.VoidTy); 5454 return TheCallResult; 5455 } 5456 5457 /// CheckObjCString - Checks that the argument to the builtin 5458 /// CFString constructor is correct 5459 /// Note: It might also make sense to do the UTF-16 conversion here (would 5460 /// simplify the backend). 5461 bool Sema::CheckObjCString(Expr *Arg) { 5462 Arg = Arg->IgnoreParenCasts(); 5463 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5464 5465 if (!Literal || !Literal->isAscii()) { 5466 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5467 << Arg->getSourceRange(); 5468 return true; 5469 } 5470 5471 if (Literal->containsNonAsciiOrNull()) { 5472 StringRef String = Literal->getString(); 5473 unsigned NumBytes = String.size(); 5474 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5475 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5476 llvm::UTF16 *ToPtr = &ToBuf[0]; 5477 5478 llvm::ConversionResult Result = 5479 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5480 ToPtr + NumBytes, llvm::strictConversion); 5481 // Check for conversion failure. 5482 if (Result != llvm::conversionOK) 5483 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5484 << Arg->getSourceRange(); 5485 } 5486 return false; 5487 } 5488 5489 /// CheckObjCString - Checks that the format string argument to the os_log() 5490 /// and os_trace() functions is correct, and converts it to const char *. 5491 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5492 Arg = Arg->IgnoreParenCasts(); 5493 auto *Literal = dyn_cast<StringLiteral>(Arg); 5494 if (!Literal) { 5495 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5496 Literal = ObjcLiteral->getString(); 5497 } 5498 } 5499 5500 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5501 return ExprError( 5502 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5503 << Arg->getSourceRange()); 5504 } 5505 5506 ExprResult Result(Literal); 5507 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5508 InitializedEntity Entity = 5509 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5510 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5511 return Result; 5512 } 5513 5514 /// Check that the user is calling the appropriate va_start builtin for the 5515 /// target and calling convention. 5516 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5517 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5518 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5519 bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64; 5520 bool IsWindows = TT.isOSWindows(); 5521 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5522 if (IsX64 || IsAArch64) { 5523 CallingConv CC = CC_C; 5524 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5525 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 5526 if (IsMSVAStart) { 5527 // Don't allow this in System V ABI functions. 5528 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5529 return S.Diag(Fn->getBeginLoc(), 5530 diag::err_ms_va_start_used_in_sysv_function); 5531 } else { 5532 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5533 // On x64 Windows, don't allow this in System V ABI functions. 5534 // (Yes, that means there's no corresponding way to support variadic 5535 // System V ABI functions on Windows.) 5536 if ((IsWindows && CC == CC_X86_64SysV) || 5537 (!IsWindows && CC == CC_Win64)) 5538 return S.Diag(Fn->getBeginLoc(), 5539 diag::err_va_start_used_in_wrong_abi_function) 5540 << !IsWindows; 5541 } 5542 return false; 5543 } 5544 5545 if (IsMSVAStart) 5546 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5547 return false; 5548 } 5549 5550 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5551 ParmVarDecl **LastParam = nullptr) { 5552 // Determine whether the current function, block, or obj-c method is variadic 5553 // and get its parameter list. 5554 bool IsVariadic = false; 5555 ArrayRef<ParmVarDecl *> Params; 5556 DeclContext *Caller = S.CurContext; 5557 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5558 IsVariadic = Block->isVariadic(); 5559 Params = Block->parameters(); 5560 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5561 IsVariadic = FD->isVariadic(); 5562 Params = FD->parameters(); 5563 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5564 IsVariadic = MD->isVariadic(); 5565 // FIXME: This isn't correct for methods (results in bogus warning). 5566 Params = MD->parameters(); 5567 } else if (isa<CapturedDecl>(Caller)) { 5568 // We don't support va_start in a CapturedDecl. 5569 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5570 return true; 5571 } else { 5572 // This must be some other declcontext that parses exprs. 5573 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5574 return true; 5575 } 5576 5577 if (!IsVariadic) { 5578 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5579 return true; 5580 } 5581 5582 if (LastParam) 5583 *LastParam = Params.empty() ? nullptr : Params.back(); 5584 5585 return false; 5586 } 5587 5588 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5589 /// for validity. Emit an error and return true on failure; return false 5590 /// on success. 5591 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5592 Expr *Fn = TheCall->getCallee(); 5593 5594 if (checkVAStartABI(*this, BuiltinID, Fn)) 5595 return true; 5596 5597 if (TheCall->getNumArgs() > 2) { 5598 Diag(TheCall->getArg(2)->getBeginLoc(), 5599 diag::err_typecheck_call_too_many_args) 5600 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5601 << Fn->getSourceRange() 5602 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5603 (*(TheCall->arg_end() - 1))->getEndLoc()); 5604 return true; 5605 } 5606 5607 if (TheCall->getNumArgs() < 2) { 5608 return Diag(TheCall->getEndLoc(), 5609 diag::err_typecheck_call_too_few_args_at_least) 5610 << 0 /*function call*/ << 2 << TheCall->getNumArgs(); 5611 } 5612 5613 // Type-check the first argument normally. 5614 if (checkBuiltinArgument(*this, TheCall, 0)) 5615 return true; 5616 5617 // Check that the current function is variadic, and get its last parameter. 5618 ParmVarDecl *LastParam; 5619 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5620 return true; 5621 5622 // Verify that the second argument to the builtin is the last argument of the 5623 // current function or method. 5624 bool SecondArgIsLastNamedArgument = false; 5625 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5626 5627 // These are valid if SecondArgIsLastNamedArgument is false after the next 5628 // block. 5629 QualType Type; 5630 SourceLocation ParamLoc; 5631 bool IsCRegister = false; 5632 5633 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 5634 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 5635 SecondArgIsLastNamedArgument = PV == LastParam; 5636 5637 Type = PV->getType(); 5638 ParamLoc = PV->getLocation(); 5639 IsCRegister = 5640 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 5641 } 5642 } 5643 5644 if (!SecondArgIsLastNamedArgument) 5645 Diag(TheCall->getArg(1)->getBeginLoc(), 5646 diag::warn_second_arg_of_va_start_not_last_named_param); 5647 else if (IsCRegister || Type->isReferenceType() || 5648 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 5649 // Promotable integers are UB, but enumerations need a bit of 5650 // extra checking to see what their promotable type actually is. 5651 if (!Type->isPromotableIntegerType()) 5652 return false; 5653 if (!Type->isEnumeralType()) 5654 return true; 5655 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 5656 return !(ED && 5657 Context.typesAreCompatible(ED->getPromotionType(), Type)); 5658 }()) { 5659 unsigned Reason = 0; 5660 if (Type->isReferenceType()) Reason = 1; 5661 else if (IsCRegister) Reason = 2; 5662 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 5663 Diag(ParamLoc, diag::note_parameter_type) << Type; 5664 } 5665 5666 TheCall->setType(Context.VoidTy); 5667 return false; 5668 } 5669 5670 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 5671 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 5672 // const char *named_addr); 5673 5674 Expr *Func = Call->getCallee(); 5675 5676 if (Call->getNumArgs() < 3) 5677 return Diag(Call->getEndLoc(), 5678 diag::err_typecheck_call_too_few_args_at_least) 5679 << 0 /*function call*/ << 3 << Call->getNumArgs(); 5680 5681 // Type-check the first argument normally. 5682 if (checkBuiltinArgument(*this, Call, 0)) 5683 return true; 5684 5685 // Check that the current function is variadic. 5686 if (checkVAStartIsInVariadicFunction(*this, Func)) 5687 return true; 5688 5689 // __va_start on Windows does not validate the parameter qualifiers 5690 5691 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 5692 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 5693 5694 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 5695 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 5696 5697 const QualType &ConstCharPtrTy = 5698 Context.getPointerType(Context.CharTy.withConst()); 5699 if (!Arg1Ty->isPointerType() || 5700 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 5701 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5702 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 5703 << 0 /* qualifier difference */ 5704 << 3 /* parameter mismatch */ 5705 << 2 << Arg1->getType() << ConstCharPtrTy; 5706 5707 const QualType SizeTy = Context.getSizeType(); 5708 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 5709 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5710 << Arg2->getType() << SizeTy << 1 /* different class */ 5711 << 0 /* qualifier difference */ 5712 << 3 /* parameter mismatch */ 5713 << 3 << Arg2->getType() << SizeTy; 5714 5715 return false; 5716 } 5717 5718 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 5719 /// friends. This is declared to take (...), so we have to check everything. 5720 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 5721 if (TheCall->getNumArgs() < 2) 5722 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5723 << 0 << 2 << TheCall->getNumArgs() /*function call*/; 5724 if (TheCall->getNumArgs() > 2) 5725 return Diag(TheCall->getArg(2)->getBeginLoc(), 5726 diag::err_typecheck_call_too_many_args) 5727 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5728 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5729 (*(TheCall->arg_end() - 1))->getEndLoc()); 5730 5731 ExprResult OrigArg0 = TheCall->getArg(0); 5732 ExprResult OrigArg1 = TheCall->getArg(1); 5733 5734 // Do standard promotions between the two arguments, returning their common 5735 // type. 5736 QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false); 5737 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 5738 return true; 5739 5740 // Make sure any conversions are pushed back into the call; this is 5741 // type safe since unordered compare builtins are declared as "_Bool 5742 // foo(...)". 5743 TheCall->setArg(0, OrigArg0.get()); 5744 TheCall->setArg(1, OrigArg1.get()); 5745 5746 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 5747 return false; 5748 5749 // If the common type isn't a real floating type, then the arguments were 5750 // invalid for this operation. 5751 if (Res.isNull() || !Res->isRealFloatingType()) 5752 return Diag(OrigArg0.get()->getBeginLoc(), 5753 diag::err_typecheck_call_invalid_ordered_compare) 5754 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 5755 << SourceRange(OrigArg0.get()->getBeginLoc(), 5756 OrigArg1.get()->getEndLoc()); 5757 5758 return false; 5759 } 5760 5761 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 5762 /// __builtin_isnan and friends. This is declared to take (...), so we have 5763 /// to check everything. We expect the last argument to be a floating point 5764 /// value. 5765 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 5766 if (TheCall->getNumArgs() < NumArgs) 5767 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5768 << 0 << NumArgs << TheCall->getNumArgs() /*function call*/; 5769 if (TheCall->getNumArgs() > NumArgs) 5770 return Diag(TheCall->getArg(NumArgs)->getBeginLoc(), 5771 diag::err_typecheck_call_too_many_args) 5772 << 0 /*function call*/ << NumArgs << TheCall->getNumArgs() 5773 << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(), 5774 (*(TheCall->arg_end() - 1))->getEndLoc()); 5775 5776 Expr *OrigArg = TheCall->getArg(NumArgs-1); 5777 5778 if (OrigArg->isTypeDependent()) 5779 return false; 5780 5781 // This operation requires a non-_Complex floating-point number. 5782 if (!OrigArg->getType()->isRealFloatingType()) 5783 return Diag(OrigArg->getBeginLoc(), 5784 diag::err_typecheck_call_invalid_unary_fp) 5785 << OrigArg->getType() << OrigArg->getSourceRange(); 5786 5787 // If this is an implicit conversion from float -> float, double, or 5788 // long double, remove it. 5789 if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) { 5790 // Only remove standard FloatCasts, leaving other casts inplace 5791 if (Cast->getCastKind() == CK_FloatingCast) { 5792 Expr *CastArg = Cast->getSubExpr(); 5793 if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) { 5794 assert( 5795 (Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) || 5796 Cast->getType()->isSpecificBuiltinType(BuiltinType::Float) || 5797 Cast->getType()->isSpecificBuiltinType(BuiltinType::LongDouble)) && 5798 "promotion from float to either float, double, or long double is " 5799 "the only expected cast here"); 5800 Cast->setSubExpr(nullptr); 5801 TheCall->setArg(NumArgs-1, CastArg); 5802 } 5803 } 5804 } 5805 5806 return false; 5807 } 5808 5809 // Customized Sema Checking for VSX builtins that have the following signature: 5810 // vector [...] builtinName(vector [...], vector [...], const int); 5811 // Which takes the same type of vectors (any legal vector type) for the first 5812 // two arguments and takes compile time constant for the third argument. 5813 // Example builtins are : 5814 // vector double vec_xxpermdi(vector double, vector double, int); 5815 // vector short vec_xxsldwi(vector short, vector short, int); 5816 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 5817 unsigned ExpectedNumArgs = 3; 5818 if (TheCall->getNumArgs() < ExpectedNumArgs) 5819 return Diag(TheCall->getEndLoc(), 5820 diag::err_typecheck_call_too_few_args_at_least) 5821 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5822 << TheCall->getSourceRange(); 5823 5824 if (TheCall->getNumArgs() > ExpectedNumArgs) 5825 return Diag(TheCall->getEndLoc(), 5826 diag::err_typecheck_call_too_many_args_at_most) 5827 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5828 << TheCall->getSourceRange(); 5829 5830 // Check the third argument is a compile time constant 5831 llvm::APSInt Value; 5832 if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context)) 5833 return Diag(TheCall->getBeginLoc(), 5834 diag::err_vsx_builtin_nonconstant_argument) 5835 << 3 /* argument index */ << TheCall->getDirectCallee() 5836 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5837 TheCall->getArg(2)->getEndLoc()); 5838 5839 QualType Arg1Ty = TheCall->getArg(0)->getType(); 5840 QualType Arg2Ty = TheCall->getArg(1)->getType(); 5841 5842 // Check the type of argument 1 and argument 2 are vectors. 5843 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 5844 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 5845 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 5846 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 5847 << TheCall->getDirectCallee() 5848 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5849 TheCall->getArg(1)->getEndLoc()); 5850 } 5851 5852 // Check the first two arguments are the same type. 5853 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 5854 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 5855 << TheCall->getDirectCallee() 5856 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5857 TheCall->getArg(1)->getEndLoc()); 5858 } 5859 5860 // When default clang type checking is turned off and the customized type 5861 // checking is used, the returning type of the function must be explicitly 5862 // set. Otherwise it is _Bool by default. 5863 TheCall->setType(Arg1Ty); 5864 5865 return false; 5866 } 5867 5868 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 5869 // This is declared to take (...), so we have to check everything. 5870 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 5871 if (TheCall->getNumArgs() < 2) 5872 return ExprError(Diag(TheCall->getEndLoc(), 5873 diag::err_typecheck_call_too_few_args_at_least) 5874 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5875 << TheCall->getSourceRange()); 5876 5877 // Determine which of the following types of shufflevector we're checking: 5878 // 1) unary, vector mask: (lhs, mask) 5879 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 5880 QualType resType = TheCall->getArg(0)->getType(); 5881 unsigned numElements = 0; 5882 5883 if (!TheCall->getArg(0)->isTypeDependent() && 5884 !TheCall->getArg(1)->isTypeDependent()) { 5885 QualType LHSType = TheCall->getArg(0)->getType(); 5886 QualType RHSType = TheCall->getArg(1)->getType(); 5887 5888 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 5889 return ExprError( 5890 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 5891 << TheCall->getDirectCallee() 5892 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5893 TheCall->getArg(1)->getEndLoc())); 5894 5895 numElements = LHSType->castAs<VectorType>()->getNumElements(); 5896 unsigned numResElements = TheCall->getNumArgs() - 2; 5897 5898 // Check to see if we have a call with 2 vector arguments, the unary shuffle 5899 // with mask. If so, verify that RHS is an integer vector type with the 5900 // same number of elts as lhs. 5901 if (TheCall->getNumArgs() == 2) { 5902 if (!RHSType->hasIntegerRepresentation() || 5903 RHSType->castAs<VectorType>()->getNumElements() != numElements) 5904 return ExprError(Diag(TheCall->getBeginLoc(), 5905 diag::err_vec_builtin_incompatible_vector) 5906 << TheCall->getDirectCallee() 5907 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 5908 TheCall->getArg(1)->getEndLoc())); 5909 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 5910 return ExprError(Diag(TheCall->getBeginLoc(), 5911 diag::err_vec_builtin_incompatible_vector) 5912 << TheCall->getDirectCallee() 5913 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5914 TheCall->getArg(1)->getEndLoc())); 5915 } else if (numElements != numResElements) { 5916 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 5917 resType = Context.getVectorType(eltType, numResElements, 5918 VectorType::GenericVector); 5919 } 5920 } 5921 5922 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 5923 if (TheCall->getArg(i)->isTypeDependent() || 5924 TheCall->getArg(i)->isValueDependent()) 5925 continue; 5926 5927 llvm::APSInt Result(32); 5928 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) 5929 return ExprError(Diag(TheCall->getBeginLoc(), 5930 diag::err_shufflevector_nonconstant_argument) 5931 << TheCall->getArg(i)->getSourceRange()); 5932 5933 // Allow -1 which will be translated to undef in the IR. 5934 if (Result.isSigned() && Result.isAllOnesValue()) 5935 continue; 5936 5937 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) 5938 return ExprError(Diag(TheCall->getBeginLoc(), 5939 diag::err_shufflevector_argument_too_large) 5940 << TheCall->getArg(i)->getSourceRange()); 5941 } 5942 5943 SmallVector<Expr*, 32> exprs; 5944 5945 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 5946 exprs.push_back(TheCall->getArg(i)); 5947 TheCall->setArg(i, nullptr); 5948 } 5949 5950 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 5951 TheCall->getCallee()->getBeginLoc(), 5952 TheCall->getRParenLoc()); 5953 } 5954 5955 /// SemaConvertVectorExpr - Handle __builtin_convertvector 5956 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 5957 SourceLocation BuiltinLoc, 5958 SourceLocation RParenLoc) { 5959 ExprValueKind VK = VK_RValue; 5960 ExprObjectKind OK = OK_Ordinary; 5961 QualType DstTy = TInfo->getType(); 5962 QualType SrcTy = E->getType(); 5963 5964 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 5965 return ExprError(Diag(BuiltinLoc, 5966 diag::err_convertvector_non_vector) 5967 << E->getSourceRange()); 5968 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 5969 return ExprError(Diag(BuiltinLoc, 5970 diag::err_convertvector_non_vector_type)); 5971 5972 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 5973 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 5974 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 5975 if (SrcElts != DstElts) 5976 return ExprError(Diag(BuiltinLoc, 5977 diag::err_convertvector_incompatible_vector) 5978 << E->getSourceRange()); 5979 } 5980 5981 return new (Context) 5982 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 5983 } 5984 5985 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 5986 // This is declared to take (const void*, ...) and can take two 5987 // optional constant int args. 5988 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 5989 unsigned NumArgs = TheCall->getNumArgs(); 5990 5991 if (NumArgs > 3) 5992 return Diag(TheCall->getEndLoc(), 5993 diag::err_typecheck_call_too_many_args_at_most) 5994 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5995 5996 // Argument 0 is checked for us and the remaining arguments must be 5997 // constant integers. 5998 for (unsigned i = 1; i != NumArgs; ++i) 5999 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 6000 return true; 6001 6002 return false; 6003 } 6004 6005 /// SemaBuiltinAssume - Handle __assume (MS Extension). 6006 // __assume does not evaluate its arguments, and should warn if its argument 6007 // has side effects. 6008 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 6009 Expr *Arg = TheCall->getArg(0); 6010 if (Arg->isInstantiationDependent()) return false; 6011 6012 if (Arg->HasSideEffects(Context)) 6013 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 6014 << Arg->getSourceRange() 6015 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 6016 6017 return false; 6018 } 6019 6020 /// Handle __builtin_alloca_with_align. This is declared 6021 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 6022 /// than 8. 6023 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 6024 // The alignment must be a constant integer. 6025 Expr *Arg = TheCall->getArg(1); 6026 6027 // We can't check the value of a dependent argument. 6028 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6029 if (const auto *UE = 6030 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 6031 if (UE->getKind() == UETT_AlignOf || 6032 UE->getKind() == UETT_PreferredAlignOf) 6033 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 6034 << Arg->getSourceRange(); 6035 6036 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 6037 6038 if (!Result.isPowerOf2()) 6039 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6040 << Arg->getSourceRange(); 6041 6042 if (Result < Context.getCharWidth()) 6043 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 6044 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 6045 6046 if (Result > std::numeric_limits<int32_t>::max()) 6047 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 6048 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 6049 } 6050 6051 return false; 6052 } 6053 6054 /// Handle __builtin_assume_aligned. This is declared 6055 /// as (const void*, size_t, ...) and can take one optional constant int arg. 6056 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 6057 unsigned NumArgs = TheCall->getNumArgs(); 6058 6059 if (NumArgs > 3) 6060 return Diag(TheCall->getEndLoc(), 6061 diag::err_typecheck_call_too_many_args_at_most) 6062 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6063 6064 // The alignment must be a constant integer. 6065 Expr *Arg = TheCall->getArg(1); 6066 6067 // We can't check the value of a dependent argument. 6068 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6069 llvm::APSInt Result; 6070 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6071 return true; 6072 6073 if (!Result.isPowerOf2()) 6074 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6075 << Arg->getSourceRange(); 6076 6077 // Alignment calculations can wrap around if it's greater than 2**29. 6078 unsigned MaximumAlignment = 536870912; 6079 if (Result > MaximumAlignment) 6080 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 6081 << Arg->getSourceRange() << MaximumAlignment; 6082 } 6083 6084 if (NumArgs > 2) { 6085 ExprResult Arg(TheCall->getArg(2)); 6086 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6087 Context.getSizeType(), false); 6088 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6089 if (Arg.isInvalid()) return true; 6090 TheCall->setArg(2, Arg.get()); 6091 } 6092 6093 return false; 6094 } 6095 6096 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 6097 unsigned BuiltinID = 6098 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 6099 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 6100 6101 unsigned NumArgs = TheCall->getNumArgs(); 6102 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 6103 if (NumArgs < NumRequiredArgs) { 6104 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 6105 << 0 /* function call */ << NumRequiredArgs << NumArgs 6106 << TheCall->getSourceRange(); 6107 } 6108 if (NumArgs >= NumRequiredArgs + 0x100) { 6109 return Diag(TheCall->getEndLoc(), 6110 diag::err_typecheck_call_too_many_args_at_most) 6111 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 6112 << TheCall->getSourceRange(); 6113 } 6114 unsigned i = 0; 6115 6116 // For formatting call, check buffer arg. 6117 if (!IsSizeCall) { 6118 ExprResult Arg(TheCall->getArg(i)); 6119 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6120 Context, Context.VoidPtrTy, false); 6121 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6122 if (Arg.isInvalid()) 6123 return true; 6124 TheCall->setArg(i, Arg.get()); 6125 i++; 6126 } 6127 6128 // Check string literal arg. 6129 unsigned FormatIdx = i; 6130 { 6131 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 6132 if (Arg.isInvalid()) 6133 return true; 6134 TheCall->setArg(i, Arg.get()); 6135 i++; 6136 } 6137 6138 // Make sure variadic args are scalar. 6139 unsigned FirstDataArg = i; 6140 while (i < NumArgs) { 6141 ExprResult Arg = DefaultVariadicArgumentPromotion( 6142 TheCall->getArg(i), VariadicFunction, nullptr); 6143 if (Arg.isInvalid()) 6144 return true; 6145 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 6146 if (ArgSize.getQuantity() >= 0x100) { 6147 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 6148 << i << (int)ArgSize.getQuantity() << 0xff 6149 << TheCall->getSourceRange(); 6150 } 6151 TheCall->setArg(i, Arg.get()); 6152 i++; 6153 } 6154 6155 // Check formatting specifiers. NOTE: We're only doing this for the non-size 6156 // call to avoid duplicate diagnostics. 6157 if (!IsSizeCall) { 6158 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 6159 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 6160 bool Success = CheckFormatArguments( 6161 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 6162 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 6163 CheckedVarArgs); 6164 if (!Success) 6165 return true; 6166 } 6167 6168 if (IsSizeCall) { 6169 TheCall->setType(Context.getSizeType()); 6170 } else { 6171 TheCall->setType(Context.VoidPtrTy); 6172 } 6173 return false; 6174 } 6175 6176 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 6177 /// TheCall is a constant expression. 6178 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 6179 llvm::APSInt &Result) { 6180 Expr *Arg = TheCall->getArg(ArgNum); 6181 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6182 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6183 6184 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6185 6186 if (!Arg->isIntegerConstantExpr(Result, Context)) 6187 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6188 << FDecl->getDeclName() << Arg->getSourceRange(); 6189 6190 return false; 6191 } 6192 6193 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6194 /// TheCall is a constant expression in the range [Low, High]. 6195 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6196 int Low, int High, bool RangeIsError) { 6197 if (isConstantEvaluated()) 6198 return false; 6199 llvm::APSInt Result; 6200 6201 // We can't check the value of a dependent argument. 6202 Expr *Arg = TheCall->getArg(ArgNum); 6203 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6204 return false; 6205 6206 // Check constant-ness first. 6207 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6208 return true; 6209 6210 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6211 if (RangeIsError) 6212 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6213 << Result.toString(10) << Low << High << Arg->getSourceRange(); 6214 else 6215 // Defer the warning until we know if the code will be emitted so that 6216 // dead code can ignore this. 6217 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6218 PDiag(diag::warn_argument_invalid_range) 6219 << Result.toString(10) << Low << High 6220 << Arg->getSourceRange()); 6221 } 6222 6223 return false; 6224 } 6225 6226 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6227 /// TheCall is a constant expression is a multiple of Num.. 6228 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6229 unsigned Num) { 6230 llvm::APSInt Result; 6231 6232 // We can't check the value of a dependent argument. 6233 Expr *Arg = TheCall->getArg(ArgNum); 6234 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6235 return false; 6236 6237 // Check constant-ness first. 6238 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6239 return true; 6240 6241 if (Result.getSExtValue() % Num != 0) 6242 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6243 << Num << Arg->getSourceRange(); 6244 6245 return false; 6246 } 6247 6248 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 6249 /// constant expression representing a power of 2. 6250 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 6251 llvm::APSInt Result; 6252 6253 // We can't check the value of a dependent argument. 6254 Expr *Arg = TheCall->getArg(ArgNum); 6255 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6256 return false; 6257 6258 // Check constant-ness first. 6259 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6260 return true; 6261 6262 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 6263 // and only if x is a power of 2. 6264 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 6265 return false; 6266 6267 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 6268 << Arg->getSourceRange(); 6269 } 6270 6271 static bool IsShiftedByte(llvm::APSInt Value) { 6272 if (Value.isNegative()) 6273 return false; 6274 6275 // Check if it's a shifted byte, by shifting it down 6276 while (true) { 6277 // If the value fits in the bottom byte, the check passes. 6278 if (Value < 0x100) 6279 return true; 6280 6281 // Otherwise, if the value has _any_ bits in the bottom byte, the check 6282 // fails. 6283 if ((Value & 0xFF) != 0) 6284 return false; 6285 6286 // If the bottom 8 bits are all 0, but something above that is nonzero, 6287 // then shifting the value right by 8 bits won't affect whether it's a 6288 // shifted byte or not. So do that, and go round again. 6289 Value >>= 8; 6290 } 6291 } 6292 6293 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 6294 /// a constant expression representing an arbitrary byte value shifted left by 6295 /// a multiple of 8 bits. 6296 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum) { 6297 llvm::APSInt Result; 6298 6299 // We can't check the value of a dependent argument. 6300 Expr *Arg = TheCall->getArg(ArgNum); 6301 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6302 return false; 6303 6304 // Check constant-ness first. 6305 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6306 return true; 6307 6308 if (IsShiftedByte(Result)) 6309 return false; 6310 6311 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 6312 << Arg->getSourceRange(); 6313 } 6314 6315 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 6316 /// TheCall is a constant expression representing either a shifted byte value, 6317 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 6318 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 6319 /// Arm MVE intrinsics. 6320 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 6321 int ArgNum) { 6322 llvm::APSInt Result; 6323 6324 // We can't check the value of a dependent argument. 6325 Expr *Arg = TheCall->getArg(ArgNum); 6326 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6327 return false; 6328 6329 // Check constant-ness first. 6330 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6331 return true; 6332 6333 // Check to see if it's in either of the required forms. 6334 if (IsShiftedByte(Result) || 6335 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 6336 return false; 6337 6338 return Diag(TheCall->getBeginLoc(), 6339 diag::err_argument_not_shifted_byte_or_xxff) 6340 << Arg->getSourceRange(); 6341 } 6342 6343 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 6344 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 6345 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 6346 if (checkArgCount(*this, TheCall, 2)) 6347 return true; 6348 Expr *Arg0 = TheCall->getArg(0); 6349 Expr *Arg1 = TheCall->getArg(1); 6350 6351 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6352 if (FirstArg.isInvalid()) 6353 return true; 6354 QualType FirstArgType = FirstArg.get()->getType(); 6355 if (!FirstArgType->isAnyPointerType()) 6356 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6357 << "first" << FirstArgType << Arg0->getSourceRange(); 6358 TheCall->setArg(0, FirstArg.get()); 6359 6360 ExprResult SecArg = DefaultLvalueConversion(Arg1); 6361 if (SecArg.isInvalid()) 6362 return true; 6363 QualType SecArgType = SecArg.get()->getType(); 6364 if (!SecArgType->isIntegerType()) 6365 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6366 << "second" << SecArgType << Arg1->getSourceRange(); 6367 6368 // Derive the return type from the pointer argument. 6369 TheCall->setType(FirstArgType); 6370 return false; 6371 } 6372 6373 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 6374 if (checkArgCount(*this, TheCall, 2)) 6375 return true; 6376 6377 Expr *Arg0 = TheCall->getArg(0); 6378 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6379 if (FirstArg.isInvalid()) 6380 return true; 6381 QualType FirstArgType = FirstArg.get()->getType(); 6382 if (!FirstArgType->isAnyPointerType()) 6383 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6384 << "first" << FirstArgType << Arg0->getSourceRange(); 6385 TheCall->setArg(0, FirstArg.get()); 6386 6387 // Derive the return type from the pointer argument. 6388 TheCall->setType(FirstArgType); 6389 6390 // Second arg must be an constant in range [0,15] 6391 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6392 } 6393 6394 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 6395 if (checkArgCount(*this, TheCall, 2)) 6396 return true; 6397 Expr *Arg0 = TheCall->getArg(0); 6398 Expr *Arg1 = TheCall->getArg(1); 6399 6400 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6401 if (FirstArg.isInvalid()) 6402 return true; 6403 QualType FirstArgType = FirstArg.get()->getType(); 6404 if (!FirstArgType->isAnyPointerType()) 6405 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6406 << "first" << FirstArgType << Arg0->getSourceRange(); 6407 6408 QualType SecArgType = Arg1->getType(); 6409 if (!SecArgType->isIntegerType()) 6410 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6411 << "second" << SecArgType << Arg1->getSourceRange(); 6412 TheCall->setType(Context.IntTy); 6413 return false; 6414 } 6415 6416 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 6417 BuiltinID == AArch64::BI__builtin_arm_stg) { 6418 if (checkArgCount(*this, TheCall, 1)) 6419 return true; 6420 Expr *Arg0 = TheCall->getArg(0); 6421 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6422 if (FirstArg.isInvalid()) 6423 return true; 6424 6425 QualType FirstArgType = FirstArg.get()->getType(); 6426 if (!FirstArgType->isAnyPointerType()) 6427 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6428 << "first" << FirstArgType << Arg0->getSourceRange(); 6429 TheCall->setArg(0, FirstArg.get()); 6430 6431 // Derive the return type from the pointer argument. 6432 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 6433 TheCall->setType(FirstArgType); 6434 return false; 6435 } 6436 6437 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 6438 Expr *ArgA = TheCall->getArg(0); 6439 Expr *ArgB = TheCall->getArg(1); 6440 6441 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 6442 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 6443 6444 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 6445 return true; 6446 6447 QualType ArgTypeA = ArgExprA.get()->getType(); 6448 QualType ArgTypeB = ArgExprB.get()->getType(); 6449 6450 auto isNull = [&] (Expr *E) -> bool { 6451 return E->isNullPointerConstant( 6452 Context, Expr::NPC_ValueDependentIsNotNull); }; 6453 6454 // argument should be either a pointer or null 6455 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 6456 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6457 << "first" << ArgTypeA << ArgA->getSourceRange(); 6458 6459 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 6460 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6461 << "second" << ArgTypeB << ArgB->getSourceRange(); 6462 6463 // Ensure Pointee types are compatible 6464 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 6465 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 6466 QualType pointeeA = ArgTypeA->getPointeeType(); 6467 QualType pointeeB = ArgTypeB->getPointeeType(); 6468 if (!Context.typesAreCompatible( 6469 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 6470 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 6471 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 6472 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 6473 << ArgB->getSourceRange(); 6474 } 6475 } 6476 6477 // at least one argument should be pointer type 6478 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 6479 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 6480 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 6481 6482 if (isNull(ArgA)) // adopt type of the other pointer 6483 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 6484 6485 if (isNull(ArgB)) 6486 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 6487 6488 TheCall->setArg(0, ArgExprA.get()); 6489 TheCall->setArg(1, ArgExprB.get()); 6490 TheCall->setType(Context.LongLongTy); 6491 return false; 6492 } 6493 assert(false && "Unhandled ARM MTE intrinsic"); 6494 return true; 6495 } 6496 6497 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 6498 /// TheCall is an ARM/AArch64 special register string literal. 6499 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 6500 int ArgNum, unsigned ExpectedFieldNum, 6501 bool AllowName) { 6502 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 6503 BuiltinID == ARM::BI__builtin_arm_wsr64 || 6504 BuiltinID == ARM::BI__builtin_arm_rsr || 6505 BuiltinID == ARM::BI__builtin_arm_rsrp || 6506 BuiltinID == ARM::BI__builtin_arm_wsr || 6507 BuiltinID == ARM::BI__builtin_arm_wsrp; 6508 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 6509 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 6510 BuiltinID == AArch64::BI__builtin_arm_rsr || 6511 BuiltinID == AArch64::BI__builtin_arm_rsrp || 6512 BuiltinID == AArch64::BI__builtin_arm_wsr || 6513 BuiltinID == AArch64::BI__builtin_arm_wsrp; 6514 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 6515 6516 // We can't check the value of a dependent argument. 6517 Expr *Arg = TheCall->getArg(ArgNum); 6518 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6519 return false; 6520 6521 // Check if the argument is a string literal. 6522 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6523 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6524 << Arg->getSourceRange(); 6525 6526 // Check the type of special register given. 6527 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6528 SmallVector<StringRef, 6> Fields; 6529 Reg.split(Fields, ":"); 6530 6531 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 6532 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6533 << Arg->getSourceRange(); 6534 6535 // If the string is the name of a register then we cannot check that it is 6536 // valid here but if the string is of one the forms described in ACLE then we 6537 // can check that the supplied fields are integers and within the valid 6538 // ranges. 6539 if (Fields.size() > 1) { 6540 bool FiveFields = Fields.size() == 5; 6541 6542 bool ValidString = true; 6543 if (IsARMBuiltin) { 6544 ValidString &= Fields[0].startswith_lower("cp") || 6545 Fields[0].startswith_lower("p"); 6546 if (ValidString) 6547 Fields[0] = 6548 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6549 6550 ValidString &= Fields[2].startswith_lower("c"); 6551 if (ValidString) 6552 Fields[2] = Fields[2].drop_front(1); 6553 6554 if (FiveFields) { 6555 ValidString &= Fields[3].startswith_lower("c"); 6556 if (ValidString) 6557 Fields[3] = Fields[3].drop_front(1); 6558 } 6559 } 6560 6561 SmallVector<int, 5> Ranges; 6562 if (FiveFields) 6563 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6564 else 6565 Ranges.append({15, 7, 15}); 6566 6567 for (unsigned i=0; i<Fields.size(); ++i) { 6568 int IntField; 6569 ValidString &= !Fields[i].getAsInteger(10, IntField); 6570 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6571 } 6572 6573 if (!ValidString) 6574 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6575 << Arg->getSourceRange(); 6576 } else if (IsAArch64Builtin && Fields.size() == 1) { 6577 // If the register name is one of those that appear in the condition below 6578 // and the special register builtin being used is one of the write builtins, 6579 // then we require that the argument provided for writing to the register 6580 // is an integer constant expression. This is because it will be lowered to 6581 // an MSR (immediate) instruction, so we need to know the immediate at 6582 // compile time. 6583 if (TheCall->getNumArgs() != 2) 6584 return false; 6585 6586 std::string RegLower = Reg.lower(); 6587 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 6588 RegLower != "pan" && RegLower != "uao") 6589 return false; 6590 6591 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6592 } 6593 6594 return false; 6595 } 6596 6597 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 6598 /// This checks that the target supports __builtin_longjmp and 6599 /// that val is a constant 1. 6600 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 6601 if (!Context.getTargetInfo().hasSjLjLowering()) 6602 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 6603 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6604 6605 Expr *Arg = TheCall->getArg(1); 6606 llvm::APSInt Result; 6607 6608 // TODO: This is less than ideal. Overload this to take a value. 6609 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6610 return true; 6611 6612 if (Result != 1) 6613 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 6614 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 6615 6616 return false; 6617 } 6618 6619 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 6620 /// This checks that the target supports __builtin_setjmp. 6621 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 6622 if (!Context.getTargetInfo().hasSjLjLowering()) 6623 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 6624 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6625 return false; 6626 } 6627 6628 namespace { 6629 6630 class UncoveredArgHandler { 6631 enum { Unknown = -1, AllCovered = -2 }; 6632 6633 signed FirstUncoveredArg = Unknown; 6634 SmallVector<const Expr *, 4> DiagnosticExprs; 6635 6636 public: 6637 UncoveredArgHandler() = default; 6638 6639 bool hasUncoveredArg() const { 6640 return (FirstUncoveredArg >= 0); 6641 } 6642 6643 unsigned getUncoveredArg() const { 6644 assert(hasUncoveredArg() && "no uncovered argument"); 6645 return FirstUncoveredArg; 6646 } 6647 6648 void setAllCovered() { 6649 // A string has been found with all arguments covered, so clear out 6650 // the diagnostics. 6651 DiagnosticExprs.clear(); 6652 FirstUncoveredArg = AllCovered; 6653 } 6654 6655 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 6656 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 6657 6658 // Don't update if a previous string covers all arguments. 6659 if (FirstUncoveredArg == AllCovered) 6660 return; 6661 6662 // UncoveredArgHandler tracks the highest uncovered argument index 6663 // and with it all the strings that match this index. 6664 if (NewFirstUncoveredArg == FirstUncoveredArg) 6665 DiagnosticExprs.push_back(StrExpr); 6666 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 6667 DiagnosticExprs.clear(); 6668 DiagnosticExprs.push_back(StrExpr); 6669 FirstUncoveredArg = NewFirstUncoveredArg; 6670 } 6671 } 6672 6673 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 6674 }; 6675 6676 enum StringLiteralCheckType { 6677 SLCT_NotALiteral, 6678 SLCT_UncheckedLiteral, 6679 SLCT_CheckedLiteral 6680 }; 6681 6682 } // namespace 6683 6684 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 6685 BinaryOperatorKind BinOpKind, 6686 bool AddendIsRight) { 6687 unsigned BitWidth = Offset.getBitWidth(); 6688 unsigned AddendBitWidth = Addend.getBitWidth(); 6689 // There might be negative interim results. 6690 if (Addend.isUnsigned()) { 6691 Addend = Addend.zext(++AddendBitWidth); 6692 Addend.setIsSigned(true); 6693 } 6694 // Adjust the bit width of the APSInts. 6695 if (AddendBitWidth > BitWidth) { 6696 Offset = Offset.sext(AddendBitWidth); 6697 BitWidth = AddendBitWidth; 6698 } else if (BitWidth > AddendBitWidth) { 6699 Addend = Addend.sext(BitWidth); 6700 } 6701 6702 bool Ov = false; 6703 llvm::APSInt ResOffset = Offset; 6704 if (BinOpKind == BO_Add) 6705 ResOffset = Offset.sadd_ov(Addend, Ov); 6706 else { 6707 assert(AddendIsRight && BinOpKind == BO_Sub && 6708 "operator must be add or sub with addend on the right"); 6709 ResOffset = Offset.ssub_ov(Addend, Ov); 6710 } 6711 6712 // We add an offset to a pointer here so we should support an offset as big as 6713 // possible. 6714 if (Ov) { 6715 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 6716 "index (intermediate) result too big"); 6717 Offset = Offset.sext(2 * BitWidth); 6718 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 6719 return; 6720 } 6721 6722 Offset = ResOffset; 6723 } 6724 6725 namespace { 6726 6727 // This is a wrapper class around StringLiteral to support offsetted string 6728 // literals as format strings. It takes the offset into account when returning 6729 // the string and its length or the source locations to display notes correctly. 6730 class FormatStringLiteral { 6731 const StringLiteral *FExpr; 6732 int64_t Offset; 6733 6734 public: 6735 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 6736 : FExpr(fexpr), Offset(Offset) {} 6737 6738 StringRef getString() const { 6739 return FExpr->getString().drop_front(Offset); 6740 } 6741 6742 unsigned getByteLength() const { 6743 return FExpr->getByteLength() - getCharByteWidth() * Offset; 6744 } 6745 6746 unsigned getLength() const { return FExpr->getLength() - Offset; } 6747 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 6748 6749 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 6750 6751 QualType getType() const { return FExpr->getType(); } 6752 6753 bool isAscii() const { return FExpr->isAscii(); } 6754 bool isWide() const { return FExpr->isWide(); } 6755 bool isUTF8() const { return FExpr->isUTF8(); } 6756 bool isUTF16() const { return FExpr->isUTF16(); } 6757 bool isUTF32() const { return FExpr->isUTF32(); } 6758 bool isPascal() const { return FExpr->isPascal(); } 6759 6760 SourceLocation getLocationOfByte( 6761 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 6762 const TargetInfo &Target, unsigned *StartToken = nullptr, 6763 unsigned *StartTokenByteOffset = nullptr) const { 6764 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 6765 StartToken, StartTokenByteOffset); 6766 } 6767 6768 SourceLocation getBeginLoc() const LLVM_READONLY { 6769 return FExpr->getBeginLoc().getLocWithOffset(Offset); 6770 } 6771 6772 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 6773 }; 6774 6775 } // namespace 6776 6777 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 6778 const Expr *OrigFormatExpr, 6779 ArrayRef<const Expr *> Args, 6780 bool HasVAListArg, unsigned format_idx, 6781 unsigned firstDataArg, 6782 Sema::FormatStringType Type, 6783 bool inFunctionCall, 6784 Sema::VariadicCallType CallType, 6785 llvm::SmallBitVector &CheckedVarArgs, 6786 UncoveredArgHandler &UncoveredArg, 6787 bool IgnoreStringsWithoutSpecifiers); 6788 6789 // Determine if an expression is a string literal or constant string. 6790 // If this function returns false on the arguments to a function expecting a 6791 // format string, we will usually need to emit a warning. 6792 // True string literals are then checked by CheckFormatString. 6793 static StringLiteralCheckType 6794 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 6795 bool HasVAListArg, unsigned format_idx, 6796 unsigned firstDataArg, Sema::FormatStringType Type, 6797 Sema::VariadicCallType CallType, bool InFunctionCall, 6798 llvm::SmallBitVector &CheckedVarArgs, 6799 UncoveredArgHandler &UncoveredArg, 6800 llvm::APSInt Offset, 6801 bool IgnoreStringsWithoutSpecifiers = false) { 6802 if (S.isConstantEvaluated()) 6803 return SLCT_NotALiteral; 6804 tryAgain: 6805 assert(Offset.isSigned() && "invalid offset"); 6806 6807 if (E->isTypeDependent() || E->isValueDependent()) 6808 return SLCT_NotALiteral; 6809 6810 E = E->IgnoreParenCasts(); 6811 6812 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 6813 // Technically -Wformat-nonliteral does not warn about this case. 6814 // The behavior of printf and friends in this case is implementation 6815 // dependent. Ideally if the format string cannot be null then 6816 // it should have a 'nonnull' attribute in the function prototype. 6817 return SLCT_UncheckedLiteral; 6818 6819 switch (E->getStmtClass()) { 6820 case Stmt::BinaryConditionalOperatorClass: 6821 case Stmt::ConditionalOperatorClass: { 6822 // The expression is a literal if both sub-expressions were, and it was 6823 // completely checked only if both sub-expressions were checked. 6824 const AbstractConditionalOperator *C = 6825 cast<AbstractConditionalOperator>(E); 6826 6827 // Determine whether it is necessary to check both sub-expressions, for 6828 // example, because the condition expression is a constant that can be 6829 // evaluated at compile time. 6830 bool CheckLeft = true, CheckRight = true; 6831 6832 bool Cond; 6833 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 6834 S.isConstantEvaluated())) { 6835 if (Cond) 6836 CheckRight = false; 6837 else 6838 CheckLeft = false; 6839 } 6840 6841 // We need to maintain the offsets for the right and the left hand side 6842 // separately to check if every possible indexed expression is a valid 6843 // string literal. They might have different offsets for different string 6844 // literals in the end. 6845 StringLiteralCheckType Left; 6846 if (!CheckLeft) 6847 Left = SLCT_UncheckedLiteral; 6848 else { 6849 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 6850 HasVAListArg, format_idx, firstDataArg, 6851 Type, CallType, InFunctionCall, 6852 CheckedVarArgs, UncoveredArg, Offset, 6853 IgnoreStringsWithoutSpecifiers); 6854 if (Left == SLCT_NotALiteral || !CheckRight) { 6855 return Left; 6856 } 6857 } 6858 6859 StringLiteralCheckType Right = checkFormatStringExpr( 6860 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 6861 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 6862 IgnoreStringsWithoutSpecifiers); 6863 6864 return (CheckLeft && Left < Right) ? Left : Right; 6865 } 6866 6867 case Stmt::ImplicitCastExprClass: 6868 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 6869 goto tryAgain; 6870 6871 case Stmt::OpaqueValueExprClass: 6872 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 6873 E = src; 6874 goto tryAgain; 6875 } 6876 return SLCT_NotALiteral; 6877 6878 case Stmt::PredefinedExprClass: 6879 // While __func__, etc., are technically not string literals, they 6880 // cannot contain format specifiers and thus are not a security 6881 // liability. 6882 return SLCT_UncheckedLiteral; 6883 6884 case Stmt::DeclRefExprClass: { 6885 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 6886 6887 // As an exception, do not flag errors for variables binding to 6888 // const string literals. 6889 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 6890 bool isConstant = false; 6891 QualType T = DR->getType(); 6892 6893 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 6894 isConstant = AT->getElementType().isConstant(S.Context); 6895 } else if (const PointerType *PT = T->getAs<PointerType>()) { 6896 isConstant = T.isConstant(S.Context) && 6897 PT->getPointeeType().isConstant(S.Context); 6898 } else if (T->isObjCObjectPointerType()) { 6899 // In ObjC, there is usually no "const ObjectPointer" type, 6900 // so don't check if the pointee type is constant. 6901 isConstant = T.isConstant(S.Context); 6902 } 6903 6904 if (isConstant) { 6905 if (const Expr *Init = VD->getAnyInitializer()) { 6906 // Look through initializers like const char c[] = { "foo" } 6907 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 6908 if (InitList->isStringLiteralInit()) 6909 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 6910 } 6911 return checkFormatStringExpr(S, Init, Args, 6912 HasVAListArg, format_idx, 6913 firstDataArg, Type, CallType, 6914 /*InFunctionCall*/ false, CheckedVarArgs, 6915 UncoveredArg, Offset); 6916 } 6917 } 6918 6919 // For vprintf* functions (i.e., HasVAListArg==true), we add a 6920 // special check to see if the format string is a function parameter 6921 // of the function calling the printf function. If the function 6922 // has an attribute indicating it is a printf-like function, then we 6923 // should suppress warnings concerning non-literals being used in a call 6924 // to a vprintf function. For example: 6925 // 6926 // void 6927 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 6928 // va_list ap; 6929 // va_start(ap, fmt); 6930 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 6931 // ... 6932 // } 6933 if (HasVAListArg) { 6934 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 6935 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 6936 int PVIndex = PV->getFunctionScopeIndex() + 1; 6937 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 6938 // adjust for implicit parameter 6939 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 6940 if (MD->isInstance()) 6941 ++PVIndex; 6942 // We also check if the formats are compatible. 6943 // We can't pass a 'scanf' string to a 'printf' function. 6944 if (PVIndex == PVFormat->getFormatIdx() && 6945 Type == S.GetFormatStringType(PVFormat)) 6946 return SLCT_UncheckedLiteral; 6947 } 6948 } 6949 } 6950 } 6951 } 6952 6953 return SLCT_NotALiteral; 6954 } 6955 6956 case Stmt::CallExprClass: 6957 case Stmt::CXXMemberCallExprClass: { 6958 const CallExpr *CE = cast<CallExpr>(E); 6959 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 6960 bool IsFirst = true; 6961 StringLiteralCheckType CommonResult; 6962 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 6963 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 6964 StringLiteralCheckType Result = checkFormatStringExpr( 6965 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6966 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 6967 IgnoreStringsWithoutSpecifiers); 6968 if (IsFirst) { 6969 CommonResult = Result; 6970 IsFirst = false; 6971 } 6972 } 6973 if (!IsFirst) 6974 return CommonResult; 6975 6976 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 6977 unsigned BuiltinID = FD->getBuiltinID(); 6978 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 6979 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 6980 const Expr *Arg = CE->getArg(0); 6981 return checkFormatStringExpr(S, Arg, Args, 6982 HasVAListArg, format_idx, 6983 firstDataArg, Type, CallType, 6984 InFunctionCall, CheckedVarArgs, 6985 UncoveredArg, Offset, 6986 IgnoreStringsWithoutSpecifiers); 6987 } 6988 } 6989 } 6990 6991 return SLCT_NotALiteral; 6992 } 6993 case Stmt::ObjCMessageExprClass: { 6994 const auto *ME = cast<ObjCMessageExpr>(E); 6995 if (const auto *MD = ME->getMethodDecl()) { 6996 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 6997 // As a special case heuristic, if we're using the method -[NSBundle 6998 // localizedStringForKey:value:table:], ignore any key strings that lack 6999 // format specifiers. The idea is that if the key doesn't have any 7000 // format specifiers then its probably just a key to map to the 7001 // localized strings. If it does have format specifiers though, then its 7002 // likely that the text of the key is the format string in the 7003 // programmer's language, and should be checked. 7004 const ObjCInterfaceDecl *IFace; 7005 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 7006 IFace->getIdentifier()->isStr("NSBundle") && 7007 MD->getSelector().isKeywordSelector( 7008 {"localizedStringForKey", "value", "table"})) { 7009 IgnoreStringsWithoutSpecifiers = true; 7010 } 7011 7012 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 7013 return checkFormatStringExpr( 7014 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7015 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7016 IgnoreStringsWithoutSpecifiers); 7017 } 7018 } 7019 7020 return SLCT_NotALiteral; 7021 } 7022 case Stmt::ObjCStringLiteralClass: 7023 case Stmt::StringLiteralClass: { 7024 const StringLiteral *StrE = nullptr; 7025 7026 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 7027 StrE = ObjCFExpr->getString(); 7028 else 7029 StrE = cast<StringLiteral>(E); 7030 7031 if (StrE) { 7032 if (Offset.isNegative() || Offset > StrE->getLength()) { 7033 // TODO: It would be better to have an explicit warning for out of 7034 // bounds literals. 7035 return SLCT_NotALiteral; 7036 } 7037 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 7038 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 7039 firstDataArg, Type, InFunctionCall, CallType, 7040 CheckedVarArgs, UncoveredArg, 7041 IgnoreStringsWithoutSpecifiers); 7042 return SLCT_CheckedLiteral; 7043 } 7044 7045 return SLCT_NotALiteral; 7046 } 7047 case Stmt::BinaryOperatorClass: { 7048 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 7049 7050 // A string literal + an int offset is still a string literal. 7051 if (BinOp->isAdditiveOp()) { 7052 Expr::EvalResult LResult, RResult; 7053 7054 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 7055 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7056 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 7057 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7058 7059 if (LIsInt != RIsInt) { 7060 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 7061 7062 if (LIsInt) { 7063 if (BinOpKind == BO_Add) { 7064 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 7065 E = BinOp->getRHS(); 7066 goto tryAgain; 7067 } 7068 } else { 7069 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 7070 E = BinOp->getLHS(); 7071 goto tryAgain; 7072 } 7073 } 7074 } 7075 7076 return SLCT_NotALiteral; 7077 } 7078 case Stmt::UnaryOperatorClass: { 7079 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 7080 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 7081 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 7082 Expr::EvalResult IndexResult; 7083 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 7084 Expr::SE_NoSideEffects, 7085 S.isConstantEvaluated())) { 7086 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 7087 /*RHS is int*/ true); 7088 E = ASE->getBase(); 7089 goto tryAgain; 7090 } 7091 } 7092 7093 return SLCT_NotALiteral; 7094 } 7095 7096 default: 7097 return SLCT_NotALiteral; 7098 } 7099 } 7100 7101 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 7102 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 7103 .Case("scanf", FST_Scanf) 7104 .Cases("printf", "printf0", FST_Printf) 7105 .Cases("NSString", "CFString", FST_NSString) 7106 .Case("strftime", FST_Strftime) 7107 .Case("strfmon", FST_Strfmon) 7108 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 7109 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 7110 .Case("os_trace", FST_OSLog) 7111 .Case("os_log", FST_OSLog) 7112 .Default(FST_Unknown); 7113 } 7114 7115 /// CheckFormatArguments - Check calls to printf and scanf (and similar 7116 /// functions) for correct use of format strings. 7117 /// Returns true if a format string has been fully checked. 7118 bool Sema::CheckFormatArguments(const FormatAttr *Format, 7119 ArrayRef<const Expr *> Args, 7120 bool IsCXXMember, 7121 VariadicCallType CallType, 7122 SourceLocation Loc, SourceRange Range, 7123 llvm::SmallBitVector &CheckedVarArgs) { 7124 FormatStringInfo FSI; 7125 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 7126 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 7127 FSI.FirstDataArg, GetFormatStringType(Format), 7128 CallType, Loc, Range, CheckedVarArgs); 7129 return false; 7130 } 7131 7132 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 7133 bool HasVAListArg, unsigned format_idx, 7134 unsigned firstDataArg, FormatStringType Type, 7135 VariadicCallType CallType, 7136 SourceLocation Loc, SourceRange Range, 7137 llvm::SmallBitVector &CheckedVarArgs) { 7138 // CHECK: printf/scanf-like function is called with no format string. 7139 if (format_idx >= Args.size()) { 7140 Diag(Loc, diag::warn_missing_format_string) << Range; 7141 return false; 7142 } 7143 7144 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 7145 7146 // CHECK: format string is not a string literal. 7147 // 7148 // Dynamically generated format strings are difficult to 7149 // automatically vet at compile time. Requiring that format strings 7150 // are string literals: (1) permits the checking of format strings by 7151 // the compiler and thereby (2) can practically remove the source of 7152 // many format string exploits. 7153 7154 // Format string can be either ObjC string (e.g. @"%d") or 7155 // C string (e.g. "%d") 7156 // ObjC string uses the same format specifiers as C string, so we can use 7157 // the same format string checking logic for both ObjC and C strings. 7158 UncoveredArgHandler UncoveredArg; 7159 StringLiteralCheckType CT = 7160 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 7161 format_idx, firstDataArg, Type, CallType, 7162 /*IsFunctionCall*/ true, CheckedVarArgs, 7163 UncoveredArg, 7164 /*no string offset*/ llvm::APSInt(64, false) = 0); 7165 7166 // Generate a diagnostic where an uncovered argument is detected. 7167 if (UncoveredArg.hasUncoveredArg()) { 7168 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 7169 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 7170 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 7171 } 7172 7173 if (CT != SLCT_NotALiteral) 7174 // Literal format string found, check done! 7175 return CT == SLCT_CheckedLiteral; 7176 7177 // Strftime is particular as it always uses a single 'time' argument, 7178 // so it is safe to pass a non-literal string. 7179 if (Type == FST_Strftime) 7180 return false; 7181 7182 // Do not emit diag when the string param is a macro expansion and the 7183 // format is either NSString or CFString. This is a hack to prevent 7184 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 7185 // which are usually used in place of NS and CF string literals. 7186 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 7187 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 7188 return false; 7189 7190 // If there are no arguments specified, warn with -Wformat-security, otherwise 7191 // warn only with -Wformat-nonliteral. 7192 if (Args.size() == firstDataArg) { 7193 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 7194 << OrigFormatExpr->getSourceRange(); 7195 switch (Type) { 7196 default: 7197 break; 7198 case FST_Kprintf: 7199 case FST_FreeBSDKPrintf: 7200 case FST_Printf: 7201 Diag(FormatLoc, diag::note_format_security_fixit) 7202 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 7203 break; 7204 case FST_NSString: 7205 Diag(FormatLoc, diag::note_format_security_fixit) 7206 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 7207 break; 7208 } 7209 } else { 7210 Diag(FormatLoc, diag::warn_format_nonliteral) 7211 << OrigFormatExpr->getSourceRange(); 7212 } 7213 return false; 7214 } 7215 7216 namespace { 7217 7218 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 7219 protected: 7220 Sema &S; 7221 const FormatStringLiteral *FExpr; 7222 const Expr *OrigFormatExpr; 7223 const Sema::FormatStringType FSType; 7224 const unsigned FirstDataArg; 7225 const unsigned NumDataArgs; 7226 const char *Beg; // Start of format string. 7227 const bool HasVAListArg; 7228 ArrayRef<const Expr *> Args; 7229 unsigned FormatIdx; 7230 llvm::SmallBitVector CoveredArgs; 7231 bool usesPositionalArgs = false; 7232 bool atFirstArg = true; 7233 bool inFunctionCall; 7234 Sema::VariadicCallType CallType; 7235 llvm::SmallBitVector &CheckedVarArgs; 7236 UncoveredArgHandler &UncoveredArg; 7237 7238 public: 7239 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 7240 const Expr *origFormatExpr, 7241 const Sema::FormatStringType type, unsigned firstDataArg, 7242 unsigned numDataArgs, const char *beg, bool hasVAListArg, 7243 ArrayRef<const Expr *> Args, unsigned formatIdx, 7244 bool inFunctionCall, Sema::VariadicCallType callType, 7245 llvm::SmallBitVector &CheckedVarArgs, 7246 UncoveredArgHandler &UncoveredArg) 7247 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 7248 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 7249 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 7250 inFunctionCall(inFunctionCall), CallType(callType), 7251 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 7252 CoveredArgs.resize(numDataArgs); 7253 CoveredArgs.reset(); 7254 } 7255 7256 void DoneProcessing(); 7257 7258 void HandleIncompleteSpecifier(const char *startSpecifier, 7259 unsigned specifierLen) override; 7260 7261 void HandleInvalidLengthModifier( 7262 const analyze_format_string::FormatSpecifier &FS, 7263 const analyze_format_string::ConversionSpecifier &CS, 7264 const char *startSpecifier, unsigned specifierLen, 7265 unsigned DiagID); 7266 7267 void HandleNonStandardLengthModifier( 7268 const analyze_format_string::FormatSpecifier &FS, 7269 const char *startSpecifier, unsigned specifierLen); 7270 7271 void HandleNonStandardConversionSpecifier( 7272 const analyze_format_string::ConversionSpecifier &CS, 7273 const char *startSpecifier, unsigned specifierLen); 7274 7275 void HandlePosition(const char *startPos, unsigned posLen) override; 7276 7277 void HandleInvalidPosition(const char *startSpecifier, 7278 unsigned specifierLen, 7279 analyze_format_string::PositionContext p) override; 7280 7281 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 7282 7283 void HandleNullChar(const char *nullCharacter) override; 7284 7285 template <typename Range> 7286 static void 7287 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 7288 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 7289 bool IsStringLocation, Range StringRange, 7290 ArrayRef<FixItHint> Fixit = None); 7291 7292 protected: 7293 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 7294 const char *startSpec, 7295 unsigned specifierLen, 7296 const char *csStart, unsigned csLen); 7297 7298 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 7299 const char *startSpec, 7300 unsigned specifierLen); 7301 7302 SourceRange getFormatStringRange(); 7303 CharSourceRange getSpecifierRange(const char *startSpecifier, 7304 unsigned specifierLen); 7305 SourceLocation getLocationOfByte(const char *x); 7306 7307 const Expr *getDataArg(unsigned i) const; 7308 7309 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 7310 const analyze_format_string::ConversionSpecifier &CS, 7311 const char *startSpecifier, unsigned specifierLen, 7312 unsigned argIndex); 7313 7314 template <typename Range> 7315 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 7316 bool IsStringLocation, Range StringRange, 7317 ArrayRef<FixItHint> Fixit = None); 7318 }; 7319 7320 } // namespace 7321 7322 SourceRange CheckFormatHandler::getFormatStringRange() { 7323 return OrigFormatExpr->getSourceRange(); 7324 } 7325 7326 CharSourceRange CheckFormatHandler:: 7327 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 7328 SourceLocation Start = getLocationOfByte(startSpecifier); 7329 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 7330 7331 // Advance the end SourceLocation by one due to half-open ranges. 7332 End = End.getLocWithOffset(1); 7333 7334 return CharSourceRange::getCharRange(Start, End); 7335 } 7336 7337 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 7338 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 7339 S.getLangOpts(), S.Context.getTargetInfo()); 7340 } 7341 7342 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 7343 unsigned specifierLen){ 7344 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 7345 getLocationOfByte(startSpecifier), 7346 /*IsStringLocation*/true, 7347 getSpecifierRange(startSpecifier, specifierLen)); 7348 } 7349 7350 void CheckFormatHandler::HandleInvalidLengthModifier( 7351 const analyze_format_string::FormatSpecifier &FS, 7352 const analyze_format_string::ConversionSpecifier &CS, 7353 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 7354 using namespace analyze_format_string; 7355 7356 const LengthModifier &LM = FS.getLengthModifier(); 7357 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7358 7359 // See if we know how to fix this length modifier. 7360 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7361 if (FixedLM) { 7362 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7363 getLocationOfByte(LM.getStart()), 7364 /*IsStringLocation*/true, 7365 getSpecifierRange(startSpecifier, specifierLen)); 7366 7367 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7368 << FixedLM->toString() 7369 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7370 7371 } else { 7372 FixItHint Hint; 7373 if (DiagID == diag::warn_format_nonsensical_length) 7374 Hint = FixItHint::CreateRemoval(LMRange); 7375 7376 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7377 getLocationOfByte(LM.getStart()), 7378 /*IsStringLocation*/true, 7379 getSpecifierRange(startSpecifier, specifierLen), 7380 Hint); 7381 } 7382 } 7383 7384 void CheckFormatHandler::HandleNonStandardLengthModifier( 7385 const analyze_format_string::FormatSpecifier &FS, 7386 const char *startSpecifier, unsigned specifierLen) { 7387 using namespace analyze_format_string; 7388 7389 const LengthModifier &LM = FS.getLengthModifier(); 7390 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7391 7392 // See if we know how to fix this length modifier. 7393 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7394 if (FixedLM) { 7395 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7396 << LM.toString() << 0, 7397 getLocationOfByte(LM.getStart()), 7398 /*IsStringLocation*/true, 7399 getSpecifierRange(startSpecifier, specifierLen)); 7400 7401 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7402 << FixedLM->toString() 7403 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7404 7405 } else { 7406 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7407 << LM.toString() << 0, 7408 getLocationOfByte(LM.getStart()), 7409 /*IsStringLocation*/true, 7410 getSpecifierRange(startSpecifier, specifierLen)); 7411 } 7412 } 7413 7414 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 7415 const analyze_format_string::ConversionSpecifier &CS, 7416 const char *startSpecifier, unsigned specifierLen) { 7417 using namespace analyze_format_string; 7418 7419 // See if we know how to fix this conversion specifier. 7420 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 7421 if (FixedCS) { 7422 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7423 << CS.toString() << /*conversion specifier*/1, 7424 getLocationOfByte(CS.getStart()), 7425 /*IsStringLocation*/true, 7426 getSpecifierRange(startSpecifier, specifierLen)); 7427 7428 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 7429 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 7430 << FixedCS->toString() 7431 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 7432 } else { 7433 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7434 << CS.toString() << /*conversion specifier*/1, 7435 getLocationOfByte(CS.getStart()), 7436 /*IsStringLocation*/true, 7437 getSpecifierRange(startSpecifier, specifierLen)); 7438 } 7439 } 7440 7441 void CheckFormatHandler::HandlePosition(const char *startPos, 7442 unsigned posLen) { 7443 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 7444 getLocationOfByte(startPos), 7445 /*IsStringLocation*/true, 7446 getSpecifierRange(startPos, posLen)); 7447 } 7448 7449 void 7450 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 7451 analyze_format_string::PositionContext p) { 7452 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 7453 << (unsigned) p, 7454 getLocationOfByte(startPos), /*IsStringLocation*/true, 7455 getSpecifierRange(startPos, posLen)); 7456 } 7457 7458 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 7459 unsigned posLen) { 7460 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 7461 getLocationOfByte(startPos), 7462 /*IsStringLocation*/true, 7463 getSpecifierRange(startPos, posLen)); 7464 } 7465 7466 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 7467 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 7468 // The presence of a null character is likely an error. 7469 EmitFormatDiagnostic( 7470 S.PDiag(diag::warn_printf_format_string_contains_null_char), 7471 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 7472 getFormatStringRange()); 7473 } 7474 } 7475 7476 // Note that this may return NULL if there was an error parsing or building 7477 // one of the argument expressions. 7478 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 7479 return Args[FirstDataArg + i]; 7480 } 7481 7482 void CheckFormatHandler::DoneProcessing() { 7483 // Does the number of data arguments exceed the number of 7484 // format conversions in the format string? 7485 if (!HasVAListArg) { 7486 // Find any arguments that weren't covered. 7487 CoveredArgs.flip(); 7488 signed notCoveredArg = CoveredArgs.find_first(); 7489 if (notCoveredArg >= 0) { 7490 assert((unsigned)notCoveredArg < NumDataArgs); 7491 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 7492 } else { 7493 UncoveredArg.setAllCovered(); 7494 } 7495 } 7496 } 7497 7498 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 7499 const Expr *ArgExpr) { 7500 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 7501 "Invalid state"); 7502 7503 if (!ArgExpr) 7504 return; 7505 7506 SourceLocation Loc = ArgExpr->getBeginLoc(); 7507 7508 if (S.getSourceManager().isInSystemMacro(Loc)) 7509 return; 7510 7511 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 7512 for (auto E : DiagnosticExprs) 7513 PDiag << E->getSourceRange(); 7514 7515 CheckFormatHandler::EmitFormatDiagnostic( 7516 S, IsFunctionCall, DiagnosticExprs[0], 7517 PDiag, Loc, /*IsStringLocation*/false, 7518 DiagnosticExprs[0]->getSourceRange()); 7519 } 7520 7521 bool 7522 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 7523 SourceLocation Loc, 7524 const char *startSpec, 7525 unsigned specifierLen, 7526 const char *csStart, 7527 unsigned csLen) { 7528 bool keepGoing = true; 7529 if (argIndex < NumDataArgs) { 7530 // Consider the argument coverered, even though the specifier doesn't 7531 // make sense. 7532 CoveredArgs.set(argIndex); 7533 } 7534 else { 7535 // If argIndex exceeds the number of data arguments we 7536 // don't issue a warning because that is just a cascade of warnings (and 7537 // they may have intended '%%' anyway). We don't want to continue processing 7538 // the format string after this point, however, as we will like just get 7539 // gibberish when trying to match arguments. 7540 keepGoing = false; 7541 } 7542 7543 StringRef Specifier(csStart, csLen); 7544 7545 // If the specifier in non-printable, it could be the first byte of a UTF-8 7546 // sequence. In that case, print the UTF-8 code point. If not, print the byte 7547 // hex value. 7548 std::string CodePointStr; 7549 if (!llvm::sys::locale::isPrint(*csStart)) { 7550 llvm::UTF32 CodePoint; 7551 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 7552 const llvm::UTF8 *E = 7553 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 7554 llvm::ConversionResult Result = 7555 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 7556 7557 if (Result != llvm::conversionOK) { 7558 unsigned char FirstChar = *csStart; 7559 CodePoint = (llvm::UTF32)FirstChar; 7560 } 7561 7562 llvm::raw_string_ostream OS(CodePointStr); 7563 if (CodePoint < 256) 7564 OS << "\\x" << llvm::format("%02x", CodePoint); 7565 else if (CodePoint <= 0xFFFF) 7566 OS << "\\u" << llvm::format("%04x", CodePoint); 7567 else 7568 OS << "\\U" << llvm::format("%08x", CodePoint); 7569 OS.flush(); 7570 Specifier = CodePointStr; 7571 } 7572 7573 EmitFormatDiagnostic( 7574 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 7575 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 7576 7577 return keepGoing; 7578 } 7579 7580 void 7581 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 7582 const char *startSpec, 7583 unsigned specifierLen) { 7584 EmitFormatDiagnostic( 7585 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 7586 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 7587 } 7588 7589 bool 7590 CheckFormatHandler::CheckNumArgs( 7591 const analyze_format_string::FormatSpecifier &FS, 7592 const analyze_format_string::ConversionSpecifier &CS, 7593 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 7594 7595 if (argIndex >= NumDataArgs) { 7596 PartialDiagnostic PDiag = FS.usesPositionalArg() 7597 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 7598 << (argIndex+1) << NumDataArgs) 7599 : S.PDiag(diag::warn_printf_insufficient_data_args); 7600 EmitFormatDiagnostic( 7601 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 7602 getSpecifierRange(startSpecifier, specifierLen)); 7603 7604 // Since more arguments than conversion tokens are given, by extension 7605 // all arguments are covered, so mark this as so. 7606 UncoveredArg.setAllCovered(); 7607 return false; 7608 } 7609 return true; 7610 } 7611 7612 template<typename Range> 7613 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 7614 SourceLocation Loc, 7615 bool IsStringLocation, 7616 Range StringRange, 7617 ArrayRef<FixItHint> FixIt) { 7618 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 7619 Loc, IsStringLocation, StringRange, FixIt); 7620 } 7621 7622 /// If the format string is not within the function call, emit a note 7623 /// so that the function call and string are in diagnostic messages. 7624 /// 7625 /// \param InFunctionCall if true, the format string is within the function 7626 /// call and only one diagnostic message will be produced. Otherwise, an 7627 /// extra note will be emitted pointing to location of the format string. 7628 /// 7629 /// \param ArgumentExpr the expression that is passed as the format string 7630 /// argument in the function call. Used for getting locations when two 7631 /// diagnostics are emitted. 7632 /// 7633 /// \param PDiag the callee should already have provided any strings for the 7634 /// diagnostic message. This function only adds locations and fixits 7635 /// to diagnostics. 7636 /// 7637 /// \param Loc primary location for diagnostic. If two diagnostics are 7638 /// required, one will be at Loc and a new SourceLocation will be created for 7639 /// the other one. 7640 /// 7641 /// \param IsStringLocation if true, Loc points to the format string should be 7642 /// used for the note. Otherwise, Loc points to the argument list and will 7643 /// be used with PDiag. 7644 /// 7645 /// \param StringRange some or all of the string to highlight. This is 7646 /// templated so it can accept either a CharSourceRange or a SourceRange. 7647 /// 7648 /// \param FixIt optional fix it hint for the format string. 7649 template <typename Range> 7650 void CheckFormatHandler::EmitFormatDiagnostic( 7651 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 7652 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 7653 Range StringRange, ArrayRef<FixItHint> FixIt) { 7654 if (InFunctionCall) { 7655 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 7656 D << StringRange; 7657 D << FixIt; 7658 } else { 7659 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 7660 << ArgumentExpr->getSourceRange(); 7661 7662 const Sema::SemaDiagnosticBuilder &Note = 7663 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 7664 diag::note_format_string_defined); 7665 7666 Note << StringRange; 7667 Note << FixIt; 7668 } 7669 } 7670 7671 //===--- CHECK: Printf format string checking ------------------------------===// 7672 7673 namespace { 7674 7675 class CheckPrintfHandler : public CheckFormatHandler { 7676 public: 7677 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 7678 const Expr *origFormatExpr, 7679 const Sema::FormatStringType type, unsigned firstDataArg, 7680 unsigned numDataArgs, bool isObjC, const char *beg, 7681 bool hasVAListArg, ArrayRef<const Expr *> Args, 7682 unsigned formatIdx, bool inFunctionCall, 7683 Sema::VariadicCallType CallType, 7684 llvm::SmallBitVector &CheckedVarArgs, 7685 UncoveredArgHandler &UncoveredArg) 7686 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7687 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7688 inFunctionCall, CallType, CheckedVarArgs, 7689 UncoveredArg) {} 7690 7691 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 7692 7693 /// Returns true if '%@' specifiers are allowed in the format string. 7694 bool allowsObjCArg() const { 7695 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 7696 FSType == Sema::FST_OSTrace; 7697 } 7698 7699 bool HandleInvalidPrintfConversionSpecifier( 7700 const analyze_printf::PrintfSpecifier &FS, 7701 const char *startSpecifier, 7702 unsigned specifierLen) override; 7703 7704 void handleInvalidMaskType(StringRef MaskType) override; 7705 7706 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 7707 const char *startSpecifier, 7708 unsigned specifierLen) override; 7709 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7710 const char *StartSpecifier, 7711 unsigned SpecifierLen, 7712 const Expr *E); 7713 7714 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 7715 const char *startSpecifier, unsigned specifierLen); 7716 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 7717 const analyze_printf::OptionalAmount &Amt, 7718 unsigned type, 7719 const char *startSpecifier, unsigned specifierLen); 7720 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7721 const analyze_printf::OptionalFlag &flag, 7722 const char *startSpecifier, unsigned specifierLen); 7723 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 7724 const analyze_printf::OptionalFlag &ignoredFlag, 7725 const analyze_printf::OptionalFlag &flag, 7726 const char *startSpecifier, unsigned specifierLen); 7727 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 7728 const Expr *E); 7729 7730 void HandleEmptyObjCModifierFlag(const char *startFlag, 7731 unsigned flagLen) override; 7732 7733 void HandleInvalidObjCModifierFlag(const char *startFlag, 7734 unsigned flagLen) override; 7735 7736 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 7737 const char *flagsEnd, 7738 const char *conversionPosition) 7739 override; 7740 }; 7741 7742 } // namespace 7743 7744 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 7745 const analyze_printf::PrintfSpecifier &FS, 7746 const char *startSpecifier, 7747 unsigned specifierLen) { 7748 const analyze_printf::PrintfConversionSpecifier &CS = 7749 FS.getConversionSpecifier(); 7750 7751 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 7752 getLocationOfByte(CS.getStart()), 7753 startSpecifier, specifierLen, 7754 CS.getStart(), CS.getLength()); 7755 } 7756 7757 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 7758 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 7759 } 7760 7761 bool CheckPrintfHandler::HandleAmount( 7762 const analyze_format_string::OptionalAmount &Amt, 7763 unsigned k, const char *startSpecifier, 7764 unsigned specifierLen) { 7765 if (Amt.hasDataArgument()) { 7766 if (!HasVAListArg) { 7767 unsigned argIndex = Amt.getArgIndex(); 7768 if (argIndex >= NumDataArgs) { 7769 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 7770 << k, 7771 getLocationOfByte(Amt.getStart()), 7772 /*IsStringLocation*/true, 7773 getSpecifierRange(startSpecifier, specifierLen)); 7774 // Don't do any more checking. We will just emit 7775 // spurious errors. 7776 return false; 7777 } 7778 7779 // Type check the data argument. It should be an 'int'. 7780 // Although not in conformance with C99, we also allow the argument to be 7781 // an 'unsigned int' as that is a reasonably safe case. GCC also 7782 // doesn't emit a warning for that case. 7783 CoveredArgs.set(argIndex); 7784 const Expr *Arg = getDataArg(argIndex); 7785 if (!Arg) 7786 return false; 7787 7788 QualType T = Arg->getType(); 7789 7790 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 7791 assert(AT.isValid()); 7792 7793 if (!AT.matchesType(S.Context, T)) { 7794 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 7795 << k << AT.getRepresentativeTypeName(S.Context) 7796 << T << Arg->getSourceRange(), 7797 getLocationOfByte(Amt.getStart()), 7798 /*IsStringLocation*/true, 7799 getSpecifierRange(startSpecifier, specifierLen)); 7800 // Don't do any more checking. We will just emit 7801 // spurious errors. 7802 return false; 7803 } 7804 } 7805 } 7806 return true; 7807 } 7808 7809 void CheckPrintfHandler::HandleInvalidAmount( 7810 const analyze_printf::PrintfSpecifier &FS, 7811 const analyze_printf::OptionalAmount &Amt, 7812 unsigned type, 7813 const char *startSpecifier, 7814 unsigned specifierLen) { 7815 const analyze_printf::PrintfConversionSpecifier &CS = 7816 FS.getConversionSpecifier(); 7817 7818 FixItHint fixit = 7819 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 7820 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 7821 Amt.getConstantLength())) 7822 : FixItHint(); 7823 7824 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 7825 << type << CS.toString(), 7826 getLocationOfByte(Amt.getStart()), 7827 /*IsStringLocation*/true, 7828 getSpecifierRange(startSpecifier, specifierLen), 7829 fixit); 7830 } 7831 7832 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7833 const analyze_printf::OptionalFlag &flag, 7834 const char *startSpecifier, 7835 unsigned specifierLen) { 7836 // Warn about pointless flag with a fixit removal. 7837 const analyze_printf::PrintfConversionSpecifier &CS = 7838 FS.getConversionSpecifier(); 7839 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 7840 << flag.toString() << CS.toString(), 7841 getLocationOfByte(flag.getPosition()), 7842 /*IsStringLocation*/true, 7843 getSpecifierRange(startSpecifier, specifierLen), 7844 FixItHint::CreateRemoval( 7845 getSpecifierRange(flag.getPosition(), 1))); 7846 } 7847 7848 void CheckPrintfHandler::HandleIgnoredFlag( 7849 const analyze_printf::PrintfSpecifier &FS, 7850 const analyze_printf::OptionalFlag &ignoredFlag, 7851 const analyze_printf::OptionalFlag &flag, 7852 const char *startSpecifier, 7853 unsigned specifierLen) { 7854 // Warn about ignored flag with a fixit removal. 7855 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 7856 << ignoredFlag.toString() << flag.toString(), 7857 getLocationOfByte(ignoredFlag.getPosition()), 7858 /*IsStringLocation*/true, 7859 getSpecifierRange(startSpecifier, specifierLen), 7860 FixItHint::CreateRemoval( 7861 getSpecifierRange(ignoredFlag.getPosition(), 1))); 7862 } 7863 7864 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 7865 unsigned flagLen) { 7866 // Warn about an empty flag. 7867 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 7868 getLocationOfByte(startFlag), 7869 /*IsStringLocation*/true, 7870 getSpecifierRange(startFlag, flagLen)); 7871 } 7872 7873 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 7874 unsigned flagLen) { 7875 // Warn about an invalid flag. 7876 auto Range = getSpecifierRange(startFlag, flagLen); 7877 StringRef flag(startFlag, flagLen); 7878 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 7879 getLocationOfByte(startFlag), 7880 /*IsStringLocation*/true, 7881 Range, FixItHint::CreateRemoval(Range)); 7882 } 7883 7884 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 7885 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 7886 // Warn about using '[...]' without a '@' conversion. 7887 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 7888 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 7889 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 7890 getLocationOfByte(conversionPosition), 7891 /*IsStringLocation*/true, 7892 Range, FixItHint::CreateRemoval(Range)); 7893 } 7894 7895 // Determines if the specified is a C++ class or struct containing 7896 // a member with the specified name and kind (e.g. a CXXMethodDecl named 7897 // "c_str()"). 7898 template<typename MemberKind> 7899 static llvm::SmallPtrSet<MemberKind*, 1> 7900 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 7901 const RecordType *RT = Ty->getAs<RecordType>(); 7902 llvm::SmallPtrSet<MemberKind*, 1> Results; 7903 7904 if (!RT) 7905 return Results; 7906 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 7907 if (!RD || !RD->getDefinition()) 7908 return Results; 7909 7910 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 7911 Sema::LookupMemberName); 7912 R.suppressDiagnostics(); 7913 7914 // We just need to include all members of the right kind turned up by the 7915 // filter, at this point. 7916 if (S.LookupQualifiedName(R, RT->getDecl())) 7917 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 7918 NamedDecl *decl = (*I)->getUnderlyingDecl(); 7919 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 7920 Results.insert(FK); 7921 } 7922 return Results; 7923 } 7924 7925 /// Check if we could call '.c_str()' on an object. 7926 /// 7927 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 7928 /// allow the call, or if it would be ambiguous). 7929 bool Sema::hasCStrMethod(const Expr *E) { 7930 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7931 7932 MethodSet Results = 7933 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 7934 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7935 MI != ME; ++MI) 7936 if ((*MI)->getMinRequiredArguments() == 0) 7937 return true; 7938 return false; 7939 } 7940 7941 // Check if a (w)string was passed when a (w)char* was needed, and offer a 7942 // better diagnostic if so. AT is assumed to be valid. 7943 // Returns true when a c_str() conversion method is found. 7944 bool CheckPrintfHandler::checkForCStrMembers( 7945 const analyze_printf::ArgType &AT, const Expr *E) { 7946 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7947 7948 MethodSet Results = 7949 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 7950 7951 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7952 MI != ME; ++MI) { 7953 const CXXMethodDecl *Method = *MI; 7954 if (Method->getMinRequiredArguments() == 0 && 7955 AT.matchesType(S.Context, Method->getReturnType())) { 7956 // FIXME: Suggest parens if the expression needs them. 7957 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 7958 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 7959 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 7960 return true; 7961 } 7962 } 7963 7964 return false; 7965 } 7966 7967 bool 7968 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 7969 &FS, 7970 const char *startSpecifier, 7971 unsigned specifierLen) { 7972 using namespace analyze_format_string; 7973 using namespace analyze_printf; 7974 7975 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 7976 7977 if (FS.consumesDataArgument()) { 7978 if (atFirstArg) { 7979 atFirstArg = false; 7980 usesPositionalArgs = FS.usesPositionalArg(); 7981 } 7982 else if (usesPositionalArgs != FS.usesPositionalArg()) { 7983 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 7984 startSpecifier, specifierLen); 7985 return false; 7986 } 7987 } 7988 7989 // First check if the field width, precision, and conversion specifier 7990 // have matching data arguments. 7991 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 7992 startSpecifier, specifierLen)) { 7993 return false; 7994 } 7995 7996 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 7997 startSpecifier, specifierLen)) { 7998 return false; 7999 } 8000 8001 if (!CS.consumesDataArgument()) { 8002 // FIXME: Technically specifying a precision or field width here 8003 // makes no sense. Worth issuing a warning at some point. 8004 return true; 8005 } 8006 8007 // Consume the argument. 8008 unsigned argIndex = FS.getArgIndex(); 8009 if (argIndex < NumDataArgs) { 8010 // The check to see if the argIndex is valid will come later. 8011 // We set the bit here because we may exit early from this 8012 // function if we encounter some other error. 8013 CoveredArgs.set(argIndex); 8014 } 8015 8016 // FreeBSD kernel extensions. 8017 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 8018 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 8019 // We need at least two arguments. 8020 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 8021 return false; 8022 8023 // Claim the second argument. 8024 CoveredArgs.set(argIndex + 1); 8025 8026 // Type check the first argument (int for %b, pointer for %D) 8027 const Expr *Ex = getDataArg(argIndex); 8028 const analyze_printf::ArgType &AT = 8029 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 8030 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 8031 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 8032 EmitFormatDiagnostic( 8033 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8034 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 8035 << false << Ex->getSourceRange(), 8036 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8037 getSpecifierRange(startSpecifier, specifierLen)); 8038 8039 // Type check the second argument (char * for both %b and %D) 8040 Ex = getDataArg(argIndex + 1); 8041 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 8042 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 8043 EmitFormatDiagnostic( 8044 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8045 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 8046 << false << Ex->getSourceRange(), 8047 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8048 getSpecifierRange(startSpecifier, specifierLen)); 8049 8050 return true; 8051 } 8052 8053 // Check for using an Objective-C specific conversion specifier 8054 // in a non-ObjC literal. 8055 if (!allowsObjCArg() && CS.isObjCArg()) { 8056 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8057 specifierLen); 8058 } 8059 8060 // %P can only be used with os_log. 8061 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 8062 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8063 specifierLen); 8064 } 8065 8066 // %n is not allowed with os_log. 8067 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 8068 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 8069 getLocationOfByte(CS.getStart()), 8070 /*IsStringLocation*/ false, 8071 getSpecifierRange(startSpecifier, specifierLen)); 8072 8073 return true; 8074 } 8075 8076 // Only scalars are allowed for os_trace. 8077 if (FSType == Sema::FST_OSTrace && 8078 (CS.getKind() == ConversionSpecifier::PArg || 8079 CS.getKind() == ConversionSpecifier::sArg || 8080 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 8081 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8082 specifierLen); 8083 } 8084 8085 // Check for use of public/private annotation outside of os_log(). 8086 if (FSType != Sema::FST_OSLog) { 8087 if (FS.isPublic().isSet()) { 8088 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8089 << "public", 8090 getLocationOfByte(FS.isPublic().getPosition()), 8091 /*IsStringLocation*/ false, 8092 getSpecifierRange(startSpecifier, specifierLen)); 8093 } 8094 if (FS.isPrivate().isSet()) { 8095 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8096 << "private", 8097 getLocationOfByte(FS.isPrivate().getPosition()), 8098 /*IsStringLocation*/ false, 8099 getSpecifierRange(startSpecifier, specifierLen)); 8100 } 8101 } 8102 8103 // Check for invalid use of field width 8104 if (!FS.hasValidFieldWidth()) { 8105 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 8106 startSpecifier, specifierLen); 8107 } 8108 8109 // Check for invalid use of precision 8110 if (!FS.hasValidPrecision()) { 8111 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 8112 startSpecifier, specifierLen); 8113 } 8114 8115 // Precision is mandatory for %P specifier. 8116 if (CS.getKind() == ConversionSpecifier::PArg && 8117 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 8118 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 8119 getLocationOfByte(startSpecifier), 8120 /*IsStringLocation*/ false, 8121 getSpecifierRange(startSpecifier, specifierLen)); 8122 } 8123 8124 // Check each flag does not conflict with any other component. 8125 if (!FS.hasValidThousandsGroupingPrefix()) 8126 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 8127 if (!FS.hasValidLeadingZeros()) 8128 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 8129 if (!FS.hasValidPlusPrefix()) 8130 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 8131 if (!FS.hasValidSpacePrefix()) 8132 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 8133 if (!FS.hasValidAlternativeForm()) 8134 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 8135 if (!FS.hasValidLeftJustified()) 8136 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 8137 8138 // Check that flags are not ignored by another flag 8139 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 8140 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 8141 startSpecifier, specifierLen); 8142 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 8143 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 8144 startSpecifier, specifierLen); 8145 8146 // Check the length modifier is valid with the given conversion specifier. 8147 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8148 S.getLangOpts())) 8149 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8150 diag::warn_format_nonsensical_length); 8151 else if (!FS.hasStandardLengthModifier()) 8152 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8153 else if (!FS.hasStandardLengthConversionCombination()) 8154 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8155 diag::warn_format_non_standard_conversion_spec); 8156 8157 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8158 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8159 8160 // The remaining checks depend on the data arguments. 8161 if (HasVAListArg) 8162 return true; 8163 8164 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8165 return false; 8166 8167 const Expr *Arg = getDataArg(argIndex); 8168 if (!Arg) 8169 return true; 8170 8171 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 8172 } 8173 8174 static bool requiresParensToAddCast(const Expr *E) { 8175 // FIXME: We should have a general way to reason about operator 8176 // precedence and whether parens are actually needed here. 8177 // Take care of a few common cases where they aren't. 8178 const Expr *Inside = E->IgnoreImpCasts(); 8179 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 8180 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 8181 8182 switch (Inside->getStmtClass()) { 8183 case Stmt::ArraySubscriptExprClass: 8184 case Stmt::CallExprClass: 8185 case Stmt::CharacterLiteralClass: 8186 case Stmt::CXXBoolLiteralExprClass: 8187 case Stmt::DeclRefExprClass: 8188 case Stmt::FloatingLiteralClass: 8189 case Stmt::IntegerLiteralClass: 8190 case Stmt::MemberExprClass: 8191 case Stmt::ObjCArrayLiteralClass: 8192 case Stmt::ObjCBoolLiteralExprClass: 8193 case Stmt::ObjCBoxedExprClass: 8194 case Stmt::ObjCDictionaryLiteralClass: 8195 case Stmt::ObjCEncodeExprClass: 8196 case Stmt::ObjCIvarRefExprClass: 8197 case Stmt::ObjCMessageExprClass: 8198 case Stmt::ObjCPropertyRefExprClass: 8199 case Stmt::ObjCStringLiteralClass: 8200 case Stmt::ObjCSubscriptRefExprClass: 8201 case Stmt::ParenExprClass: 8202 case Stmt::StringLiteralClass: 8203 case Stmt::UnaryOperatorClass: 8204 return false; 8205 default: 8206 return true; 8207 } 8208 } 8209 8210 static std::pair<QualType, StringRef> 8211 shouldNotPrintDirectly(const ASTContext &Context, 8212 QualType IntendedTy, 8213 const Expr *E) { 8214 // Use a 'while' to peel off layers of typedefs. 8215 QualType TyTy = IntendedTy; 8216 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 8217 StringRef Name = UserTy->getDecl()->getName(); 8218 QualType CastTy = llvm::StringSwitch<QualType>(Name) 8219 .Case("CFIndex", Context.getNSIntegerType()) 8220 .Case("NSInteger", Context.getNSIntegerType()) 8221 .Case("NSUInteger", Context.getNSUIntegerType()) 8222 .Case("SInt32", Context.IntTy) 8223 .Case("UInt32", Context.UnsignedIntTy) 8224 .Default(QualType()); 8225 8226 if (!CastTy.isNull()) 8227 return std::make_pair(CastTy, Name); 8228 8229 TyTy = UserTy->desugar(); 8230 } 8231 8232 // Strip parens if necessary. 8233 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 8234 return shouldNotPrintDirectly(Context, 8235 PE->getSubExpr()->getType(), 8236 PE->getSubExpr()); 8237 8238 // If this is a conditional expression, then its result type is constructed 8239 // via usual arithmetic conversions and thus there might be no necessary 8240 // typedef sugar there. Recurse to operands to check for NSInteger & 8241 // Co. usage condition. 8242 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 8243 QualType TrueTy, FalseTy; 8244 StringRef TrueName, FalseName; 8245 8246 std::tie(TrueTy, TrueName) = 8247 shouldNotPrintDirectly(Context, 8248 CO->getTrueExpr()->getType(), 8249 CO->getTrueExpr()); 8250 std::tie(FalseTy, FalseName) = 8251 shouldNotPrintDirectly(Context, 8252 CO->getFalseExpr()->getType(), 8253 CO->getFalseExpr()); 8254 8255 if (TrueTy == FalseTy) 8256 return std::make_pair(TrueTy, TrueName); 8257 else if (TrueTy.isNull()) 8258 return std::make_pair(FalseTy, FalseName); 8259 else if (FalseTy.isNull()) 8260 return std::make_pair(TrueTy, TrueName); 8261 } 8262 8263 return std::make_pair(QualType(), StringRef()); 8264 } 8265 8266 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 8267 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 8268 /// type do not count. 8269 static bool 8270 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 8271 QualType From = ICE->getSubExpr()->getType(); 8272 QualType To = ICE->getType(); 8273 // It's an integer promotion if the destination type is the promoted 8274 // source type. 8275 if (ICE->getCastKind() == CK_IntegralCast && 8276 From->isPromotableIntegerType() && 8277 S.Context.getPromotedIntegerType(From) == To) 8278 return true; 8279 // Look through vector types, since we do default argument promotion for 8280 // those in OpenCL. 8281 if (const auto *VecTy = From->getAs<ExtVectorType>()) 8282 From = VecTy->getElementType(); 8283 if (const auto *VecTy = To->getAs<ExtVectorType>()) 8284 To = VecTy->getElementType(); 8285 // It's a floating promotion if the source type is a lower rank. 8286 return ICE->getCastKind() == CK_FloatingCast && 8287 S.Context.getFloatingTypeOrder(From, To) < 0; 8288 } 8289 8290 bool 8291 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8292 const char *StartSpecifier, 8293 unsigned SpecifierLen, 8294 const Expr *E) { 8295 using namespace analyze_format_string; 8296 using namespace analyze_printf; 8297 8298 // Now type check the data expression that matches the 8299 // format specifier. 8300 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 8301 if (!AT.isValid()) 8302 return true; 8303 8304 QualType ExprTy = E->getType(); 8305 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 8306 ExprTy = TET->getUnderlyingExpr()->getType(); 8307 } 8308 8309 // Diagnose attempts to print a boolean value as a character. Unlike other 8310 // -Wformat diagnostics, this is fine from a type perspective, but it still 8311 // doesn't make sense. 8312 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 8313 E->isKnownToHaveBooleanValue()) { 8314 const CharSourceRange &CSR = 8315 getSpecifierRange(StartSpecifier, SpecifierLen); 8316 SmallString<4> FSString; 8317 llvm::raw_svector_ostream os(FSString); 8318 FS.toString(os); 8319 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 8320 << FSString, 8321 E->getExprLoc(), false, CSR); 8322 return true; 8323 } 8324 8325 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 8326 if (Match == analyze_printf::ArgType::Match) 8327 return true; 8328 8329 // Look through argument promotions for our error message's reported type. 8330 // This includes the integral and floating promotions, but excludes array 8331 // and function pointer decay (seeing that an argument intended to be a 8332 // string has type 'char [6]' is probably more confusing than 'char *') and 8333 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 8334 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 8335 if (isArithmeticArgumentPromotion(S, ICE)) { 8336 E = ICE->getSubExpr(); 8337 ExprTy = E->getType(); 8338 8339 // Check if we didn't match because of an implicit cast from a 'char' 8340 // or 'short' to an 'int'. This is done because printf is a varargs 8341 // function. 8342 if (ICE->getType() == S.Context.IntTy || 8343 ICE->getType() == S.Context.UnsignedIntTy) { 8344 // All further checking is done on the subexpression 8345 const analyze_printf::ArgType::MatchKind ImplicitMatch = 8346 AT.matchesType(S.Context, ExprTy); 8347 if (ImplicitMatch == analyze_printf::ArgType::Match) 8348 return true; 8349 if (ImplicitMatch == ArgType::NoMatchPedantic || 8350 ImplicitMatch == ArgType::NoMatchTypeConfusion) 8351 Match = ImplicitMatch; 8352 } 8353 } 8354 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 8355 // Special case for 'a', which has type 'int' in C. 8356 // Note, however, that we do /not/ want to treat multibyte constants like 8357 // 'MooV' as characters! This form is deprecated but still exists. 8358 if (ExprTy == S.Context.IntTy) 8359 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 8360 ExprTy = S.Context.CharTy; 8361 } 8362 8363 // Look through enums to their underlying type. 8364 bool IsEnum = false; 8365 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 8366 ExprTy = EnumTy->getDecl()->getIntegerType(); 8367 IsEnum = true; 8368 } 8369 8370 // %C in an Objective-C context prints a unichar, not a wchar_t. 8371 // If the argument is an integer of some kind, believe the %C and suggest 8372 // a cast instead of changing the conversion specifier. 8373 QualType IntendedTy = ExprTy; 8374 if (isObjCContext() && 8375 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 8376 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 8377 !ExprTy->isCharType()) { 8378 // 'unichar' is defined as a typedef of unsigned short, but we should 8379 // prefer using the typedef if it is visible. 8380 IntendedTy = S.Context.UnsignedShortTy; 8381 8382 // While we are here, check if the value is an IntegerLiteral that happens 8383 // to be within the valid range. 8384 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 8385 const llvm::APInt &V = IL->getValue(); 8386 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 8387 return true; 8388 } 8389 8390 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 8391 Sema::LookupOrdinaryName); 8392 if (S.LookupName(Result, S.getCurScope())) { 8393 NamedDecl *ND = Result.getFoundDecl(); 8394 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 8395 if (TD->getUnderlyingType() == IntendedTy) 8396 IntendedTy = S.Context.getTypedefType(TD); 8397 } 8398 } 8399 } 8400 8401 // Special-case some of Darwin's platform-independence types by suggesting 8402 // casts to primitive types that are known to be large enough. 8403 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 8404 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 8405 QualType CastTy; 8406 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 8407 if (!CastTy.isNull()) { 8408 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 8409 // (long in ASTContext). Only complain to pedants. 8410 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 8411 (AT.isSizeT() || AT.isPtrdiffT()) && 8412 AT.matchesType(S.Context, CastTy)) 8413 Match = ArgType::NoMatchPedantic; 8414 IntendedTy = CastTy; 8415 ShouldNotPrintDirectly = true; 8416 } 8417 } 8418 8419 // We may be able to offer a FixItHint if it is a supported type. 8420 PrintfSpecifier fixedFS = FS; 8421 bool Success = 8422 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 8423 8424 if (Success) { 8425 // Get the fix string from the fixed format specifier 8426 SmallString<16> buf; 8427 llvm::raw_svector_ostream os(buf); 8428 fixedFS.toString(os); 8429 8430 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 8431 8432 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 8433 unsigned Diag; 8434 switch (Match) { 8435 case ArgType::Match: llvm_unreachable("expected non-matching"); 8436 case ArgType::NoMatchPedantic: 8437 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8438 break; 8439 case ArgType::NoMatchTypeConfusion: 8440 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8441 break; 8442 case ArgType::NoMatch: 8443 Diag = diag::warn_format_conversion_argument_type_mismatch; 8444 break; 8445 } 8446 8447 // In this case, the specifier is wrong and should be changed to match 8448 // the argument. 8449 EmitFormatDiagnostic(S.PDiag(Diag) 8450 << AT.getRepresentativeTypeName(S.Context) 8451 << IntendedTy << IsEnum << E->getSourceRange(), 8452 E->getBeginLoc(), 8453 /*IsStringLocation*/ false, SpecRange, 8454 FixItHint::CreateReplacement(SpecRange, os.str())); 8455 } else { 8456 // The canonical type for formatting this value is different from the 8457 // actual type of the expression. (This occurs, for example, with Darwin's 8458 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 8459 // should be printed as 'long' for 64-bit compatibility.) 8460 // Rather than emitting a normal format/argument mismatch, we want to 8461 // add a cast to the recommended type (and correct the format string 8462 // if necessary). 8463 SmallString<16> CastBuf; 8464 llvm::raw_svector_ostream CastFix(CastBuf); 8465 CastFix << "("; 8466 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 8467 CastFix << ")"; 8468 8469 SmallVector<FixItHint,4> Hints; 8470 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 8471 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 8472 8473 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 8474 // If there's already a cast present, just replace it. 8475 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 8476 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 8477 8478 } else if (!requiresParensToAddCast(E)) { 8479 // If the expression has high enough precedence, 8480 // just write the C-style cast. 8481 Hints.push_back( 8482 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8483 } else { 8484 // Otherwise, add parens around the expression as well as the cast. 8485 CastFix << "("; 8486 Hints.push_back( 8487 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8488 8489 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 8490 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 8491 } 8492 8493 if (ShouldNotPrintDirectly) { 8494 // The expression has a type that should not be printed directly. 8495 // We extract the name from the typedef because we don't want to show 8496 // the underlying type in the diagnostic. 8497 StringRef Name; 8498 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 8499 Name = TypedefTy->getDecl()->getName(); 8500 else 8501 Name = CastTyName; 8502 unsigned Diag = Match == ArgType::NoMatchPedantic 8503 ? diag::warn_format_argument_needs_cast_pedantic 8504 : diag::warn_format_argument_needs_cast; 8505 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 8506 << E->getSourceRange(), 8507 E->getBeginLoc(), /*IsStringLocation=*/false, 8508 SpecRange, Hints); 8509 } else { 8510 // In this case, the expression could be printed using a different 8511 // specifier, but we've decided that the specifier is probably correct 8512 // and we should cast instead. Just use the normal warning message. 8513 EmitFormatDiagnostic( 8514 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8515 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 8516 << E->getSourceRange(), 8517 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 8518 } 8519 } 8520 } else { 8521 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 8522 SpecifierLen); 8523 // Since the warning for passing non-POD types to variadic functions 8524 // was deferred until now, we emit a warning for non-POD 8525 // arguments here. 8526 switch (S.isValidVarArgType(ExprTy)) { 8527 case Sema::VAK_Valid: 8528 case Sema::VAK_ValidInCXX11: { 8529 unsigned Diag; 8530 switch (Match) { 8531 case ArgType::Match: llvm_unreachable("expected non-matching"); 8532 case ArgType::NoMatchPedantic: 8533 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8534 break; 8535 case ArgType::NoMatchTypeConfusion: 8536 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8537 break; 8538 case ArgType::NoMatch: 8539 Diag = diag::warn_format_conversion_argument_type_mismatch; 8540 break; 8541 } 8542 8543 EmitFormatDiagnostic( 8544 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 8545 << IsEnum << CSR << E->getSourceRange(), 8546 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8547 break; 8548 } 8549 case Sema::VAK_Undefined: 8550 case Sema::VAK_MSVCUndefined: 8551 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 8552 << S.getLangOpts().CPlusPlus11 << ExprTy 8553 << CallType 8554 << AT.getRepresentativeTypeName(S.Context) << CSR 8555 << E->getSourceRange(), 8556 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8557 checkForCStrMembers(AT, E); 8558 break; 8559 8560 case Sema::VAK_Invalid: 8561 if (ExprTy->isObjCObjectType()) 8562 EmitFormatDiagnostic( 8563 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 8564 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 8565 << AT.getRepresentativeTypeName(S.Context) << CSR 8566 << E->getSourceRange(), 8567 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8568 else 8569 // FIXME: If this is an initializer list, suggest removing the braces 8570 // or inserting a cast to the target type. 8571 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 8572 << isa<InitListExpr>(E) << ExprTy << CallType 8573 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 8574 break; 8575 } 8576 8577 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 8578 "format string specifier index out of range"); 8579 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 8580 } 8581 8582 return true; 8583 } 8584 8585 //===--- CHECK: Scanf format string checking ------------------------------===// 8586 8587 namespace { 8588 8589 class CheckScanfHandler : public CheckFormatHandler { 8590 public: 8591 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 8592 const Expr *origFormatExpr, Sema::FormatStringType type, 8593 unsigned firstDataArg, unsigned numDataArgs, 8594 const char *beg, bool hasVAListArg, 8595 ArrayRef<const Expr *> Args, unsigned formatIdx, 8596 bool inFunctionCall, Sema::VariadicCallType CallType, 8597 llvm::SmallBitVector &CheckedVarArgs, 8598 UncoveredArgHandler &UncoveredArg) 8599 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8600 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8601 inFunctionCall, CallType, CheckedVarArgs, 8602 UncoveredArg) {} 8603 8604 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 8605 const char *startSpecifier, 8606 unsigned specifierLen) override; 8607 8608 bool HandleInvalidScanfConversionSpecifier( 8609 const analyze_scanf::ScanfSpecifier &FS, 8610 const char *startSpecifier, 8611 unsigned specifierLen) override; 8612 8613 void HandleIncompleteScanList(const char *start, const char *end) override; 8614 }; 8615 8616 } // namespace 8617 8618 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 8619 const char *end) { 8620 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 8621 getLocationOfByte(end), /*IsStringLocation*/true, 8622 getSpecifierRange(start, end - start)); 8623 } 8624 8625 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 8626 const analyze_scanf::ScanfSpecifier &FS, 8627 const char *startSpecifier, 8628 unsigned specifierLen) { 8629 const analyze_scanf::ScanfConversionSpecifier &CS = 8630 FS.getConversionSpecifier(); 8631 8632 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8633 getLocationOfByte(CS.getStart()), 8634 startSpecifier, specifierLen, 8635 CS.getStart(), CS.getLength()); 8636 } 8637 8638 bool CheckScanfHandler::HandleScanfSpecifier( 8639 const analyze_scanf::ScanfSpecifier &FS, 8640 const char *startSpecifier, 8641 unsigned specifierLen) { 8642 using namespace analyze_scanf; 8643 using namespace analyze_format_string; 8644 8645 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 8646 8647 // Handle case where '%' and '*' don't consume an argument. These shouldn't 8648 // be used to decide if we are using positional arguments consistently. 8649 if (FS.consumesDataArgument()) { 8650 if (atFirstArg) { 8651 atFirstArg = false; 8652 usesPositionalArgs = FS.usesPositionalArg(); 8653 } 8654 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8655 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8656 startSpecifier, specifierLen); 8657 return false; 8658 } 8659 } 8660 8661 // Check if the field with is non-zero. 8662 const OptionalAmount &Amt = FS.getFieldWidth(); 8663 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 8664 if (Amt.getConstantAmount() == 0) { 8665 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 8666 Amt.getConstantLength()); 8667 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 8668 getLocationOfByte(Amt.getStart()), 8669 /*IsStringLocation*/true, R, 8670 FixItHint::CreateRemoval(R)); 8671 } 8672 } 8673 8674 if (!FS.consumesDataArgument()) { 8675 // FIXME: Technically specifying a precision or field width here 8676 // makes no sense. Worth issuing a warning at some point. 8677 return true; 8678 } 8679 8680 // Consume the argument. 8681 unsigned argIndex = FS.getArgIndex(); 8682 if (argIndex < NumDataArgs) { 8683 // The check to see if the argIndex is valid will come later. 8684 // We set the bit here because we may exit early from this 8685 // function if we encounter some other error. 8686 CoveredArgs.set(argIndex); 8687 } 8688 8689 // Check the length modifier is valid with the given conversion specifier. 8690 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8691 S.getLangOpts())) 8692 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8693 diag::warn_format_nonsensical_length); 8694 else if (!FS.hasStandardLengthModifier()) 8695 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8696 else if (!FS.hasStandardLengthConversionCombination()) 8697 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8698 diag::warn_format_non_standard_conversion_spec); 8699 8700 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8701 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8702 8703 // The remaining checks depend on the data arguments. 8704 if (HasVAListArg) 8705 return true; 8706 8707 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8708 return false; 8709 8710 // Check that the argument type matches the format specifier. 8711 const Expr *Ex = getDataArg(argIndex); 8712 if (!Ex) 8713 return true; 8714 8715 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 8716 8717 if (!AT.isValid()) { 8718 return true; 8719 } 8720 8721 analyze_format_string::ArgType::MatchKind Match = 8722 AT.matchesType(S.Context, Ex->getType()); 8723 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 8724 if (Match == analyze_format_string::ArgType::Match) 8725 return true; 8726 8727 ScanfSpecifier fixedFS = FS; 8728 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 8729 S.getLangOpts(), S.Context); 8730 8731 unsigned Diag = 8732 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8733 : diag::warn_format_conversion_argument_type_mismatch; 8734 8735 if (Success) { 8736 // Get the fix string from the fixed format specifier. 8737 SmallString<128> buf; 8738 llvm::raw_svector_ostream os(buf); 8739 fixedFS.toString(os); 8740 8741 EmitFormatDiagnostic( 8742 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 8743 << Ex->getType() << false << Ex->getSourceRange(), 8744 Ex->getBeginLoc(), 8745 /*IsStringLocation*/ false, 8746 getSpecifierRange(startSpecifier, specifierLen), 8747 FixItHint::CreateReplacement( 8748 getSpecifierRange(startSpecifier, specifierLen), os.str())); 8749 } else { 8750 EmitFormatDiagnostic(S.PDiag(Diag) 8751 << AT.getRepresentativeTypeName(S.Context) 8752 << Ex->getType() << false << Ex->getSourceRange(), 8753 Ex->getBeginLoc(), 8754 /*IsStringLocation*/ false, 8755 getSpecifierRange(startSpecifier, specifierLen)); 8756 } 8757 8758 return true; 8759 } 8760 8761 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8762 const Expr *OrigFormatExpr, 8763 ArrayRef<const Expr *> Args, 8764 bool HasVAListArg, unsigned format_idx, 8765 unsigned firstDataArg, 8766 Sema::FormatStringType Type, 8767 bool inFunctionCall, 8768 Sema::VariadicCallType CallType, 8769 llvm::SmallBitVector &CheckedVarArgs, 8770 UncoveredArgHandler &UncoveredArg, 8771 bool IgnoreStringsWithoutSpecifiers) { 8772 // CHECK: is the format string a wide literal? 8773 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 8774 CheckFormatHandler::EmitFormatDiagnostic( 8775 S, inFunctionCall, Args[format_idx], 8776 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 8777 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8778 return; 8779 } 8780 8781 // Str - The format string. NOTE: this is NOT null-terminated! 8782 StringRef StrRef = FExpr->getString(); 8783 const char *Str = StrRef.data(); 8784 // Account for cases where the string literal is truncated in a declaration. 8785 const ConstantArrayType *T = 8786 S.Context.getAsConstantArrayType(FExpr->getType()); 8787 assert(T && "String literal not of constant array type!"); 8788 size_t TypeSize = T->getSize().getZExtValue(); 8789 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8790 const unsigned numDataArgs = Args.size() - firstDataArg; 8791 8792 if (IgnoreStringsWithoutSpecifiers && 8793 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 8794 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 8795 return; 8796 8797 // Emit a warning if the string literal is truncated and does not contain an 8798 // embedded null character. 8799 if (TypeSize <= StrRef.size() && 8800 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 8801 CheckFormatHandler::EmitFormatDiagnostic( 8802 S, inFunctionCall, Args[format_idx], 8803 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 8804 FExpr->getBeginLoc(), 8805 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 8806 return; 8807 } 8808 8809 // CHECK: empty format string? 8810 if (StrLen == 0 && numDataArgs > 0) { 8811 CheckFormatHandler::EmitFormatDiagnostic( 8812 S, inFunctionCall, Args[format_idx], 8813 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 8814 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8815 return; 8816 } 8817 8818 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 8819 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 8820 Type == Sema::FST_OSTrace) { 8821 CheckPrintfHandler H( 8822 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 8823 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 8824 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 8825 CheckedVarArgs, UncoveredArg); 8826 8827 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 8828 S.getLangOpts(), 8829 S.Context.getTargetInfo(), 8830 Type == Sema::FST_FreeBSDKPrintf)) 8831 H.DoneProcessing(); 8832 } else if (Type == Sema::FST_Scanf) { 8833 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 8834 numDataArgs, Str, HasVAListArg, Args, format_idx, 8835 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 8836 8837 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 8838 S.getLangOpts(), 8839 S.Context.getTargetInfo())) 8840 H.DoneProcessing(); 8841 } // TODO: handle other formats 8842 } 8843 8844 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 8845 // Str - The format string. NOTE: this is NOT null-terminated! 8846 StringRef StrRef = FExpr->getString(); 8847 const char *Str = StrRef.data(); 8848 // Account for cases where the string literal is truncated in a declaration. 8849 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 8850 assert(T && "String literal not of constant array type!"); 8851 size_t TypeSize = T->getSize().getZExtValue(); 8852 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8853 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 8854 getLangOpts(), 8855 Context.getTargetInfo()); 8856 } 8857 8858 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 8859 8860 // Returns the related absolute value function that is larger, of 0 if one 8861 // does not exist. 8862 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 8863 switch (AbsFunction) { 8864 default: 8865 return 0; 8866 8867 case Builtin::BI__builtin_abs: 8868 return Builtin::BI__builtin_labs; 8869 case Builtin::BI__builtin_labs: 8870 return Builtin::BI__builtin_llabs; 8871 case Builtin::BI__builtin_llabs: 8872 return 0; 8873 8874 case Builtin::BI__builtin_fabsf: 8875 return Builtin::BI__builtin_fabs; 8876 case Builtin::BI__builtin_fabs: 8877 return Builtin::BI__builtin_fabsl; 8878 case Builtin::BI__builtin_fabsl: 8879 return 0; 8880 8881 case Builtin::BI__builtin_cabsf: 8882 return Builtin::BI__builtin_cabs; 8883 case Builtin::BI__builtin_cabs: 8884 return Builtin::BI__builtin_cabsl; 8885 case Builtin::BI__builtin_cabsl: 8886 return 0; 8887 8888 case Builtin::BIabs: 8889 return Builtin::BIlabs; 8890 case Builtin::BIlabs: 8891 return Builtin::BIllabs; 8892 case Builtin::BIllabs: 8893 return 0; 8894 8895 case Builtin::BIfabsf: 8896 return Builtin::BIfabs; 8897 case Builtin::BIfabs: 8898 return Builtin::BIfabsl; 8899 case Builtin::BIfabsl: 8900 return 0; 8901 8902 case Builtin::BIcabsf: 8903 return Builtin::BIcabs; 8904 case Builtin::BIcabs: 8905 return Builtin::BIcabsl; 8906 case Builtin::BIcabsl: 8907 return 0; 8908 } 8909 } 8910 8911 // Returns the argument type of the absolute value function. 8912 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 8913 unsigned AbsType) { 8914 if (AbsType == 0) 8915 return QualType(); 8916 8917 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 8918 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 8919 if (Error != ASTContext::GE_None) 8920 return QualType(); 8921 8922 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 8923 if (!FT) 8924 return QualType(); 8925 8926 if (FT->getNumParams() != 1) 8927 return QualType(); 8928 8929 return FT->getParamType(0); 8930 } 8931 8932 // Returns the best absolute value function, or zero, based on type and 8933 // current absolute value function. 8934 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 8935 unsigned AbsFunctionKind) { 8936 unsigned BestKind = 0; 8937 uint64_t ArgSize = Context.getTypeSize(ArgType); 8938 for (unsigned Kind = AbsFunctionKind; Kind != 0; 8939 Kind = getLargerAbsoluteValueFunction(Kind)) { 8940 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 8941 if (Context.getTypeSize(ParamType) >= ArgSize) { 8942 if (BestKind == 0) 8943 BestKind = Kind; 8944 else if (Context.hasSameType(ParamType, ArgType)) { 8945 BestKind = Kind; 8946 break; 8947 } 8948 } 8949 } 8950 return BestKind; 8951 } 8952 8953 enum AbsoluteValueKind { 8954 AVK_Integer, 8955 AVK_Floating, 8956 AVK_Complex 8957 }; 8958 8959 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 8960 if (T->isIntegralOrEnumerationType()) 8961 return AVK_Integer; 8962 if (T->isRealFloatingType()) 8963 return AVK_Floating; 8964 if (T->isAnyComplexType()) 8965 return AVK_Complex; 8966 8967 llvm_unreachable("Type not integer, floating, or complex"); 8968 } 8969 8970 // Changes the absolute value function to a different type. Preserves whether 8971 // the function is a builtin. 8972 static unsigned changeAbsFunction(unsigned AbsKind, 8973 AbsoluteValueKind ValueKind) { 8974 switch (ValueKind) { 8975 case AVK_Integer: 8976 switch (AbsKind) { 8977 default: 8978 return 0; 8979 case Builtin::BI__builtin_fabsf: 8980 case Builtin::BI__builtin_fabs: 8981 case Builtin::BI__builtin_fabsl: 8982 case Builtin::BI__builtin_cabsf: 8983 case Builtin::BI__builtin_cabs: 8984 case Builtin::BI__builtin_cabsl: 8985 return Builtin::BI__builtin_abs; 8986 case Builtin::BIfabsf: 8987 case Builtin::BIfabs: 8988 case Builtin::BIfabsl: 8989 case Builtin::BIcabsf: 8990 case Builtin::BIcabs: 8991 case Builtin::BIcabsl: 8992 return Builtin::BIabs; 8993 } 8994 case AVK_Floating: 8995 switch (AbsKind) { 8996 default: 8997 return 0; 8998 case Builtin::BI__builtin_abs: 8999 case Builtin::BI__builtin_labs: 9000 case Builtin::BI__builtin_llabs: 9001 case Builtin::BI__builtin_cabsf: 9002 case Builtin::BI__builtin_cabs: 9003 case Builtin::BI__builtin_cabsl: 9004 return Builtin::BI__builtin_fabsf; 9005 case Builtin::BIabs: 9006 case Builtin::BIlabs: 9007 case Builtin::BIllabs: 9008 case Builtin::BIcabsf: 9009 case Builtin::BIcabs: 9010 case Builtin::BIcabsl: 9011 return Builtin::BIfabsf; 9012 } 9013 case AVK_Complex: 9014 switch (AbsKind) { 9015 default: 9016 return 0; 9017 case Builtin::BI__builtin_abs: 9018 case Builtin::BI__builtin_labs: 9019 case Builtin::BI__builtin_llabs: 9020 case Builtin::BI__builtin_fabsf: 9021 case Builtin::BI__builtin_fabs: 9022 case Builtin::BI__builtin_fabsl: 9023 return Builtin::BI__builtin_cabsf; 9024 case Builtin::BIabs: 9025 case Builtin::BIlabs: 9026 case Builtin::BIllabs: 9027 case Builtin::BIfabsf: 9028 case Builtin::BIfabs: 9029 case Builtin::BIfabsl: 9030 return Builtin::BIcabsf; 9031 } 9032 } 9033 llvm_unreachable("Unable to convert function"); 9034 } 9035 9036 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 9037 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 9038 if (!FnInfo) 9039 return 0; 9040 9041 switch (FDecl->getBuiltinID()) { 9042 default: 9043 return 0; 9044 case Builtin::BI__builtin_abs: 9045 case Builtin::BI__builtin_fabs: 9046 case Builtin::BI__builtin_fabsf: 9047 case Builtin::BI__builtin_fabsl: 9048 case Builtin::BI__builtin_labs: 9049 case Builtin::BI__builtin_llabs: 9050 case Builtin::BI__builtin_cabs: 9051 case Builtin::BI__builtin_cabsf: 9052 case Builtin::BI__builtin_cabsl: 9053 case Builtin::BIabs: 9054 case Builtin::BIlabs: 9055 case Builtin::BIllabs: 9056 case Builtin::BIfabs: 9057 case Builtin::BIfabsf: 9058 case Builtin::BIfabsl: 9059 case Builtin::BIcabs: 9060 case Builtin::BIcabsf: 9061 case Builtin::BIcabsl: 9062 return FDecl->getBuiltinID(); 9063 } 9064 llvm_unreachable("Unknown Builtin type"); 9065 } 9066 9067 // If the replacement is valid, emit a note with replacement function. 9068 // Additionally, suggest including the proper header if not already included. 9069 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 9070 unsigned AbsKind, QualType ArgType) { 9071 bool EmitHeaderHint = true; 9072 const char *HeaderName = nullptr; 9073 const char *FunctionName = nullptr; 9074 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 9075 FunctionName = "std::abs"; 9076 if (ArgType->isIntegralOrEnumerationType()) { 9077 HeaderName = "cstdlib"; 9078 } else if (ArgType->isRealFloatingType()) { 9079 HeaderName = "cmath"; 9080 } else { 9081 llvm_unreachable("Invalid Type"); 9082 } 9083 9084 // Lookup all std::abs 9085 if (NamespaceDecl *Std = S.getStdNamespace()) { 9086 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 9087 R.suppressDiagnostics(); 9088 S.LookupQualifiedName(R, Std); 9089 9090 for (const auto *I : R) { 9091 const FunctionDecl *FDecl = nullptr; 9092 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 9093 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 9094 } else { 9095 FDecl = dyn_cast<FunctionDecl>(I); 9096 } 9097 if (!FDecl) 9098 continue; 9099 9100 // Found std::abs(), check that they are the right ones. 9101 if (FDecl->getNumParams() != 1) 9102 continue; 9103 9104 // Check that the parameter type can handle the argument. 9105 QualType ParamType = FDecl->getParamDecl(0)->getType(); 9106 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 9107 S.Context.getTypeSize(ArgType) <= 9108 S.Context.getTypeSize(ParamType)) { 9109 // Found a function, don't need the header hint. 9110 EmitHeaderHint = false; 9111 break; 9112 } 9113 } 9114 } 9115 } else { 9116 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 9117 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 9118 9119 if (HeaderName) { 9120 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 9121 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 9122 R.suppressDiagnostics(); 9123 S.LookupName(R, S.getCurScope()); 9124 9125 if (R.isSingleResult()) { 9126 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 9127 if (FD && FD->getBuiltinID() == AbsKind) { 9128 EmitHeaderHint = false; 9129 } else { 9130 return; 9131 } 9132 } else if (!R.empty()) { 9133 return; 9134 } 9135 } 9136 } 9137 9138 S.Diag(Loc, diag::note_replace_abs_function) 9139 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 9140 9141 if (!HeaderName) 9142 return; 9143 9144 if (!EmitHeaderHint) 9145 return; 9146 9147 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 9148 << FunctionName; 9149 } 9150 9151 template <std::size_t StrLen> 9152 static bool IsStdFunction(const FunctionDecl *FDecl, 9153 const char (&Str)[StrLen]) { 9154 if (!FDecl) 9155 return false; 9156 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 9157 return false; 9158 if (!FDecl->isInStdNamespace()) 9159 return false; 9160 9161 return true; 9162 } 9163 9164 // Warn when using the wrong abs() function. 9165 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 9166 const FunctionDecl *FDecl) { 9167 if (Call->getNumArgs() != 1) 9168 return; 9169 9170 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 9171 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 9172 if (AbsKind == 0 && !IsStdAbs) 9173 return; 9174 9175 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9176 QualType ParamType = Call->getArg(0)->getType(); 9177 9178 // Unsigned types cannot be negative. Suggest removing the absolute value 9179 // function call. 9180 if (ArgType->isUnsignedIntegerType()) { 9181 const char *FunctionName = 9182 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 9183 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 9184 Diag(Call->getExprLoc(), diag::note_remove_abs) 9185 << FunctionName 9186 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 9187 return; 9188 } 9189 9190 // Taking the absolute value of a pointer is very suspicious, they probably 9191 // wanted to index into an array, dereference a pointer, call a function, etc. 9192 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 9193 unsigned DiagType = 0; 9194 if (ArgType->isFunctionType()) 9195 DiagType = 1; 9196 else if (ArgType->isArrayType()) 9197 DiagType = 2; 9198 9199 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 9200 return; 9201 } 9202 9203 // std::abs has overloads which prevent most of the absolute value problems 9204 // from occurring. 9205 if (IsStdAbs) 9206 return; 9207 9208 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 9209 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 9210 9211 // The argument and parameter are the same kind. Check if they are the right 9212 // size. 9213 if (ArgValueKind == ParamValueKind) { 9214 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 9215 return; 9216 9217 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 9218 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 9219 << FDecl << ArgType << ParamType; 9220 9221 if (NewAbsKind == 0) 9222 return; 9223 9224 emitReplacement(*this, Call->getExprLoc(), 9225 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9226 return; 9227 } 9228 9229 // ArgValueKind != ParamValueKind 9230 // The wrong type of absolute value function was used. Attempt to find the 9231 // proper one. 9232 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 9233 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 9234 if (NewAbsKind == 0) 9235 return; 9236 9237 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 9238 << FDecl << ParamValueKind << ArgValueKind; 9239 9240 emitReplacement(*this, Call->getExprLoc(), 9241 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9242 } 9243 9244 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 9245 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 9246 const FunctionDecl *FDecl) { 9247 if (!Call || !FDecl) return; 9248 9249 // Ignore template specializations and macros. 9250 if (inTemplateInstantiation()) return; 9251 if (Call->getExprLoc().isMacroID()) return; 9252 9253 // Only care about the one template argument, two function parameter std::max 9254 if (Call->getNumArgs() != 2) return; 9255 if (!IsStdFunction(FDecl, "max")) return; 9256 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 9257 if (!ArgList) return; 9258 if (ArgList->size() != 1) return; 9259 9260 // Check that template type argument is unsigned integer. 9261 const auto& TA = ArgList->get(0); 9262 if (TA.getKind() != TemplateArgument::Type) return; 9263 QualType ArgType = TA.getAsType(); 9264 if (!ArgType->isUnsignedIntegerType()) return; 9265 9266 // See if either argument is a literal zero. 9267 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 9268 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 9269 if (!MTE) return false; 9270 const auto *Num = dyn_cast<IntegerLiteral>(MTE->GetTemporaryExpr()); 9271 if (!Num) return false; 9272 if (Num->getValue() != 0) return false; 9273 return true; 9274 }; 9275 9276 const Expr *FirstArg = Call->getArg(0); 9277 const Expr *SecondArg = Call->getArg(1); 9278 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 9279 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 9280 9281 // Only warn when exactly one argument is zero. 9282 if (IsFirstArgZero == IsSecondArgZero) return; 9283 9284 SourceRange FirstRange = FirstArg->getSourceRange(); 9285 SourceRange SecondRange = SecondArg->getSourceRange(); 9286 9287 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 9288 9289 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 9290 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 9291 9292 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 9293 SourceRange RemovalRange; 9294 if (IsFirstArgZero) { 9295 RemovalRange = SourceRange(FirstRange.getBegin(), 9296 SecondRange.getBegin().getLocWithOffset(-1)); 9297 } else { 9298 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 9299 SecondRange.getEnd()); 9300 } 9301 9302 Diag(Call->getExprLoc(), diag::note_remove_max_call) 9303 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 9304 << FixItHint::CreateRemoval(RemovalRange); 9305 } 9306 9307 //===--- CHECK: Standard memory functions ---------------------------------===// 9308 9309 /// Takes the expression passed to the size_t parameter of functions 9310 /// such as memcmp, strncat, etc and warns if it's a comparison. 9311 /// 9312 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 9313 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 9314 IdentifierInfo *FnName, 9315 SourceLocation FnLoc, 9316 SourceLocation RParenLoc) { 9317 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 9318 if (!Size) 9319 return false; 9320 9321 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 9322 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 9323 return false; 9324 9325 SourceRange SizeRange = Size->getSourceRange(); 9326 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 9327 << SizeRange << FnName; 9328 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 9329 << FnName 9330 << FixItHint::CreateInsertion( 9331 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 9332 << FixItHint::CreateRemoval(RParenLoc); 9333 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 9334 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 9335 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 9336 ")"); 9337 9338 return true; 9339 } 9340 9341 /// Determine whether the given type is or contains a dynamic class type 9342 /// (e.g., whether it has a vtable). 9343 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 9344 bool &IsContained) { 9345 // Look through array types while ignoring qualifiers. 9346 const Type *Ty = T->getBaseElementTypeUnsafe(); 9347 IsContained = false; 9348 9349 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 9350 RD = RD ? RD->getDefinition() : nullptr; 9351 if (!RD || RD->isInvalidDecl()) 9352 return nullptr; 9353 9354 if (RD->isDynamicClass()) 9355 return RD; 9356 9357 // Check all the fields. If any bases were dynamic, the class is dynamic. 9358 // It's impossible for a class to transitively contain itself by value, so 9359 // infinite recursion is impossible. 9360 for (auto *FD : RD->fields()) { 9361 bool SubContained; 9362 if (const CXXRecordDecl *ContainedRD = 9363 getContainedDynamicClass(FD->getType(), SubContained)) { 9364 IsContained = true; 9365 return ContainedRD; 9366 } 9367 } 9368 9369 return nullptr; 9370 } 9371 9372 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 9373 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 9374 if (Unary->getKind() == UETT_SizeOf) 9375 return Unary; 9376 return nullptr; 9377 } 9378 9379 /// If E is a sizeof expression, returns its argument expression, 9380 /// otherwise returns NULL. 9381 static const Expr *getSizeOfExprArg(const Expr *E) { 9382 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9383 if (!SizeOf->isArgumentType()) 9384 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 9385 return nullptr; 9386 } 9387 9388 /// If E is a sizeof expression, returns its argument type. 9389 static QualType getSizeOfArgType(const Expr *E) { 9390 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9391 return SizeOf->getTypeOfArgument(); 9392 return QualType(); 9393 } 9394 9395 namespace { 9396 9397 struct SearchNonTrivialToInitializeField 9398 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 9399 using Super = 9400 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 9401 9402 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 9403 9404 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 9405 SourceLocation SL) { 9406 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9407 asDerived().visitArray(PDIK, AT, SL); 9408 return; 9409 } 9410 9411 Super::visitWithKind(PDIK, FT, SL); 9412 } 9413 9414 void visitARCStrong(QualType FT, SourceLocation SL) { 9415 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9416 } 9417 void visitARCWeak(QualType FT, SourceLocation SL) { 9418 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9419 } 9420 void visitStruct(QualType FT, SourceLocation SL) { 9421 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9422 visit(FD->getType(), FD->getLocation()); 9423 } 9424 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 9425 const ArrayType *AT, SourceLocation SL) { 9426 visit(getContext().getBaseElementType(AT), SL); 9427 } 9428 void visitTrivial(QualType FT, SourceLocation SL) {} 9429 9430 static void diag(QualType RT, const Expr *E, Sema &S) { 9431 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 9432 } 9433 9434 ASTContext &getContext() { return S.getASTContext(); } 9435 9436 const Expr *E; 9437 Sema &S; 9438 }; 9439 9440 struct SearchNonTrivialToCopyField 9441 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 9442 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 9443 9444 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 9445 9446 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 9447 SourceLocation SL) { 9448 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9449 asDerived().visitArray(PCK, AT, SL); 9450 return; 9451 } 9452 9453 Super::visitWithKind(PCK, FT, SL); 9454 } 9455 9456 void visitARCStrong(QualType FT, SourceLocation SL) { 9457 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9458 } 9459 void visitARCWeak(QualType FT, SourceLocation SL) { 9460 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9461 } 9462 void visitStruct(QualType FT, SourceLocation SL) { 9463 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9464 visit(FD->getType(), FD->getLocation()); 9465 } 9466 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 9467 SourceLocation SL) { 9468 visit(getContext().getBaseElementType(AT), SL); 9469 } 9470 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 9471 SourceLocation SL) {} 9472 void visitTrivial(QualType FT, SourceLocation SL) {} 9473 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 9474 9475 static void diag(QualType RT, const Expr *E, Sema &S) { 9476 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 9477 } 9478 9479 ASTContext &getContext() { return S.getASTContext(); } 9480 9481 const Expr *E; 9482 Sema &S; 9483 }; 9484 9485 } 9486 9487 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 9488 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 9489 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 9490 9491 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 9492 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 9493 return false; 9494 9495 return doesExprLikelyComputeSize(BO->getLHS()) || 9496 doesExprLikelyComputeSize(BO->getRHS()); 9497 } 9498 9499 return getAsSizeOfExpr(SizeofExpr) != nullptr; 9500 } 9501 9502 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 9503 /// 9504 /// \code 9505 /// #define MACRO 0 9506 /// foo(MACRO); 9507 /// foo(0); 9508 /// \endcode 9509 /// 9510 /// This should return true for the first call to foo, but not for the second 9511 /// (regardless of whether foo is a macro or function). 9512 static bool isArgumentExpandedFromMacro(SourceManager &SM, 9513 SourceLocation CallLoc, 9514 SourceLocation ArgLoc) { 9515 if (!CallLoc.isMacroID()) 9516 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 9517 9518 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 9519 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 9520 } 9521 9522 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 9523 /// last two arguments transposed. 9524 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 9525 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 9526 return; 9527 9528 const Expr *SizeArg = 9529 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 9530 9531 auto isLiteralZero = [](const Expr *E) { 9532 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 9533 }; 9534 9535 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 9536 SourceLocation CallLoc = Call->getRParenLoc(); 9537 SourceManager &SM = S.getSourceManager(); 9538 if (isLiteralZero(SizeArg) && 9539 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 9540 9541 SourceLocation DiagLoc = SizeArg->getExprLoc(); 9542 9543 // Some platforms #define bzero to __builtin_memset. See if this is the 9544 // case, and if so, emit a better diagnostic. 9545 if (BId == Builtin::BIbzero || 9546 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 9547 CallLoc, SM, S.getLangOpts()) == "bzero")) { 9548 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 9549 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 9550 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 9551 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 9552 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 9553 } 9554 return; 9555 } 9556 9557 // If the second argument to a memset is a sizeof expression and the third 9558 // isn't, this is also likely an error. This should catch 9559 // 'memset(buf, sizeof(buf), 0xff)'. 9560 if (BId == Builtin::BImemset && 9561 doesExprLikelyComputeSize(Call->getArg(1)) && 9562 !doesExprLikelyComputeSize(Call->getArg(2))) { 9563 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 9564 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 9565 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 9566 return; 9567 } 9568 } 9569 9570 /// Check for dangerous or invalid arguments to memset(). 9571 /// 9572 /// This issues warnings on known problematic, dangerous or unspecified 9573 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 9574 /// function calls. 9575 /// 9576 /// \param Call The call expression to diagnose. 9577 void Sema::CheckMemaccessArguments(const CallExpr *Call, 9578 unsigned BId, 9579 IdentifierInfo *FnName) { 9580 assert(BId != 0); 9581 9582 // It is possible to have a non-standard definition of memset. Validate 9583 // we have enough arguments, and if not, abort further checking. 9584 unsigned ExpectedNumArgs = 9585 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 9586 if (Call->getNumArgs() < ExpectedNumArgs) 9587 return; 9588 9589 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 9590 BId == Builtin::BIstrndup ? 1 : 2); 9591 unsigned LenArg = 9592 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 9593 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 9594 9595 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 9596 Call->getBeginLoc(), Call->getRParenLoc())) 9597 return; 9598 9599 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 9600 CheckMemaccessSize(*this, BId, Call); 9601 9602 // We have special checking when the length is a sizeof expression. 9603 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 9604 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 9605 llvm::FoldingSetNodeID SizeOfArgID; 9606 9607 // Although widely used, 'bzero' is not a standard function. Be more strict 9608 // with the argument types before allowing diagnostics and only allow the 9609 // form bzero(ptr, sizeof(...)). 9610 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9611 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 9612 return; 9613 9614 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 9615 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 9616 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 9617 9618 QualType DestTy = Dest->getType(); 9619 QualType PointeeTy; 9620 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 9621 PointeeTy = DestPtrTy->getPointeeType(); 9622 9623 // Never warn about void type pointers. This can be used to suppress 9624 // false positives. 9625 if (PointeeTy->isVoidType()) 9626 continue; 9627 9628 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 9629 // actually comparing the expressions for equality. Because computing the 9630 // expression IDs can be expensive, we only do this if the diagnostic is 9631 // enabled. 9632 if (SizeOfArg && 9633 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 9634 SizeOfArg->getExprLoc())) { 9635 // We only compute IDs for expressions if the warning is enabled, and 9636 // cache the sizeof arg's ID. 9637 if (SizeOfArgID == llvm::FoldingSetNodeID()) 9638 SizeOfArg->Profile(SizeOfArgID, Context, true); 9639 llvm::FoldingSetNodeID DestID; 9640 Dest->Profile(DestID, Context, true); 9641 if (DestID == SizeOfArgID) { 9642 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 9643 // over sizeof(src) as well. 9644 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 9645 StringRef ReadableName = FnName->getName(); 9646 9647 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 9648 if (UnaryOp->getOpcode() == UO_AddrOf) 9649 ActionIdx = 1; // If its an address-of operator, just remove it. 9650 if (!PointeeTy->isIncompleteType() && 9651 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 9652 ActionIdx = 2; // If the pointee's size is sizeof(char), 9653 // suggest an explicit length. 9654 9655 // If the function is defined as a builtin macro, do not show macro 9656 // expansion. 9657 SourceLocation SL = SizeOfArg->getExprLoc(); 9658 SourceRange DSR = Dest->getSourceRange(); 9659 SourceRange SSR = SizeOfArg->getSourceRange(); 9660 SourceManager &SM = getSourceManager(); 9661 9662 if (SM.isMacroArgExpansion(SL)) { 9663 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 9664 SL = SM.getSpellingLoc(SL); 9665 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 9666 SM.getSpellingLoc(DSR.getEnd())); 9667 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 9668 SM.getSpellingLoc(SSR.getEnd())); 9669 } 9670 9671 DiagRuntimeBehavior(SL, SizeOfArg, 9672 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 9673 << ReadableName 9674 << PointeeTy 9675 << DestTy 9676 << DSR 9677 << SSR); 9678 DiagRuntimeBehavior(SL, SizeOfArg, 9679 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 9680 << ActionIdx 9681 << SSR); 9682 9683 break; 9684 } 9685 } 9686 9687 // Also check for cases where the sizeof argument is the exact same 9688 // type as the memory argument, and where it points to a user-defined 9689 // record type. 9690 if (SizeOfArgTy != QualType()) { 9691 if (PointeeTy->isRecordType() && 9692 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 9693 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 9694 PDiag(diag::warn_sizeof_pointer_type_memaccess) 9695 << FnName << SizeOfArgTy << ArgIdx 9696 << PointeeTy << Dest->getSourceRange() 9697 << LenExpr->getSourceRange()); 9698 break; 9699 } 9700 } 9701 } else if (DestTy->isArrayType()) { 9702 PointeeTy = DestTy; 9703 } 9704 9705 if (PointeeTy == QualType()) 9706 continue; 9707 9708 // Always complain about dynamic classes. 9709 bool IsContained; 9710 if (const CXXRecordDecl *ContainedRD = 9711 getContainedDynamicClass(PointeeTy, IsContained)) { 9712 9713 unsigned OperationType = 0; 9714 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 9715 // "overwritten" if we're warning about the destination for any call 9716 // but memcmp; otherwise a verb appropriate to the call. 9717 if (ArgIdx != 0 || IsCmp) { 9718 if (BId == Builtin::BImemcpy) 9719 OperationType = 1; 9720 else if(BId == Builtin::BImemmove) 9721 OperationType = 2; 9722 else if (IsCmp) 9723 OperationType = 3; 9724 } 9725 9726 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9727 PDiag(diag::warn_dyn_class_memaccess) 9728 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 9729 << IsContained << ContainedRD << OperationType 9730 << Call->getCallee()->getSourceRange()); 9731 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 9732 BId != Builtin::BImemset) 9733 DiagRuntimeBehavior( 9734 Dest->getExprLoc(), Dest, 9735 PDiag(diag::warn_arc_object_memaccess) 9736 << ArgIdx << FnName << PointeeTy 9737 << Call->getCallee()->getSourceRange()); 9738 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 9739 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 9740 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 9741 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9742 PDiag(diag::warn_cstruct_memaccess) 9743 << ArgIdx << FnName << PointeeTy << 0); 9744 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 9745 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 9746 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 9747 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9748 PDiag(diag::warn_cstruct_memaccess) 9749 << ArgIdx << FnName << PointeeTy << 1); 9750 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 9751 } else { 9752 continue; 9753 } 9754 } else 9755 continue; 9756 9757 DiagRuntimeBehavior( 9758 Dest->getExprLoc(), Dest, 9759 PDiag(diag::note_bad_memaccess_silence) 9760 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 9761 break; 9762 } 9763 } 9764 9765 // A little helper routine: ignore addition and subtraction of integer literals. 9766 // This intentionally does not ignore all integer constant expressions because 9767 // we don't want to remove sizeof(). 9768 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 9769 Ex = Ex->IgnoreParenCasts(); 9770 9771 while (true) { 9772 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 9773 if (!BO || !BO->isAdditiveOp()) 9774 break; 9775 9776 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 9777 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 9778 9779 if (isa<IntegerLiteral>(RHS)) 9780 Ex = LHS; 9781 else if (isa<IntegerLiteral>(LHS)) 9782 Ex = RHS; 9783 else 9784 break; 9785 } 9786 9787 return Ex; 9788 } 9789 9790 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 9791 ASTContext &Context) { 9792 // Only handle constant-sized or VLAs, but not flexible members. 9793 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 9794 // Only issue the FIXIT for arrays of size > 1. 9795 if (CAT->getSize().getSExtValue() <= 1) 9796 return false; 9797 } else if (!Ty->isVariableArrayType()) { 9798 return false; 9799 } 9800 return true; 9801 } 9802 9803 // Warn if the user has made the 'size' argument to strlcpy or strlcat 9804 // be the size of the source, instead of the destination. 9805 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 9806 IdentifierInfo *FnName) { 9807 9808 // Don't crash if the user has the wrong number of arguments 9809 unsigned NumArgs = Call->getNumArgs(); 9810 if ((NumArgs != 3) && (NumArgs != 4)) 9811 return; 9812 9813 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 9814 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 9815 const Expr *CompareWithSrc = nullptr; 9816 9817 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 9818 Call->getBeginLoc(), Call->getRParenLoc())) 9819 return; 9820 9821 // Look for 'strlcpy(dst, x, sizeof(x))' 9822 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 9823 CompareWithSrc = Ex; 9824 else { 9825 // Look for 'strlcpy(dst, x, strlen(x))' 9826 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 9827 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 9828 SizeCall->getNumArgs() == 1) 9829 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 9830 } 9831 } 9832 9833 if (!CompareWithSrc) 9834 return; 9835 9836 // Determine if the argument to sizeof/strlen is equal to the source 9837 // argument. In principle there's all kinds of things you could do 9838 // here, for instance creating an == expression and evaluating it with 9839 // EvaluateAsBooleanCondition, but this uses a more direct technique: 9840 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 9841 if (!SrcArgDRE) 9842 return; 9843 9844 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 9845 if (!CompareWithSrcDRE || 9846 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 9847 return; 9848 9849 const Expr *OriginalSizeArg = Call->getArg(2); 9850 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 9851 << OriginalSizeArg->getSourceRange() << FnName; 9852 9853 // Output a FIXIT hint if the destination is an array (rather than a 9854 // pointer to an array). This could be enhanced to handle some 9855 // pointers if we know the actual size, like if DstArg is 'array+2' 9856 // we could say 'sizeof(array)-2'. 9857 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 9858 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 9859 return; 9860 9861 SmallString<128> sizeString; 9862 llvm::raw_svector_ostream OS(sizeString); 9863 OS << "sizeof("; 9864 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9865 OS << ")"; 9866 9867 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 9868 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 9869 OS.str()); 9870 } 9871 9872 /// Check if two expressions refer to the same declaration. 9873 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 9874 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 9875 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 9876 return D1->getDecl() == D2->getDecl(); 9877 return false; 9878 } 9879 9880 static const Expr *getStrlenExprArg(const Expr *E) { 9881 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 9882 const FunctionDecl *FD = CE->getDirectCallee(); 9883 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 9884 return nullptr; 9885 return CE->getArg(0)->IgnoreParenCasts(); 9886 } 9887 return nullptr; 9888 } 9889 9890 // Warn on anti-patterns as the 'size' argument to strncat. 9891 // The correct size argument should look like following: 9892 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 9893 void Sema::CheckStrncatArguments(const CallExpr *CE, 9894 IdentifierInfo *FnName) { 9895 // Don't crash if the user has the wrong number of arguments. 9896 if (CE->getNumArgs() < 3) 9897 return; 9898 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 9899 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 9900 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 9901 9902 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 9903 CE->getRParenLoc())) 9904 return; 9905 9906 // Identify common expressions, which are wrongly used as the size argument 9907 // to strncat and may lead to buffer overflows. 9908 unsigned PatternType = 0; 9909 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 9910 // - sizeof(dst) 9911 if (referToTheSameDecl(SizeOfArg, DstArg)) 9912 PatternType = 1; 9913 // - sizeof(src) 9914 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 9915 PatternType = 2; 9916 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 9917 if (BE->getOpcode() == BO_Sub) { 9918 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 9919 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 9920 // - sizeof(dst) - strlen(dst) 9921 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 9922 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 9923 PatternType = 1; 9924 // - sizeof(src) - (anything) 9925 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 9926 PatternType = 2; 9927 } 9928 } 9929 9930 if (PatternType == 0) 9931 return; 9932 9933 // Generate the diagnostic. 9934 SourceLocation SL = LenArg->getBeginLoc(); 9935 SourceRange SR = LenArg->getSourceRange(); 9936 SourceManager &SM = getSourceManager(); 9937 9938 // If the function is defined as a builtin macro, do not show macro expansion. 9939 if (SM.isMacroArgExpansion(SL)) { 9940 SL = SM.getSpellingLoc(SL); 9941 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 9942 SM.getSpellingLoc(SR.getEnd())); 9943 } 9944 9945 // Check if the destination is an array (rather than a pointer to an array). 9946 QualType DstTy = DstArg->getType(); 9947 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 9948 Context); 9949 if (!isKnownSizeArray) { 9950 if (PatternType == 1) 9951 Diag(SL, diag::warn_strncat_wrong_size) << SR; 9952 else 9953 Diag(SL, diag::warn_strncat_src_size) << SR; 9954 return; 9955 } 9956 9957 if (PatternType == 1) 9958 Diag(SL, diag::warn_strncat_large_size) << SR; 9959 else 9960 Diag(SL, diag::warn_strncat_src_size) << SR; 9961 9962 SmallString<128> sizeString; 9963 llvm::raw_svector_ostream OS(sizeString); 9964 OS << "sizeof("; 9965 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9966 OS << ") - "; 9967 OS << "strlen("; 9968 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9969 OS << ") - 1"; 9970 9971 Diag(SL, diag::note_strncat_wrong_size) 9972 << FixItHint::CreateReplacement(SR, OS.str()); 9973 } 9974 9975 void 9976 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 9977 SourceLocation ReturnLoc, 9978 bool isObjCMethod, 9979 const AttrVec *Attrs, 9980 const FunctionDecl *FD) { 9981 // Check if the return value is null but should not be. 9982 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 9983 (!isObjCMethod && isNonNullType(Context, lhsType))) && 9984 CheckNonNullExpr(*this, RetValExp)) 9985 Diag(ReturnLoc, diag::warn_null_ret) 9986 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 9987 9988 // C++11 [basic.stc.dynamic.allocation]p4: 9989 // If an allocation function declared with a non-throwing 9990 // exception-specification fails to allocate storage, it shall return 9991 // a null pointer. Any other allocation function that fails to allocate 9992 // storage shall indicate failure only by throwing an exception [...] 9993 if (FD) { 9994 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 9995 if (Op == OO_New || Op == OO_Array_New) { 9996 const FunctionProtoType *Proto 9997 = FD->getType()->castAs<FunctionProtoType>(); 9998 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 9999 CheckNonNullExpr(*this, RetValExp)) 10000 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 10001 << FD << getLangOpts().CPlusPlus11; 10002 } 10003 } 10004 } 10005 10006 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 10007 10008 /// Check for comparisons of floating point operands using != and ==. 10009 /// Issue a warning if these are no self-comparisons, as they are not likely 10010 /// to do what the programmer intended. 10011 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 10012 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 10013 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 10014 10015 // Special case: check for x == x (which is OK). 10016 // Do not emit warnings for such cases. 10017 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 10018 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 10019 if (DRL->getDecl() == DRR->getDecl()) 10020 return; 10021 10022 // Special case: check for comparisons against literals that can be exactly 10023 // represented by APFloat. In such cases, do not emit a warning. This 10024 // is a heuristic: often comparison against such literals are used to 10025 // detect if a value in a variable has not changed. This clearly can 10026 // lead to false negatives. 10027 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 10028 if (FLL->isExact()) 10029 return; 10030 } else 10031 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 10032 if (FLR->isExact()) 10033 return; 10034 10035 // Check for comparisons with builtin types. 10036 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 10037 if (CL->getBuiltinCallee()) 10038 return; 10039 10040 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 10041 if (CR->getBuiltinCallee()) 10042 return; 10043 10044 // Emit the diagnostic. 10045 Diag(Loc, diag::warn_floatingpoint_eq) 10046 << LHS->getSourceRange() << RHS->getSourceRange(); 10047 } 10048 10049 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 10050 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 10051 10052 namespace { 10053 10054 /// Structure recording the 'active' range of an integer-valued 10055 /// expression. 10056 struct IntRange { 10057 /// The number of bits active in the int. 10058 unsigned Width; 10059 10060 /// True if the int is known not to have negative values. 10061 bool NonNegative; 10062 10063 IntRange(unsigned Width, bool NonNegative) 10064 : Width(Width), NonNegative(NonNegative) {} 10065 10066 /// Returns the range of the bool type. 10067 static IntRange forBoolType() { 10068 return IntRange(1, true); 10069 } 10070 10071 /// Returns the range of an opaque value of the given integral type. 10072 static IntRange forValueOfType(ASTContext &C, QualType T) { 10073 return forValueOfCanonicalType(C, 10074 T->getCanonicalTypeInternal().getTypePtr()); 10075 } 10076 10077 /// Returns the range of an opaque value of a canonical integral type. 10078 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 10079 assert(T->isCanonicalUnqualified()); 10080 10081 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10082 T = VT->getElementType().getTypePtr(); 10083 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10084 T = CT->getElementType().getTypePtr(); 10085 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10086 T = AT->getValueType().getTypePtr(); 10087 10088 if (!C.getLangOpts().CPlusPlus) { 10089 // For enum types in C code, use the underlying datatype. 10090 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10091 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 10092 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 10093 // For enum types in C++, use the known bit width of the enumerators. 10094 EnumDecl *Enum = ET->getDecl(); 10095 // In C++11, enums can have a fixed underlying type. Use this type to 10096 // compute the range. 10097 if (Enum->isFixed()) { 10098 return IntRange(C.getIntWidth(QualType(T, 0)), 10099 !ET->isSignedIntegerOrEnumerationType()); 10100 } 10101 10102 unsigned NumPositive = Enum->getNumPositiveBits(); 10103 unsigned NumNegative = Enum->getNumNegativeBits(); 10104 10105 if (NumNegative == 0) 10106 return IntRange(NumPositive, true/*NonNegative*/); 10107 else 10108 return IntRange(std::max(NumPositive + 1, NumNegative), 10109 false/*NonNegative*/); 10110 } 10111 10112 const BuiltinType *BT = cast<BuiltinType>(T); 10113 assert(BT->isInteger()); 10114 10115 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10116 } 10117 10118 /// Returns the "target" range of a canonical integral type, i.e. 10119 /// the range of values expressible in the type. 10120 /// 10121 /// This matches forValueOfCanonicalType except that enums have the 10122 /// full range of their type, not the range of their enumerators. 10123 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 10124 assert(T->isCanonicalUnqualified()); 10125 10126 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10127 T = VT->getElementType().getTypePtr(); 10128 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10129 T = CT->getElementType().getTypePtr(); 10130 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10131 T = AT->getValueType().getTypePtr(); 10132 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10133 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 10134 10135 const BuiltinType *BT = cast<BuiltinType>(T); 10136 assert(BT->isInteger()); 10137 10138 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10139 } 10140 10141 /// Returns the supremum of two ranges: i.e. their conservative merge. 10142 static IntRange join(IntRange L, IntRange R) { 10143 return IntRange(std::max(L.Width, R.Width), 10144 L.NonNegative && R.NonNegative); 10145 } 10146 10147 /// Returns the infinum of two ranges: i.e. their aggressive merge. 10148 static IntRange meet(IntRange L, IntRange R) { 10149 return IntRange(std::min(L.Width, R.Width), 10150 L.NonNegative || R.NonNegative); 10151 } 10152 }; 10153 10154 } // namespace 10155 10156 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 10157 unsigned MaxWidth) { 10158 if (value.isSigned() && value.isNegative()) 10159 return IntRange(value.getMinSignedBits(), false); 10160 10161 if (value.getBitWidth() > MaxWidth) 10162 value = value.trunc(MaxWidth); 10163 10164 // isNonNegative() just checks the sign bit without considering 10165 // signedness. 10166 return IntRange(value.getActiveBits(), true); 10167 } 10168 10169 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 10170 unsigned MaxWidth) { 10171 if (result.isInt()) 10172 return GetValueRange(C, result.getInt(), MaxWidth); 10173 10174 if (result.isVector()) { 10175 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 10176 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 10177 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 10178 R = IntRange::join(R, El); 10179 } 10180 return R; 10181 } 10182 10183 if (result.isComplexInt()) { 10184 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 10185 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 10186 return IntRange::join(R, I); 10187 } 10188 10189 // This can happen with lossless casts to intptr_t of "based" lvalues. 10190 // Assume it might use arbitrary bits. 10191 // FIXME: The only reason we need to pass the type in here is to get 10192 // the sign right on this one case. It would be nice if APValue 10193 // preserved this. 10194 assert(result.isLValue() || result.isAddrLabelDiff()); 10195 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 10196 } 10197 10198 static QualType GetExprType(const Expr *E) { 10199 QualType Ty = E->getType(); 10200 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 10201 Ty = AtomicRHS->getValueType(); 10202 return Ty; 10203 } 10204 10205 /// Pseudo-evaluate the given integer expression, estimating the 10206 /// range of values it might take. 10207 /// 10208 /// \param MaxWidth - the width to which the value will be truncated 10209 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 10210 bool InConstantContext) { 10211 E = E->IgnoreParens(); 10212 10213 // Try a full evaluation first. 10214 Expr::EvalResult result; 10215 if (E->EvaluateAsRValue(result, C, InConstantContext)) 10216 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 10217 10218 // I think we only want to look through implicit casts here; if the 10219 // user has an explicit widening cast, we should treat the value as 10220 // being of the new, wider type. 10221 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 10222 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 10223 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext); 10224 10225 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 10226 10227 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 10228 CE->getCastKind() == CK_BooleanToSignedIntegral; 10229 10230 // Assume that non-integer casts can span the full range of the type. 10231 if (!isIntegerCast) 10232 return OutputTypeRange; 10233 10234 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 10235 std::min(MaxWidth, OutputTypeRange.Width), 10236 InConstantContext); 10237 10238 // Bail out if the subexpr's range is as wide as the cast type. 10239 if (SubRange.Width >= OutputTypeRange.Width) 10240 return OutputTypeRange; 10241 10242 // Otherwise, we take the smaller width, and we're non-negative if 10243 // either the output type or the subexpr is. 10244 return IntRange(SubRange.Width, 10245 SubRange.NonNegative || OutputTypeRange.NonNegative); 10246 } 10247 10248 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 10249 // If we can fold the condition, just take that operand. 10250 bool CondResult; 10251 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 10252 return GetExprRange(C, 10253 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 10254 MaxWidth, InConstantContext); 10255 10256 // Otherwise, conservatively merge. 10257 IntRange L = 10258 GetExprRange(C, CO->getTrueExpr(), MaxWidth, InConstantContext); 10259 IntRange R = 10260 GetExprRange(C, CO->getFalseExpr(), MaxWidth, InConstantContext); 10261 return IntRange::join(L, R); 10262 } 10263 10264 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 10265 switch (BO->getOpcode()) { 10266 case BO_Cmp: 10267 llvm_unreachable("builtin <=> should have class type"); 10268 10269 // Boolean-valued operations are single-bit and positive. 10270 case BO_LAnd: 10271 case BO_LOr: 10272 case BO_LT: 10273 case BO_GT: 10274 case BO_LE: 10275 case BO_GE: 10276 case BO_EQ: 10277 case BO_NE: 10278 return IntRange::forBoolType(); 10279 10280 // The type of the assignments is the type of the LHS, so the RHS 10281 // is not necessarily the same type. 10282 case BO_MulAssign: 10283 case BO_DivAssign: 10284 case BO_RemAssign: 10285 case BO_AddAssign: 10286 case BO_SubAssign: 10287 case BO_XorAssign: 10288 case BO_OrAssign: 10289 // TODO: bitfields? 10290 return IntRange::forValueOfType(C, GetExprType(E)); 10291 10292 // Simple assignments just pass through the RHS, which will have 10293 // been coerced to the LHS type. 10294 case BO_Assign: 10295 // TODO: bitfields? 10296 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10297 10298 // Operations with opaque sources are black-listed. 10299 case BO_PtrMemD: 10300 case BO_PtrMemI: 10301 return IntRange::forValueOfType(C, GetExprType(E)); 10302 10303 // Bitwise-and uses the *infinum* of the two source ranges. 10304 case BO_And: 10305 case BO_AndAssign: 10306 return IntRange::meet( 10307 GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext), 10308 GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext)); 10309 10310 // Left shift gets black-listed based on a judgement call. 10311 case BO_Shl: 10312 // ...except that we want to treat '1 << (blah)' as logically 10313 // positive. It's an important idiom. 10314 if (IntegerLiteral *I 10315 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 10316 if (I->getValue() == 1) { 10317 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 10318 return IntRange(R.Width, /*NonNegative*/ true); 10319 } 10320 } 10321 LLVM_FALLTHROUGH; 10322 10323 case BO_ShlAssign: 10324 return IntRange::forValueOfType(C, GetExprType(E)); 10325 10326 // Right shift by a constant can narrow its left argument. 10327 case BO_Shr: 10328 case BO_ShrAssign: { 10329 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext); 10330 10331 // If the shift amount is a positive constant, drop the width by 10332 // that much. 10333 llvm::APSInt shift; 10334 if (BO->getRHS()->isIntegerConstantExpr(shift, C) && 10335 shift.isNonNegative()) { 10336 unsigned zext = shift.getZExtValue(); 10337 if (zext >= L.Width) 10338 L.Width = (L.NonNegative ? 0 : 1); 10339 else 10340 L.Width -= zext; 10341 } 10342 10343 return L; 10344 } 10345 10346 // Comma acts as its right operand. 10347 case BO_Comma: 10348 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10349 10350 // Black-list pointer subtractions. 10351 case BO_Sub: 10352 if (BO->getLHS()->getType()->isPointerType()) 10353 return IntRange::forValueOfType(C, GetExprType(E)); 10354 break; 10355 10356 // The width of a division result is mostly determined by the size 10357 // of the LHS. 10358 case BO_Div: { 10359 // Don't 'pre-truncate' the operands. 10360 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10361 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext); 10362 10363 // If the divisor is constant, use that. 10364 llvm::APSInt divisor; 10365 if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) { 10366 unsigned log2 = divisor.logBase2(); // floor(log_2(divisor)) 10367 if (log2 >= L.Width) 10368 L.Width = (L.NonNegative ? 0 : 1); 10369 else 10370 L.Width = std::min(L.Width - log2, MaxWidth); 10371 return L; 10372 } 10373 10374 // Otherwise, just use the LHS's width. 10375 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext); 10376 return IntRange(L.Width, L.NonNegative && R.NonNegative); 10377 } 10378 10379 // The result of a remainder can't be larger than the result of 10380 // either side. 10381 case BO_Rem: { 10382 // Don't 'pre-truncate' the operands. 10383 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10384 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext); 10385 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext); 10386 10387 IntRange meet = IntRange::meet(L, R); 10388 meet.Width = std::min(meet.Width, MaxWidth); 10389 return meet; 10390 } 10391 10392 // The default behavior is okay for these. 10393 case BO_Mul: 10394 case BO_Add: 10395 case BO_Xor: 10396 case BO_Or: 10397 break; 10398 } 10399 10400 // The default case is to treat the operation as if it were closed 10401 // on the narrowest type that encompasses both operands. 10402 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext); 10403 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10404 return IntRange::join(L, R); 10405 } 10406 10407 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 10408 switch (UO->getOpcode()) { 10409 // Boolean-valued operations are white-listed. 10410 case UO_LNot: 10411 return IntRange::forBoolType(); 10412 10413 // Operations with opaque sources are black-listed. 10414 case UO_Deref: 10415 case UO_AddrOf: // should be impossible 10416 return IntRange::forValueOfType(C, GetExprType(E)); 10417 10418 default: 10419 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext); 10420 } 10421 } 10422 10423 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 10424 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext); 10425 10426 if (const auto *BitField = E->getSourceBitField()) 10427 return IntRange(BitField->getBitWidthValue(C), 10428 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 10429 10430 return IntRange::forValueOfType(C, GetExprType(E)); 10431 } 10432 10433 static IntRange GetExprRange(ASTContext &C, const Expr *E, 10434 bool InConstantContext) { 10435 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext); 10436 } 10437 10438 /// Checks whether the given value, which currently has the given 10439 /// source semantics, has the same value when coerced through the 10440 /// target semantics. 10441 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 10442 const llvm::fltSemantics &Src, 10443 const llvm::fltSemantics &Tgt) { 10444 llvm::APFloat truncated = value; 10445 10446 bool ignored; 10447 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 10448 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 10449 10450 return truncated.bitwiseIsEqual(value); 10451 } 10452 10453 /// Checks whether the given value, which currently has the given 10454 /// source semantics, has the same value when coerced through the 10455 /// target semantics. 10456 /// 10457 /// The value might be a vector of floats (or a complex number). 10458 static bool IsSameFloatAfterCast(const APValue &value, 10459 const llvm::fltSemantics &Src, 10460 const llvm::fltSemantics &Tgt) { 10461 if (value.isFloat()) 10462 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 10463 10464 if (value.isVector()) { 10465 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 10466 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 10467 return false; 10468 return true; 10469 } 10470 10471 assert(value.isComplexFloat()); 10472 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 10473 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 10474 } 10475 10476 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 10477 bool IsListInit = false); 10478 10479 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 10480 // Suppress cases where we are comparing against an enum constant. 10481 if (const DeclRefExpr *DR = 10482 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 10483 if (isa<EnumConstantDecl>(DR->getDecl())) 10484 return true; 10485 10486 // Suppress cases where the value is expanded from a macro, unless that macro 10487 // is how a language represents a boolean literal. This is the case in both C 10488 // and Objective-C. 10489 SourceLocation BeginLoc = E->getBeginLoc(); 10490 if (BeginLoc.isMacroID()) { 10491 StringRef MacroName = Lexer::getImmediateMacroName( 10492 BeginLoc, S.getSourceManager(), S.getLangOpts()); 10493 return MacroName != "YES" && MacroName != "NO" && 10494 MacroName != "true" && MacroName != "false"; 10495 } 10496 10497 return false; 10498 } 10499 10500 static bool isKnownToHaveUnsignedValue(Expr *E) { 10501 return E->getType()->isIntegerType() && 10502 (!E->getType()->isSignedIntegerType() || 10503 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 10504 } 10505 10506 namespace { 10507 /// The promoted range of values of a type. In general this has the 10508 /// following structure: 10509 /// 10510 /// |-----------| . . . |-----------| 10511 /// ^ ^ ^ ^ 10512 /// Min HoleMin HoleMax Max 10513 /// 10514 /// ... where there is only a hole if a signed type is promoted to unsigned 10515 /// (in which case Min and Max are the smallest and largest representable 10516 /// values). 10517 struct PromotedRange { 10518 // Min, or HoleMax if there is a hole. 10519 llvm::APSInt PromotedMin; 10520 // Max, or HoleMin if there is a hole. 10521 llvm::APSInt PromotedMax; 10522 10523 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 10524 if (R.Width == 0) 10525 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 10526 else if (R.Width >= BitWidth && !Unsigned) { 10527 // Promotion made the type *narrower*. This happens when promoting 10528 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 10529 // Treat all values of 'signed int' as being in range for now. 10530 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 10531 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 10532 } else { 10533 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 10534 .extOrTrunc(BitWidth); 10535 PromotedMin.setIsUnsigned(Unsigned); 10536 10537 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 10538 .extOrTrunc(BitWidth); 10539 PromotedMax.setIsUnsigned(Unsigned); 10540 } 10541 } 10542 10543 // Determine whether this range is contiguous (has no hole). 10544 bool isContiguous() const { return PromotedMin <= PromotedMax; } 10545 10546 // Where a constant value is within the range. 10547 enum ComparisonResult { 10548 LT = 0x1, 10549 LE = 0x2, 10550 GT = 0x4, 10551 GE = 0x8, 10552 EQ = 0x10, 10553 NE = 0x20, 10554 InRangeFlag = 0x40, 10555 10556 Less = LE | LT | NE, 10557 Min = LE | InRangeFlag, 10558 InRange = InRangeFlag, 10559 Max = GE | InRangeFlag, 10560 Greater = GE | GT | NE, 10561 10562 OnlyValue = LE | GE | EQ | InRangeFlag, 10563 InHole = NE 10564 }; 10565 10566 ComparisonResult compare(const llvm::APSInt &Value) const { 10567 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 10568 Value.isUnsigned() == PromotedMin.isUnsigned()); 10569 if (!isContiguous()) { 10570 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 10571 if (Value.isMinValue()) return Min; 10572 if (Value.isMaxValue()) return Max; 10573 if (Value >= PromotedMin) return InRange; 10574 if (Value <= PromotedMax) return InRange; 10575 return InHole; 10576 } 10577 10578 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 10579 case -1: return Less; 10580 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 10581 case 1: 10582 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 10583 case -1: return InRange; 10584 case 0: return Max; 10585 case 1: return Greater; 10586 } 10587 } 10588 10589 llvm_unreachable("impossible compare result"); 10590 } 10591 10592 static llvm::Optional<StringRef> 10593 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 10594 if (Op == BO_Cmp) { 10595 ComparisonResult LTFlag = LT, GTFlag = GT; 10596 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 10597 10598 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 10599 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 10600 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 10601 return llvm::None; 10602 } 10603 10604 ComparisonResult TrueFlag, FalseFlag; 10605 if (Op == BO_EQ) { 10606 TrueFlag = EQ; 10607 FalseFlag = NE; 10608 } else if (Op == BO_NE) { 10609 TrueFlag = NE; 10610 FalseFlag = EQ; 10611 } else { 10612 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 10613 TrueFlag = LT; 10614 FalseFlag = GE; 10615 } else { 10616 TrueFlag = GT; 10617 FalseFlag = LE; 10618 } 10619 if (Op == BO_GE || Op == BO_LE) 10620 std::swap(TrueFlag, FalseFlag); 10621 } 10622 if (R & TrueFlag) 10623 return StringRef("true"); 10624 if (R & FalseFlag) 10625 return StringRef("false"); 10626 return llvm::None; 10627 } 10628 }; 10629 } 10630 10631 static bool HasEnumType(Expr *E) { 10632 // Strip off implicit integral promotions. 10633 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10634 if (ICE->getCastKind() != CK_IntegralCast && 10635 ICE->getCastKind() != CK_NoOp) 10636 break; 10637 E = ICE->getSubExpr(); 10638 } 10639 10640 return E->getType()->isEnumeralType(); 10641 } 10642 10643 static int classifyConstantValue(Expr *Constant) { 10644 // The values of this enumeration are used in the diagnostics 10645 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 10646 enum ConstantValueKind { 10647 Miscellaneous = 0, 10648 LiteralTrue, 10649 LiteralFalse 10650 }; 10651 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 10652 return BL->getValue() ? ConstantValueKind::LiteralTrue 10653 : ConstantValueKind::LiteralFalse; 10654 return ConstantValueKind::Miscellaneous; 10655 } 10656 10657 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 10658 Expr *Constant, Expr *Other, 10659 const llvm::APSInt &Value, 10660 bool RhsConstant) { 10661 if (S.inTemplateInstantiation()) 10662 return false; 10663 10664 Expr *OriginalOther = Other; 10665 10666 Constant = Constant->IgnoreParenImpCasts(); 10667 Other = Other->IgnoreParenImpCasts(); 10668 10669 // Suppress warnings on tautological comparisons between values of the same 10670 // enumeration type. There are only two ways we could warn on this: 10671 // - If the constant is outside the range of representable values of 10672 // the enumeration. In such a case, we should warn about the cast 10673 // to enumeration type, not about the comparison. 10674 // - If the constant is the maximum / minimum in-range value. For an 10675 // enumeratin type, such comparisons can be meaningful and useful. 10676 if (Constant->getType()->isEnumeralType() && 10677 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 10678 return false; 10679 10680 // TODO: Investigate using GetExprRange() to get tighter bounds 10681 // on the bit ranges. 10682 QualType OtherT = Other->getType(); 10683 if (const auto *AT = OtherT->getAs<AtomicType>()) 10684 OtherT = AT->getValueType(); 10685 IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT); 10686 10687 // Special case for ObjC BOOL on targets where its a typedef for a signed char 10688 // (Namely, macOS). 10689 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 10690 S.NSAPIObj->isObjCBOOLType(OtherT) && 10691 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 10692 10693 // Whether we're treating Other as being a bool because of the form of 10694 // expression despite it having another type (typically 'int' in C). 10695 bool OtherIsBooleanDespiteType = 10696 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 10697 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 10698 OtherRange = IntRange::forBoolType(); 10699 10700 // Determine the promoted range of the other type and see if a comparison of 10701 // the constant against that range is tautological. 10702 PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(), 10703 Value.isUnsigned()); 10704 auto Cmp = OtherPromotedRange.compare(Value); 10705 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 10706 if (!Result) 10707 return false; 10708 10709 // Suppress the diagnostic for an in-range comparison if the constant comes 10710 // from a macro or enumerator. We don't want to diagnose 10711 // 10712 // some_long_value <= INT_MAX 10713 // 10714 // when sizeof(int) == sizeof(long). 10715 bool InRange = Cmp & PromotedRange::InRangeFlag; 10716 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 10717 return false; 10718 10719 // If this is a comparison to an enum constant, include that 10720 // constant in the diagnostic. 10721 const EnumConstantDecl *ED = nullptr; 10722 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 10723 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 10724 10725 // Should be enough for uint128 (39 decimal digits) 10726 SmallString<64> PrettySourceValue; 10727 llvm::raw_svector_ostream OS(PrettySourceValue); 10728 if (ED) { 10729 OS << '\'' << *ED << "' (" << Value << ")"; 10730 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 10731 Constant->IgnoreParenImpCasts())) { 10732 OS << (BL->getValue() ? "YES" : "NO"); 10733 } else { 10734 OS << Value; 10735 } 10736 10737 if (IsObjCSignedCharBool) { 10738 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10739 S.PDiag(diag::warn_tautological_compare_objc_bool) 10740 << OS.str() << *Result); 10741 return true; 10742 } 10743 10744 // FIXME: We use a somewhat different formatting for the in-range cases and 10745 // cases involving boolean values for historical reasons. We should pick a 10746 // consistent way of presenting these diagnostics. 10747 if (!InRange || Other->isKnownToHaveBooleanValue()) { 10748 10749 S.DiagRuntimeBehavior( 10750 E->getOperatorLoc(), E, 10751 S.PDiag(!InRange ? diag::warn_out_of_range_compare 10752 : diag::warn_tautological_bool_compare) 10753 << OS.str() << classifyConstantValue(Constant) << OtherT 10754 << OtherIsBooleanDespiteType << *Result 10755 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 10756 } else { 10757 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 10758 ? (HasEnumType(OriginalOther) 10759 ? diag::warn_unsigned_enum_always_true_comparison 10760 : diag::warn_unsigned_always_true_comparison) 10761 : diag::warn_tautological_constant_compare; 10762 10763 S.Diag(E->getOperatorLoc(), Diag) 10764 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 10765 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 10766 } 10767 10768 return true; 10769 } 10770 10771 /// Analyze the operands of the given comparison. Implements the 10772 /// fallback case from AnalyzeComparison. 10773 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 10774 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10775 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10776 } 10777 10778 /// Implements -Wsign-compare. 10779 /// 10780 /// \param E the binary operator to check for warnings 10781 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 10782 // The type the comparison is being performed in. 10783 QualType T = E->getLHS()->getType(); 10784 10785 // Only analyze comparison operators where both sides have been converted to 10786 // the same type. 10787 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 10788 return AnalyzeImpConvsInComparison(S, E); 10789 10790 // Don't analyze value-dependent comparisons directly. 10791 if (E->isValueDependent()) 10792 return AnalyzeImpConvsInComparison(S, E); 10793 10794 Expr *LHS = E->getLHS(); 10795 Expr *RHS = E->getRHS(); 10796 10797 if (T->isIntegralType(S.Context)) { 10798 llvm::APSInt RHSValue; 10799 llvm::APSInt LHSValue; 10800 10801 bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context); 10802 bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context); 10803 10804 // We don't care about expressions whose result is a constant. 10805 if (IsRHSIntegralLiteral && IsLHSIntegralLiteral) 10806 return AnalyzeImpConvsInComparison(S, E); 10807 10808 // We only care about expressions where just one side is literal 10809 if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) { 10810 // Is the constant on the RHS or LHS? 10811 const bool RhsConstant = IsRHSIntegralLiteral; 10812 Expr *Const = RhsConstant ? RHS : LHS; 10813 Expr *Other = RhsConstant ? LHS : RHS; 10814 const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue; 10815 10816 // Check whether an integer constant comparison results in a value 10817 // of 'true' or 'false'. 10818 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 10819 return AnalyzeImpConvsInComparison(S, E); 10820 } 10821 } 10822 10823 if (!T->hasUnsignedIntegerRepresentation()) { 10824 // We don't do anything special if this isn't an unsigned integral 10825 // comparison: we're only interested in integral comparisons, and 10826 // signed comparisons only happen in cases we don't care to warn about. 10827 return AnalyzeImpConvsInComparison(S, E); 10828 } 10829 10830 LHS = LHS->IgnoreParenImpCasts(); 10831 RHS = RHS->IgnoreParenImpCasts(); 10832 10833 if (!S.getLangOpts().CPlusPlus) { 10834 // Avoid warning about comparison of integers with different signs when 10835 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 10836 // the type of `E`. 10837 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 10838 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10839 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 10840 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10841 } 10842 10843 // Check to see if one of the (unmodified) operands is of different 10844 // signedness. 10845 Expr *signedOperand, *unsignedOperand; 10846 if (LHS->getType()->hasSignedIntegerRepresentation()) { 10847 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 10848 "unsigned comparison between two signed integer expressions?"); 10849 signedOperand = LHS; 10850 unsignedOperand = RHS; 10851 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 10852 signedOperand = RHS; 10853 unsignedOperand = LHS; 10854 } else { 10855 return AnalyzeImpConvsInComparison(S, E); 10856 } 10857 10858 // Otherwise, calculate the effective range of the signed operand. 10859 IntRange signedRange = 10860 GetExprRange(S.Context, signedOperand, S.isConstantEvaluated()); 10861 10862 // Go ahead and analyze implicit conversions in the operands. Note 10863 // that we skip the implicit conversions on both sides. 10864 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 10865 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 10866 10867 // If the signed range is non-negative, -Wsign-compare won't fire. 10868 if (signedRange.NonNegative) 10869 return; 10870 10871 // For (in)equality comparisons, if the unsigned operand is a 10872 // constant which cannot collide with a overflowed signed operand, 10873 // then reinterpreting the signed operand as unsigned will not 10874 // change the result of the comparison. 10875 if (E->isEqualityOp()) { 10876 unsigned comparisonWidth = S.Context.getIntWidth(T); 10877 IntRange unsignedRange = 10878 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated()); 10879 10880 // We should never be unable to prove that the unsigned operand is 10881 // non-negative. 10882 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 10883 10884 if (unsignedRange.Width < comparisonWidth) 10885 return; 10886 } 10887 10888 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10889 S.PDiag(diag::warn_mixed_sign_comparison) 10890 << LHS->getType() << RHS->getType() 10891 << LHS->getSourceRange() << RHS->getSourceRange()); 10892 } 10893 10894 /// Analyzes an attempt to assign the given value to a bitfield. 10895 /// 10896 /// Returns true if there was something fishy about the attempt. 10897 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 10898 SourceLocation InitLoc) { 10899 assert(Bitfield->isBitField()); 10900 if (Bitfield->isInvalidDecl()) 10901 return false; 10902 10903 // White-list bool bitfields. 10904 QualType BitfieldType = Bitfield->getType(); 10905 if (BitfieldType->isBooleanType()) 10906 return false; 10907 10908 if (BitfieldType->isEnumeralType()) { 10909 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 10910 // If the underlying enum type was not explicitly specified as an unsigned 10911 // type and the enum contain only positive values, MSVC++ will cause an 10912 // inconsistency by storing this as a signed type. 10913 if (S.getLangOpts().CPlusPlus11 && 10914 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 10915 BitfieldEnumDecl->getNumPositiveBits() > 0 && 10916 BitfieldEnumDecl->getNumNegativeBits() == 0) { 10917 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 10918 << BitfieldEnumDecl->getNameAsString(); 10919 } 10920 } 10921 10922 if (Bitfield->getType()->isBooleanType()) 10923 return false; 10924 10925 // Ignore value- or type-dependent expressions. 10926 if (Bitfield->getBitWidth()->isValueDependent() || 10927 Bitfield->getBitWidth()->isTypeDependent() || 10928 Init->isValueDependent() || 10929 Init->isTypeDependent()) 10930 return false; 10931 10932 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 10933 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 10934 10935 Expr::EvalResult Result; 10936 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 10937 Expr::SE_AllowSideEffects)) { 10938 // The RHS is not constant. If the RHS has an enum type, make sure the 10939 // bitfield is wide enough to hold all the values of the enum without 10940 // truncation. 10941 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 10942 EnumDecl *ED = EnumTy->getDecl(); 10943 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 10944 10945 // Enum types are implicitly signed on Windows, so check if there are any 10946 // negative enumerators to see if the enum was intended to be signed or 10947 // not. 10948 bool SignedEnum = ED->getNumNegativeBits() > 0; 10949 10950 // Check for surprising sign changes when assigning enum values to a 10951 // bitfield of different signedness. If the bitfield is signed and we 10952 // have exactly the right number of bits to store this unsigned enum, 10953 // suggest changing the enum to an unsigned type. This typically happens 10954 // on Windows where unfixed enums always use an underlying type of 'int'. 10955 unsigned DiagID = 0; 10956 if (SignedEnum && !SignedBitfield) { 10957 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 10958 } else if (SignedBitfield && !SignedEnum && 10959 ED->getNumPositiveBits() == FieldWidth) { 10960 DiagID = diag::warn_signed_bitfield_enum_conversion; 10961 } 10962 10963 if (DiagID) { 10964 S.Diag(InitLoc, DiagID) << Bitfield << ED; 10965 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 10966 SourceRange TypeRange = 10967 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 10968 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 10969 << SignedEnum << TypeRange; 10970 } 10971 10972 // Compute the required bitwidth. If the enum has negative values, we need 10973 // one more bit than the normal number of positive bits to represent the 10974 // sign bit. 10975 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 10976 ED->getNumNegativeBits()) 10977 : ED->getNumPositiveBits(); 10978 10979 // Check the bitwidth. 10980 if (BitsNeeded > FieldWidth) { 10981 Expr *WidthExpr = Bitfield->getBitWidth(); 10982 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 10983 << Bitfield << ED; 10984 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 10985 << BitsNeeded << ED << WidthExpr->getSourceRange(); 10986 } 10987 } 10988 10989 return false; 10990 } 10991 10992 llvm::APSInt Value = Result.Val.getInt(); 10993 10994 unsigned OriginalWidth = Value.getBitWidth(); 10995 10996 if (!Value.isSigned() || Value.isNegative()) 10997 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 10998 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 10999 OriginalWidth = Value.getMinSignedBits(); 11000 11001 if (OriginalWidth <= FieldWidth) 11002 return false; 11003 11004 // Compute the value which the bitfield will contain. 11005 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 11006 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 11007 11008 // Check whether the stored value is equal to the original value. 11009 TruncatedValue = TruncatedValue.extend(OriginalWidth); 11010 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 11011 return false; 11012 11013 // Special-case bitfields of width 1: booleans are naturally 0/1, and 11014 // therefore don't strictly fit into a signed bitfield of width 1. 11015 if (FieldWidth == 1 && Value == 1) 11016 return false; 11017 11018 std::string PrettyValue = Value.toString(10); 11019 std::string PrettyTrunc = TruncatedValue.toString(10); 11020 11021 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 11022 << PrettyValue << PrettyTrunc << OriginalInit->getType() 11023 << Init->getSourceRange(); 11024 11025 return true; 11026 } 11027 11028 /// Analyze the given simple or compound assignment for warning-worthy 11029 /// operations. 11030 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 11031 // Just recurse on the LHS. 11032 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11033 11034 // We want to recurse on the RHS as normal unless we're assigning to 11035 // a bitfield. 11036 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 11037 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 11038 E->getOperatorLoc())) { 11039 // Recurse, ignoring any implicit conversions on the RHS. 11040 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 11041 E->getOperatorLoc()); 11042 } 11043 } 11044 11045 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11046 11047 // Diagnose implicitly sequentially-consistent atomic assignment. 11048 if (E->getLHS()->getType()->isAtomicType()) 11049 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11050 } 11051 11052 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11053 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 11054 SourceLocation CContext, unsigned diag, 11055 bool pruneControlFlow = false) { 11056 if (pruneControlFlow) { 11057 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11058 S.PDiag(diag) 11059 << SourceType << T << E->getSourceRange() 11060 << SourceRange(CContext)); 11061 return; 11062 } 11063 S.Diag(E->getExprLoc(), diag) 11064 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 11065 } 11066 11067 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11068 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 11069 SourceLocation CContext, 11070 unsigned diag, bool pruneControlFlow = false) { 11071 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 11072 } 11073 11074 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 11075 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 11076 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 11077 } 11078 11079 static void adornObjCBoolConversionDiagWithTernaryFixit( 11080 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 11081 Expr *Ignored = SourceExpr->IgnoreImplicit(); 11082 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 11083 Ignored = OVE->getSourceExpr(); 11084 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 11085 isa<BinaryOperator>(Ignored) || 11086 isa<CXXOperatorCallExpr>(Ignored); 11087 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 11088 if (NeedsParens) 11089 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 11090 << FixItHint::CreateInsertion(EndLoc, ")"); 11091 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 11092 } 11093 11094 /// Diagnose an implicit cast from a floating point value to an integer value. 11095 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 11096 SourceLocation CContext) { 11097 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 11098 const bool PruneWarnings = S.inTemplateInstantiation(); 11099 11100 Expr *InnerE = E->IgnoreParenImpCasts(); 11101 // We also want to warn on, e.g., "int i = -1.234" 11102 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 11103 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 11104 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 11105 11106 const bool IsLiteral = 11107 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 11108 11109 llvm::APFloat Value(0.0); 11110 bool IsConstant = 11111 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 11112 if (!IsConstant) { 11113 if (isObjCSignedCharBool(S, T)) { 11114 return adornObjCBoolConversionDiagWithTernaryFixit( 11115 S, E, 11116 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 11117 << E->getType()); 11118 } 11119 11120 return DiagnoseImpCast(S, E, T, CContext, 11121 diag::warn_impcast_float_integer, PruneWarnings); 11122 } 11123 11124 bool isExact = false; 11125 11126 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 11127 T->hasUnsignedIntegerRepresentation()); 11128 llvm::APFloat::opStatus Result = Value.convertToInteger( 11129 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 11130 11131 // FIXME: Force the precision of the source value down so we don't print 11132 // digits which are usually useless (we don't really care here if we 11133 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 11134 // would automatically print the shortest representation, but it's a bit 11135 // tricky to implement. 11136 SmallString<16> PrettySourceValue; 11137 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 11138 precision = (precision * 59 + 195) / 196; 11139 Value.toString(PrettySourceValue, precision); 11140 11141 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 11142 return adornObjCBoolConversionDiagWithTernaryFixit( 11143 S, E, 11144 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 11145 << PrettySourceValue); 11146 } 11147 11148 if (Result == llvm::APFloat::opOK && isExact) { 11149 if (IsLiteral) return; 11150 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 11151 PruneWarnings); 11152 } 11153 11154 // Conversion of a floating-point value to a non-bool integer where the 11155 // integral part cannot be represented by the integer type is undefined. 11156 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 11157 return DiagnoseImpCast( 11158 S, E, T, CContext, 11159 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 11160 : diag::warn_impcast_float_to_integer_out_of_range, 11161 PruneWarnings); 11162 11163 unsigned DiagID = 0; 11164 if (IsLiteral) { 11165 // Warn on floating point literal to integer. 11166 DiagID = diag::warn_impcast_literal_float_to_integer; 11167 } else if (IntegerValue == 0) { 11168 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 11169 return DiagnoseImpCast(S, E, T, CContext, 11170 diag::warn_impcast_float_integer, PruneWarnings); 11171 } 11172 // Warn on non-zero to zero conversion. 11173 DiagID = diag::warn_impcast_float_to_integer_zero; 11174 } else { 11175 if (IntegerValue.isUnsigned()) { 11176 if (!IntegerValue.isMaxValue()) { 11177 return DiagnoseImpCast(S, E, T, CContext, 11178 diag::warn_impcast_float_integer, PruneWarnings); 11179 } 11180 } else { // IntegerValue.isSigned() 11181 if (!IntegerValue.isMaxSignedValue() && 11182 !IntegerValue.isMinSignedValue()) { 11183 return DiagnoseImpCast(S, E, T, CContext, 11184 diag::warn_impcast_float_integer, PruneWarnings); 11185 } 11186 } 11187 // Warn on evaluatable floating point expression to integer conversion. 11188 DiagID = diag::warn_impcast_float_to_integer; 11189 } 11190 11191 SmallString<16> PrettyTargetValue; 11192 if (IsBool) 11193 PrettyTargetValue = Value.isZero() ? "false" : "true"; 11194 else 11195 IntegerValue.toString(PrettyTargetValue); 11196 11197 if (PruneWarnings) { 11198 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11199 S.PDiag(DiagID) 11200 << E->getType() << T.getUnqualifiedType() 11201 << PrettySourceValue << PrettyTargetValue 11202 << E->getSourceRange() << SourceRange(CContext)); 11203 } else { 11204 S.Diag(E->getExprLoc(), DiagID) 11205 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 11206 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 11207 } 11208 } 11209 11210 /// Analyze the given compound assignment for the possible losing of 11211 /// floating-point precision. 11212 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 11213 assert(isa<CompoundAssignOperator>(E) && 11214 "Must be compound assignment operation"); 11215 // Recurse on the LHS and RHS in here 11216 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11217 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11218 11219 if (E->getLHS()->getType()->isAtomicType()) 11220 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 11221 11222 // Now check the outermost expression 11223 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 11224 const auto *RBT = cast<CompoundAssignOperator>(E) 11225 ->getComputationResultType() 11226 ->getAs<BuiltinType>(); 11227 11228 // The below checks assume source is floating point. 11229 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 11230 11231 // If source is floating point but target is an integer. 11232 if (ResultBT->isInteger()) 11233 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 11234 E->getExprLoc(), diag::warn_impcast_float_integer); 11235 11236 if (!ResultBT->isFloatingPoint()) 11237 return; 11238 11239 // If both source and target are floating points, warn about losing precision. 11240 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11241 QualType(ResultBT, 0), QualType(RBT, 0)); 11242 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 11243 // warn about dropping FP rank. 11244 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 11245 diag::warn_impcast_float_result_precision); 11246 } 11247 11248 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 11249 IntRange Range) { 11250 if (!Range.Width) return "0"; 11251 11252 llvm::APSInt ValueInRange = Value; 11253 ValueInRange.setIsSigned(!Range.NonNegative); 11254 ValueInRange = ValueInRange.trunc(Range.Width); 11255 return ValueInRange.toString(10); 11256 } 11257 11258 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 11259 if (!isa<ImplicitCastExpr>(Ex)) 11260 return false; 11261 11262 Expr *InnerE = Ex->IgnoreParenImpCasts(); 11263 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 11264 const Type *Source = 11265 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 11266 if (Target->isDependentType()) 11267 return false; 11268 11269 const BuiltinType *FloatCandidateBT = 11270 dyn_cast<BuiltinType>(ToBool ? Source : Target); 11271 const Type *BoolCandidateType = ToBool ? Target : Source; 11272 11273 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 11274 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 11275 } 11276 11277 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 11278 SourceLocation CC) { 11279 unsigned NumArgs = TheCall->getNumArgs(); 11280 for (unsigned i = 0; i < NumArgs; ++i) { 11281 Expr *CurrA = TheCall->getArg(i); 11282 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 11283 continue; 11284 11285 bool IsSwapped = ((i > 0) && 11286 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 11287 IsSwapped |= ((i < (NumArgs - 1)) && 11288 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 11289 if (IsSwapped) { 11290 // Warn on this floating-point to bool conversion. 11291 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 11292 CurrA->getType(), CC, 11293 diag::warn_impcast_floating_point_to_bool); 11294 } 11295 } 11296 } 11297 11298 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 11299 SourceLocation CC) { 11300 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 11301 E->getExprLoc())) 11302 return; 11303 11304 // Don't warn on functions which have return type nullptr_t. 11305 if (isa<CallExpr>(E)) 11306 return; 11307 11308 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 11309 const Expr::NullPointerConstantKind NullKind = 11310 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 11311 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 11312 return; 11313 11314 // Return if target type is a safe conversion. 11315 if (T->isAnyPointerType() || T->isBlockPointerType() || 11316 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 11317 return; 11318 11319 SourceLocation Loc = E->getSourceRange().getBegin(); 11320 11321 // Venture through the macro stacks to get to the source of macro arguments. 11322 // The new location is a better location than the complete location that was 11323 // passed in. 11324 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 11325 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 11326 11327 // __null is usually wrapped in a macro. Go up a macro if that is the case. 11328 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 11329 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 11330 Loc, S.SourceMgr, S.getLangOpts()); 11331 if (MacroName == "NULL") 11332 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 11333 } 11334 11335 // Only warn if the null and context location are in the same macro expansion. 11336 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 11337 return; 11338 11339 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 11340 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 11341 << FixItHint::CreateReplacement(Loc, 11342 S.getFixItZeroLiteralForType(T, Loc)); 11343 } 11344 11345 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11346 ObjCArrayLiteral *ArrayLiteral); 11347 11348 static void 11349 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11350 ObjCDictionaryLiteral *DictionaryLiteral); 11351 11352 /// Check a single element within a collection literal against the 11353 /// target element type. 11354 static void checkObjCCollectionLiteralElement(Sema &S, 11355 QualType TargetElementType, 11356 Expr *Element, 11357 unsigned ElementKind) { 11358 // Skip a bitcast to 'id' or qualified 'id'. 11359 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 11360 if (ICE->getCastKind() == CK_BitCast && 11361 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 11362 Element = ICE->getSubExpr(); 11363 } 11364 11365 QualType ElementType = Element->getType(); 11366 ExprResult ElementResult(Element); 11367 if (ElementType->getAs<ObjCObjectPointerType>() && 11368 S.CheckSingleAssignmentConstraints(TargetElementType, 11369 ElementResult, 11370 false, false) 11371 != Sema::Compatible) { 11372 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 11373 << ElementType << ElementKind << TargetElementType 11374 << Element->getSourceRange(); 11375 } 11376 11377 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 11378 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 11379 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 11380 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 11381 } 11382 11383 /// Check an Objective-C array literal being converted to the given 11384 /// target type. 11385 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11386 ObjCArrayLiteral *ArrayLiteral) { 11387 if (!S.NSArrayDecl) 11388 return; 11389 11390 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11391 if (!TargetObjCPtr) 11392 return; 11393 11394 if (TargetObjCPtr->isUnspecialized() || 11395 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11396 != S.NSArrayDecl->getCanonicalDecl()) 11397 return; 11398 11399 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11400 if (TypeArgs.size() != 1) 11401 return; 11402 11403 QualType TargetElementType = TypeArgs[0]; 11404 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 11405 checkObjCCollectionLiteralElement(S, TargetElementType, 11406 ArrayLiteral->getElement(I), 11407 0); 11408 } 11409 } 11410 11411 /// Check an Objective-C dictionary literal being converted to the given 11412 /// target type. 11413 static void 11414 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11415 ObjCDictionaryLiteral *DictionaryLiteral) { 11416 if (!S.NSDictionaryDecl) 11417 return; 11418 11419 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11420 if (!TargetObjCPtr) 11421 return; 11422 11423 if (TargetObjCPtr->isUnspecialized() || 11424 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11425 != S.NSDictionaryDecl->getCanonicalDecl()) 11426 return; 11427 11428 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11429 if (TypeArgs.size() != 2) 11430 return; 11431 11432 QualType TargetKeyType = TypeArgs[0]; 11433 QualType TargetObjectType = TypeArgs[1]; 11434 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 11435 auto Element = DictionaryLiteral->getKeyValueElement(I); 11436 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 11437 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 11438 } 11439 } 11440 11441 // Helper function to filter out cases for constant width constant conversion. 11442 // Don't warn on char array initialization or for non-decimal values. 11443 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 11444 SourceLocation CC) { 11445 // If initializing from a constant, and the constant starts with '0', 11446 // then it is a binary, octal, or hexadecimal. Allow these constants 11447 // to fill all the bits, even if there is a sign change. 11448 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 11449 const char FirstLiteralCharacter = 11450 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 11451 if (FirstLiteralCharacter == '0') 11452 return false; 11453 } 11454 11455 // If the CC location points to a '{', and the type is char, then assume 11456 // assume it is an array initialization. 11457 if (CC.isValid() && T->isCharType()) { 11458 const char FirstContextCharacter = 11459 S.getSourceManager().getCharacterData(CC)[0]; 11460 if (FirstContextCharacter == '{') 11461 return false; 11462 } 11463 11464 return true; 11465 } 11466 11467 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 11468 const auto *IL = dyn_cast<IntegerLiteral>(E); 11469 if (!IL) { 11470 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 11471 if (UO->getOpcode() == UO_Minus) 11472 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 11473 } 11474 } 11475 11476 return IL; 11477 } 11478 11479 static void CheckConditionalWithEnumTypes(Sema &S, SourceLocation Loc, 11480 Expr *LHS, Expr *RHS) { 11481 QualType LHSStrippedType = LHS->IgnoreParenImpCasts()->getType(); 11482 QualType RHSStrippedType = RHS->IgnoreParenImpCasts()->getType(); 11483 11484 const auto *LHSEnumType = LHSStrippedType->getAs<EnumType>(); 11485 if (!LHSEnumType) 11486 return; 11487 const auto *RHSEnumType = RHSStrippedType->getAs<EnumType>(); 11488 if (!RHSEnumType) 11489 return; 11490 11491 // Ignore anonymous enums. 11492 if (!LHSEnumType->getDecl()->hasNameForLinkage()) 11493 return; 11494 if (!RHSEnumType->getDecl()->hasNameForLinkage()) 11495 return; 11496 11497 if (S.Context.hasSameUnqualifiedType(LHSStrippedType, RHSStrippedType)) 11498 return; 11499 11500 S.Diag(Loc, diag::warn_conditional_mixed_enum_types) 11501 << LHSStrippedType << RHSStrippedType << LHS->getSourceRange() 11502 << RHS->getSourceRange(); 11503 } 11504 11505 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 11506 E = E->IgnoreParenImpCasts(); 11507 SourceLocation ExprLoc = E->getExprLoc(); 11508 11509 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11510 BinaryOperator::Opcode Opc = BO->getOpcode(); 11511 Expr::EvalResult Result; 11512 // Do not diagnose unsigned shifts. 11513 if (Opc == BO_Shl) { 11514 const auto *LHS = getIntegerLiteral(BO->getLHS()); 11515 const auto *RHS = getIntegerLiteral(BO->getRHS()); 11516 if (LHS && LHS->getValue() == 0) 11517 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 11518 else if (!E->isValueDependent() && LHS && RHS && 11519 RHS->getValue().isNonNegative() && 11520 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 11521 S.Diag(ExprLoc, diag::warn_left_shift_always) 11522 << (Result.Val.getInt() != 0); 11523 else if (E->getType()->isSignedIntegerType()) 11524 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 11525 } 11526 } 11527 11528 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11529 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 11530 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 11531 if (!LHS || !RHS) 11532 return; 11533 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 11534 (RHS->getValue() == 0 || RHS->getValue() == 1)) 11535 // Do not diagnose common idioms. 11536 return; 11537 if (LHS->getValue() != 0 && RHS->getValue() != 0) 11538 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 11539 } 11540 } 11541 11542 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 11543 SourceLocation CC, 11544 bool *ICContext = nullptr, 11545 bool IsListInit = false) { 11546 if (E->isTypeDependent() || E->isValueDependent()) return; 11547 11548 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 11549 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 11550 if (Source == Target) return; 11551 if (Target->isDependentType()) return; 11552 11553 // If the conversion context location is invalid don't complain. We also 11554 // don't want to emit a warning if the issue occurs from the expansion of 11555 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 11556 // delay this check as long as possible. Once we detect we are in that 11557 // scenario, we just return. 11558 if (CC.isInvalid()) 11559 return; 11560 11561 if (Source->isAtomicType()) 11562 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 11563 11564 // Diagnose implicit casts to bool. 11565 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 11566 if (isa<StringLiteral>(E)) 11567 // Warn on string literal to bool. Checks for string literals in logical 11568 // and expressions, for instance, assert(0 && "error here"), are 11569 // prevented by a check in AnalyzeImplicitConversions(). 11570 return DiagnoseImpCast(S, E, T, CC, 11571 diag::warn_impcast_string_literal_to_bool); 11572 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 11573 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 11574 // This covers the literal expressions that evaluate to Objective-C 11575 // objects. 11576 return DiagnoseImpCast(S, E, T, CC, 11577 diag::warn_impcast_objective_c_literal_to_bool); 11578 } 11579 if (Source->isPointerType() || Source->canDecayToPointerType()) { 11580 // Warn on pointer to bool conversion that is always true. 11581 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 11582 SourceRange(CC)); 11583 } 11584 } 11585 11586 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 11587 // is a typedef for signed char (macOS), then that constant value has to be 1 11588 // or 0. 11589 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 11590 Expr::EvalResult Result; 11591 if (E->EvaluateAsInt(Result, S.getASTContext(), 11592 Expr::SE_AllowSideEffects)) { 11593 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 11594 adornObjCBoolConversionDiagWithTernaryFixit( 11595 S, E, 11596 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 11597 << Result.Val.getInt().toString(10)); 11598 } 11599 return; 11600 } 11601 } 11602 11603 // Check implicit casts from Objective-C collection literals to specialized 11604 // collection types, e.g., NSArray<NSString *> *. 11605 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 11606 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 11607 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 11608 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 11609 11610 // Strip vector types. 11611 if (isa<VectorType>(Source)) { 11612 if (!isa<VectorType>(Target)) { 11613 if (S.SourceMgr.isInSystemMacro(CC)) 11614 return; 11615 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 11616 } 11617 11618 // If the vector cast is cast between two vectors of the same size, it is 11619 // a bitcast, not a conversion. 11620 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 11621 return; 11622 11623 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 11624 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 11625 } 11626 if (auto VecTy = dyn_cast<VectorType>(Target)) 11627 Target = VecTy->getElementType().getTypePtr(); 11628 11629 // Strip complex types. 11630 if (isa<ComplexType>(Source)) { 11631 if (!isa<ComplexType>(Target)) { 11632 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 11633 return; 11634 11635 return DiagnoseImpCast(S, E, T, CC, 11636 S.getLangOpts().CPlusPlus 11637 ? diag::err_impcast_complex_scalar 11638 : diag::warn_impcast_complex_scalar); 11639 } 11640 11641 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 11642 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 11643 } 11644 11645 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 11646 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 11647 11648 // If the source is floating point... 11649 if (SourceBT && SourceBT->isFloatingPoint()) { 11650 // ...and the target is floating point... 11651 if (TargetBT && TargetBT->isFloatingPoint()) { 11652 // ...then warn if we're dropping FP rank. 11653 11654 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11655 QualType(SourceBT, 0), QualType(TargetBT, 0)); 11656 if (Order > 0) { 11657 // Don't warn about float constants that are precisely 11658 // representable in the target type. 11659 Expr::EvalResult result; 11660 if (E->EvaluateAsRValue(result, S.Context)) { 11661 // Value might be a float, a float vector, or a float complex. 11662 if (IsSameFloatAfterCast(result.Val, 11663 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 11664 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 11665 return; 11666 } 11667 11668 if (S.SourceMgr.isInSystemMacro(CC)) 11669 return; 11670 11671 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 11672 } 11673 // ... or possibly if we're increasing rank, too 11674 else if (Order < 0) { 11675 if (S.SourceMgr.isInSystemMacro(CC)) 11676 return; 11677 11678 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 11679 } 11680 return; 11681 } 11682 11683 // If the target is integral, always warn. 11684 if (TargetBT && TargetBT->isInteger()) { 11685 if (S.SourceMgr.isInSystemMacro(CC)) 11686 return; 11687 11688 DiagnoseFloatingImpCast(S, E, T, CC); 11689 } 11690 11691 // Detect the case where a call result is converted from floating-point to 11692 // to bool, and the final argument to the call is converted from bool, to 11693 // discover this typo: 11694 // 11695 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 11696 // 11697 // FIXME: This is an incredibly special case; is there some more general 11698 // way to detect this class of misplaced-parentheses bug? 11699 if (Target->isBooleanType() && isa<CallExpr>(E)) { 11700 // Check last argument of function call to see if it is an 11701 // implicit cast from a type matching the type the result 11702 // is being cast to. 11703 CallExpr *CEx = cast<CallExpr>(E); 11704 if (unsigned NumArgs = CEx->getNumArgs()) { 11705 Expr *LastA = CEx->getArg(NumArgs - 1); 11706 Expr *InnerE = LastA->IgnoreParenImpCasts(); 11707 if (isa<ImplicitCastExpr>(LastA) && 11708 InnerE->getType()->isBooleanType()) { 11709 // Warn on this floating-point to bool conversion 11710 DiagnoseImpCast(S, E, T, CC, 11711 diag::warn_impcast_floating_point_to_bool); 11712 } 11713 } 11714 } 11715 return; 11716 } 11717 11718 // Valid casts involving fixed point types should be accounted for here. 11719 if (Source->isFixedPointType()) { 11720 if (Target->isUnsaturatedFixedPointType()) { 11721 Expr::EvalResult Result; 11722 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 11723 S.isConstantEvaluated())) { 11724 APFixedPoint Value = Result.Val.getFixedPoint(); 11725 APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 11726 APFixedPoint MinVal = S.Context.getFixedPointMin(T); 11727 if (Value > MaxVal || Value < MinVal) { 11728 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11729 S.PDiag(diag::warn_impcast_fixed_point_range) 11730 << Value.toString() << T 11731 << E->getSourceRange() 11732 << clang::SourceRange(CC)); 11733 return; 11734 } 11735 } 11736 } else if (Target->isIntegerType()) { 11737 Expr::EvalResult Result; 11738 if (!S.isConstantEvaluated() && 11739 E->EvaluateAsFixedPoint(Result, S.Context, 11740 Expr::SE_AllowSideEffects)) { 11741 APFixedPoint FXResult = Result.Val.getFixedPoint(); 11742 11743 bool Overflowed; 11744 llvm::APSInt IntResult = FXResult.convertToInt( 11745 S.Context.getIntWidth(T), 11746 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 11747 11748 if (Overflowed) { 11749 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11750 S.PDiag(diag::warn_impcast_fixed_point_range) 11751 << FXResult.toString() << T 11752 << E->getSourceRange() 11753 << clang::SourceRange(CC)); 11754 return; 11755 } 11756 } 11757 } 11758 } else if (Target->isUnsaturatedFixedPointType()) { 11759 if (Source->isIntegerType()) { 11760 Expr::EvalResult Result; 11761 if (!S.isConstantEvaluated() && 11762 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 11763 llvm::APSInt Value = Result.Val.getInt(); 11764 11765 bool Overflowed; 11766 APFixedPoint IntResult = APFixedPoint::getFromIntValue( 11767 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 11768 11769 if (Overflowed) { 11770 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11771 S.PDiag(diag::warn_impcast_fixed_point_range) 11772 << Value.toString(/*Radix=*/10) << T 11773 << E->getSourceRange() 11774 << clang::SourceRange(CC)); 11775 return; 11776 } 11777 } 11778 } 11779 } 11780 11781 // If we are casting an integer type to a floating point type without 11782 // initialization-list syntax, we might lose accuracy if the floating 11783 // point type has a narrower significand than the integer type. 11784 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 11785 TargetBT->isFloatingType() && !IsListInit) { 11786 // Determine the number of precision bits in the source integer type. 11787 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated()); 11788 unsigned int SourcePrecision = SourceRange.Width; 11789 11790 // Determine the number of precision bits in the 11791 // target floating point type. 11792 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 11793 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 11794 11795 if (SourcePrecision > 0 && TargetPrecision > 0 && 11796 SourcePrecision > TargetPrecision) { 11797 11798 llvm::APSInt SourceInt; 11799 if (E->isIntegerConstantExpr(SourceInt, S.Context)) { 11800 // If the source integer is a constant, convert it to the target 11801 // floating point type. Issue a warning if the value changes 11802 // during the whole conversion. 11803 llvm::APFloat TargetFloatValue( 11804 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 11805 llvm::APFloat::opStatus ConversionStatus = 11806 TargetFloatValue.convertFromAPInt( 11807 SourceInt, SourceBT->isSignedInteger(), 11808 llvm::APFloat::rmNearestTiesToEven); 11809 11810 if (ConversionStatus != llvm::APFloat::opOK) { 11811 std::string PrettySourceValue = SourceInt.toString(10); 11812 SmallString<32> PrettyTargetValue; 11813 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 11814 11815 S.DiagRuntimeBehavior( 11816 E->getExprLoc(), E, 11817 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 11818 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11819 << E->getSourceRange() << clang::SourceRange(CC)); 11820 } 11821 } else { 11822 // Otherwise, the implicit conversion may lose precision. 11823 DiagnoseImpCast(S, E, T, CC, 11824 diag::warn_impcast_integer_float_precision); 11825 } 11826 } 11827 } 11828 11829 DiagnoseNullConversion(S, E, T, CC); 11830 11831 S.DiscardMisalignedMemberAddress(Target, E); 11832 11833 if (Target->isBooleanType()) 11834 DiagnoseIntInBoolContext(S, E); 11835 11836 if (!Source->isIntegerType() || !Target->isIntegerType()) 11837 return; 11838 11839 // TODO: remove this early return once the false positives for constant->bool 11840 // in templates, macros, etc, are reduced or removed. 11841 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 11842 return; 11843 11844 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 11845 !E->isKnownToHaveBooleanValue()) { 11846 return adornObjCBoolConversionDiagWithTernaryFixit( 11847 S, E, 11848 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 11849 << E->getType()); 11850 } 11851 11852 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated()); 11853 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 11854 11855 if (SourceRange.Width > TargetRange.Width) { 11856 // If the source is a constant, use a default-on diagnostic. 11857 // TODO: this should happen for bitfield stores, too. 11858 Expr::EvalResult Result; 11859 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 11860 S.isConstantEvaluated())) { 11861 llvm::APSInt Value(32); 11862 Value = Result.Val.getInt(); 11863 11864 if (S.SourceMgr.isInSystemMacro(CC)) 11865 return; 11866 11867 std::string PrettySourceValue = Value.toString(10); 11868 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11869 11870 S.DiagRuntimeBehavior( 11871 E->getExprLoc(), E, 11872 S.PDiag(diag::warn_impcast_integer_precision_constant) 11873 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11874 << E->getSourceRange() << clang::SourceRange(CC)); 11875 return; 11876 } 11877 11878 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 11879 if (S.SourceMgr.isInSystemMacro(CC)) 11880 return; 11881 11882 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 11883 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 11884 /* pruneControlFlow */ true); 11885 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 11886 } 11887 11888 if (TargetRange.Width > SourceRange.Width) { 11889 if (auto *UO = dyn_cast<UnaryOperator>(E)) 11890 if (UO->getOpcode() == UO_Minus) 11891 if (Source->isUnsignedIntegerType()) { 11892 if (Target->isUnsignedIntegerType()) 11893 return DiagnoseImpCast(S, E, T, CC, 11894 diag::warn_impcast_high_order_zero_bits); 11895 if (Target->isSignedIntegerType()) 11896 return DiagnoseImpCast(S, E, T, CC, 11897 diag::warn_impcast_nonnegative_result); 11898 } 11899 } 11900 11901 if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative && 11902 SourceRange.NonNegative && Source->isSignedIntegerType()) { 11903 // Warn when doing a signed to signed conversion, warn if the positive 11904 // source value is exactly the width of the target type, which will 11905 // cause a negative value to be stored. 11906 11907 Expr::EvalResult Result; 11908 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 11909 !S.SourceMgr.isInSystemMacro(CC)) { 11910 llvm::APSInt Value = Result.Val.getInt(); 11911 if (isSameWidthConstantConversion(S, E, T, CC)) { 11912 std::string PrettySourceValue = Value.toString(10); 11913 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11914 11915 S.DiagRuntimeBehavior( 11916 E->getExprLoc(), E, 11917 S.PDiag(diag::warn_impcast_integer_precision_constant) 11918 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11919 << E->getSourceRange() << clang::SourceRange(CC)); 11920 return; 11921 } 11922 } 11923 11924 // Fall through for non-constants to give a sign conversion warning. 11925 } 11926 11927 if ((TargetRange.NonNegative && !SourceRange.NonNegative) || 11928 (!TargetRange.NonNegative && SourceRange.NonNegative && 11929 SourceRange.Width == TargetRange.Width)) { 11930 if (S.SourceMgr.isInSystemMacro(CC)) 11931 return; 11932 11933 unsigned DiagID = diag::warn_impcast_integer_sign; 11934 11935 // Traditionally, gcc has warned about this under -Wsign-compare. 11936 // We also want to warn about it in -Wconversion. 11937 // So if -Wconversion is off, use a completely identical diagnostic 11938 // in the sign-compare group. 11939 // The conditional-checking code will 11940 if (ICContext) { 11941 DiagID = diag::warn_impcast_integer_sign_conditional; 11942 *ICContext = true; 11943 } 11944 11945 return DiagnoseImpCast(S, E, T, CC, DiagID); 11946 } 11947 11948 // Diagnose conversions between different enumeration types. 11949 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 11950 // type, to give us better diagnostics. 11951 QualType SourceType = E->getType(); 11952 if (!S.getLangOpts().CPlusPlus) { 11953 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 11954 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 11955 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 11956 SourceType = S.Context.getTypeDeclType(Enum); 11957 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 11958 } 11959 } 11960 11961 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 11962 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 11963 if (SourceEnum->getDecl()->hasNameForLinkage() && 11964 TargetEnum->getDecl()->hasNameForLinkage() && 11965 SourceEnum != TargetEnum) { 11966 if (S.SourceMgr.isInSystemMacro(CC)) 11967 return; 11968 11969 return DiagnoseImpCast(S, E, SourceType, T, CC, 11970 diag::warn_impcast_different_enum_types); 11971 } 11972 } 11973 11974 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11975 SourceLocation CC, QualType T); 11976 11977 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 11978 SourceLocation CC, bool &ICContext) { 11979 E = E->IgnoreParenImpCasts(); 11980 11981 if (isa<ConditionalOperator>(E)) 11982 return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T); 11983 11984 AnalyzeImplicitConversions(S, E, CC); 11985 if (E->getType() != T) 11986 return CheckImplicitConversion(S, E, T, CC, &ICContext); 11987 } 11988 11989 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11990 SourceLocation CC, QualType T) { 11991 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 11992 11993 bool Suspicious = false; 11994 CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious); 11995 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 11996 CheckConditionalWithEnumTypes(S, E->getBeginLoc(), E->getTrueExpr(), 11997 E->getFalseExpr()); 11998 11999 if (T->isBooleanType()) 12000 DiagnoseIntInBoolContext(S, E); 12001 12002 // If -Wconversion would have warned about either of the candidates 12003 // for a signedness conversion to the context type... 12004 if (!Suspicious) return; 12005 12006 // ...but it's currently ignored... 12007 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 12008 return; 12009 12010 // ...then check whether it would have warned about either of the 12011 // candidates for a signedness conversion to the condition type. 12012 if (E->getType() == T) return; 12013 12014 Suspicious = false; 12015 CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(), 12016 E->getType(), CC, &Suspicious); 12017 if (!Suspicious) 12018 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 12019 E->getType(), CC, &Suspicious); 12020 } 12021 12022 /// Check conversion of given expression to boolean. 12023 /// Input argument E is a logical expression. 12024 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 12025 if (S.getLangOpts().Bool) 12026 return; 12027 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 12028 return; 12029 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 12030 } 12031 12032 /// AnalyzeImplicitConversions - Find and report any interesting 12033 /// implicit conversions in the given expression. There are a couple 12034 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 12035 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 12036 bool IsListInit/*= false*/) { 12037 QualType T = OrigE->getType(); 12038 Expr *E = OrigE->IgnoreParenImpCasts(); 12039 12040 // Propagate whether we are in a C++ list initialization expression. 12041 // If so, we do not issue warnings for implicit int-float conversion 12042 // precision loss, because C++11 narrowing already handles it. 12043 IsListInit = 12044 IsListInit || (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 12045 12046 if (E->isTypeDependent() || E->isValueDependent()) 12047 return; 12048 12049 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 12050 if (UO->getOpcode() == UO_Not && 12051 UO->getSubExpr()->isKnownToHaveBooleanValue()) 12052 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 12053 << OrigE->getSourceRange() << T->isBooleanType() 12054 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 12055 12056 // For conditional operators, we analyze the arguments as if they 12057 // were being fed directly into the output. 12058 if (isa<ConditionalOperator>(E)) { 12059 ConditionalOperator *CO = cast<ConditionalOperator>(E); 12060 CheckConditionalOperator(S, CO, CC, T); 12061 return; 12062 } 12063 12064 // Check implicit argument conversions for function calls. 12065 if (CallExpr *Call = dyn_cast<CallExpr>(E)) 12066 CheckImplicitArgumentConversions(S, Call, CC); 12067 12068 // Go ahead and check any implicit conversions we might have skipped. 12069 // The non-canonical typecheck is just an optimization; 12070 // CheckImplicitConversion will filter out dead implicit conversions. 12071 if (E->getType() != T) 12072 CheckImplicitConversion(S, E, T, CC, nullptr, IsListInit); 12073 12074 // Now continue drilling into this expression. 12075 12076 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 12077 // The bound subexpressions in a PseudoObjectExpr are not reachable 12078 // as transitive children. 12079 // FIXME: Use a more uniform representation for this. 12080 for (auto *SE : POE->semantics()) 12081 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 12082 AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC, IsListInit); 12083 } 12084 12085 // Skip past explicit casts. 12086 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 12087 E = CE->getSubExpr()->IgnoreParenImpCasts(); 12088 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 12089 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12090 return AnalyzeImplicitConversions(S, E, CC, IsListInit); 12091 } 12092 12093 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12094 // Do a somewhat different check with comparison operators. 12095 if (BO->isComparisonOp()) 12096 return AnalyzeComparison(S, BO); 12097 12098 // And with simple assignments. 12099 if (BO->getOpcode() == BO_Assign) 12100 return AnalyzeAssignment(S, BO); 12101 // And with compound assignments. 12102 if (BO->isAssignmentOp()) 12103 return AnalyzeCompoundAssignment(S, BO); 12104 } 12105 12106 // These break the otherwise-useful invariant below. Fortunately, 12107 // we don't really need to recurse into them, because any internal 12108 // expressions should have been analyzed already when they were 12109 // built into statements. 12110 if (isa<StmtExpr>(E)) return; 12111 12112 // Don't descend into unevaluated contexts. 12113 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 12114 12115 // Now just recurse over the expression's children. 12116 CC = E->getExprLoc(); 12117 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 12118 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 12119 for (Stmt *SubStmt : E->children()) { 12120 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 12121 if (!ChildExpr) 12122 continue; 12123 12124 if (IsLogicalAndOperator && 12125 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 12126 // Ignore checking string literals that are in logical and operators. 12127 // This is a common pattern for asserts. 12128 continue; 12129 AnalyzeImplicitConversions(S, ChildExpr, CC, IsListInit); 12130 } 12131 12132 if (BO && BO->isLogicalOp()) { 12133 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 12134 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12135 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12136 12137 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 12138 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12139 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12140 } 12141 12142 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 12143 if (U->getOpcode() == UO_LNot) { 12144 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 12145 } else if (U->getOpcode() != UO_AddrOf) { 12146 if (U->getSubExpr()->getType()->isAtomicType()) 12147 S.Diag(U->getSubExpr()->getBeginLoc(), 12148 diag::warn_atomic_implicit_seq_cst); 12149 } 12150 } 12151 } 12152 12153 /// Diagnose integer type and any valid implicit conversion to it. 12154 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 12155 // Taking into account implicit conversions, 12156 // allow any integer. 12157 if (!E->getType()->isIntegerType()) { 12158 S.Diag(E->getBeginLoc(), 12159 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 12160 return true; 12161 } 12162 // Potentially emit standard warnings for implicit conversions if enabled 12163 // using -Wconversion. 12164 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 12165 return false; 12166 } 12167 12168 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 12169 // Returns true when emitting a warning about taking the address of a reference. 12170 static bool CheckForReference(Sema &SemaRef, const Expr *E, 12171 const PartialDiagnostic &PD) { 12172 E = E->IgnoreParenImpCasts(); 12173 12174 const FunctionDecl *FD = nullptr; 12175 12176 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 12177 if (!DRE->getDecl()->getType()->isReferenceType()) 12178 return false; 12179 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12180 if (!M->getMemberDecl()->getType()->isReferenceType()) 12181 return false; 12182 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 12183 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 12184 return false; 12185 FD = Call->getDirectCallee(); 12186 } else { 12187 return false; 12188 } 12189 12190 SemaRef.Diag(E->getExprLoc(), PD); 12191 12192 // If possible, point to location of function. 12193 if (FD) { 12194 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 12195 } 12196 12197 return true; 12198 } 12199 12200 // Returns true if the SourceLocation is expanded from any macro body. 12201 // Returns false if the SourceLocation is invalid, is from not in a macro 12202 // expansion, or is from expanded from a top-level macro argument. 12203 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 12204 if (Loc.isInvalid()) 12205 return false; 12206 12207 while (Loc.isMacroID()) { 12208 if (SM.isMacroBodyExpansion(Loc)) 12209 return true; 12210 Loc = SM.getImmediateMacroCallerLoc(Loc); 12211 } 12212 12213 return false; 12214 } 12215 12216 /// Diagnose pointers that are always non-null. 12217 /// \param E the expression containing the pointer 12218 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 12219 /// compared to a null pointer 12220 /// \param IsEqual True when the comparison is equal to a null pointer 12221 /// \param Range Extra SourceRange to highlight in the diagnostic 12222 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 12223 Expr::NullPointerConstantKind NullKind, 12224 bool IsEqual, SourceRange Range) { 12225 if (!E) 12226 return; 12227 12228 // Don't warn inside macros. 12229 if (E->getExprLoc().isMacroID()) { 12230 const SourceManager &SM = getSourceManager(); 12231 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 12232 IsInAnyMacroBody(SM, Range.getBegin())) 12233 return; 12234 } 12235 E = E->IgnoreImpCasts(); 12236 12237 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 12238 12239 if (isa<CXXThisExpr>(E)) { 12240 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 12241 : diag::warn_this_bool_conversion; 12242 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 12243 return; 12244 } 12245 12246 bool IsAddressOf = false; 12247 12248 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12249 if (UO->getOpcode() != UO_AddrOf) 12250 return; 12251 IsAddressOf = true; 12252 E = UO->getSubExpr(); 12253 } 12254 12255 if (IsAddressOf) { 12256 unsigned DiagID = IsCompare 12257 ? diag::warn_address_of_reference_null_compare 12258 : diag::warn_address_of_reference_bool_conversion; 12259 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 12260 << IsEqual; 12261 if (CheckForReference(*this, E, PD)) { 12262 return; 12263 } 12264 } 12265 12266 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 12267 bool IsParam = isa<NonNullAttr>(NonnullAttr); 12268 std::string Str; 12269 llvm::raw_string_ostream S(Str); 12270 E->printPretty(S, nullptr, getPrintingPolicy()); 12271 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 12272 : diag::warn_cast_nonnull_to_bool; 12273 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 12274 << E->getSourceRange() << Range << IsEqual; 12275 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 12276 }; 12277 12278 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 12279 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 12280 if (auto *Callee = Call->getDirectCallee()) { 12281 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 12282 ComplainAboutNonnullParamOrCall(A); 12283 return; 12284 } 12285 } 12286 } 12287 12288 // Expect to find a single Decl. Skip anything more complicated. 12289 ValueDecl *D = nullptr; 12290 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 12291 D = R->getDecl(); 12292 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12293 D = M->getMemberDecl(); 12294 } 12295 12296 // Weak Decls can be null. 12297 if (!D || D->isWeak()) 12298 return; 12299 12300 // Check for parameter decl with nonnull attribute 12301 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 12302 if (getCurFunction() && 12303 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 12304 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 12305 ComplainAboutNonnullParamOrCall(A); 12306 return; 12307 } 12308 12309 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 12310 // Skip function template not specialized yet. 12311 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 12312 return; 12313 auto ParamIter = llvm::find(FD->parameters(), PV); 12314 assert(ParamIter != FD->param_end()); 12315 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 12316 12317 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 12318 if (!NonNull->args_size()) { 12319 ComplainAboutNonnullParamOrCall(NonNull); 12320 return; 12321 } 12322 12323 for (const ParamIdx &ArgNo : NonNull->args()) { 12324 if (ArgNo.getASTIndex() == ParamNo) { 12325 ComplainAboutNonnullParamOrCall(NonNull); 12326 return; 12327 } 12328 } 12329 } 12330 } 12331 } 12332 } 12333 12334 QualType T = D->getType(); 12335 const bool IsArray = T->isArrayType(); 12336 const bool IsFunction = T->isFunctionType(); 12337 12338 // Address of function is used to silence the function warning. 12339 if (IsAddressOf && IsFunction) { 12340 return; 12341 } 12342 12343 // Found nothing. 12344 if (!IsAddressOf && !IsFunction && !IsArray) 12345 return; 12346 12347 // Pretty print the expression for the diagnostic. 12348 std::string Str; 12349 llvm::raw_string_ostream S(Str); 12350 E->printPretty(S, nullptr, getPrintingPolicy()); 12351 12352 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 12353 : diag::warn_impcast_pointer_to_bool; 12354 enum { 12355 AddressOf, 12356 FunctionPointer, 12357 ArrayPointer 12358 } DiagType; 12359 if (IsAddressOf) 12360 DiagType = AddressOf; 12361 else if (IsFunction) 12362 DiagType = FunctionPointer; 12363 else if (IsArray) 12364 DiagType = ArrayPointer; 12365 else 12366 llvm_unreachable("Could not determine diagnostic."); 12367 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 12368 << Range << IsEqual; 12369 12370 if (!IsFunction) 12371 return; 12372 12373 // Suggest '&' to silence the function warning. 12374 Diag(E->getExprLoc(), diag::note_function_warning_silence) 12375 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 12376 12377 // Check to see if '()' fixit should be emitted. 12378 QualType ReturnType; 12379 UnresolvedSet<4> NonTemplateOverloads; 12380 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 12381 if (ReturnType.isNull()) 12382 return; 12383 12384 if (IsCompare) { 12385 // There are two cases here. If there is null constant, the only suggest 12386 // for a pointer return type. If the null is 0, then suggest if the return 12387 // type is a pointer or an integer type. 12388 if (!ReturnType->isPointerType()) { 12389 if (NullKind == Expr::NPCK_ZeroExpression || 12390 NullKind == Expr::NPCK_ZeroLiteral) { 12391 if (!ReturnType->isIntegerType()) 12392 return; 12393 } else { 12394 return; 12395 } 12396 } 12397 } else { // !IsCompare 12398 // For function to bool, only suggest if the function pointer has bool 12399 // return type. 12400 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 12401 return; 12402 } 12403 Diag(E->getExprLoc(), diag::note_function_to_function_call) 12404 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 12405 } 12406 12407 /// Diagnoses "dangerous" implicit conversions within the given 12408 /// expression (which is a full expression). Implements -Wconversion 12409 /// and -Wsign-compare. 12410 /// 12411 /// \param CC the "context" location of the implicit conversion, i.e. 12412 /// the most location of the syntactic entity requiring the implicit 12413 /// conversion 12414 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 12415 // Don't diagnose in unevaluated contexts. 12416 if (isUnevaluatedContext()) 12417 return; 12418 12419 // Don't diagnose for value- or type-dependent expressions. 12420 if (E->isTypeDependent() || E->isValueDependent()) 12421 return; 12422 12423 // Check for array bounds violations in cases where the check isn't triggered 12424 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 12425 // ArraySubscriptExpr is on the RHS of a variable initialization. 12426 CheckArrayAccess(E); 12427 12428 // This is not the right CC for (e.g.) a variable initialization. 12429 AnalyzeImplicitConversions(*this, E, CC); 12430 } 12431 12432 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 12433 /// Input argument E is a logical expression. 12434 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 12435 ::CheckBoolLikeConversion(*this, E, CC); 12436 } 12437 12438 /// Diagnose when expression is an integer constant expression and its evaluation 12439 /// results in integer overflow 12440 void Sema::CheckForIntOverflow (Expr *E) { 12441 // Use a work list to deal with nested struct initializers. 12442 SmallVector<Expr *, 2> Exprs(1, E); 12443 12444 do { 12445 Expr *OriginalE = Exprs.pop_back_val(); 12446 Expr *E = OriginalE->IgnoreParenCasts(); 12447 12448 if (isa<BinaryOperator>(E)) { 12449 E->EvaluateForOverflow(Context); 12450 continue; 12451 } 12452 12453 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 12454 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 12455 else if (isa<ObjCBoxedExpr>(OriginalE)) 12456 E->EvaluateForOverflow(Context); 12457 else if (auto Call = dyn_cast<CallExpr>(E)) 12458 Exprs.append(Call->arg_begin(), Call->arg_end()); 12459 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 12460 Exprs.append(Message->arg_begin(), Message->arg_end()); 12461 } while (!Exprs.empty()); 12462 } 12463 12464 namespace { 12465 12466 /// Visitor for expressions which looks for unsequenced operations on the 12467 /// same object. 12468 class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> { 12469 using Base = EvaluatedExprVisitor<SequenceChecker>; 12470 12471 /// A tree of sequenced regions within an expression. Two regions are 12472 /// unsequenced if one is an ancestor or a descendent of the other. When we 12473 /// finish processing an expression with sequencing, such as a comma 12474 /// expression, we fold its tree nodes into its parent, since they are 12475 /// unsequenced with respect to nodes we will visit later. 12476 class SequenceTree { 12477 struct Value { 12478 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 12479 unsigned Parent : 31; 12480 unsigned Merged : 1; 12481 }; 12482 SmallVector<Value, 8> Values; 12483 12484 public: 12485 /// A region within an expression which may be sequenced with respect 12486 /// to some other region. 12487 class Seq { 12488 friend class SequenceTree; 12489 12490 unsigned Index; 12491 12492 explicit Seq(unsigned N) : Index(N) {} 12493 12494 public: 12495 Seq() : Index(0) {} 12496 }; 12497 12498 SequenceTree() { Values.push_back(Value(0)); } 12499 Seq root() const { return Seq(0); } 12500 12501 /// Create a new sequence of operations, which is an unsequenced 12502 /// subset of \p Parent. This sequence of operations is sequenced with 12503 /// respect to other children of \p Parent. 12504 Seq allocate(Seq Parent) { 12505 Values.push_back(Value(Parent.Index)); 12506 return Seq(Values.size() - 1); 12507 } 12508 12509 /// Merge a sequence of operations into its parent. 12510 void merge(Seq S) { 12511 Values[S.Index].Merged = true; 12512 } 12513 12514 /// Determine whether two operations are unsequenced. This operation 12515 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 12516 /// should have been merged into its parent as appropriate. 12517 bool isUnsequenced(Seq Cur, Seq Old) { 12518 unsigned C = representative(Cur.Index); 12519 unsigned Target = representative(Old.Index); 12520 while (C >= Target) { 12521 if (C == Target) 12522 return true; 12523 C = Values[C].Parent; 12524 } 12525 return false; 12526 } 12527 12528 private: 12529 /// Pick a representative for a sequence. 12530 unsigned representative(unsigned K) { 12531 if (Values[K].Merged) 12532 // Perform path compression as we go. 12533 return Values[K].Parent = representative(Values[K].Parent); 12534 return K; 12535 } 12536 }; 12537 12538 /// An object for which we can track unsequenced uses. 12539 using Object = NamedDecl *; 12540 12541 /// Different flavors of object usage which we track. We only track the 12542 /// least-sequenced usage of each kind. 12543 enum UsageKind { 12544 /// A read of an object. Multiple unsequenced reads are OK. 12545 UK_Use, 12546 12547 /// A modification of an object which is sequenced before the value 12548 /// computation of the expression, such as ++n in C++. 12549 UK_ModAsValue, 12550 12551 /// A modification of an object which is not sequenced before the value 12552 /// computation of the expression, such as n++. 12553 UK_ModAsSideEffect, 12554 12555 UK_Count = UK_ModAsSideEffect + 1 12556 }; 12557 12558 struct Usage { 12559 Expr *Use; 12560 SequenceTree::Seq Seq; 12561 12562 Usage() : Use(nullptr), Seq() {} 12563 }; 12564 12565 struct UsageInfo { 12566 Usage Uses[UK_Count]; 12567 12568 /// Have we issued a diagnostic for this variable already? 12569 bool Diagnosed; 12570 12571 UsageInfo() : Uses(), Diagnosed(false) {} 12572 }; 12573 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 12574 12575 Sema &SemaRef; 12576 12577 /// Sequenced regions within the expression. 12578 SequenceTree Tree; 12579 12580 /// Declaration modifications and references which we have seen. 12581 UsageInfoMap UsageMap; 12582 12583 /// The region we are currently within. 12584 SequenceTree::Seq Region; 12585 12586 /// Filled in with declarations which were modified as a side-effect 12587 /// (that is, post-increment operations). 12588 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 12589 12590 /// Expressions to check later. We defer checking these to reduce 12591 /// stack usage. 12592 SmallVectorImpl<Expr *> &WorkList; 12593 12594 /// RAII object wrapping the visitation of a sequenced subexpression of an 12595 /// expression. At the end of this process, the side-effects of the evaluation 12596 /// become sequenced with respect to the value computation of the result, so 12597 /// we downgrade any UK_ModAsSideEffect within the evaluation to 12598 /// UK_ModAsValue. 12599 struct SequencedSubexpression { 12600 SequencedSubexpression(SequenceChecker &Self) 12601 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 12602 Self.ModAsSideEffect = &ModAsSideEffect; 12603 } 12604 12605 ~SequencedSubexpression() { 12606 for (auto &M : llvm::reverse(ModAsSideEffect)) { 12607 UsageInfo &U = Self.UsageMap[M.first]; 12608 auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect]; 12609 Self.addUsage(U, M.first, SideEffectUsage.Use, UK_ModAsValue); 12610 SideEffectUsage = M.second; 12611 } 12612 Self.ModAsSideEffect = OldModAsSideEffect; 12613 } 12614 12615 SequenceChecker &Self; 12616 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 12617 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 12618 }; 12619 12620 /// RAII object wrapping the visitation of a subexpression which we might 12621 /// choose to evaluate as a constant. If any subexpression is evaluated and 12622 /// found to be non-constant, this allows us to suppress the evaluation of 12623 /// the outer expression. 12624 class EvaluationTracker { 12625 public: 12626 EvaluationTracker(SequenceChecker &Self) 12627 : Self(Self), Prev(Self.EvalTracker) { 12628 Self.EvalTracker = this; 12629 } 12630 12631 ~EvaluationTracker() { 12632 Self.EvalTracker = Prev; 12633 if (Prev) 12634 Prev->EvalOK &= EvalOK; 12635 } 12636 12637 bool evaluate(const Expr *E, bool &Result) { 12638 if (!EvalOK || E->isValueDependent()) 12639 return false; 12640 EvalOK = E->EvaluateAsBooleanCondition( 12641 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 12642 return EvalOK; 12643 } 12644 12645 private: 12646 SequenceChecker &Self; 12647 EvaluationTracker *Prev; 12648 bool EvalOK = true; 12649 } *EvalTracker = nullptr; 12650 12651 /// Find the object which is produced by the specified expression, 12652 /// if any. 12653 Object getObject(Expr *E, bool Mod) const { 12654 E = E->IgnoreParenCasts(); 12655 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12656 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 12657 return getObject(UO->getSubExpr(), Mod); 12658 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12659 if (BO->getOpcode() == BO_Comma) 12660 return getObject(BO->getRHS(), Mod); 12661 if (Mod && BO->isAssignmentOp()) 12662 return getObject(BO->getLHS(), Mod); 12663 } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 12664 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 12665 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 12666 return ME->getMemberDecl(); 12667 } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12668 // FIXME: If this is a reference, map through to its value. 12669 return DRE->getDecl(); 12670 return nullptr; 12671 } 12672 12673 /// Note that an object was modified or used by an expression. 12674 void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) { 12675 Usage &U = UI.Uses[UK]; 12676 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) { 12677 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 12678 ModAsSideEffect->push_back(std::make_pair(O, U)); 12679 U.Use = Ref; 12680 U.Seq = Region; 12681 } 12682 } 12683 12684 /// Check whether a modification or use conflicts with a prior usage. 12685 void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind, 12686 bool IsModMod) { 12687 if (UI.Diagnosed) 12688 return; 12689 12690 const Usage &U = UI.Uses[OtherKind]; 12691 if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) 12692 return; 12693 12694 Expr *Mod = U.Use; 12695 Expr *ModOrUse = Ref; 12696 if (OtherKind == UK_Use) 12697 std::swap(Mod, ModOrUse); 12698 12699 SemaRef.DiagRuntimeBehavior( 12700 Mod->getExprLoc(), {Mod, ModOrUse}, 12701 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 12702 : diag::warn_unsequenced_mod_use) 12703 << O << SourceRange(ModOrUse->getExprLoc())); 12704 UI.Diagnosed = true; 12705 } 12706 12707 void notePreUse(Object O, Expr *Use) { 12708 UsageInfo &U = UsageMap[O]; 12709 // Uses conflict with other modifications. 12710 checkUsage(O, U, Use, UK_ModAsValue, false); 12711 } 12712 12713 void notePostUse(Object O, Expr *Use) { 12714 UsageInfo &U = UsageMap[O]; 12715 checkUsage(O, U, Use, UK_ModAsSideEffect, false); 12716 addUsage(U, O, Use, UK_Use); 12717 } 12718 12719 void notePreMod(Object O, Expr *Mod) { 12720 UsageInfo &U = UsageMap[O]; 12721 // Modifications conflict with other modifications and with uses. 12722 checkUsage(O, U, Mod, UK_ModAsValue, true); 12723 checkUsage(O, U, Mod, UK_Use, false); 12724 } 12725 12726 void notePostMod(Object O, Expr *Use, UsageKind UK) { 12727 UsageInfo &U = UsageMap[O]; 12728 checkUsage(O, U, Use, UK_ModAsSideEffect, true); 12729 addUsage(U, O, Use, UK); 12730 } 12731 12732 public: 12733 SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList) 12734 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 12735 Visit(E); 12736 } 12737 12738 void VisitStmt(Stmt *S) { 12739 // Skip all statements which aren't expressions for now. 12740 } 12741 12742 void VisitExpr(Expr *E) { 12743 // By default, just recurse to evaluated subexpressions. 12744 Base::VisitStmt(E); 12745 } 12746 12747 void VisitCastExpr(CastExpr *E) { 12748 Object O = Object(); 12749 if (E->getCastKind() == CK_LValueToRValue) 12750 O = getObject(E->getSubExpr(), false); 12751 12752 if (O) 12753 notePreUse(O, E); 12754 VisitExpr(E); 12755 if (O) 12756 notePostUse(O, E); 12757 } 12758 12759 void VisitSequencedExpressions(Expr *SequencedBefore, Expr *SequencedAfter) { 12760 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 12761 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 12762 SequenceTree::Seq OldRegion = Region; 12763 12764 { 12765 SequencedSubexpression SeqBefore(*this); 12766 Region = BeforeRegion; 12767 Visit(SequencedBefore); 12768 } 12769 12770 Region = AfterRegion; 12771 Visit(SequencedAfter); 12772 12773 Region = OldRegion; 12774 12775 Tree.merge(BeforeRegion); 12776 Tree.merge(AfterRegion); 12777 } 12778 12779 void VisitArraySubscriptExpr(ArraySubscriptExpr *ASE) { 12780 // C++17 [expr.sub]p1: 12781 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 12782 // expression E1 is sequenced before the expression E2. 12783 if (SemaRef.getLangOpts().CPlusPlus17) 12784 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 12785 else 12786 Base::VisitStmt(ASE); 12787 } 12788 12789 void VisitBinComma(BinaryOperator *BO) { 12790 // C++11 [expr.comma]p1: 12791 // Every value computation and side effect associated with the left 12792 // expression is sequenced before every value computation and side 12793 // effect associated with the right expression. 12794 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 12795 } 12796 12797 void VisitBinAssign(BinaryOperator *BO) { 12798 // The modification is sequenced after the value computation of the LHS 12799 // and RHS, so check it before inspecting the operands and update the 12800 // map afterwards. 12801 Object O = getObject(BO->getLHS(), true); 12802 if (!O) 12803 return VisitExpr(BO); 12804 12805 notePreMod(O, BO); 12806 12807 // C++11 [expr.ass]p7: 12808 // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated 12809 // only once. 12810 // 12811 // Therefore, for a compound assignment operator, O is considered used 12812 // everywhere except within the evaluation of E1 itself. 12813 if (isa<CompoundAssignOperator>(BO)) 12814 notePreUse(O, BO); 12815 12816 Visit(BO->getLHS()); 12817 12818 if (isa<CompoundAssignOperator>(BO)) 12819 notePostUse(O, BO); 12820 12821 Visit(BO->getRHS()); 12822 12823 // C++11 [expr.ass]p1: 12824 // the assignment is sequenced [...] before the value computation of the 12825 // assignment expression. 12826 // C11 6.5.16/3 has no such rule. 12827 notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12828 : UK_ModAsSideEffect); 12829 } 12830 12831 void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) { 12832 VisitBinAssign(CAO); 12833 } 12834 12835 void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12836 void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12837 void VisitUnaryPreIncDec(UnaryOperator *UO) { 12838 Object O = getObject(UO->getSubExpr(), true); 12839 if (!O) 12840 return VisitExpr(UO); 12841 12842 notePreMod(O, UO); 12843 Visit(UO->getSubExpr()); 12844 // C++11 [expr.pre.incr]p1: 12845 // the expression ++x is equivalent to x+=1 12846 notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12847 : UK_ModAsSideEffect); 12848 } 12849 12850 void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12851 void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12852 void VisitUnaryPostIncDec(UnaryOperator *UO) { 12853 Object O = getObject(UO->getSubExpr(), true); 12854 if (!O) 12855 return VisitExpr(UO); 12856 12857 notePreMod(O, UO); 12858 Visit(UO->getSubExpr()); 12859 notePostMod(O, UO, UK_ModAsSideEffect); 12860 } 12861 12862 /// Don't visit the RHS of '&&' or '||' if it might not be evaluated. 12863 void VisitBinLOr(BinaryOperator *BO) { 12864 // The side-effects of the LHS of an '&&' are sequenced before the 12865 // value computation of the RHS, and hence before the value computation 12866 // of the '&&' itself, unless the LHS evaluates to zero. We treat them 12867 // as if they were unconditionally sequenced. 12868 EvaluationTracker Eval(*this); 12869 { 12870 SequencedSubexpression Sequenced(*this); 12871 Visit(BO->getLHS()); 12872 } 12873 12874 bool Result; 12875 if (Eval.evaluate(BO->getLHS(), Result)) { 12876 if (!Result) 12877 Visit(BO->getRHS()); 12878 } else { 12879 // Check for unsequenced operations in the RHS, treating it as an 12880 // entirely separate evaluation. 12881 // 12882 // FIXME: If there are operations in the RHS which are unsequenced 12883 // with respect to operations outside the RHS, and those operations 12884 // are unconditionally evaluated, diagnose them. 12885 WorkList.push_back(BO->getRHS()); 12886 } 12887 } 12888 void VisitBinLAnd(BinaryOperator *BO) { 12889 EvaluationTracker Eval(*this); 12890 { 12891 SequencedSubexpression Sequenced(*this); 12892 Visit(BO->getLHS()); 12893 } 12894 12895 bool Result; 12896 if (Eval.evaluate(BO->getLHS(), Result)) { 12897 if (Result) 12898 Visit(BO->getRHS()); 12899 } else { 12900 WorkList.push_back(BO->getRHS()); 12901 } 12902 } 12903 12904 // Only visit the condition, unless we can be sure which subexpression will 12905 // be chosen. 12906 void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) { 12907 EvaluationTracker Eval(*this); 12908 { 12909 SequencedSubexpression Sequenced(*this); 12910 Visit(CO->getCond()); 12911 } 12912 12913 bool Result; 12914 if (Eval.evaluate(CO->getCond(), Result)) 12915 Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr()); 12916 else { 12917 WorkList.push_back(CO->getTrueExpr()); 12918 WorkList.push_back(CO->getFalseExpr()); 12919 } 12920 } 12921 12922 void VisitCallExpr(CallExpr *CE) { 12923 // C++11 [intro.execution]p15: 12924 // When calling a function [...], every value computation and side effect 12925 // associated with any argument expression, or with the postfix expression 12926 // designating the called function, is sequenced before execution of every 12927 // expression or statement in the body of the function [and thus before 12928 // the value computation of its result]. 12929 SequencedSubexpression Sequenced(*this); 12930 Base::VisitCallExpr(CE); 12931 12932 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 12933 } 12934 12935 void VisitCXXConstructExpr(CXXConstructExpr *CCE) { 12936 // This is a call, so all subexpressions are sequenced before the result. 12937 SequencedSubexpression Sequenced(*this); 12938 12939 if (!CCE->isListInitialization()) 12940 return VisitExpr(CCE); 12941 12942 // In C++11, list initializations are sequenced. 12943 SmallVector<SequenceTree::Seq, 32> Elts; 12944 SequenceTree::Seq Parent = Region; 12945 for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(), 12946 E = CCE->arg_end(); 12947 I != E; ++I) { 12948 Region = Tree.allocate(Parent); 12949 Elts.push_back(Region); 12950 Visit(*I); 12951 } 12952 12953 // Forget that the initializers are sequenced. 12954 Region = Parent; 12955 for (unsigned I = 0; I < Elts.size(); ++I) 12956 Tree.merge(Elts[I]); 12957 } 12958 12959 void VisitInitListExpr(InitListExpr *ILE) { 12960 if (!SemaRef.getLangOpts().CPlusPlus11) 12961 return VisitExpr(ILE); 12962 12963 // In C++11, list initializations are sequenced. 12964 SmallVector<SequenceTree::Seq, 32> Elts; 12965 SequenceTree::Seq Parent = Region; 12966 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 12967 Expr *E = ILE->getInit(I); 12968 if (!E) continue; 12969 Region = Tree.allocate(Parent); 12970 Elts.push_back(Region); 12971 Visit(E); 12972 } 12973 12974 // Forget that the initializers are sequenced. 12975 Region = Parent; 12976 for (unsigned I = 0; I < Elts.size(); ++I) 12977 Tree.merge(Elts[I]); 12978 } 12979 }; 12980 12981 } // namespace 12982 12983 void Sema::CheckUnsequencedOperations(Expr *E) { 12984 SmallVector<Expr *, 8> WorkList; 12985 WorkList.push_back(E); 12986 while (!WorkList.empty()) { 12987 Expr *Item = WorkList.pop_back_val(); 12988 SequenceChecker(*this, Item, WorkList); 12989 } 12990 } 12991 12992 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 12993 bool IsConstexpr) { 12994 llvm::SaveAndRestore<bool> ConstantContext( 12995 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 12996 CheckImplicitConversions(E, CheckLoc); 12997 if (!E->isInstantiationDependent()) 12998 CheckUnsequencedOperations(E); 12999 if (!IsConstexpr && !E->isValueDependent()) 13000 CheckForIntOverflow(E); 13001 DiagnoseMisalignedMembers(); 13002 } 13003 13004 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 13005 FieldDecl *BitField, 13006 Expr *Init) { 13007 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 13008 } 13009 13010 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 13011 SourceLocation Loc) { 13012 if (!PType->isVariablyModifiedType()) 13013 return; 13014 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 13015 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 13016 return; 13017 } 13018 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 13019 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 13020 return; 13021 } 13022 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 13023 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 13024 return; 13025 } 13026 13027 const ArrayType *AT = S.Context.getAsArrayType(PType); 13028 if (!AT) 13029 return; 13030 13031 if (AT->getSizeModifier() != ArrayType::Star) { 13032 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 13033 return; 13034 } 13035 13036 S.Diag(Loc, diag::err_array_star_in_function_definition); 13037 } 13038 13039 /// CheckParmsForFunctionDef - Check that the parameters of the given 13040 /// function are appropriate for the definition of a function. This 13041 /// takes care of any checks that cannot be performed on the 13042 /// declaration itself, e.g., that the types of each of the function 13043 /// parameters are complete. 13044 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 13045 bool CheckParameterNames) { 13046 bool HasInvalidParm = false; 13047 for (ParmVarDecl *Param : Parameters) { 13048 // C99 6.7.5.3p4: the parameters in a parameter type list in a 13049 // function declarator that is part of a function definition of 13050 // that function shall not have incomplete type. 13051 // 13052 // This is also C++ [dcl.fct]p6. 13053 if (!Param->isInvalidDecl() && 13054 RequireCompleteType(Param->getLocation(), Param->getType(), 13055 diag::err_typecheck_decl_incomplete_type)) { 13056 Param->setInvalidDecl(); 13057 HasInvalidParm = true; 13058 } 13059 13060 // C99 6.9.1p5: If the declarator includes a parameter type list, the 13061 // declaration of each parameter shall include an identifier. 13062 if (CheckParameterNames && 13063 Param->getIdentifier() == nullptr && 13064 !Param->isImplicit() && 13065 !getLangOpts().CPlusPlus) 13066 Diag(Param->getLocation(), diag::err_parameter_name_omitted); 13067 13068 // C99 6.7.5.3p12: 13069 // If the function declarator is not part of a definition of that 13070 // function, parameters may have incomplete type and may use the [*] 13071 // notation in their sequences of declarator specifiers to specify 13072 // variable length array types. 13073 QualType PType = Param->getOriginalType(); 13074 // FIXME: This diagnostic should point the '[*]' if source-location 13075 // information is added for it. 13076 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 13077 13078 // If the parameter is a c++ class type and it has to be destructed in the 13079 // callee function, declare the destructor so that it can be called by the 13080 // callee function. Do not perform any direct access check on the dtor here. 13081 if (!Param->isInvalidDecl()) { 13082 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 13083 if (!ClassDecl->isInvalidDecl() && 13084 !ClassDecl->hasIrrelevantDestructor() && 13085 !ClassDecl->isDependentContext() && 13086 ClassDecl->isParamDestroyedInCallee()) { 13087 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 13088 MarkFunctionReferenced(Param->getLocation(), Destructor); 13089 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 13090 } 13091 } 13092 } 13093 13094 // Parameters with the pass_object_size attribute only need to be marked 13095 // constant at function definitions. Because we lack information about 13096 // whether we're on a declaration or definition when we're instantiating the 13097 // attribute, we need to check for constness here. 13098 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 13099 if (!Param->getType().isConstQualified()) 13100 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 13101 << Attr->getSpelling() << 1; 13102 13103 // Check for parameter names shadowing fields from the class. 13104 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 13105 // The owning context for the parameter should be the function, but we 13106 // want to see if this function's declaration context is a record. 13107 DeclContext *DC = Param->getDeclContext(); 13108 if (DC && DC->isFunctionOrMethod()) { 13109 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 13110 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 13111 RD, /*DeclIsField*/ false); 13112 } 13113 } 13114 } 13115 13116 return HasInvalidParm; 13117 } 13118 13119 /// A helper function to get the alignment of a Decl referred to by DeclRefExpr 13120 /// or MemberExpr. 13121 static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign, 13122 ASTContext &Context) { 13123 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) 13124 return Context.getDeclAlign(DRE->getDecl()); 13125 13126 if (const auto *ME = dyn_cast<MemberExpr>(E)) 13127 return Context.getDeclAlign(ME->getMemberDecl()); 13128 13129 return TypeAlign; 13130 } 13131 13132 /// CheckCastAlign - Implements -Wcast-align, which warns when a 13133 /// pointer cast increases the alignment requirements. 13134 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 13135 // This is actually a lot of work to potentially be doing on every 13136 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 13137 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 13138 return; 13139 13140 // Ignore dependent types. 13141 if (T->isDependentType() || Op->getType()->isDependentType()) 13142 return; 13143 13144 // Require that the destination be a pointer type. 13145 const PointerType *DestPtr = T->getAs<PointerType>(); 13146 if (!DestPtr) return; 13147 13148 // If the destination has alignment 1, we're done. 13149 QualType DestPointee = DestPtr->getPointeeType(); 13150 if (DestPointee->isIncompleteType()) return; 13151 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 13152 if (DestAlign.isOne()) return; 13153 13154 // Require that the source be a pointer type. 13155 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 13156 if (!SrcPtr) return; 13157 QualType SrcPointee = SrcPtr->getPointeeType(); 13158 13159 // Whitelist casts from cv void*. We already implicitly 13160 // whitelisted casts to cv void*, since they have alignment 1. 13161 // Also whitelist casts involving incomplete types, which implicitly 13162 // includes 'void'. 13163 if (SrcPointee->isIncompleteType()) return; 13164 13165 CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee); 13166 13167 if (auto *CE = dyn_cast<CastExpr>(Op)) { 13168 if (CE->getCastKind() == CK_ArrayToPointerDecay) 13169 SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context); 13170 } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) { 13171 if (UO->getOpcode() == UO_AddrOf) 13172 SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context); 13173 } 13174 13175 if (SrcAlign >= DestAlign) return; 13176 13177 Diag(TRange.getBegin(), diag::warn_cast_align) 13178 << Op->getType() << T 13179 << static_cast<unsigned>(SrcAlign.getQuantity()) 13180 << static_cast<unsigned>(DestAlign.getQuantity()) 13181 << TRange << Op->getSourceRange(); 13182 } 13183 13184 /// Check whether this array fits the idiom of a size-one tail padded 13185 /// array member of a struct. 13186 /// 13187 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 13188 /// commonly used to emulate flexible arrays in C89 code. 13189 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 13190 const NamedDecl *ND) { 13191 if (Size != 1 || !ND) return false; 13192 13193 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 13194 if (!FD) return false; 13195 13196 // Don't consider sizes resulting from macro expansions or template argument 13197 // substitution to form C89 tail-padded arrays. 13198 13199 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 13200 while (TInfo) { 13201 TypeLoc TL = TInfo->getTypeLoc(); 13202 // Look through typedefs. 13203 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 13204 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 13205 TInfo = TDL->getTypeSourceInfo(); 13206 continue; 13207 } 13208 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 13209 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 13210 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 13211 return false; 13212 } 13213 break; 13214 } 13215 13216 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 13217 if (!RD) return false; 13218 if (RD->isUnion()) return false; 13219 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 13220 if (!CRD->isStandardLayout()) return false; 13221 } 13222 13223 // See if this is the last field decl in the record. 13224 const Decl *D = FD; 13225 while ((D = D->getNextDeclInContext())) 13226 if (isa<FieldDecl>(D)) 13227 return false; 13228 return true; 13229 } 13230 13231 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 13232 const ArraySubscriptExpr *ASE, 13233 bool AllowOnePastEnd, bool IndexNegated) { 13234 // Already diagnosed by the constant evaluator. 13235 if (isConstantEvaluated()) 13236 return; 13237 13238 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 13239 if (IndexExpr->isValueDependent()) 13240 return; 13241 13242 const Type *EffectiveType = 13243 BaseExpr->getType()->getPointeeOrArrayElementType(); 13244 BaseExpr = BaseExpr->IgnoreParenCasts(); 13245 const ConstantArrayType *ArrayTy = 13246 Context.getAsConstantArrayType(BaseExpr->getType()); 13247 13248 if (!ArrayTy) 13249 return; 13250 13251 const Type *BaseType = ArrayTy->getElementType().getTypePtr(); 13252 if (EffectiveType->isDependentType() || BaseType->isDependentType()) 13253 return; 13254 13255 Expr::EvalResult Result; 13256 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 13257 return; 13258 13259 llvm::APSInt index = Result.Val.getInt(); 13260 if (IndexNegated) 13261 index = -index; 13262 13263 const NamedDecl *ND = nullptr; 13264 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 13265 ND = DRE->getDecl(); 13266 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 13267 ND = ME->getMemberDecl(); 13268 13269 if (index.isUnsigned() || !index.isNegative()) { 13270 // It is possible that the type of the base expression after 13271 // IgnoreParenCasts is incomplete, even though the type of the base 13272 // expression before IgnoreParenCasts is complete (see PR39746 for an 13273 // example). In this case we have no information about whether the array 13274 // access exceeds the array bounds. However we can still diagnose an array 13275 // access which precedes the array bounds. 13276 if (BaseType->isIncompleteType()) 13277 return; 13278 13279 llvm::APInt size = ArrayTy->getSize(); 13280 if (!size.isStrictlyPositive()) 13281 return; 13282 13283 if (BaseType != EffectiveType) { 13284 // Make sure we're comparing apples to apples when comparing index to size 13285 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 13286 uint64_t array_typesize = Context.getTypeSize(BaseType); 13287 // Handle ptrarith_typesize being zero, such as when casting to void* 13288 if (!ptrarith_typesize) ptrarith_typesize = 1; 13289 if (ptrarith_typesize != array_typesize) { 13290 // There's a cast to a different size type involved 13291 uint64_t ratio = array_typesize / ptrarith_typesize; 13292 // TODO: Be smarter about handling cases where array_typesize is not a 13293 // multiple of ptrarith_typesize 13294 if (ptrarith_typesize * ratio == array_typesize) 13295 size *= llvm::APInt(size.getBitWidth(), ratio); 13296 } 13297 } 13298 13299 if (size.getBitWidth() > index.getBitWidth()) 13300 index = index.zext(size.getBitWidth()); 13301 else if (size.getBitWidth() < index.getBitWidth()) 13302 size = size.zext(index.getBitWidth()); 13303 13304 // For array subscripting the index must be less than size, but for pointer 13305 // arithmetic also allow the index (offset) to be equal to size since 13306 // computing the next address after the end of the array is legal and 13307 // commonly done e.g. in C++ iterators and range-based for loops. 13308 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 13309 return; 13310 13311 // Also don't warn for arrays of size 1 which are members of some 13312 // structure. These are often used to approximate flexible arrays in C89 13313 // code. 13314 if (IsTailPaddedMemberArray(*this, size, ND)) 13315 return; 13316 13317 // Suppress the warning if the subscript expression (as identified by the 13318 // ']' location) and the index expression are both from macro expansions 13319 // within a system header. 13320 if (ASE) { 13321 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 13322 ASE->getRBracketLoc()); 13323 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 13324 SourceLocation IndexLoc = 13325 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 13326 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 13327 return; 13328 } 13329 } 13330 13331 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 13332 if (ASE) 13333 DiagID = diag::warn_array_index_exceeds_bounds; 13334 13335 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 13336 PDiag(DiagID) << index.toString(10, true) 13337 << size.toString(10, true) 13338 << (unsigned)size.getLimitedValue(~0U) 13339 << IndexExpr->getSourceRange()); 13340 } else { 13341 unsigned DiagID = diag::warn_array_index_precedes_bounds; 13342 if (!ASE) { 13343 DiagID = diag::warn_ptr_arith_precedes_bounds; 13344 if (index.isNegative()) index = -index; 13345 } 13346 13347 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 13348 PDiag(DiagID) << index.toString(10, true) 13349 << IndexExpr->getSourceRange()); 13350 } 13351 13352 if (!ND) { 13353 // Try harder to find a NamedDecl to point at in the note. 13354 while (const ArraySubscriptExpr *ASE = 13355 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 13356 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 13357 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 13358 ND = DRE->getDecl(); 13359 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 13360 ND = ME->getMemberDecl(); 13361 } 13362 13363 if (ND) 13364 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 13365 PDiag(diag::note_array_declared_here) 13366 << ND->getDeclName()); 13367 } 13368 13369 void Sema::CheckArrayAccess(const Expr *expr) { 13370 int AllowOnePastEnd = 0; 13371 while (expr) { 13372 expr = expr->IgnoreParenImpCasts(); 13373 switch (expr->getStmtClass()) { 13374 case Stmt::ArraySubscriptExprClass: { 13375 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 13376 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 13377 AllowOnePastEnd > 0); 13378 expr = ASE->getBase(); 13379 break; 13380 } 13381 case Stmt::MemberExprClass: { 13382 expr = cast<MemberExpr>(expr)->getBase(); 13383 break; 13384 } 13385 case Stmt::OMPArraySectionExprClass: { 13386 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 13387 if (ASE->getLowerBound()) 13388 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 13389 /*ASE=*/nullptr, AllowOnePastEnd > 0); 13390 return; 13391 } 13392 case Stmt::UnaryOperatorClass: { 13393 // Only unwrap the * and & unary operators 13394 const UnaryOperator *UO = cast<UnaryOperator>(expr); 13395 expr = UO->getSubExpr(); 13396 switch (UO->getOpcode()) { 13397 case UO_AddrOf: 13398 AllowOnePastEnd++; 13399 break; 13400 case UO_Deref: 13401 AllowOnePastEnd--; 13402 break; 13403 default: 13404 return; 13405 } 13406 break; 13407 } 13408 case Stmt::ConditionalOperatorClass: { 13409 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 13410 if (const Expr *lhs = cond->getLHS()) 13411 CheckArrayAccess(lhs); 13412 if (const Expr *rhs = cond->getRHS()) 13413 CheckArrayAccess(rhs); 13414 return; 13415 } 13416 case Stmt::CXXOperatorCallExprClass: { 13417 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 13418 for (const auto *Arg : OCE->arguments()) 13419 CheckArrayAccess(Arg); 13420 return; 13421 } 13422 default: 13423 return; 13424 } 13425 } 13426 } 13427 13428 //===--- CHECK: Objective-C retain cycles ----------------------------------// 13429 13430 namespace { 13431 13432 struct RetainCycleOwner { 13433 VarDecl *Variable = nullptr; 13434 SourceRange Range; 13435 SourceLocation Loc; 13436 bool Indirect = false; 13437 13438 RetainCycleOwner() = default; 13439 13440 void setLocsFrom(Expr *e) { 13441 Loc = e->getExprLoc(); 13442 Range = e->getSourceRange(); 13443 } 13444 }; 13445 13446 } // namespace 13447 13448 /// Consider whether capturing the given variable can possibly lead to 13449 /// a retain cycle. 13450 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 13451 // In ARC, it's captured strongly iff the variable has __strong 13452 // lifetime. In MRR, it's captured strongly if the variable is 13453 // __block and has an appropriate type. 13454 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 13455 return false; 13456 13457 owner.Variable = var; 13458 if (ref) 13459 owner.setLocsFrom(ref); 13460 return true; 13461 } 13462 13463 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 13464 while (true) { 13465 e = e->IgnoreParens(); 13466 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 13467 switch (cast->getCastKind()) { 13468 case CK_BitCast: 13469 case CK_LValueBitCast: 13470 case CK_LValueToRValue: 13471 case CK_ARCReclaimReturnedObject: 13472 e = cast->getSubExpr(); 13473 continue; 13474 13475 default: 13476 return false; 13477 } 13478 } 13479 13480 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 13481 ObjCIvarDecl *ivar = ref->getDecl(); 13482 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 13483 return false; 13484 13485 // Try to find a retain cycle in the base. 13486 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 13487 return false; 13488 13489 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 13490 owner.Indirect = true; 13491 return true; 13492 } 13493 13494 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 13495 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 13496 if (!var) return false; 13497 return considerVariable(var, ref, owner); 13498 } 13499 13500 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 13501 if (member->isArrow()) return false; 13502 13503 // Don't count this as an indirect ownership. 13504 e = member->getBase(); 13505 continue; 13506 } 13507 13508 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 13509 // Only pay attention to pseudo-objects on property references. 13510 ObjCPropertyRefExpr *pre 13511 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 13512 ->IgnoreParens()); 13513 if (!pre) return false; 13514 if (pre->isImplicitProperty()) return false; 13515 ObjCPropertyDecl *property = pre->getExplicitProperty(); 13516 if (!property->isRetaining() && 13517 !(property->getPropertyIvarDecl() && 13518 property->getPropertyIvarDecl()->getType() 13519 .getObjCLifetime() == Qualifiers::OCL_Strong)) 13520 return false; 13521 13522 owner.Indirect = true; 13523 if (pre->isSuperReceiver()) { 13524 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 13525 if (!owner.Variable) 13526 return false; 13527 owner.Loc = pre->getLocation(); 13528 owner.Range = pre->getSourceRange(); 13529 return true; 13530 } 13531 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 13532 ->getSourceExpr()); 13533 continue; 13534 } 13535 13536 // Array ivars? 13537 13538 return false; 13539 } 13540 } 13541 13542 namespace { 13543 13544 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 13545 ASTContext &Context; 13546 VarDecl *Variable; 13547 Expr *Capturer = nullptr; 13548 bool VarWillBeReased = false; 13549 13550 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 13551 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 13552 Context(Context), Variable(variable) {} 13553 13554 void VisitDeclRefExpr(DeclRefExpr *ref) { 13555 if (ref->getDecl() == Variable && !Capturer) 13556 Capturer = ref; 13557 } 13558 13559 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 13560 if (Capturer) return; 13561 Visit(ref->getBase()); 13562 if (Capturer && ref->isFreeIvar()) 13563 Capturer = ref; 13564 } 13565 13566 void VisitBlockExpr(BlockExpr *block) { 13567 // Look inside nested blocks 13568 if (block->getBlockDecl()->capturesVariable(Variable)) 13569 Visit(block->getBlockDecl()->getBody()); 13570 } 13571 13572 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 13573 if (Capturer) return; 13574 if (OVE->getSourceExpr()) 13575 Visit(OVE->getSourceExpr()); 13576 } 13577 13578 void VisitBinaryOperator(BinaryOperator *BinOp) { 13579 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 13580 return; 13581 Expr *LHS = BinOp->getLHS(); 13582 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 13583 if (DRE->getDecl() != Variable) 13584 return; 13585 if (Expr *RHS = BinOp->getRHS()) { 13586 RHS = RHS->IgnoreParenCasts(); 13587 llvm::APSInt Value; 13588 VarWillBeReased = 13589 (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0); 13590 } 13591 } 13592 } 13593 }; 13594 13595 } // namespace 13596 13597 /// Check whether the given argument is a block which captures a 13598 /// variable. 13599 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 13600 assert(owner.Variable && owner.Loc.isValid()); 13601 13602 e = e->IgnoreParenCasts(); 13603 13604 // Look through [^{...} copy] and Block_copy(^{...}). 13605 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 13606 Selector Cmd = ME->getSelector(); 13607 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 13608 e = ME->getInstanceReceiver(); 13609 if (!e) 13610 return nullptr; 13611 e = e->IgnoreParenCasts(); 13612 } 13613 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 13614 if (CE->getNumArgs() == 1) { 13615 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 13616 if (Fn) { 13617 const IdentifierInfo *FnI = Fn->getIdentifier(); 13618 if (FnI && FnI->isStr("_Block_copy")) { 13619 e = CE->getArg(0)->IgnoreParenCasts(); 13620 } 13621 } 13622 } 13623 } 13624 13625 BlockExpr *block = dyn_cast<BlockExpr>(e); 13626 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 13627 return nullptr; 13628 13629 FindCaptureVisitor visitor(S.Context, owner.Variable); 13630 visitor.Visit(block->getBlockDecl()->getBody()); 13631 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 13632 } 13633 13634 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 13635 RetainCycleOwner &owner) { 13636 assert(capturer); 13637 assert(owner.Variable && owner.Loc.isValid()); 13638 13639 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 13640 << owner.Variable << capturer->getSourceRange(); 13641 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 13642 << owner.Indirect << owner.Range; 13643 } 13644 13645 /// Check for a keyword selector that starts with the word 'add' or 13646 /// 'set'. 13647 static bool isSetterLikeSelector(Selector sel) { 13648 if (sel.isUnarySelector()) return false; 13649 13650 StringRef str = sel.getNameForSlot(0); 13651 while (!str.empty() && str.front() == '_') str = str.substr(1); 13652 if (str.startswith("set")) 13653 str = str.substr(3); 13654 else if (str.startswith("add")) { 13655 // Specially whitelist 'addOperationWithBlock:'. 13656 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 13657 return false; 13658 str = str.substr(3); 13659 } 13660 else 13661 return false; 13662 13663 if (str.empty()) return true; 13664 return !isLowercase(str.front()); 13665 } 13666 13667 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 13668 ObjCMessageExpr *Message) { 13669 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 13670 Message->getReceiverInterface(), 13671 NSAPI::ClassId_NSMutableArray); 13672 if (!IsMutableArray) { 13673 return None; 13674 } 13675 13676 Selector Sel = Message->getSelector(); 13677 13678 Optional<NSAPI::NSArrayMethodKind> MKOpt = 13679 S.NSAPIObj->getNSArrayMethodKind(Sel); 13680 if (!MKOpt) { 13681 return None; 13682 } 13683 13684 NSAPI::NSArrayMethodKind MK = *MKOpt; 13685 13686 switch (MK) { 13687 case NSAPI::NSMutableArr_addObject: 13688 case NSAPI::NSMutableArr_insertObjectAtIndex: 13689 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 13690 return 0; 13691 case NSAPI::NSMutableArr_replaceObjectAtIndex: 13692 return 1; 13693 13694 default: 13695 return None; 13696 } 13697 13698 return None; 13699 } 13700 13701 static 13702 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 13703 ObjCMessageExpr *Message) { 13704 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 13705 Message->getReceiverInterface(), 13706 NSAPI::ClassId_NSMutableDictionary); 13707 if (!IsMutableDictionary) { 13708 return None; 13709 } 13710 13711 Selector Sel = Message->getSelector(); 13712 13713 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 13714 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 13715 if (!MKOpt) { 13716 return None; 13717 } 13718 13719 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 13720 13721 switch (MK) { 13722 case NSAPI::NSMutableDict_setObjectForKey: 13723 case NSAPI::NSMutableDict_setValueForKey: 13724 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 13725 return 0; 13726 13727 default: 13728 return None; 13729 } 13730 13731 return None; 13732 } 13733 13734 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 13735 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 13736 Message->getReceiverInterface(), 13737 NSAPI::ClassId_NSMutableSet); 13738 13739 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 13740 Message->getReceiverInterface(), 13741 NSAPI::ClassId_NSMutableOrderedSet); 13742 if (!IsMutableSet && !IsMutableOrderedSet) { 13743 return None; 13744 } 13745 13746 Selector Sel = Message->getSelector(); 13747 13748 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 13749 if (!MKOpt) { 13750 return None; 13751 } 13752 13753 NSAPI::NSSetMethodKind MK = *MKOpt; 13754 13755 switch (MK) { 13756 case NSAPI::NSMutableSet_addObject: 13757 case NSAPI::NSOrderedSet_setObjectAtIndex: 13758 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 13759 case NSAPI::NSOrderedSet_insertObjectAtIndex: 13760 return 0; 13761 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 13762 return 1; 13763 } 13764 13765 return None; 13766 } 13767 13768 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 13769 if (!Message->isInstanceMessage()) { 13770 return; 13771 } 13772 13773 Optional<int> ArgOpt; 13774 13775 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 13776 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 13777 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 13778 return; 13779 } 13780 13781 int ArgIndex = *ArgOpt; 13782 13783 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 13784 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 13785 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 13786 } 13787 13788 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 13789 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 13790 if (ArgRE->isObjCSelfExpr()) { 13791 Diag(Message->getSourceRange().getBegin(), 13792 diag::warn_objc_circular_container) 13793 << ArgRE->getDecl() << StringRef("'super'"); 13794 } 13795 } 13796 } else { 13797 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 13798 13799 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 13800 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 13801 } 13802 13803 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 13804 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 13805 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 13806 ValueDecl *Decl = ReceiverRE->getDecl(); 13807 Diag(Message->getSourceRange().getBegin(), 13808 diag::warn_objc_circular_container) 13809 << Decl << Decl; 13810 if (!ArgRE->isObjCSelfExpr()) { 13811 Diag(Decl->getLocation(), 13812 diag::note_objc_circular_container_declared_here) 13813 << Decl; 13814 } 13815 } 13816 } 13817 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 13818 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 13819 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 13820 ObjCIvarDecl *Decl = IvarRE->getDecl(); 13821 Diag(Message->getSourceRange().getBegin(), 13822 diag::warn_objc_circular_container) 13823 << Decl << Decl; 13824 Diag(Decl->getLocation(), 13825 diag::note_objc_circular_container_declared_here) 13826 << Decl; 13827 } 13828 } 13829 } 13830 } 13831 } 13832 13833 /// Check a message send to see if it's likely to cause a retain cycle. 13834 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 13835 // Only check instance methods whose selector looks like a setter. 13836 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 13837 return; 13838 13839 // Try to find a variable that the receiver is strongly owned by. 13840 RetainCycleOwner owner; 13841 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 13842 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 13843 return; 13844 } else { 13845 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 13846 owner.Variable = getCurMethodDecl()->getSelfDecl(); 13847 owner.Loc = msg->getSuperLoc(); 13848 owner.Range = msg->getSuperLoc(); 13849 } 13850 13851 // Check whether the receiver is captured by any of the arguments. 13852 const ObjCMethodDecl *MD = msg->getMethodDecl(); 13853 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 13854 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 13855 // noescape blocks should not be retained by the method. 13856 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 13857 continue; 13858 return diagnoseRetainCycle(*this, capturer, owner); 13859 } 13860 } 13861 } 13862 13863 /// Check a property assign to see if it's likely to cause a retain cycle. 13864 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 13865 RetainCycleOwner owner; 13866 if (!findRetainCycleOwner(*this, receiver, owner)) 13867 return; 13868 13869 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 13870 diagnoseRetainCycle(*this, capturer, owner); 13871 } 13872 13873 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 13874 RetainCycleOwner Owner; 13875 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 13876 return; 13877 13878 // Because we don't have an expression for the variable, we have to set the 13879 // location explicitly here. 13880 Owner.Loc = Var->getLocation(); 13881 Owner.Range = Var->getSourceRange(); 13882 13883 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 13884 diagnoseRetainCycle(*this, Capturer, Owner); 13885 } 13886 13887 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 13888 Expr *RHS, bool isProperty) { 13889 // Check if RHS is an Objective-C object literal, which also can get 13890 // immediately zapped in a weak reference. Note that we explicitly 13891 // allow ObjCStringLiterals, since those are designed to never really die. 13892 RHS = RHS->IgnoreParenImpCasts(); 13893 13894 // This enum needs to match with the 'select' in 13895 // warn_objc_arc_literal_assign (off-by-1). 13896 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 13897 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 13898 return false; 13899 13900 S.Diag(Loc, diag::warn_arc_literal_assign) 13901 << (unsigned) Kind 13902 << (isProperty ? 0 : 1) 13903 << RHS->getSourceRange(); 13904 13905 return true; 13906 } 13907 13908 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 13909 Qualifiers::ObjCLifetime LT, 13910 Expr *RHS, bool isProperty) { 13911 // Strip off any implicit cast added to get to the one ARC-specific. 13912 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 13913 if (cast->getCastKind() == CK_ARCConsumeObject) { 13914 S.Diag(Loc, diag::warn_arc_retained_assign) 13915 << (LT == Qualifiers::OCL_ExplicitNone) 13916 << (isProperty ? 0 : 1) 13917 << RHS->getSourceRange(); 13918 return true; 13919 } 13920 RHS = cast->getSubExpr(); 13921 } 13922 13923 if (LT == Qualifiers::OCL_Weak && 13924 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 13925 return true; 13926 13927 return false; 13928 } 13929 13930 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 13931 QualType LHS, Expr *RHS) { 13932 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 13933 13934 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 13935 return false; 13936 13937 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 13938 return true; 13939 13940 return false; 13941 } 13942 13943 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 13944 Expr *LHS, Expr *RHS) { 13945 QualType LHSType; 13946 // PropertyRef on LHS type need be directly obtained from 13947 // its declaration as it has a PseudoType. 13948 ObjCPropertyRefExpr *PRE 13949 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 13950 if (PRE && !PRE->isImplicitProperty()) { 13951 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 13952 if (PD) 13953 LHSType = PD->getType(); 13954 } 13955 13956 if (LHSType.isNull()) 13957 LHSType = LHS->getType(); 13958 13959 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 13960 13961 if (LT == Qualifiers::OCL_Weak) { 13962 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 13963 getCurFunction()->markSafeWeakUse(LHS); 13964 } 13965 13966 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 13967 return; 13968 13969 // FIXME. Check for other life times. 13970 if (LT != Qualifiers::OCL_None) 13971 return; 13972 13973 if (PRE) { 13974 if (PRE->isImplicitProperty()) 13975 return; 13976 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 13977 if (!PD) 13978 return; 13979 13980 unsigned Attributes = PD->getPropertyAttributes(); 13981 if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) { 13982 // when 'assign' attribute was not explicitly specified 13983 // by user, ignore it and rely on property type itself 13984 // for lifetime info. 13985 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 13986 if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) && 13987 LHSType->isObjCRetainableType()) 13988 return; 13989 13990 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 13991 if (cast->getCastKind() == CK_ARCConsumeObject) { 13992 Diag(Loc, diag::warn_arc_retained_property_assign) 13993 << RHS->getSourceRange(); 13994 return; 13995 } 13996 RHS = cast->getSubExpr(); 13997 } 13998 } 13999 else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) { 14000 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 14001 return; 14002 } 14003 } 14004 } 14005 14006 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 14007 14008 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 14009 SourceLocation StmtLoc, 14010 const NullStmt *Body) { 14011 // Do not warn if the body is a macro that expands to nothing, e.g: 14012 // 14013 // #define CALL(x) 14014 // if (condition) 14015 // CALL(0); 14016 if (Body->hasLeadingEmptyMacro()) 14017 return false; 14018 14019 // Get line numbers of statement and body. 14020 bool StmtLineInvalid; 14021 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 14022 &StmtLineInvalid); 14023 if (StmtLineInvalid) 14024 return false; 14025 14026 bool BodyLineInvalid; 14027 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 14028 &BodyLineInvalid); 14029 if (BodyLineInvalid) 14030 return false; 14031 14032 // Warn if null statement and body are on the same line. 14033 if (StmtLine != BodyLine) 14034 return false; 14035 14036 return true; 14037 } 14038 14039 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 14040 const Stmt *Body, 14041 unsigned DiagID) { 14042 // Since this is a syntactic check, don't emit diagnostic for template 14043 // instantiations, this just adds noise. 14044 if (CurrentInstantiationScope) 14045 return; 14046 14047 // The body should be a null statement. 14048 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 14049 if (!NBody) 14050 return; 14051 14052 // Do the usual checks. 14053 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 14054 return; 14055 14056 Diag(NBody->getSemiLoc(), DiagID); 14057 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 14058 } 14059 14060 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 14061 const Stmt *PossibleBody) { 14062 assert(!CurrentInstantiationScope); // Ensured by caller 14063 14064 SourceLocation StmtLoc; 14065 const Stmt *Body; 14066 unsigned DiagID; 14067 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 14068 StmtLoc = FS->getRParenLoc(); 14069 Body = FS->getBody(); 14070 DiagID = diag::warn_empty_for_body; 14071 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 14072 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 14073 Body = WS->getBody(); 14074 DiagID = diag::warn_empty_while_body; 14075 } else 14076 return; // Neither `for' nor `while'. 14077 14078 // The body should be a null statement. 14079 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 14080 if (!NBody) 14081 return; 14082 14083 // Skip expensive checks if diagnostic is disabled. 14084 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 14085 return; 14086 14087 // Do the usual checks. 14088 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 14089 return; 14090 14091 // `for(...);' and `while(...);' are popular idioms, so in order to keep 14092 // noise level low, emit diagnostics only if for/while is followed by a 14093 // CompoundStmt, e.g.: 14094 // for (int i = 0; i < n; i++); 14095 // { 14096 // a(i); 14097 // } 14098 // or if for/while is followed by a statement with more indentation 14099 // than for/while itself: 14100 // for (int i = 0; i < n; i++); 14101 // a(i); 14102 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 14103 if (!ProbableTypo) { 14104 bool BodyColInvalid; 14105 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 14106 PossibleBody->getBeginLoc(), &BodyColInvalid); 14107 if (BodyColInvalid) 14108 return; 14109 14110 bool StmtColInvalid; 14111 unsigned StmtCol = 14112 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 14113 if (StmtColInvalid) 14114 return; 14115 14116 if (BodyCol > StmtCol) 14117 ProbableTypo = true; 14118 } 14119 14120 if (ProbableTypo) { 14121 Diag(NBody->getSemiLoc(), DiagID); 14122 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 14123 } 14124 } 14125 14126 //===--- CHECK: Warn on self move with std::move. -------------------------===// 14127 14128 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 14129 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 14130 SourceLocation OpLoc) { 14131 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 14132 return; 14133 14134 if (inTemplateInstantiation()) 14135 return; 14136 14137 // Strip parens and casts away. 14138 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 14139 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 14140 14141 // Check for a call expression 14142 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 14143 if (!CE || CE->getNumArgs() != 1) 14144 return; 14145 14146 // Check for a call to std::move 14147 if (!CE->isCallToStdMove()) 14148 return; 14149 14150 // Get argument from std::move 14151 RHSExpr = CE->getArg(0); 14152 14153 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 14154 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 14155 14156 // Two DeclRefExpr's, check that the decls are the same. 14157 if (LHSDeclRef && RHSDeclRef) { 14158 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 14159 return; 14160 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 14161 RHSDeclRef->getDecl()->getCanonicalDecl()) 14162 return; 14163 14164 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14165 << LHSExpr->getSourceRange() 14166 << RHSExpr->getSourceRange(); 14167 return; 14168 } 14169 14170 // Member variables require a different approach to check for self moves. 14171 // MemberExpr's are the same if every nested MemberExpr refers to the same 14172 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 14173 // the base Expr's are CXXThisExpr's. 14174 const Expr *LHSBase = LHSExpr; 14175 const Expr *RHSBase = RHSExpr; 14176 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 14177 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 14178 if (!LHSME || !RHSME) 14179 return; 14180 14181 while (LHSME && RHSME) { 14182 if (LHSME->getMemberDecl()->getCanonicalDecl() != 14183 RHSME->getMemberDecl()->getCanonicalDecl()) 14184 return; 14185 14186 LHSBase = LHSME->getBase(); 14187 RHSBase = RHSME->getBase(); 14188 LHSME = dyn_cast<MemberExpr>(LHSBase); 14189 RHSME = dyn_cast<MemberExpr>(RHSBase); 14190 } 14191 14192 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 14193 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 14194 if (LHSDeclRef && RHSDeclRef) { 14195 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 14196 return; 14197 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 14198 RHSDeclRef->getDecl()->getCanonicalDecl()) 14199 return; 14200 14201 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14202 << LHSExpr->getSourceRange() 14203 << RHSExpr->getSourceRange(); 14204 return; 14205 } 14206 14207 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 14208 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14209 << LHSExpr->getSourceRange() 14210 << RHSExpr->getSourceRange(); 14211 } 14212 14213 //===--- Layout compatibility ----------------------------------------------// 14214 14215 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 14216 14217 /// Check if two enumeration types are layout-compatible. 14218 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 14219 // C++11 [dcl.enum] p8: 14220 // Two enumeration types are layout-compatible if they have the same 14221 // underlying type. 14222 return ED1->isComplete() && ED2->isComplete() && 14223 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 14224 } 14225 14226 /// Check if two fields are layout-compatible. 14227 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 14228 FieldDecl *Field2) { 14229 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 14230 return false; 14231 14232 if (Field1->isBitField() != Field2->isBitField()) 14233 return false; 14234 14235 if (Field1->isBitField()) { 14236 // Make sure that the bit-fields are the same length. 14237 unsigned Bits1 = Field1->getBitWidthValue(C); 14238 unsigned Bits2 = Field2->getBitWidthValue(C); 14239 14240 if (Bits1 != Bits2) 14241 return false; 14242 } 14243 14244 return true; 14245 } 14246 14247 /// Check if two standard-layout structs are layout-compatible. 14248 /// (C++11 [class.mem] p17) 14249 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 14250 RecordDecl *RD2) { 14251 // If both records are C++ classes, check that base classes match. 14252 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 14253 // If one of records is a CXXRecordDecl we are in C++ mode, 14254 // thus the other one is a CXXRecordDecl, too. 14255 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 14256 // Check number of base classes. 14257 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 14258 return false; 14259 14260 // Check the base classes. 14261 for (CXXRecordDecl::base_class_const_iterator 14262 Base1 = D1CXX->bases_begin(), 14263 BaseEnd1 = D1CXX->bases_end(), 14264 Base2 = D2CXX->bases_begin(); 14265 Base1 != BaseEnd1; 14266 ++Base1, ++Base2) { 14267 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 14268 return false; 14269 } 14270 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 14271 // If only RD2 is a C++ class, it should have zero base classes. 14272 if (D2CXX->getNumBases() > 0) 14273 return false; 14274 } 14275 14276 // Check the fields. 14277 RecordDecl::field_iterator Field2 = RD2->field_begin(), 14278 Field2End = RD2->field_end(), 14279 Field1 = RD1->field_begin(), 14280 Field1End = RD1->field_end(); 14281 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 14282 if (!isLayoutCompatible(C, *Field1, *Field2)) 14283 return false; 14284 } 14285 if (Field1 != Field1End || Field2 != Field2End) 14286 return false; 14287 14288 return true; 14289 } 14290 14291 /// Check if two standard-layout unions are layout-compatible. 14292 /// (C++11 [class.mem] p18) 14293 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 14294 RecordDecl *RD2) { 14295 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 14296 for (auto *Field2 : RD2->fields()) 14297 UnmatchedFields.insert(Field2); 14298 14299 for (auto *Field1 : RD1->fields()) { 14300 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 14301 I = UnmatchedFields.begin(), 14302 E = UnmatchedFields.end(); 14303 14304 for ( ; I != E; ++I) { 14305 if (isLayoutCompatible(C, Field1, *I)) { 14306 bool Result = UnmatchedFields.erase(*I); 14307 (void) Result; 14308 assert(Result); 14309 break; 14310 } 14311 } 14312 if (I == E) 14313 return false; 14314 } 14315 14316 return UnmatchedFields.empty(); 14317 } 14318 14319 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 14320 RecordDecl *RD2) { 14321 if (RD1->isUnion() != RD2->isUnion()) 14322 return false; 14323 14324 if (RD1->isUnion()) 14325 return isLayoutCompatibleUnion(C, RD1, RD2); 14326 else 14327 return isLayoutCompatibleStruct(C, RD1, RD2); 14328 } 14329 14330 /// Check if two types are layout-compatible in C++11 sense. 14331 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 14332 if (T1.isNull() || T2.isNull()) 14333 return false; 14334 14335 // C++11 [basic.types] p11: 14336 // If two types T1 and T2 are the same type, then T1 and T2 are 14337 // layout-compatible types. 14338 if (C.hasSameType(T1, T2)) 14339 return true; 14340 14341 T1 = T1.getCanonicalType().getUnqualifiedType(); 14342 T2 = T2.getCanonicalType().getUnqualifiedType(); 14343 14344 const Type::TypeClass TC1 = T1->getTypeClass(); 14345 const Type::TypeClass TC2 = T2->getTypeClass(); 14346 14347 if (TC1 != TC2) 14348 return false; 14349 14350 if (TC1 == Type::Enum) { 14351 return isLayoutCompatible(C, 14352 cast<EnumType>(T1)->getDecl(), 14353 cast<EnumType>(T2)->getDecl()); 14354 } else if (TC1 == Type::Record) { 14355 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 14356 return false; 14357 14358 return isLayoutCompatible(C, 14359 cast<RecordType>(T1)->getDecl(), 14360 cast<RecordType>(T2)->getDecl()); 14361 } 14362 14363 return false; 14364 } 14365 14366 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 14367 14368 /// Given a type tag expression find the type tag itself. 14369 /// 14370 /// \param TypeExpr Type tag expression, as it appears in user's code. 14371 /// 14372 /// \param VD Declaration of an identifier that appears in a type tag. 14373 /// 14374 /// \param MagicValue Type tag magic value. 14375 /// 14376 /// \param isConstantEvaluated wether the evalaution should be performed in 14377 14378 /// constant context. 14379 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 14380 const ValueDecl **VD, uint64_t *MagicValue, 14381 bool isConstantEvaluated) { 14382 while(true) { 14383 if (!TypeExpr) 14384 return false; 14385 14386 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 14387 14388 switch (TypeExpr->getStmtClass()) { 14389 case Stmt::UnaryOperatorClass: { 14390 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 14391 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 14392 TypeExpr = UO->getSubExpr(); 14393 continue; 14394 } 14395 return false; 14396 } 14397 14398 case Stmt::DeclRefExprClass: { 14399 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 14400 *VD = DRE->getDecl(); 14401 return true; 14402 } 14403 14404 case Stmt::IntegerLiteralClass: { 14405 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 14406 llvm::APInt MagicValueAPInt = IL->getValue(); 14407 if (MagicValueAPInt.getActiveBits() <= 64) { 14408 *MagicValue = MagicValueAPInt.getZExtValue(); 14409 return true; 14410 } else 14411 return false; 14412 } 14413 14414 case Stmt::BinaryConditionalOperatorClass: 14415 case Stmt::ConditionalOperatorClass: { 14416 const AbstractConditionalOperator *ACO = 14417 cast<AbstractConditionalOperator>(TypeExpr); 14418 bool Result; 14419 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 14420 isConstantEvaluated)) { 14421 if (Result) 14422 TypeExpr = ACO->getTrueExpr(); 14423 else 14424 TypeExpr = ACO->getFalseExpr(); 14425 continue; 14426 } 14427 return false; 14428 } 14429 14430 case Stmt::BinaryOperatorClass: { 14431 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 14432 if (BO->getOpcode() == BO_Comma) { 14433 TypeExpr = BO->getRHS(); 14434 continue; 14435 } 14436 return false; 14437 } 14438 14439 default: 14440 return false; 14441 } 14442 } 14443 } 14444 14445 /// Retrieve the C type corresponding to type tag TypeExpr. 14446 /// 14447 /// \param TypeExpr Expression that specifies a type tag. 14448 /// 14449 /// \param MagicValues Registered magic values. 14450 /// 14451 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 14452 /// kind. 14453 /// 14454 /// \param TypeInfo Information about the corresponding C type. 14455 /// 14456 /// \param isConstantEvaluated wether the evalaution should be performed in 14457 /// constant context. 14458 /// 14459 /// \returns true if the corresponding C type was found. 14460 static bool GetMatchingCType( 14461 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 14462 const ASTContext &Ctx, 14463 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 14464 *MagicValues, 14465 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 14466 bool isConstantEvaluated) { 14467 FoundWrongKind = false; 14468 14469 // Variable declaration that has type_tag_for_datatype attribute. 14470 const ValueDecl *VD = nullptr; 14471 14472 uint64_t MagicValue; 14473 14474 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 14475 return false; 14476 14477 if (VD) { 14478 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 14479 if (I->getArgumentKind() != ArgumentKind) { 14480 FoundWrongKind = true; 14481 return false; 14482 } 14483 TypeInfo.Type = I->getMatchingCType(); 14484 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 14485 TypeInfo.MustBeNull = I->getMustBeNull(); 14486 return true; 14487 } 14488 return false; 14489 } 14490 14491 if (!MagicValues) 14492 return false; 14493 14494 llvm::DenseMap<Sema::TypeTagMagicValue, 14495 Sema::TypeTagData>::const_iterator I = 14496 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 14497 if (I == MagicValues->end()) 14498 return false; 14499 14500 TypeInfo = I->second; 14501 return true; 14502 } 14503 14504 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 14505 uint64_t MagicValue, QualType Type, 14506 bool LayoutCompatible, 14507 bool MustBeNull) { 14508 if (!TypeTagForDatatypeMagicValues) 14509 TypeTagForDatatypeMagicValues.reset( 14510 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 14511 14512 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 14513 (*TypeTagForDatatypeMagicValues)[Magic] = 14514 TypeTagData(Type, LayoutCompatible, MustBeNull); 14515 } 14516 14517 static bool IsSameCharType(QualType T1, QualType T2) { 14518 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 14519 if (!BT1) 14520 return false; 14521 14522 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 14523 if (!BT2) 14524 return false; 14525 14526 BuiltinType::Kind T1Kind = BT1->getKind(); 14527 BuiltinType::Kind T2Kind = BT2->getKind(); 14528 14529 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 14530 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 14531 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 14532 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 14533 } 14534 14535 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 14536 const ArrayRef<const Expr *> ExprArgs, 14537 SourceLocation CallSiteLoc) { 14538 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 14539 bool IsPointerAttr = Attr->getIsPointer(); 14540 14541 // Retrieve the argument representing the 'type_tag'. 14542 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 14543 if (TypeTagIdxAST >= ExprArgs.size()) { 14544 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 14545 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 14546 return; 14547 } 14548 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 14549 bool FoundWrongKind; 14550 TypeTagData TypeInfo; 14551 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 14552 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 14553 TypeInfo, isConstantEvaluated())) { 14554 if (FoundWrongKind) 14555 Diag(TypeTagExpr->getExprLoc(), 14556 diag::warn_type_tag_for_datatype_wrong_kind) 14557 << TypeTagExpr->getSourceRange(); 14558 return; 14559 } 14560 14561 // Retrieve the argument representing the 'arg_idx'. 14562 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 14563 if (ArgumentIdxAST >= ExprArgs.size()) { 14564 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 14565 << 1 << Attr->getArgumentIdx().getSourceIndex(); 14566 return; 14567 } 14568 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 14569 if (IsPointerAttr) { 14570 // Skip implicit cast of pointer to `void *' (as a function argument). 14571 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 14572 if (ICE->getType()->isVoidPointerType() && 14573 ICE->getCastKind() == CK_BitCast) 14574 ArgumentExpr = ICE->getSubExpr(); 14575 } 14576 QualType ArgumentType = ArgumentExpr->getType(); 14577 14578 // Passing a `void*' pointer shouldn't trigger a warning. 14579 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 14580 return; 14581 14582 if (TypeInfo.MustBeNull) { 14583 // Type tag with matching void type requires a null pointer. 14584 if (!ArgumentExpr->isNullPointerConstant(Context, 14585 Expr::NPC_ValueDependentIsNotNull)) { 14586 Diag(ArgumentExpr->getExprLoc(), 14587 diag::warn_type_safety_null_pointer_required) 14588 << ArgumentKind->getName() 14589 << ArgumentExpr->getSourceRange() 14590 << TypeTagExpr->getSourceRange(); 14591 } 14592 return; 14593 } 14594 14595 QualType RequiredType = TypeInfo.Type; 14596 if (IsPointerAttr) 14597 RequiredType = Context.getPointerType(RequiredType); 14598 14599 bool mismatch = false; 14600 if (!TypeInfo.LayoutCompatible) { 14601 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 14602 14603 // C++11 [basic.fundamental] p1: 14604 // Plain char, signed char, and unsigned char are three distinct types. 14605 // 14606 // But we treat plain `char' as equivalent to `signed char' or `unsigned 14607 // char' depending on the current char signedness mode. 14608 if (mismatch) 14609 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 14610 RequiredType->getPointeeType())) || 14611 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 14612 mismatch = false; 14613 } else 14614 if (IsPointerAttr) 14615 mismatch = !isLayoutCompatible(Context, 14616 ArgumentType->getPointeeType(), 14617 RequiredType->getPointeeType()); 14618 else 14619 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 14620 14621 if (mismatch) 14622 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 14623 << ArgumentType << ArgumentKind 14624 << TypeInfo.LayoutCompatible << RequiredType 14625 << ArgumentExpr->getSourceRange() 14626 << TypeTagExpr->getSourceRange(); 14627 } 14628 14629 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 14630 CharUnits Alignment) { 14631 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 14632 } 14633 14634 void Sema::DiagnoseMisalignedMembers() { 14635 for (MisalignedMember &m : MisalignedMembers) { 14636 const NamedDecl *ND = m.RD; 14637 if (ND->getName().empty()) { 14638 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 14639 ND = TD; 14640 } 14641 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 14642 << m.MD << ND << m.E->getSourceRange(); 14643 } 14644 MisalignedMembers.clear(); 14645 } 14646 14647 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 14648 E = E->IgnoreParens(); 14649 if (!T->isPointerType() && !T->isIntegerType()) 14650 return; 14651 if (isa<UnaryOperator>(E) && 14652 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 14653 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 14654 if (isa<MemberExpr>(Op)) { 14655 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 14656 if (MA != MisalignedMembers.end() && 14657 (T->isIntegerType() || 14658 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 14659 Context.getTypeAlignInChars( 14660 T->getPointeeType()) <= MA->Alignment)))) 14661 MisalignedMembers.erase(MA); 14662 } 14663 } 14664 } 14665 14666 void Sema::RefersToMemberWithReducedAlignment( 14667 Expr *E, 14668 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 14669 Action) { 14670 const auto *ME = dyn_cast<MemberExpr>(E); 14671 if (!ME) 14672 return; 14673 14674 // No need to check expressions with an __unaligned-qualified type. 14675 if (E->getType().getQualifiers().hasUnaligned()) 14676 return; 14677 14678 // For a chain of MemberExpr like "a.b.c.d" this list 14679 // will keep FieldDecl's like [d, c, b]. 14680 SmallVector<FieldDecl *, 4> ReverseMemberChain; 14681 const MemberExpr *TopME = nullptr; 14682 bool AnyIsPacked = false; 14683 do { 14684 QualType BaseType = ME->getBase()->getType(); 14685 if (ME->isArrow()) 14686 BaseType = BaseType->getPointeeType(); 14687 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 14688 if (RD->isInvalidDecl()) 14689 return; 14690 14691 ValueDecl *MD = ME->getMemberDecl(); 14692 auto *FD = dyn_cast<FieldDecl>(MD); 14693 // We do not care about non-data members. 14694 if (!FD || FD->isInvalidDecl()) 14695 return; 14696 14697 AnyIsPacked = 14698 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 14699 ReverseMemberChain.push_back(FD); 14700 14701 TopME = ME; 14702 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 14703 } while (ME); 14704 assert(TopME && "We did not compute a topmost MemberExpr!"); 14705 14706 // Not the scope of this diagnostic. 14707 if (!AnyIsPacked) 14708 return; 14709 14710 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 14711 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 14712 // TODO: The innermost base of the member expression may be too complicated. 14713 // For now, just disregard these cases. This is left for future 14714 // improvement. 14715 if (!DRE && !isa<CXXThisExpr>(TopBase)) 14716 return; 14717 14718 // Alignment expected by the whole expression. 14719 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 14720 14721 // No need to do anything else with this case. 14722 if (ExpectedAlignment.isOne()) 14723 return; 14724 14725 // Synthesize offset of the whole access. 14726 CharUnits Offset; 14727 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 14728 I++) { 14729 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 14730 } 14731 14732 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 14733 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 14734 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 14735 14736 // The base expression of the innermost MemberExpr may give 14737 // stronger guarantees than the class containing the member. 14738 if (DRE && !TopME->isArrow()) { 14739 const ValueDecl *VD = DRE->getDecl(); 14740 if (!VD->getType()->isReferenceType()) 14741 CompleteObjectAlignment = 14742 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 14743 } 14744 14745 // Check if the synthesized offset fulfills the alignment. 14746 if (Offset % ExpectedAlignment != 0 || 14747 // It may fulfill the offset it but the effective alignment may still be 14748 // lower than the expected expression alignment. 14749 CompleteObjectAlignment < ExpectedAlignment) { 14750 // If this happens, we want to determine a sensible culprit of this. 14751 // Intuitively, watching the chain of member expressions from right to 14752 // left, we start with the required alignment (as required by the field 14753 // type) but some packed attribute in that chain has reduced the alignment. 14754 // It may happen that another packed structure increases it again. But if 14755 // we are here such increase has not been enough. So pointing the first 14756 // FieldDecl that either is packed or else its RecordDecl is, 14757 // seems reasonable. 14758 FieldDecl *FD = nullptr; 14759 CharUnits Alignment; 14760 for (FieldDecl *FDI : ReverseMemberChain) { 14761 if (FDI->hasAttr<PackedAttr>() || 14762 FDI->getParent()->hasAttr<PackedAttr>()) { 14763 FD = FDI; 14764 Alignment = std::min( 14765 Context.getTypeAlignInChars(FD->getType()), 14766 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 14767 break; 14768 } 14769 } 14770 assert(FD && "We did not find a packed FieldDecl!"); 14771 Action(E, FD->getParent(), FD, Alignment); 14772 } 14773 } 14774 14775 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 14776 using namespace std::placeholders; 14777 14778 RefersToMemberWithReducedAlignment( 14779 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 14780 _2, _3, _4)); 14781 } 14782