1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the actions class which performs semantic analysis and 10 // builds an AST out of a parse stream. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "UsedDeclVisitor.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/ASTDiagnostic.h" 17 #include "clang/AST/DeclCXX.h" 18 #include "clang/AST/DeclFriend.h" 19 #include "clang/AST/DeclObjC.h" 20 #include "clang/AST/Expr.h" 21 #include "clang/AST/ExprCXX.h" 22 #include "clang/AST/PrettyDeclStackTrace.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Basic/DiagnosticOptions.h" 25 #include "clang/Basic/PartialDiagnostic.h" 26 #include "clang/Basic/SourceManager.h" 27 #include "clang/Basic/Stack.h" 28 #include "clang/Basic/TargetInfo.h" 29 #include "clang/Lex/HeaderSearch.h" 30 #include "clang/Lex/Preprocessor.h" 31 #include "clang/Sema/CXXFieldCollector.h" 32 #include "clang/Sema/DelayedDiagnostic.h" 33 #include "clang/Sema/ExternalSemaSource.h" 34 #include "clang/Sema/Initialization.h" 35 #include "clang/Sema/MultiplexExternalSemaSource.h" 36 #include "clang/Sema/ObjCMethodList.h" 37 #include "clang/Sema/Scope.h" 38 #include "clang/Sema/ScopeInfo.h" 39 #include "clang/Sema/SemaConsumer.h" 40 #include "clang/Sema/SemaInternal.h" 41 #include "clang/Sema/TemplateDeduction.h" 42 #include "clang/Sema/TemplateInstCallback.h" 43 #include "clang/Sema/TypoCorrection.h" 44 #include "llvm/ADT/DenseMap.h" 45 #include "llvm/ADT/SmallPtrSet.h" 46 #include "llvm/Support/TimeProfiler.h" 47 48 using namespace clang; 49 using namespace sema; 50 51 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) { 52 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts); 53 } 54 55 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); } 56 57 IdentifierInfo * 58 Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, 59 unsigned int Index) { 60 std::string InventedName; 61 llvm::raw_string_ostream OS(InventedName); 62 63 if (!ParamName) 64 OS << "auto:" << Index + 1; 65 else 66 OS << ParamName->getName() << ":auto"; 67 68 OS.flush(); 69 return &Context.Idents.get(OS.str()); 70 } 71 72 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, 73 const Preprocessor &PP) { 74 PrintingPolicy Policy = Context.getPrintingPolicy(); 75 // In diagnostics, we print _Bool as bool if the latter is defined as the 76 // former. 77 Policy.Bool = Context.getLangOpts().Bool; 78 if (!Policy.Bool) { 79 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) { 80 Policy.Bool = BoolMacro->isObjectLike() && 81 BoolMacro->getNumTokens() == 1 && 82 BoolMacro->getReplacementToken(0).is(tok::kw__Bool); 83 } 84 } 85 86 return Policy; 87 } 88 89 void Sema::ActOnTranslationUnitScope(Scope *S) { 90 TUScope = S; 91 PushDeclContext(S, Context.getTranslationUnitDecl()); 92 } 93 94 namespace clang { 95 namespace sema { 96 97 class SemaPPCallbacks : public PPCallbacks { 98 Sema *S = nullptr; 99 llvm::SmallVector<SourceLocation, 8> IncludeStack; 100 101 public: 102 void set(Sema &S) { this->S = &S; } 103 104 void reset() { S = nullptr; } 105 106 virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason, 107 SrcMgr::CharacteristicKind FileType, 108 FileID PrevFID) override { 109 if (!S) 110 return; 111 switch (Reason) { 112 case EnterFile: { 113 SourceManager &SM = S->getSourceManager(); 114 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc)); 115 if (IncludeLoc.isValid()) { 116 if (llvm::timeTraceProfilerEnabled()) { 117 const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc)); 118 llvm::timeTraceProfilerBegin( 119 "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>")); 120 } 121 122 IncludeStack.push_back(IncludeLoc); 123 S->DiagnoseNonDefaultPragmaAlignPack( 124 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude, 125 IncludeLoc); 126 } 127 break; 128 } 129 case ExitFile: 130 if (!IncludeStack.empty()) { 131 if (llvm::timeTraceProfilerEnabled()) 132 llvm::timeTraceProfilerEnd(); 133 134 S->DiagnoseNonDefaultPragmaAlignPack( 135 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit, 136 IncludeStack.pop_back_val()); 137 } 138 break; 139 default: 140 break; 141 } 142 } 143 }; 144 145 } // end namespace sema 146 } // end namespace clang 147 148 const unsigned Sema::MaxAlignmentExponent; 149 const unsigned Sema::MaximumAlignment; 150 151 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, 152 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) 153 : ExternalSource(nullptr), isMultiplexExternalSource(false), 154 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), 155 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), 156 SourceMgr(PP.getSourceManager()), CollectStats(false), 157 CodeCompleter(CodeCompleter), CurContext(nullptr), 158 OriginalLexicalContext(nullptr), MSStructPragmaOn(false), 159 MSPointerToMemberRepresentationMethod( 160 LangOpts.getMSPointerToMemberRepresentationMethod()), 161 VtorDispStack(LangOpts.getVtorDispMode()), 162 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)), 163 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr), 164 CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()), 165 CurInitSeg(nullptr), VisContext(nullptr), 166 PragmaAttributeCurrentTargetDecl(nullptr), 167 IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr), 168 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp), 169 StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr), 170 StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr), 171 MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr), 172 NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr), 173 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), 174 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), 175 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false), 176 TUKind(TUKind), NumSFINAEErrors(0), 177 FullyCheckedComparisonCategories( 178 static_cast<unsigned>(ComparisonCategoryType::Last) + 1), 179 SatisfactionCache(Context), AccessCheckingSFINAE(false), 180 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), 181 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), 182 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this), 183 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), 184 CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { 185 TUScope = nullptr; 186 isConstantEvaluatedOverride = false; 187 188 LoadedExternalKnownNamespaces = false; 189 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I) 190 NSNumberLiteralMethods[I] = nullptr; 191 192 if (getLangOpts().ObjC) 193 NSAPIObj.reset(new NSAPI(Context)); 194 195 if (getLangOpts().CPlusPlus) 196 FieldCollector.reset(new CXXFieldCollector()); 197 198 // Tell diagnostics how to render things from the AST library. 199 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); 200 201 ExprEvalContexts.emplace_back( 202 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{}, 203 nullptr, ExpressionEvaluationContextRecord::EK_Other); 204 205 // Initialization of data sharing attributes stack for OpenMP 206 InitDataSharingAttributesStack(); 207 208 std::unique_ptr<sema::SemaPPCallbacks> Callbacks = 209 std::make_unique<sema::SemaPPCallbacks>(); 210 SemaPPCallbackHandler = Callbacks.get(); 211 PP.addPPCallbacks(std::move(Callbacks)); 212 SemaPPCallbackHandler->set(*this); 213 } 214 215 // Anchor Sema's type info to this TU. 216 void Sema::anchor() {} 217 218 void Sema::addImplicitTypedef(StringRef Name, QualType T) { 219 DeclarationName DN = &Context.Idents.get(Name); 220 if (IdResolver.begin(DN) == IdResolver.end()) 221 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope); 222 } 223 224 void Sema::Initialize() { 225 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 226 SC->InitializeSema(*this); 227 228 // Tell the external Sema source about this Sema object. 229 if (ExternalSemaSource *ExternalSema 230 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 231 ExternalSema->InitializeSema(*this); 232 233 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we 234 // will not be able to merge any duplicate __va_list_tag decls correctly. 235 VAListTagName = PP.getIdentifierInfo("__va_list_tag"); 236 237 if (!TUScope) 238 return; 239 240 // Initialize predefined 128-bit integer types, if needed. 241 if (Context.getTargetInfo().hasInt128Type() || 242 (Context.getAuxTargetInfo() && 243 Context.getAuxTargetInfo()->hasInt128Type())) { 244 // If either of the 128-bit integer types are unavailable to name lookup, 245 // define them now. 246 DeclarationName Int128 = &Context.Idents.get("__int128_t"); 247 if (IdResolver.begin(Int128) == IdResolver.end()) 248 PushOnScopeChains(Context.getInt128Decl(), TUScope); 249 250 DeclarationName UInt128 = &Context.Idents.get("__uint128_t"); 251 if (IdResolver.begin(UInt128) == IdResolver.end()) 252 PushOnScopeChains(Context.getUInt128Decl(), TUScope); 253 } 254 255 256 // Initialize predefined Objective-C types: 257 if (getLangOpts().ObjC) { 258 // If 'SEL' does not yet refer to any declarations, make it refer to the 259 // predefined 'SEL'. 260 DeclarationName SEL = &Context.Idents.get("SEL"); 261 if (IdResolver.begin(SEL) == IdResolver.end()) 262 PushOnScopeChains(Context.getObjCSelDecl(), TUScope); 263 264 // If 'id' does not yet refer to any declarations, make it refer to the 265 // predefined 'id'. 266 DeclarationName Id = &Context.Idents.get("id"); 267 if (IdResolver.begin(Id) == IdResolver.end()) 268 PushOnScopeChains(Context.getObjCIdDecl(), TUScope); 269 270 // Create the built-in typedef for 'Class'. 271 DeclarationName Class = &Context.Idents.get("Class"); 272 if (IdResolver.begin(Class) == IdResolver.end()) 273 PushOnScopeChains(Context.getObjCClassDecl(), TUScope); 274 275 // Create the built-in forward declaratino for 'Protocol'. 276 DeclarationName Protocol = &Context.Idents.get("Protocol"); 277 if (IdResolver.begin(Protocol) == IdResolver.end()) 278 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope); 279 } 280 281 // Create the internal type for the *StringMakeConstantString builtins. 282 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString"); 283 if (IdResolver.begin(ConstantString) == IdResolver.end()) 284 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope); 285 286 // Initialize Microsoft "predefined C++ types". 287 if (getLangOpts().MSVCCompat) { 288 if (getLangOpts().CPlusPlus && 289 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) 290 PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class), 291 TUScope); 292 293 addImplicitTypedef("size_t", Context.getSizeType()); 294 } 295 296 // Initialize predefined OpenCL types and supported extensions and (optional) 297 // core features. 298 if (getLangOpts().OpenCL) { 299 getOpenCLOptions().addSupport( 300 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts()); 301 getOpenCLOptions().enableSupportedCore(getLangOpts()); 302 addImplicitTypedef("sampler_t", Context.OCLSamplerTy); 303 addImplicitTypedef("event_t", Context.OCLEventTy); 304 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().OpenCLVersion >= 200) { 305 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy); 306 addImplicitTypedef("queue_t", Context.OCLQueueTy); 307 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy); 308 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); 309 addImplicitTypedef("atomic_uint", 310 Context.getAtomicType(Context.UnsignedIntTy)); 311 auto AtomicLongT = Context.getAtomicType(Context.LongTy); 312 addImplicitTypedef("atomic_long", AtomicLongT); 313 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy); 314 addImplicitTypedef("atomic_ulong", AtomicULongT); 315 addImplicitTypedef("atomic_float", 316 Context.getAtomicType(Context.FloatTy)); 317 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy); 318 addImplicitTypedef("atomic_double", AtomicDoubleT); 319 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as 320 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. 321 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); 322 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType()); 323 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT); 324 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType()); 325 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT); 326 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType()); 327 addImplicitTypedef("atomic_size_t", AtomicSizeT); 328 auto AtomicPtrDiffT = Context.getAtomicType(Context.getPointerDiffType()); 329 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT); 330 331 // OpenCL v2.0 s6.13.11.6: 332 // - The atomic_long and atomic_ulong types are supported if the 333 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics 334 // extensions are supported. 335 // - The atomic_double type is only supported if double precision 336 // is supported and the cl_khr_int64_base_atomics and 337 // cl_khr_int64_extended_atomics extensions are supported. 338 // - If the device address space is 64-bits, the data types 339 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and 340 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and 341 // cl_khr_int64_extended_atomics extensions are supported. 342 std::vector<QualType> Atomic64BitTypes; 343 Atomic64BitTypes.push_back(AtomicLongT); 344 Atomic64BitTypes.push_back(AtomicULongT); 345 Atomic64BitTypes.push_back(AtomicDoubleT); 346 if (Context.getTypeSize(AtomicSizeT) == 64) { 347 Atomic64BitTypes.push_back(AtomicSizeT); 348 Atomic64BitTypes.push_back(AtomicIntPtrT); 349 Atomic64BitTypes.push_back(AtomicUIntPtrT); 350 Atomic64BitTypes.push_back(AtomicPtrDiffT); 351 } 352 for (auto &I : Atomic64BitTypes) 353 setOpenCLExtensionForType(I, 354 "cl_khr_int64_base_atomics cl_khr_int64_extended_atomics"); 355 356 setOpenCLExtensionForType(AtomicDoubleT, "cl_khr_fp64"); 357 } 358 359 setOpenCLExtensionForType(Context.DoubleTy, "cl_khr_fp64"); 360 361 #define GENERIC_IMAGE_TYPE_EXT(Type, Id, Ext) \ 362 setOpenCLExtensionForType(Context.Id, Ext); 363 #include "clang/Basic/OpenCLImageTypes.def" 364 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 365 addImplicitTypedef(#ExtType, Context.Id##Ty); \ 366 setOpenCLExtensionForType(Context.Id##Ty, #Ext); 367 #include "clang/Basic/OpenCLExtensionTypes.def" 368 } 369 370 if (Context.getTargetInfo().hasAArch64SVETypes()) { 371 #define SVE_TYPE(Name, Id, SingletonId) \ 372 addImplicitTypedef(Name, Context.SingletonId); 373 #include "clang/Basic/AArch64SVEACLETypes.def" 374 } 375 376 if (Context.getTargetInfo().getTriple().isPPC64() && 377 Context.getTargetInfo().hasFeature("paired-vector-memops")) { 378 if (Context.getTargetInfo().hasFeature("mma")) { 379 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 380 addImplicitTypedef(#Name, Context.Id##Ty); 381 #include "clang/Basic/PPCTypes.def" 382 } 383 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 384 addImplicitTypedef(#Name, Context.Id##Ty); 385 #include "clang/Basic/PPCTypes.def" 386 } 387 388 if (Context.getTargetInfo().hasBuiltinMSVaList()) { 389 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list"); 390 if (IdResolver.begin(MSVaList) == IdResolver.end()) 391 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope); 392 } 393 394 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); 395 if (IdResolver.begin(BuiltinVaList) == IdResolver.end()) 396 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope); 397 } 398 399 Sema::~Sema() { 400 assert(InstantiatingSpecializations.empty() && 401 "failed to clean up an InstantiatingTemplate?"); 402 403 if (VisContext) FreeVisContext(); 404 405 // Kill all the active scopes. 406 for (sema::FunctionScopeInfo *FSI : FunctionScopes) 407 delete FSI; 408 409 // Tell the SemaConsumer to forget about us; we're going out of scope. 410 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 411 SC->ForgetSema(); 412 413 // Detach from the external Sema source. 414 if (ExternalSemaSource *ExternalSema 415 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 416 ExternalSema->ForgetSema(); 417 418 // If Sema's ExternalSource is the multiplexer - we own it. 419 if (isMultiplexExternalSource) 420 delete ExternalSource; 421 422 // Delete cached satisfactions. 423 std::vector<ConstraintSatisfaction *> Satisfactions; 424 Satisfactions.reserve(Satisfactions.size()); 425 for (auto &Node : SatisfactionCache) 426 Satisfactions.push_back(&Node); 427 for (auto *Node : Satisfactions) 428 delete Node; 429 430 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); 431 432 // Destroys data sharing attributes stack for OpenMP 433 DestroyDataSharingAttributesStack(); 434 435 // Detach from the PP callback handler which outlives Sema since it's owned 436 // by the preprocessor. 437 SemaPPCallbackHandler->reset(); 438 } 439 440 void Sema::warnStackExhausted(SourceLocation Loc) { 441 // Only warn about this once. 442 if (!WarnedStackExhausted) { 443 Diag(Loc, diag::warn_stack_exhausted); 444 WarnedStackExhausted = true; 445 } 446 } 447 448 void Sema::runWithSufficientStackSpace(SourceLocation Loc, 449 llvm::function_ref<void()> Fn) { 450 clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn); 451 } 452 453 /// makeUnavailableInSystemHeader - There is an error in the current 454 /// context. If we're still in a system header, and we can plausibly 455 /// make the relevant declaration unavailable instead of erroring, do 456 /// so and return true. 457 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc, 458 UnavailableAttr::ImplicitReason reason) { 459 // If we're not in a function, it's an error. 460 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext); 461 if (!fn) return false; 462 463 // If we're in template instantiation, it's an error. 464 if (inTemplateInstantiation()) 465 return false; 466 467 // If that function's not in a system header, it's an error. 468 if (!Context.getSourceManager().isInSystemHeader(loc)) 469 return false; 470 471 // If the function is already unavailable, it's not an error. 472 if (fn->hasAttr<UnavailableAttr>()) return true; 473 474 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc)); 475 return true; 476 } 477 478 ASTMutationListener *Sema::getASTMutationListener() const { 479 return getASTConsumer().GetASTMutationListener(); 480 } 481 482 ///Registers an external source. If an external source already exists, 483 /// creates a multiplex external source and appends to it. 484 /// 485 ///\param[in] E - A non-null external sema source. 486 /// 487 void Sema::addExternalSource(ExternalSemaSource *E) { 488 assert(E && "Cannot use with NULL ptr"); 489 490 if (!ExternalSource) { 491 ExternalSource = E; 492 return; 493 } 494 495 if (isMultiplexExternalSource) 496 static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E); 497 else { 498 ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E); 499 isMultiplexExternalSource = true; 500 } 501 } 502 503 /// Print out statistics about the semantic analysis. 504 void Sema::PrintStats() const { 505 llvm::errs() << "\n*** Semantic Analysis Stats:\n"; 506 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n"; 507 508 BumpAlloc.PrintStats(); 509 AnalysisWarnings.PrintStats(); 510 } 511 512 void Sema::diagnoseNullableToNonnullConversion(QualType DstType, 513 QualType SrcType, 514 SourceLocation Loc) { 515 Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context); 516 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable && 517 *ExprNullability != NullabilityKind::NullableResult)) 518 return; 519 520 Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context); 521 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull) 522 return; 523 524 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType; 525 } 526 527 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) { 528 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant, 529 E->getBeginLoc())) 530 return; 531 // nullptr only exists from C++11 on, so don't warn on its absence earlier. 532 if (!getLangOpts().CPlusPlus11) 533 return; 534 535 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer) 536 return; 537 if (E->IgnoreParenImpCasts()->getType()->isNullPtrType()) 538 return; 539 540 // If it is a macro from system header, and if the macro name is not "NULL", 541 // do not warn. 542 SourceLocation MaybeMacroLoc = E->getBeginLoc(); 543 if (Diags.getSuppressSystemWarnings() && 544 SourceMgr.isInSystemMacro(MaybeMacroLoc) && 545 !findMacroSpelling(MaybeMacroLoc, "NULL")) 546 return; 547 548 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant) 549 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr"); 550 } 551 552 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast. 553 /// If there is already an implicit cast, merge into the existing one. 554 /// The result is of the given category. 555 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, 556 CastKind Kind, ExprValueKind VK, 557 const CXXCastPath *BasePath, 558 CheckedConversionKind CCK) { 559 #ifndef NDEBUG 560 if (VK == VK_RValue && !E->isRValue()) { 561 switch (Kind) { 562 default: 563 llvm_unreachable(("can't implicitly cast lvalue to rvalue with this cast " 564 "kind: " + 565 std::string(CastExpr::getCastKindName(Kind))) 566 .c_str()); 567 case CK_Dependent: 568 case CK_LValueToRValue: 569 case CK_ArrayToPointerDecay: 570 case CK_FunctionToPointerDecay: 571 case CK_ToVoid: 572 case CK_NonAtomicToAtomic: 573 break; 574 } 575 } 576 assert((VK == VK_RValue || Kind == CK_Dependent || !E->isRValue()) && 577 "can't cast rvalue to lvalue"); 578 #endif 579 580 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc()); 581 diagnoseZeroToNullptrConversion(Kind, E); 582 583 QualType ExprTy = Context.getCanonicalType(E->getType()); 584 QualType TypeTy = Context.getCanonicalType(Ty); 585 586 if (ExprTy == TypeTy) 587 return E; 588 589 // C++1z [conv.array]: The temporary materialization conversion is applied. 590 // We also use this to fuel C++ DR1213, which applies to C++11 onwards. 591 if (Kind == CK_ArrayToPointerDecay && getLangOpts().CPlusPlus && 592 E->getValueKind() == VK_RValue) { 593 // The temporary is an lvalue in C++98 and an xvalue otherwise. 594 ExprResult Materialized = CreateMaterializeTemporaryExpr( 595 E->getType(), E, !getLangOpts().CPlusPlus11); 596 if (Materialized.isInvalid()) 597 return ExprError(); 598 E = Materialized.get(); 599 } 600 601 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { 602 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { 603 ImpCast->setType(Ty); 604 ImpCast->setValueKind(VK); 605 return E; 606 } 607 } 608 609 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK, 610 CurFPFeatureOverrides()); 611 } 612 613 /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding 614 /// to the conversion from scalar type ScalarTy to the Boolean type. 615 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) { 616 switch (ScalarTy->getScalarTypeKind()) { 617 case Type::STK_Bool: return CK_NoOp; 618 case Type::STK_CPointer: return CK_PointerToBoolean; 619 case Type::STK_BlockPointer: return CK_PointerToBoolean; 620 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean; 621 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean; 622 case Type::STK_Integral: return CK_IntegralToBoolean; 623 case Type::STK_Floating: return CK_FloatingToBoolean; 624 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean; 625 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean; 626 case Type::STK_FixedPoint: return CK_FixedPointToBoolean; 627 } 628 llvm_unreachable("unknown scalar type kind"); 629 } 630 631 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector. 632 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) { 633 if (D->getMostRecentDecl()->isUsed()) 634 return true; 635 636 if (D->isExternallyVisible()) 637 return true; 638 639 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 640 // If this is a function template and none of its specializations is used, 641 // we should warn. 642 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate()) 643 for (const auto *Spec : Template->specializations()) 644 if (ShouldRemoveFromUnused(SemaRef, Spec)) 645 return true; 646 647 // UnusedFileScopedDecls stores the first declaration. 648 // The declaration may have become definition so check again. 649 const FunctionDecl *DeclToCheck; 650 if (FD->hasBody(DeclToCheck)) 651 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 652 653 // Later redecls may add new information resulting in not having to warn, 654 // so check again. 655 DeclToCheck = FD->getMostRecentDecl(); 656 if (DeclToCheck != FD) 657 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 658 } 659 660 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 661 // If a variable usable in constant expressions is referenced, 662 // don't warn if it isn't used: if the value of a variable is required 663 // for the computation of a constant expression, it doesn't make sense to 664 // warn even if the variable isn't odr-used. (isReferenced doesn't 665 // precisely reflect that, but it's a decent approximation.) 666 if (VD->isReferenced() && 667 VD->mightBeUsableInConstantExpressions(SemaRef->Context)) 668 return true; 669 670 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate()) 671 // If this is a variable template and none of its specializations is used, 672 // we should warn. 673 for (const auto *Spec : Template->specializations()) 674 if (ShouldRemoveFromUnused(SemaRef, Spec)) 675 return true; 676 677 // UnusedFileScopedDecls stores the first declaration. 678 // The declaration may have become definition so check again. 679 const VarDecl *DeclToCheck = VD->getDefinition(); 680 if (DeclToCheck) 681 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 682 683 // Later redecls may add new information resulting in not having to warn, 684 // so check again. 685 DeclToCheck = VD->getMostRecentDecl(); 686 if (DeclToCheck != VD) 687 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 688 } 689 690 return false; 691 } 692 693 static bool isFunctionOrVarDeclExternC(NamedDecl *ND) { 694 if (auto *FD = dyn_cast<FunctionDecl>(ND)) 695 return FD->isExternC(); 696 return cast<VarDecl>(ND)->isExternC(); 697 } 698 699 /// Determine whether ND is an external-linkage function or variable whose 700 /// type has no linkage. 701 bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) { 702 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage, 703 // because we also want to catch the case where its type has VisibleNoLinkage, 704 // which does not affect the linkage of VD. 705 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() && 706 !isExternalFormalLinkage(VD->getType()->getLinkage()) && 707 !isFunctionOrVarDeclExternC(VD); 708 } 709 710 /// Obtains a sorted list of functions and variables that are undefined but 711 /// ODR-used. 712 void Sema::getUndefinedButUsed( 713 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) { 714 for (const auto &UndefinedUse : UndefinedButUsed) { 715 NamedDecl *ND = UndefinedUse.first; 716 717 // Ignore attributes that have become invalid. 718 if (ND->isInvalidDecl()) continue; 719 720 // __attribute__((weakref)) is basically a definition. 721 if (ND->hasAttr<WeakRefAttr>()) continue; 722 723 if (isa<CXXDeductionGuideDecl>(ND)) 724 continue; 725 726 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) { 727 // An exported function will always be emitted when defined, so even if 728 // the function is inline, it doesn't have to be emitted in this TU. An 729 // imported function implies that it has been exported somewhere else. 730 continue; 731 } 732 733 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { 734 if (FD->isDefined()) 735 continue; 736 if (FD->isExternallyVisible() && 737 !isExternalWithNoLinkageType(FD) && 738 !FD->getMostRecentDecl()->isInlined() && 739 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 740 continue; 741 if (FD->getBuiltinID()) 742 continue; 743 } else { 744 auto *VD = cast<VarDecl>(ND); 745 if (VD->hasDefinition() != VarDecl::DeclarationOnly) 746 continue; 747 if (VD->isExternallyVisible() && 748 !isExternalWithNoLinkageType(VD) && 749 !VD->getMostRecentDecl()->isInline() && 750 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 751 continue; 752 753 // Skip VarDecls that lack formal definitions but which we know are in 754 // fact defined somewhere. 755 if (VD->isKnownToBeDefined()) 756 continue; 757 } 758 759 Undefined.push_back(std::make_pair(ND, UndefinedUse.second)); 760 } 761 } 762 763 /// checkUndefinedButUsed - Check for undefined objects with internal linkage 764 /// or that are inline. 765 static void checkUndefinedButUsed(Sema &S) { 766 if (S.UndefinedButUsed.empty()) return; 767 768 // Collect all the still-undefined entities with internal linkage. 769 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined; 770 S.getUndefinedButUsed(Undefined); 771 if (Undefined.empty()) return; 772 773 for (auto Undef : Undefined) { 774 ValueDecl *VD = cast<ValueDecl>(Undef.first); 775 SourceLocation UseLoc = Undef.second; 776 777 if (S.isExternalWithNoLinkageType(VD)) { 778 // C++ [basic.link]p8: 779 // A type without linkage shall not be used as the type of a variable 780 // or function with external linkage unless 781 // -- the entity has C language linkage 782 // -- the entity is not odr-used or is defined in the same TU 783 // 784 // As an extension, accept this in cases where the type is externally 785 // visible, since the function or variable actually can be defined in 786 // another translation unit in that case. 787 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage()) 788 ? diag::ext_undefined_internal_type 789 : diag::err_undefined_internal_type) 790 << isa<VarDecl>(VD) << VD; 791 } else if (!VD->isExternallyVisible()) { 792 // FIXME: We can promote this to an error. The function or variable can't 793 // be defined anywhere else, so the program must necessarily violate the 794 // one definition rule. 795 S.Diag(VD->getLocation(), diag::warn_undefined_internal) 796 << isa<VarDecl>(VD) << VD; 797 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) { 798 (void)FD; 799 assert(FD->getMostRecentDecl()->isInlined() && 800 "used object requires definition but isn't inline or internal?"); 801 // FIXME: This is ill-formed; we should reject. 802 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD; 803 } else { 804 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() && 805 "used var requires definition but isn't inline or internal?"); 806 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD; 807 } 808 if (UseLoc.isValid()) 809 S.Diag(UseLoc, diag::note_used_here); 810 } 811 812 S.UndefinedButUsed.clear(); 813 } 814 815 void Sema::LoadExternalWeakUndeclaredIdentifiers() { 816 if (!ExternalSource) 817 return; 818 819 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; 820 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); 821 for (auto &WeakID : WeakIDs) 822 WeakUndeclaredIdentifiers.insert(WeakID); 823 } 824 825 826 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap; 827 828 /// Returns true, if all methods and nested classes of the given 829 /// CXXRecordDecl are defined in this translation unit. 830 /// 831 /// Should only be called from ActOnEndOfTranslationUnit so that all 832 /// definitions are actually read. 833 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD, 834 RecordCompleteMap &MNCComplete) { 835 RecordCompleteMap::iterator Cache = MNCComplete.find(RD); 836 if (Cache != MNCComplete.end()) 837 return Cache->second; 838 if (!RD->isCompleteDefinition()) 839 return false; 840 bool Complete = true; 841 for (DeclContext::decl_iterator I = RD->decls_begin(), 842 E = RD->decls_end(); 843 I != E && Complete; ++I) { 844 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) 845 Complete = M->isDefined() || M->isDefaulted() || 846 (M->isPure() && !isa<CXXDestructorDecl>(M)); 847 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I)) 848 // If the template function is marked as late template parsed at this 849 // point, it has not been instantiated and therefore we have not 850 // performed semantic analysis on it yet, so we cannot know if the type 851 // can be considered complete. 852 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() && 853 F->getTemplatedDecl()->isDefined(); 854 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) { 855 if (R->isInjectedClassName()) 856 continue; 857 if (R->hasDefinition()) 858 Complete = MethodsAndNestedClassesComplete(R->getDefinition(), 859 MNCComplete); 860 else 861 Complete = false; 862 } 863 } 864 MNCComplete[RD] = Complete; 865 return Complete; 866 } 867 868 /// Returns true, if the given CXXRecordDecl is fully defined in this 869 /// translation unit, i.e. all methods are defined or pure virtual and all 870 /// friends, friend functions and nested classes are fully defined in this 871 /// translation unit. 872 /// 873 /// Should only be called from ActOnEndOfTranslationUnit so that all 874 /// definitions are actually read. 875 static bool IsRecordFullyDefined(const CXXRecordDecl *RD, 876 RecordCompleteMap &RecordsComplete, 877 RecordCompleteMap &MNCComplete) { 878 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD); 879 if (Cache != RecordsComplete.end()) 880 return Cache->second; 881 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete); 882 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(), 883 E = RD->friend_end(); 884 I != E && Complete; ++I) { 885 // Check if friend classes and methods are complete. 886 if (TypeSourceInfo *TSI = (*I)->getFriendType()) { 887 // Friend classes are available as the TypeSourceInfo of the FriendDecl. 888 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl()) 889 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete); 890 else 891 Complete = false; 892 } else { 893 // Friend functions are available through the NamedDecl of FriendDecl. 894 if (const FunctionDecl *FD = 895 dyn_cast<FunctionDecl>((*I)->getFriendDecl())) 896 Complete = FD->isDefined(); 897 else 898 // This is a template friend, give up. 899 Complete = false; 900 } 901 } 902 RecordsComplete[RD] = Complete; 903 return Complete; 904 } 905 906 void Sema::emitAndClearUnusedLocalTypedefWarnings() { 907 if (ExternalSource) 908 ExternalSource->ReadUnusedLocalTypedefNameCandidates( 909 UnusedLocalTypedefNameCandidates); 910 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) { 911 if (TD->isReferenced()) 912 continue; 913 Diag(TD->getLocation(), diag::warn_unused_local_typedef) 914 << isa<TypeAliasDecl>(TD) << TD->getDeclName(); 915 } 916 UnusedLocalTypedefNameCandidates.clear(); 917 } 918 919 /// This is called before the very first declaration in the translation unit 920 /// is parsed. Note that the ASTContext may have already injected some 921 /// declarations. 922 void Sema::ActOnStartOfTranslationUnit() { 923 if (getLangOpts().ModulesTS && 924 (getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface || 925 getLangOpts().getCompilingModule() == LangOptions::CMK_None)) { 926 // We start in an implied global module fragment. 927 SourceLocation StartOfTU = 928 SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); 929 ActOnGlobalModuleFragmentDecl(StartOfTU); 930 ModuleScopes.back().ImplicitGlobalModuleFragment = true; 931 } 932 } 933 934 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) { 935 // No explicit actions are required at the end of the global module fragment. 936 if (Kind == TUFragmentKind::Global) 937 return; 938 939 // Transfer late parsed template instantiations over to the pending template 940 // instantiation list. During normal compilation, the late template parser 941 // will be installed and instantiating these templates will succeed. 942 // 943 // If we are building a TU prefix for serialization, it is also safe to 944 // transfer these over, even though they are not parsed. The end of the TU 945 // should be outside of any eager template instantiation scope, so when this 946 // AST is deserialized, these templates will not be parsed until the end of 947 // the combined TU. 948 PendingInstantiations.insert(PendingInstantiations.end(), 949 LateParsedInstantiations.begin(), 950 LateParsedInstantiations.end()); 951 LateParsedInstantiations.clear(); 952 953 // If DefinedUsedVTables ends up marking any virtual member functions it 954 // might lead to more pending template instantiations, which we then need 955 // to instantiate. 956 DefineUsedVTables(); 957 958 // C++: Perform implicit template instantiations. 959 // 960 // FIXME: When we perform these implicit instantiations, we do not 961 // carefully keep track of the point of instantiation (C++ [temp.point]). 962 // This means that name lookup that occurs within the template 963 // instantiation will always happen at the end of the translation unit, 964 // so it will find some names that are not required to be found. This is 965 // valid, but we could do better by diagnosing if an instantiation uses a 966 // name that was not visible at its first point of instantiation. 967 if (ExternalSource) { 968 // Load pending instantiations from the external source. 969 SmallVector<PendingImplicitInstantiation, 4> Pending; 970 ExternalSource->ReadPendingInstantiations(Pending); 971 for (auto PII : Pending) 972 if (auto Func = dyn_cast<FunctionDecl>(PII.first)) 973 Func->setInstantiationIsPending(true); 974 PendingInstantiations.insert(PendingInstantiations.begin(), 975 Pending.begin(), Pending.end()); 976 } 977 978 { 979 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 980 PerformPendingInstantiations(); 981 } 982 983 emitDeferredDiags(); 984 985 assert(LateParsedInstantiations.empty() && 986 "end of TU template instantiation should not create more " 987 "late-parsed templates"); 988 989 // Report diagnostics for uncorrected delayed typos. Ideally all of them 990 // should have been corrected by that time, but it is very hard to cover all 991 // cases in practice. 992 for (const auto &Typo : DelayedTypos) { 993 // We pass an empty TypoCorrection to indicate no correction was performed. 994 Typo.second.DiagHandler(TypoCorrection()); 995 } 996 DelayedTypos.clear(); 997 } 998 999 /// ActOnEndOfTranslationUnit - This is called at the very end of the 1000 /// translation unit when EOF is reached and all but the top-level scope is 1001 /// popped. 1002 void Sema::ActOnEndOfTranslationUnit() { 1003 assert(DelayedDiagnostics.getCurrentPool() == nullptr 1004 && "reached end of translation unit with a pool attached?"); 1005 1006 // If code completion is enabled, don't perform any end-of-translation-unit 1007 // work. 1008 if (PP.isCodeCompletionEnabled()) 1009 return; 1010 1011 // Complete translation units and modules define vtables and perform implicit 1012 // instantiations. PCH files do not. 1013 if (TUKind != TU_Prefix) { 1014 DiagnoseUseOfUnimplementedSelectors(); 1015 1016 ActOnEndOfTranslationUnitFragment( 1017 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind == 1018 Module::PrivateModuleFragment 1019 ? TUFragmentKind::Private 1020 : TUFragmentKind::Normal); 1021 1022 if (LateTemplateParserCleanup) 1023 LateTemplateParserCleanup(OpaqueParser); 1024 1025 CheckDelayedMemberExceptionSpecs(); 1026 } else { 1027 // If we are building a TU prefix for serialization, it is safe to transfer 1028 // these over, even though they are not parsed. The end of the TU should be 1029 // outside of any eager template instantiation scope, so when this AST is 1030 // deserialized, these templates will not be parsed until the end of the 1031 // combined TU. 1032 PendingInstantiations.insert(PendingInstantiations.end(), 1033 LateParsedInstantiations.begin(), 1034 LateParsedInstantiations.end()); 1035 LateParsedInstantiations.clear(); 1036 1037 if (LangOpts.PCHInstantiateTemplates) { 1038 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1039 PerformPendingInstantiations(); 1040 } 1041 } 1042 1043 DiagnoseUnterminatedPragmaAlignPack(); 1044 DiagnoseUnterminatedPragmaAttribute(); 1045 1046 // All delayed member exception specs should be checked or we end up accepting 1047 // incompatible declarations. 1048 assert(DelayedOverridingExceptionSpecChecks.empty()); 1049 assert(DelayedEquivalentExceptionSpecChecks.empty()); 1050 1051 // All dllexport classes should have been processed already. 1052 assert(DelayedDllExportClasses.empty()); 1053 assert(DelayedDllExportMemberFunctions.empty()); 1054 1055 // Remove file scoped decls that turned out to be used. 1056 UnusedFileScopedDecls.erase( 1057 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true), 1058 UnusedFileScopedDecls.end(), 1059 [this](const DeclaratorDecl *DD) { 1060 return ShouldRemoveFromUnused(this, DD); 1061 }), 1062 UnusedFileScopedDecls.end()); 1063 1064 if (TUKind == TU_Prefix) { 1065 // Translation unit prefixes don't need any of the checking below. 1066 if (!PP.isIncrementalProcessingEnabled()) 1067 TUScope = nullptr; 1068 return; 1069 } 1070 1071 // Check for #pragma weak identifiers that were never declared 1072 LoadExternalWeakUndeclaredIdentifiers(); 1073 for (auto WeakID : WeakUndeclaredIdentifiers) { 1074 if (WeakID.second.getUsed()) 1075 continue; 1076 1077 Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(), 1078 LookupOrdinaryName); 1079 if (PrevDecl != nullptr && 1080 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) 1081 Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type) 1082 << "'weak'" << ExpectedVariableOrFunction; 1083 else 1084 Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared) 1085 << WeakID.first; 1086 } 1087 1088 if (LangOpts.CPlusPlus11 && 1089 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation())) 1090 CheckDelegatingCtorCycles(); 1091 1092 if (!Diags.hasErrorOccurred()) { 1093 if (ExternalSource) 1094 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed); 1095 checkUndefinedButUsed(*this); 1096 } 1097 1098 // A global-module-fragment is only permitted within a module unit. 1099 bool DiagnosedMissingModuleDeclaration = false; 1100 if (!ModuleScopes.empty() && 1101 ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment && 1102 !ModuleScopes.back().ImplicitGlobalModuleFragment) { 1103 Diag(ModuleScopes.back().BeginLoc, 1104 diag::err_module_declaration_missing_after_global_module_introducer); 1105 DiagnosedMissingModuleDeclaration = true; 1106 } 1107 1108 if (TUKind == TU_Module) { 1109 // If we are building a module interface unit, we need to have seen the 1110 // module declaration by now. 1111 if (getLangOpts().getCompilingModule() == 1112 LangOptions::CMK_ModuleInterface && 1113 (ModuleScopes.empty() || 1114 !ModuleScopes.back().Module->isModulePurview()) && 1115 !DiagnosedMissingModuleDeclaration) { 1116 // FIXME: Make a better guess as to where to put the module declaration. 1117 Diag(getSourceManager().getLocForStartOfFile( 1118 getSourceManager().getMainFileID()), 1119 diag::err_module_declaration_missing); 1120 } 1121 1122 // If we are building a module, resolve all of the exported declarations 1123 // now. 1124 if (Module *CurrentModule = PP.getCurrentModule()) { 1125 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); 1126 1127 SmallVector<Module *, 2> Stack; 1128 Stack.push_back(CurrentModule); 1129 while (!Stack.empty()) { 1130 Module *Mod = Stack.pop_back_val(); 1131 1132 // Resolve the exported declarations and conflicts. 1133 // FIXME: Actually complain, once we figure out how to teach the 1134 // diagnostic client to deal with complaints in the module map at this 1135 // point. 1136 ModMap.resolveExports(Mod, /*Complain=*/false); 1137 ModMap.resolveUses(Mod, /*Complain=*/false); 1138 ModMap.resolveConflicts(Mod, /*Complain=*/false); 1139 1140 // Queue the submodules, so their exports will also be resolved. 1141 Stack.append(Mod->submodule_begin(), Mod->submodule_end()); 1142 } 1143 } 1144 1145 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for 1146 // modules when they are built, not every time they are used. 1147 emitAndClearUnusedLocalTypedefWarnings(); 1148 } 1149 1150 // C99 6.9.2p2: 1151 // A declaration of an identifier for an object that has file 1152 // scope without an initializer, and without a storage-class 1153 // specifier or with the storage-class specifier static, 1154 // constitutes a tentative definition. If a translation unit 1155 // contains one or more tentative definitions for an identifier, 1156 // and the translation unit contains no external definition for 1157 // that identifier, then the behavior is exactly as if the 1158 // translation unit contains a file scope declaration of that 1159 // identifier, with the composite type as of the end of the 1160 // translation unit, with an initializer equal to 0. 1161 llvm::SmallSet<VarDecl *, 32> Seen; 1162 for (TentativeDefinitionsType::iterator 1163 T = TentativeDefinitions.begin(ExternalSource), 1164 TEnd = TentativeDefinitions.end(); 1165 T != TEnd; ++T) { 1166 VarDecl *VD = (*T)->getActingDefinition(); 1167 1168 // If the tentative definition was completed, getActingDefinition() returns 1169 // null. If we've already seen this variable before, insert()'s second 1170 // return value is false. 1171 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second) 1172 continue; 1173 1174 if (const IncompleteArrayType *ArrayT 1175 = Context.getAsIncompleteArrayType(VD->getType())) { 1176 // Set the length of the array to 1 (C99 6.9.2p5). 1177 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array); 1178 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true); 1179 QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One, 1180 nullptr, ArrayType::Normal, 0); 1181 VD->setType(T); 1182 } else if (RequireCompleteType(VD->getLocation(), VD->getType(), 1183 diag::err_tentative_def_incomplete_type)) 1184 VD->setInvalidDecl(); 1185 1186 // No initialization is performed for a tentative definition. 1187 CheckCompleteVariableDeclaration(VD); 1188 1189 // Notify the consumer that we've completed a tentative definition. 1190 if (!VD->isInvalidDecl()) 1191 Consumer.CompleteTentativeDefinition(VD); 1192 } 1193 1194 for (auto D : ExternalDeclarations) { 1195 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed()) 1196 continue; 1197 1198 Consumer.CompleteExternalDeclaration(D); 1199 } 1200 1201 // If there were errors, disable 'unused' warnings since they will mostly be 1202 // noise. Don't warn for a use from a module: either we should warn on all 1203 // file-scope declarations in modules or not at all, but whether the 1204 // declaration is used is immaterial. 1205 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) { 1206 // Output warning for unused file scoped decls. 1207 for (UnusedFileScopedDeclsType::iterator 1208 I = UnusedFileScopedDecls.begin(ExternalSource), 1209 E = UnusedFileScopedDecls.end(); I != E; ++I) { 1210 if (ShouldRemoveFromUnused(this, *I)) 1211 continue; 1212 1213 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { 1214 const FunctionDecl *DiagD; 1215 if (!FD->hasBody(DiagD)) 1216 DiagD = FD; 1217 if (DiagD->isDeleted()) 1218 continue; // Deleted functions are supposed to be unused. 1219 if (DiagD->isReferenced()) { 1220 if (isa<CXXMethodDecl>(DiagD)) 1221 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function) 1222 << DiagD; 1223 else { 1224 if (FD->getStorageClass() == SC_Static && 1225 !FD->isInlineSpecified() && 1226 !SourceMgr.isInMainFile( 1227 SourceMgr.getExpansionLoc(FD->getLocation()))) 1228 Diag(DiagD->getLocation(), 1229 diag::warn_unneeded_static_internal_decl) 1230 << DiagD; 1231 else 1232 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1233 << /*function*/ 0 << DiagD; 1234 } 1235 } else { 1236 if (FD->getDescribedFunctionTemplate()) 1237 Diag(DiagD->getLocation(), diag::warn_unused_template) 1238 << /*function*/ 0 << DiagD; 1239 else 1240 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD) 1241 ? diag::warn_unused_member_function 1242 : diag::warn_unused_function) 1243 << DiagD; 1244 } 1245 } else { 1246 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition(); 1247 if (!DiagD) 1248 DiagD = cast<VarDecl>(*I); 1249 if (DiagD->isReferenced()) { 1250 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1251 << /*variable*/ 1 << DiagD; 1252 } else if (DiagD->getType().isConstQualified()) { 1253 const SourceManager &SM = SourceMgr; 1254 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) || 1255 !PP.getLangOpts().IsHeaderFile) 1256 Diag(DiagD->getLocation(), diag::warn_unused_const_variable) 1257 << DiagD; 1258 } else { 1259 if (DiagD->getDescribedVarTemplate()) 1260 Diag(DiagD->getLocation(), diag::warn_unused_template) 1261 << /*variable*/ 1 << DiagD; 1262 else 1263 Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD; 1264 } 1265 } 1266 } 1267 1268 emitAndClearUnusedLocalTypedefWarnings(); 1269 } 1270 1271 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) { 1272 // FIXME: Load additional unused private field candidates from the external 1273 // source. 1274 RecordCompleteMap RecordsComplete; 1275 RecordCompleteMap MNCComplete; 1276 for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(), 1277 E = UnusedPrivateFields.end(); I != E; ++I) { 1278 const NamedDecl *D = *I; 1279 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); 1280 if (RD && !RD->isUnion() && 1281 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) { 1282 Diag(D->getLocation(), diag::warn_unused_private_field) 1283 << D->getDeclName(); 1284 } 1285 } 1286 } 1287 1288 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { 1289 if (ExternalSource) 1290 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); 1291 for (const auto &DeletedFieldInfo : DeleteExprs) { 1292 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { 1293 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, 1294 DeleteExprLoc.second); 1295 } 1296 } 1297 } 1298 1299 // Check we've noticed that we're no longer parsing the initializer for every 1300 // variable. If we miss cases, then at best we have a performance issue and 1301 // at worst a rejects-valid bug. 1302 assert(ParsingInitForAutoVars.empty() && 1303 "Didn't unmark var as having its initializer parsed"); 1304 1305 if (!PP.isIncrementalProcessingEnabled()) 1306 TUScope = nullptr; 1307 } 1308 1309 1310 //===----------------------------------------------------------------------===// 1311 // Helper functions. 1312 //===----------------------------------------------------------------------===// 1313 1314 DeclContext *Sema::getFunctionLevelDeclContext() { 1315 DeclContext *DC = CurContext; 1316 1317 while (true) { 1318 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) || 1319 isa<RequiresExprBodyDecl>(DC)) { 1320 DC = DC->getParent(); 1321 } else if (isa<CXXMethodDecl>(DC) && 1322 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call && 1323 cast<CXXRecordDecl>(DC->getParent())->isLambda()) { 1324 DC = DC->getParent()->getParent(); 1325 } 1326 else break; 1327 } 1328 1329 return DC; 1330 } 1331 1332 /// getCurFunctionDecl - If inside of a function body, this returns a pointer 1333 /// to the function decl for the function being parsed. If we're currently 1334 /// in a 'block', this returns the containing context. 1335 FunctionDecl *Sema::getCurFunctionDecl() { 1336 DeclContext *DC = getFunctionLevelDeclContext(); 1337 return dyn_cast<FunctionDecl>(DC); 1338 } 1339 1340 ObjCMethodDecl *Sema::getCurMethodDecl() { 1341 DeclContext *DC = getFunctionLevelDeclContext(); 1342 while (isa<RecordDecl>(DC)) 1343 DC = DC->getParent(); 1344 return dyn_cast<ObjCMethodDecl>(DC); 1345 } 1346 1347 NamedDecl *Sema::getCurFunctionOrMethodDecl() { 1348 DeclContext *DC = getFunctionLevelDeclContext(); 1349 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC)) 1350 return cast<NamedDecl>(DC); 1351 return nullptr; 1352 } 1353 1354 LangAS Sema::getDefaultCXXMethodAddrSpace() const { 1355 if (getLangOpts().OpenCL) 1356 return LangAS::opencl_generic; 1357 return LangAS::Default; 1358 } 1359 1360 void Sema::EmitCurrentDiagnostic(unsigned DiagID) { 1361 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here 1362 // and yet we also use the current diag ID on the DiagnosticsEngine. This has 1363 // been made more painfully obvious by the refactor that introduced this 1364 // function, but it is possible that the incoming argument can be 1365 // eliminated. If it truly cannot be (for example, there is some reentrancy 1366 // issue I am not seeing yet), then there should at least be a clarifying 1367 // comment somewhere. 1368 if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) { 1369 switch (DiagnosticIDs::getDiagnosticSFINAEResponse( 1370 Diags.getCurrentDiagID())) { 1371 case DiagnosticIDs::SFINAE_Report: 1372 // We'll report the diagnostic below. 1373 break; 1374 1375 case DiagnosticIDs::SFINAE_SubstitutionFailure: 1376 // Count this failure so that we know that template argument deduction 1377 // has failed. 1378 ++NumSFINAEErrors; 1379 1380 // Make a copy of this suppressed diagnostic and store it with the 1381 // template-deduction information. 1382 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1383 Diagnostic DiagInfo(&Diags); 1384 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1385 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1386 } 1387 1388 Diags.setLastDiagnosticIgnored(true); 1389 Diags.Clear(); 1390 return; 1391 1392 case DiagnosticIDs::SFINAE_AccessControl: { 1393 // Per C++ Core Issue 1170, access control is part of SFINAE. 1394 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily 1395 // make access control a part of SFINAE for the purposes of checking 1396 // type traits. 1397 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11) 1398 break; 1399 1400 SourceLocation Loc = Diags.getCurrentDiagLoc(); 1401 1402 // Suppress this diagnostic. 1403 ++NumSFINAEErrors; 1404 1405 // Make a copy of this suppressed diagnostic and store it with the 1406 // template-deduction information. 1407 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1408 Diagnostic DiagInfo(&Diags); 1409 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1410 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1411 } 1412 1413 Diags.setLastDiagnosticIgnored(true); 1414 Diags.Clear(); 1415 1416 // Now the diagnostic state is clear, produce a C++98 compatibility 1417 // warning. 1418 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control); 1419 1420 // The last diagnostic which Sema produced was ignored. Suppress any 1421 // notes attached to it. 1422 Diags.setLastDiagnosticIgnored(true); 1423 return; 1424 } 1425 1426 case DiagnosticIDs::SFINAE_Suppress: 1427 // Make a copy of this suppressed diagnostic and store it with the 1428 // template-deduction information; 1429 if (*Info) { 1430 Diagnostic DiagInfo(&Diags); 1431 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(), 1432 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1433 } 1434 1435 // Suppress this diagnostic. 1436 Diags.setLastDiagnosticIgnored(true); 1437 Diags.Clear(); 1438 return; 1439 } 1440 } 1441 1442 // Copy the diagnostic printing policy over the ASTContext printing policy. 1443 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292 1444 Context.setPrintingPolicy(getPrintingPolicy()); 1445 1446 // Emit the diagnostic. 1447 if (!Diags.EmitCurrentDiagnostic()) 1448 return; 1449 1450 // If this is not a note, and we're in a template instantiation 1451 // that is different from the last template instantiation where 1452 // we emitted an error, print a template instantiation 1453 // backtrace. 1454 if (!DiagnosticIDs::isBuiltinNote(DiagID)) 1455 PrintContextStack(); 1456 } 1457 1458 Sema::SemaDiagnosticBuilder 1459 Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) { 1460 return Diag(Loc, PD.getDiagID(), DeferHint) << PD; 1461 } 1462 1463 bool Sema::hasUncompilableErrorOccurred() const { 1464 if (getDiagnostics().hasUncompilableErrorOccurred()) 1465 return true; 1466 auto *FD = dyn_cast<FunctionDecl>(CurContext); 1467 if (!FD) 1468 return false; 1469 auto Loc = DeviceDeferredDiags.find(FD); 1470 if (Loc == DeviceDeferredDiags.end()) 1471 return false; 1472 for (auto PDAt : Loc->second) { 1473 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID())) 1474 return true; 1475 } 1476 return false; 1477 } 1478 1479 // Print notes showing how we can reach FD starting from an a priori 1480 // known-callable function. 1481 static void emitCallStackNotes(Sema &S, FunctionDecl *FD) { 1482 auto FnIt = S.DeviceKnownEmittedFns.find(FD); 1483 while (FnIt != S.DeviceKnownEmittedFns.end()) { 1484 // Respect error limit. 1485 if (S.Diags.hasFatalErrorOccurred()) 1486 return; 1487 DiagnosticBuilder Builder( 1488 S.Diags.Report(FnIt->second.Loc, diag::note_called_by)); 1489 Builder << FnIt->second.FD; 1490 FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD); 1491 } 1492 } 1493 1494 namespace { 1495 1496 /// Helper class that emits deferred diagnostic messages if an entity directly 1497 /// or indirectly using the function that causes the deferred diagnostic 1498 /// messages is known to be emitted. 1499 /// 1500 /// During parsing of AST, certain diagnostic messages are recorded as deferred 1501 /// diagnostics since it is unknown whether the functions containing such 1502 /// diagnostics will be emitted. A list of potentially emitted functions and 1503 /// variables that may potentially trigger emission of functions are also 1504 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions 1505 /// by each function to emit deferred diagnostics. 1506 /// 1507 /// During the visit, certain OpenMP directives or initializer of variables 1508 /// with certain OpenMP attributes will cause subsequent visiting of any 1509 /// functions enter a state which is called OpenMP device context in this 1510 /// implementation. The state is exited when the directive or initializer is 1511 /// exited. This state can change the emission states of subsequent uses 1512 /// of functions. 1513 /// 1514 /// Conceptually the functions or variables to be visited form a use graph 1515 /// where the parent node uses the child node. At any point of the visit, 1516 /// the tree nodes traversed from the tree root to the current node form a use 1517 /// stack. The emission state of the current node depends on two factors: 1518 /// 1. the emission state of the root node 1519 /// 2. whether the current node is in OpenMP device context 1520 /// If the function is decided to be emitted, its contained deferred diagnostics 1521 /// are emitted, together with the information about the use stack. 1522 /// 1523 class DeferredDiagnosticsEmitter 1524 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> { 1525 public: 1526 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited; 1527 1528 // Whether the function is already in the current use-path. 1529 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath; 1530 1531 // The current use-path. 1532 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath; 1533 1534 // Whether the visiting of the function has been done. Done[0] is for the 1535 // case not in OpenMP device context. Done[1] is for the case in OpenMP 1536 // device context. We need two sets because diagnostics emission may be 1537 // different depending on whether it is in OpenMP device context. 1538 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2]; 1539 1540 // Emission state of the root node of the current use graph. 1541 bool ShouldEmitRootNode; 1542 1543 // Current OpenMP device context level. It is initialized to 0 and each 1544 // entering of device context increases it by 1 and each exit decreases 1545 // it by 1. Non-zero value indicates it is currently in device context. 1546 unsigned InOMPDeviceContext; 1547 1548 DeferredDiagnosticsEmitter(Sema &S) 1549 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {} 1550 1551 void VisitOMPTargetDirective(OMPTargetDirective *Node) { 1552 ++InOMPDeviceContext; 1553 Inherited::VisitOMPTargetDirective(Node); 1554 --InOMPDeviceContext; 1555 } 1556 1557 void visitUsedDecl(SourceLocation Loc, Decl *D) { 1558 if (isa<VarDecl>(D)) 1559 return; 1560 if (auto *FD = dyn_cast<FunctionDecl>(D)) 1561 checkFunc(Loc, FD); 1562 else 1563 Inherited::visitUsedDecl(Loc, D); 1564 } 1565 1566 void checkVar(VarDecl *VD) { 1567 assert(VD->isFileVarDecl() && 1568 "Should only check file-scope variables"); 1569 if (auto *Init = VD->getInit()) { 1570 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); 1571 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost || 1572 *DevTy == OMPDeclareTargetDeclAttr::DT_Any); 1573 if (IsDev) 1574 ++InOMPDeviceContext; 1575 this->Visit(Init); 1576 if (IsDev) 1577 --InOMPDeviceContext; 1578 } 1579 } 1580 1581 void checkFunc(SourceLocation Loc, FunctionDecl *FD) { 1582 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0]; 1583 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back(); 1584 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) || 1585 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD)) 1586 return; 1587 // Finalize analysis of OpenMP-specific constructs. 1588 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 && 1589 (ShouldEmitRootNode || InOMPDeviceContext)) 1590 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); 1591 if (Caller) 1592 S.DeviceKnownEmittedFns[FD] = {Caller, Loc}; 1593 // Always emit deferred diagnostics for the direct users. This does not 1594 // lead to explosion of diagnostics since each user is visited at most 1595 // twice. 1596 if (ShouldEmitRootNode || InOMPDeviceContext) 1597 emitDeferredDiags(FD, Caller); 1598 // Do not revisit a function if the function body has been completely 1599 // visited before. 1600 if (!Done.insert(FD).second) 1601 return; 1602 InUsePath.insert(FD); 1603 UsePath.push_back(FD); 1604 if (auto *S = FD->getBody()) { 1605 this->Visit(S); 1606 } 1607 UsePath.pop_back(); 1608 InUsePath.erase(FD); 1609 } 1610 1611 void checkRecordedDecl(Decl *D) { 1612 if (auto *FD = dyn_cast<FunctionDecl>(D)) { 1613 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) == 1614 Sema::FunctionEmissionStatus::Emitted; 1615 checkFunc(SourceLocation(), FD); 1616 } else 1617 checkVar(cast<VarDecl>(D)); 1618 } 1619 1620 // Emit any deferred diagnostics for FD 1621 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) { 1622 auto It = S.DeviceDeferredDiags.find(FD); 1623 if (It == S.DeviceDeferredDiags.end()) 1624 return; 1625 bool HasWarningOrError = false; 1626 bool FirstDiag = true; 1627 for (PartialDiagnosticAt &PDAt : It->second) { 1628 // Respect error limit. 1629 if (S.Diags.hasFatalErrorOccurred()) 1630 return; 1631 const SourceLocation &Loc = PDAt.first; 1632 const PartialDiagnostic &PD = PDAt.second; 1633 HasWarningOrError |= 1634 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >= 1635 DiagnosticsEngine::Warning; 1636 { 1637 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID())); 1638 PD.Emit(Builder); 1639 } 1640 // Emit the note on the first diagnostic in case too many diagnostics 1641 // cause the note not emitted. 1642 if (FirstDiag && HasWarningOrError && ShowCallStack) { 1643 emitCallStackNotes(S, FD); 1644 FirstDiag = false; 1645 } 1646 } 1647 } 1648 }; 1649 } // namespace 1650 1651 void Sema::emitDeferredDiags() { 1652 if (ExternalSource) 1653 ExternalSource->ReadDeclsToCheckForDeferredDiags( 1654 DeclsToCheckForDeferredDiags); 1655 1656 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) || 1657 DeclsToCheckForDeferredDiags.empty()) 1658 return; 1659 1660 DeferredDiagnosticsEmitter DDE(*this); 1661 for (auto D : DeclsToCheckForDeferredDiags) 1662 DDE.checkRecordedDecl(D); 1663 } 1664 1665 // In CUDA, there are some constructs which may appear in semantically-valid 1666 // code, but trigger errors if we ever generate code for the function in which 1667 // they appear. Essentially every construct you're not allowed to use on the 1668 // device falls into this category, because you are allowed to use these 1669 // constructs in a __host__ __device__ function, but only if that function is 1670 // never codegen'ed on the device. 1671 // 1672 // To handle semantic checking for these constructs, we keep track of the set of 1673 // functions we know will be emitted, either because we could tell a priori that 1674 // they would be emitted, or because they were transitively called by a 1675 // known-emitted function. 1676 // 1677 // We also keep a partial call graph of which not-known-emitted functions call 1678 // which other not-known-emitted functions. 1679 // 1680 // When we see something which is illegal if the current function is emitted 1681 // (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or 1682 // CheckCUDACall), we first check if the current function is known-emitted. If 1683 // so, we immediately output the diagnostic. 1684 // 1685 // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags 1686 // until we discover that the function is known-emitted, at which point we take 1687 // it out of this map and emit the diagnostic. 1688 1689 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc, 1690 unsigned DiagID, 1691 FunctionDecl *Fn, Sema &S) 1692 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn), 1693 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) { 1694 switch (K) { 1695 case K_Nop: 1696 break; 1697 case K_Immediate: 1698 case K_ImmediateWithCallStack: 1699 ImmediateDiag.emplace( 1700 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID)); 1701 break; 1702 case K_Deferred: 1703 assert(Fn && "Must have a function to attach the deferred diag to."); 1704 auto &Diags = S.DeviceDeferredDiags[Fn]; 1705 PartialDiagId.emplace(Diags.size()); 1706 Diags.emplace_back(Loc, S.PDiag(DiagID)); 1707 break; 1708 } 1709 } 1710 1711 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D) 1712 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn), 1713 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag), 1714 PartialDiagId(D.PartialDiagId) { 1715 // Clean the previous diagnostics. 1716 D.ShowCallStack = false; 1717 D.ImmediateDiag.reset(); 1718 D.PartialDiagId.reset(); 1719 } 1720 1721 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() { 1722 if (ImmediateDiag) { 1723 // Emit our diagnostic and, if it was a warning or error, output a callstack 1724 // if Fn isn't a priori known-emitted. 1725 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel( 1726 DiagID, Loc) >= DiagnosticsEngine::Warning; 1727 ImmediateDiag.reset(); // Emit the immediate diag. 1728 if (IsWarningOrError && ShowCallStack) 1729 emitCallStackNotes(S, Fn); 1730 } else { 1731 assert((!PartialDiagId || ShowCallStack) && 1732 "Must always show call stack for deferred diags."); 1733 } 1734 } 1735 1736 Sema::SemaDiagnosticBuilder Sema::targetDiag(SourceLocation Loc, 1737 unsigned DiagID) { 1738 if (LangOpts.OpenMP) 1739 return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID) 1740 : diagIfOpenMPHostCode(Loc, DiagID); 1741 if (getLangOpts().CUDA) 1742 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID) 1743 : CUDADiagIfHostCode(Loc, DiagID); 1744 1745 if (getLangOpts().SYCLIsDevice) 1746 return SYCLDiagIfDeviceCode(Loc, DiagID); 1747 1748 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID, 1749 getCurFunctionDecl(), *this); 1750 } 1751 1752 Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID, 1753 bool DeferHint) { 1754 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID); 1755 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag && 1756 DiagnosticIDs::isDeferrable(DiagID) && 1757 (DeferHint || !IsError); 1758 auto SetIsLastErrorImmediate = [&](bool Flag) { 1759 if (IsError) 1760 IsLastErrorImmediate = Flag; 1761 }; 1762 if (!ShouldDefer) { 1763 SetIsLastErrorImmediate(true); 1764 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, 1765 DiagID, getCurFunctionDecl(), *this); 1766 } 1767 1768 SemaDiagnosticBuilder DB = 1769 getLangOpts().CUDAIsDevice 1770 ? CUDADiagIfDeviceCode(Loc, DiagID) 1771 : CUDADiagIfHostCode(Loc, DiagID); 1772 SetIsLastErrorImmediate(DB.isImmediate()); 1773 return DB; 1774 } 1775 1776 void Sema::checkDeviceDecl(const ValueDecl *D, SourceLocation Loc) { 1777 if (isUnevaluatedContext()) 1778 return; 1779 1780 Decl *C = cast<Decl>(getCurLexicalContext()); 1781 1782 // Memcpy operations for structs containing a member with unsupported type 1783 // are ok, though. 1784 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) { 1785 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 1786 MD->isTrivial()) 1787 return; 1788 1789 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD)) 1790 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial()) 1791 return; 1792 } 1793 1794 auto CheckType = [&](QualType Ty) { 1795 if (Ty->isDependentType()) 1796 return; 1797 1798 if (Ty->isExtIntType()) { 1799 if (!Context.getTargetInfo().hasExtIntType()) { 1800 targetDiag(Loc, diag::err_device_unsupported_type) 1801 << D << false /*show bit size*/ << 0 /*bitsize*/ 1802 << Ty << Context.getTargetInfo().getTriple().str(); 1803 } 1804 return; 1805 } 1806 1807 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) || 1808 ((Ty->isFloat128Type() || 1809 (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) && 1810 !Context.getTargetInfo().hasFloat128Type()) || 1811 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 && 1812 !Context.getTargetInfo().hasInt128Type())) { 1813 targetDiag(Loc, diag::err_device_unsupported_type) 1814 << D << true /*show bit size*/ 1815 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty 1816 << Context.getTargetInfo().getTriple().str(); 1817 targetDiag(D->getLocation(), diag::note_defined_here) << D; 1818 } 1819 }; 1820 1821 QualType Ty = D->getType(); 1822 CheckType(Ty); 1823 1824 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) { 1825 for (const auto &ParamTy : FPTy->param_types()) 1826 CheckType(ParamTy); 1827 CheckType(FPTy->getReturnType()); 1828 } 1829 } 1830 1831 /// Looks through the macro-expansion chain for the given 1832 /// location, looking for a macro expansion with the given name. 1833 /// If one is found, returns true and sets the location to that 1834 /// expansion loc. 1835 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) { 1836 SourceLocation loc = locref; 1837 if (!loc.isMacroID()) return false; 1838 1839 // There's no good way right now to look at the intermediate 1840 // expansions, so just jump to the expansion location. 1841 loc = getSourceManager().getExpansionLoc(loc); 1842 1843 // If that's written with the name, stop here. 1844 SmallString<16> buffer; 1845 if (getPreprocessor().getSpelling(loc, buffer) == name) { 1846 locref = loc; 1847 return true; 1848 } 1849 return false; 1850 } 1851 1852 /// Determines the active Scope associated with the given declaration 1853 /// context. 1854 /// 1855 /// This routine maps a declaration context to the active Scope object that 1856 /// represents that declaration context in the parser. It is typically used 1857 /// from "scope-less" code (e.g., template instantiation, lazy creation of 1858 /// declarations) that injects a name for name-lookup purposes and, therefore, 1859 /// must update the Scope. 1860 /// 1861 /// \returns The scope corresponding to the given declaraion context, or NULL 1862 /// if no such scope is open. 1863 Scope *Sema::getScopeForContext(DeclContext *Ctx) { 1864 1865 if (!Ctx) 1866 return nullptr; 1867 1868 Ctx = Ctx->getPrimaryContext(); 1869 for (Scope *S = getCurScope(); S; S = S->getParent()) { 1870 // Ignore scopes that cannot have declarations. This is important for 1871 // out-of-line definitions of static class members. 1872 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) 1873 if (DeclContext *Entity = S->getEntity()) 1874 if (Ctx == Entity->getPrimaryContext()) 1875 return S; 1876 } 1877 1878 return nullptr; 1879 } 1880 1881 /// Enter a new function scope 1882 void Sema::PushFunctionScope() { 1883 if (FunctionScopes.empty() && CachedFunctionScope) { 1884 // Use CachedFunctionScope to avoid allocating memory when possible. 1885 CachedFunctionScope->Clear(); 1886 FunctionScopes.push_back(CachedFunctionScope.release()); 1887 } else { 1888 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); 1889 } 1890 if (LangOpts.OpenMP) 1891 pushOpenMPFunctionRegion(); 1892 } 1893 1894 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { 1895 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(), 1896 BlockScope, Block)); 1897 } 1898 1899 LambdaScopeInfo *Sema::PushLambdaScope() { 1900 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics()); 1901 FunctionScopes.push_back(LSI); 1902 return LSI; 1903 } 1904 1905 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) { 1906 if (LambdaScopeInfo *const LSI = getCurLambda()) { 1907 LSI->AutoTemplateParameterDepth = Depth; 1908 return; 1909 } 1910 llvm_unreachable( 1911 "Remove assertion if intentionally called in a non-lambda context."); 1912 } 1913 1914 // Check that the type of the VarDecl has an accessible copy constructor and 1915 // resolve its destructor's exception specification. 1916 static void checkEscapingByref(VarDecl *VD, Sema &S) { 1917 QualType T = VD->getType(); 1918 EnterExpressionEvaluationContext scope( 1919 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated); 1920 SourceLocation Loc = VD->getLocation(); 1921 Expr *VarRef = 1922 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); 1923 ExprResult Result = S.PerformMoveOrCopyInitialization( 1924 InitializedEntity::InitializeBlock(Loc, T, false), VD, VD->getType(), 1925 VarRef, /*AllowNRVO=*/true); 1926 if (!Result.isInvalid()) { 1927 Result = S.MaybeCreateExprWithCleanups(Result); 1928 Expr *Init = Result.getAs<Expr>(); 1929 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init)); 1930 } 1931 1932 // The destructor's exception specification is needed when IRGen generates 1933 // block copy/destroy functions. Resolve it here. 1934 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 1935 if (CXXDestructorDecl *DD = RD->getDestructor()) { 1936 auto *FPT = DD->getType()->getAs<FunctionProtoType>(); 1937 S.ResolveExceptionSpec(Loc, FPT); 1938 } 1939 } 1940 1941 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { 1942 // Set the EscapingByref flag of __block variables captured by 1943 // escaping blocks. 1944 for (const BlockDecl *BD : FSI.Blocks) { 1945 for (const BlockDecl::Capture &BC : BD->captures()) { 1946 VarDecl *VD = BC.getVariable(); 1947 if (VD->hasAttr<BlocksAttr>()) { 1948 // Nothing to do if this is a __block variable captured by a 1949 // non-escaping block. 1950 if (BD->doesNotEscape()) 1951 continue; 1952 VD->setEscapingByref(); 1953 } 1954 // Check whether the captured variable is or contains an object of 1955 // non-trivial C union type. 1956 QualType CapType = BC.getVariable()->getType(); 1957 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() || 1958 CapType.hasNonTrivialToPrimitiveCopyCUnion()) 1959 S.checkNonTrivialCUnion(BC.getVariable()->getType(), 1960 BD->getCaretLocation(), 1961 Sema::NTCUC_BlockCapture, 1962 Sema::NTCUK_Destruct|Sema::NTCUK_Copy); 1963 } 1964 } 1965 1966 for (VarDecl *VD : FSI.ByrefBlockVars) { 1967 // __block variables might require us to capture a copy-initializer. 1968 if (!VD->isEscapingByref()) 1969 continue; 1970 // It's currently invalid to ever have a __block variable with an 1971 // array type; should we diagnose that here? 1972 // Regardless, we don't want to ignore array nesting when 1973 // constructing this copy. 1974 if (VD->getType()->isStructureOrClassType()) 1975 checkEscapingByref(VD, S); 1976 } 1977 } 1978 1979 /// Pop a function (or block or lambda or captured region) scope from the stack. 1980 /// 1981 /// \param WP The warning policy to use for CFG-based warnings, or null if such 1982 /// warnings should not be produced. 1983 /// \param D The declaration corresponding to this function scope, if producing 1984 /// CFG-based warnings. 1985 /// \param BlockType The type of the block expression, if D is a BlockDecl. 1986 Sema::PoppedFunctionScopePtr 1987 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, 1988 const Decl *D, QualType BlockType) { 1989 assert(!FunctionScopes.empty() && "mismatched push/pop!"); 1990 1991 markEscapingByrefs(*FunctionScopes.back(), *this); 1992 1993 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(), 1994 PoppedFunctionScopeDeleter(this)); 1995 1996 if (LangOpts.OpenMP) 1997 popOpenMPFunctionRegion(Scope.get()); 1998 1999 // Issue any analysis-based warnings. 2000 if (WP && D) 2001 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType); 2002 else 2003 for (const auto &PUD : Scope->PossiblyUnreachableDiags) 2004 Diag(PUD.Loc, PUD.PD); 2005 2006 return Scope; 2007 } 2008 2009 void Sema::PoppedFunctionScopeDeleter:: 2010 operator()(sema::FunctionScopeInfo *Scope) const { 2011 // Stash the function scope for later reuse if it's for a normal function. 2012 if (Scope->isPlainFunction() && !Self->CachedFunctionScope) 2013 Self->CachedFunctionScope.reset(Scope); 2014 else 2015 delete Scope; 2016 } 2017 2018 void Sema::PushCompoundScope(bool IsStmtExpr) { 2019 getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr)); 2020 } 2021 2022 void Sema::PopCompoundScope() { 2023 FunctionScopeInfo *CurFunction = getCurFunction(); 2024 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop"); 2025 2026 CurFunction->CompoundScopes.pop_back(); 2027 } 2028 2029 /// Determine whether any errors occurred within this function/method/ 2030 /// block. 2031 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const { 2032 return getCurFunction()->hasUnrecoverableErrorOccurred(); 2033 } 2034 2035 void Sema::setFunctionHasBranchIntoScope() { 2036 if (!FunctionScopes.empty()) 2037 FunctionScopes.back()->setHasBranchIntoScope(); 2038 } 2039 2040 void Sema::setFunctionHasBranchProtectedScope() { 2041 if (!FunctionScopes.empty()) 2042 FunctionScopes.back()->setHasBranchProtectedScope(); 2043 } 2044 2045 void Sema::setFunctionHasIndirectGoto() { 2046 if (!FunctionScopes.empty()) 2047 FunctionScopes.back()->setHasIndirectGoto(); 2048 } 2049 2050 BlockScopeInfo *Sema::getCurBlock() { 2051 if (FunctionScopes.empty()) 2052 return nullptr; 2053 2054 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back()); 2055 if (CurBSI && CurBSI->TheDecl && 2056 !CurBSI->TheDecl->Encloses(CurContext)) { 2057 // We have switched contexts due to template instantiation. 2058 assert(!CodeSynthesisContexts.empty()); 2059 return nullptr; 2060 } 2061 2062 return CurBSI; 2063 } 2064 2065 FunctionScopeInfo *Sema::getEnclosingFunction() const { 2066 if (FunctionScopes.empty()) 2067 return nullptr; 2068 2069 for (int e = FunctionScopes.size() - 1; e >= 0; --e) { 2070 if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) 2071 continue; 2072 return FunctionScopes[e]; 2073 } 2074 return nullptr; 2075 } 2076 2077 LambdaScopeInfo *Sema::getEnclosingLambda() const { 2078 for (auto *Scope : llvm::reverse(FunctionScopes)) { 2079 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) { 2080 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) { 2081 // We have switched contexts due to template instantiation. 2082 // FIXME: We should swap out the FunctionScopes during code synthesis 2083 // so that we don't need to check for this. 2084 assert(!CodeSynthesisContexts.empty()); 2085 return nullptr; 2086 } 2087 return LSI; 2088 } 2089 } 2090 return nullptr; 2091 } 2092 2093 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) { 2094 if (FunctionScopes.empty()) 2095 return nullptr; 2096 2097 auto I = FunctionScopes.rbegin(); 2098 if (IgnoreNonLambdaCapturingScope) { 2099 auto E = FunctionScopes.rend(); 2100 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I)) 2101 ++I; 2102 if (I == E) 2103 return nullptr; 2104 } 2105 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I); 2106 if (CurLSI && CurLSI->Lambda && 2107 !CurLSI->Lambda->Encloses(CurContext)) { 2108 // We have switched contexts due to template instantiation. 2109 assert(!CodeSynthesisContexts.empty()); 2110 return nullptr; 2111 } 2112 2113 return CurLSI; 2114 } 2115 2116 // We have a generic lambda if we parsed auto parameters, or we have 2117 // an associated template parameter list. 2118 LambdaScopeInfo *Sema::getCurGenericLambda() { 2119 if (LambdaScopeInfo *LSI = getCurLambda()) { 2120 return (LSI->TemplateParams.size() || 2121 LSI->GLTemplateParameterList) ? LSI : nullptr; 2122 } 2123 return nullptr; 2124 } 2125 2126 2127 void Sema::ActOnComment(SourceRange Comment) { 2128 if (!LangOpts.RetainCommentsFromSystemHeaders && 2129 SourceMgr.isInSystemHeader(Comment.getBegin())) 2130 return; 2131 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false); 2132 if (RC.isAlmostTrailingComment()) { 2133 SourceRange MagicMarkerRange(Comment.getBegin(), 2134 Comment.getBegin().getLocWithOffset(3)); 2135 StringRef MagicMarkerText; 2136 switch (RC.getKind()) { 2137 case RawComment::RCK_OrdinaryBCPL: 2138 MagicMarkerText = "///<"; 2139 break; 2140 case RawComment::RCK_OrdinaryC: 2141 MagicMarkerText = "/**<"; 2142 break; 2143 default: 2144 llvm_unreachable("if this is an almost Doxygen comment, " 2145 "it should be ordinary"); 2146 } 2147 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) << 2148 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText); 2149 } 2150 Context.addComment(RC); 2151 } 2152 2153 // Pin this vtable to this file. 2154 ExternalSemaSource::~ExternalSemaSource() {} 2155 char ExternalSemaSource::ID; 2156 2157 void ExternalSemaSource::ReadMethodPool(Selector Sel) { } 2158 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { } 2159 2160 void ExternalSemaSource::ReadKnownNamespaces( 2161 SmallVectorImpl<NamespaceDecl *> &Namespaces) { 2162 } 2163 2164 void ExternalSemaSource::ReadUndefinedButUsed( 2165 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {} 2166 2167 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< 2168 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} 2169 2170 /// Figure out if an expression could be turned into a call. 2171 /// 2172 /// Use this when trying to recover from an error where the programmer may have 2173 /// written just the name of a function instead of actually calling it. 2174 /// 2175 /// \param E - The expression to examine. 2176 /// \param ZeroArgCallReturnTy - If the expression can be turned into a call 2177 /// with no arguments, this parameter is set to the type returned by such a 2178 /// call; otherwise, it is set to an empty QualType. 2179 /// \param OverloadSet - If the expression is an overloaded function 2180 /// name, this parameter is populated with the decls of the various overloads. 2181 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, 2182 UnresolvedSetImpl &OverloadSet) { 2183 ZeroArgCallReturnTy = QualType(); 2184 OverloadSet.clear(); 2185 2186 const OverloadExpr *Overloads = nullptr; 2187 bool IsMemExpr = false; 2188 if (E.getType() == Context.OverloadTy) { 2189 OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); 2190 2191 // Ignore overloads that are pointer-to-member constants. 2192 if (FR.HasFormOfMemberPointer) 2193 return false; 2194 2195 Overloads = FR.Expression; 2196 } else if (E.getType() == Context.BoundMemberTy) { 2197 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens()); 2198 IsMemExpr = true; 2199 } 2200 2201 bool Ambiguous = false; 2202 bool IsMV = false; 2203 2204 if (Overloads) { 2205 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), 2206 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { 2207 OverloadSet.addDecl(*it); 2208 2209 // Check whether the function is a non-template, non-member which takes no 2210 // arguments. 2211 if (IsMemExpr) 2212 continue; 2213 if (const FunctionDecl *OverloadDecl 2214 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { 2215 if (OverloadDecl->getMinRequiredArguments() == 0) { 2216 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous && 2217 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() || 2218 OverloadDecl->isCPUSpecificMultiVersion()))) { 2219 ZeroArgCallReturnTy = QualType(); 2220 Ambiguous = true; 2221 } else { 2222 ZeroArgCallReturnTy = OverloadDecl->getReturnType(); 2223 IsMV = OverloadDecl->isCPUDispatchMultiVersion() || 2224 OverloadDecl->isCPUSpecificMultiVersion(); 2225 } 2226 } 2227 } 2228 } 2229 2230 // If it's not a member, use better machinery to try to resolve the call 2231 if (!IsMemExpr) 2232 return !ZeroArgCallReturnTy.isNull(); 2233 } 2234 2235 // Attempt to call the member with no arguments - this will correctly handle 2236 // member templates with defaults/deduction of template arguments, overloads 2237 // with default arguments, etc. 2238 if (IsMemExpr && !E.isTypeDependent()) { 2239 Sema::TentativeAnalysisScope Trap(*this); 2240 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), 2241 None, SourceLocation()); 2242 if (R.isUsable()) { 2243 ZeroArgCallReturnTy = R.get()->getType(); 2244 return true; 2245 } 2246 return false; 2247 } 2248 2249 if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { 2250 if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { 2251 if (Fun->getMinRequiredArguments() == 0) 2252 ZeroArgCallReturnTy = Fun->getReturnType(); 2253 return true; 2254 } 2255 } 2256 2257 // We don't have an expression that's convenient to get a FunctionDecl from, 2258 // but we can at least check if the type is "function of 0 arguments". 2259 QualType ExprTy = E.getType(); 2260 const FunctionType *FunTy = nullptr; 2261 QualType PointeeTy = ExprTy->getPointeeType(); 2262 if (!PointeeTy.isNull()) 2263 FunTy = PointeeTy->getAs<FunctionType>(); 2264 if (!FunTy) 2265 FunTy = ExprTy->getAs<FunctionType>(); 2266 2267 if (const FunctionProtoType *FPT = 2268 dyn_cast_or_null<FunctionProtoType>(FunTy)) { 2269 if (FPT->getNumParams() == 0) 2270 ZeroArgCallReturnTy = FunTy->getReturnType(); 2271 return true; 2272 } 2273 return false; 2274 } 2275 2276 /// Give notes for a set of overloads. 2277 /// 2278 /// A companion to tryExprAsCall. In cases when the name that the programmer 2279 /// wrote was an overloaded function, we may be able to make some guesses about 2280 /// plausible overloads based on their return types; such guesses can be handed 2281 /// off to this method to be emitted as notes. 2282 /// 2283 /// \param Overloads - The overloads to note. 2284 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to 2285 /// -fshow-overloads=best, this is the location to attach to the note about too 2286 /// many candidates. Typically this will be the location of the original 2287 /// ill-formed expression. 2288 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, 2289 const SourceLocation FinalNoteLoc) { 2290 int ShownOverloads = 0; 2291 int SuppressedOverloads = 0; 2292 for (UnresolvedSetImpl::iterator It = Overloads.begin(), 2293 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2294 // FIXME: Magic number for max shown overloads stolen from 2295 // OverloadCandidateSet::NoteCandidates. 2296 if (ShownOverloads >= 4 && S.Diags.getShowOverloads() == Ovl_Best) { 2297 ++SuppressedOverloads; 2298 continue; 2299 } 2300 2301 NamedDecl *Fn = (*It)->getUnderlyingDecl(); 2302 // Don't print overloads for non-default multiversioned functions. 2303 if (const auto *FD = Fn->getAsFunction()) { 2304 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() && 2305 !FD->getAttr<TargetAttr>()->isDefaultVersion()) 2306 continue; 2307 } 2308 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call); 2309 ++ShownOverloads; 2310 } 2311 2312 if (SuppressedOverloads) 2313 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates) 2314 << SuppressedOverloads; 2315 } 2316 2317 static void notePlausibleOverloads(Sema &S, SourceLocation Loc, 2318 const UnresolvedSetImpl &Overloads, 2319 bool (*IsPlausibleResult)(QualType)) { 2320 if (!IsPlausibleResult) 2321 return noteOverloads(S, Overloads, Loc); 2322 2323 UnresolvedSet<2> PlausibleOverloads; 2324 for (OverloadExpr::decls_iterator It = Overloads.begin(), 2325 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2326 const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It); 2327 QualType OverloadResultTy = OverloadDecl->getReturnType(); 2328 if (IsPlausibleResult(OverloadResultTy)) 2329 PlausibleOverloads.addDecl(It.getDecl()); 2330 } 2331 noteOverloads(S, PlausibleOverloads, Loc); 2332 } 2333 2334 /// Determine whether the given expression can be called by just 2335 /// putting parentheses after it. Notably, expressions with unary 2336 /// operators can't be because the unary operator will start parsing 2337 /// outside the call. 2338 static bool IsCallableWithAppend(Expr *E) { 2339 E = E->IgnoreImplicit(); 2340 return (!isa<CStyleCastExpr>(E) && 2341 !isa<UnaryOperator>(E) && 2342 !isa<BinaryOperator>(E) && 2343 !isa<CXXOperatorCallExpr>(E)); 2344 } 2345 2346 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) { 2347 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2348 E = UO->getSubExpr(); 2349 2350 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { 2351 if (ULE->getNumDecls() == 0) 2352 return false; 2353 2354 const NamedDecl *ND = *ULE->decls_begin(); 2355 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2356 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion(); 2357 } 2358 return false; 2359 } 2360 2361 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, 2362 bool ForceComplain, 2363 bool (*IsPlausibleResult)(QualType)) { 2364 SourceLocation Loc = E.get()->getExprLoc(); 2365 SourceRange Range = E.get()->getSourceRange(); 2366 2367 QualType ZeroArgCallTy; 2368 UnresolvedSet<4> Overloads; 2369 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) && 2370 !ZeroArgCallTy.isNull() && 2371 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) { 2372 // At this point, we know E is potentially callable with 0 2373 // arguments and that it returns something of a reasonable type, 2374 // so we can emit a fixit and carry on pretending that E was 2375 // actually a CallExpr. 2376 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd()); 2377 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2378 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range 2379 << (IsCallableWithAppend(E.get()) 2380 ? FixItHint::CreateInsertion(ParenInsertionLoc, "()") 2381 : FixItHint()); 2382 if (!IsMV) 2383 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2384 2385 // FIXME: Try this before emitting the fixit, and suppress diagnostics 2386 // while doing so. 2387 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None, 2388 Range.getEnd().getLocWithOffset(1)); 2389 return true; 2390 } 2391 2392 if (!ForceComplain) return false; 2393 2394 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2395 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range; 2396 if (!IsMV) 2397 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2398 E = ExprError(); 2399 return true; 2400 } 2401 2402 IdentifierInfo *Sema::getSuperIdentifier() const { 2403 if (!Ident_super) 2404 Ident_super = &Context.Idents.get("super"); 2405 return Ident_super; 2406 } 2407 2408 IdentifierInfo *Sema::getFloat128Identifier() const { 2409 if (!Ident___float128) 2410 Ident___float128 = &Context.Idents.get("__float128"); 2411 return Ident___float128; 2412 } 2413 2414 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, 2415 CapturedRegionKind K, 2416 unsigned OpenMPCaptureLevel) { 2417 auto *CSI = new CapturedRegionScopeInfo( 2418 getDiagnostics(), S, CD, RD, CD->getContextParam(), K, 2419 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0, 2420 OpenMPCaptureLevel); 2421 CSI->ReturnType = Context.VoidTy; 2422 FunctionScopes.push_back(CSI); 2423 } 2424 2425 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { 2426 if (FunctionScopes.empty()) 2427 return nullptr; 2428 2429 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); 2430 } 2431 2432 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & 2433 Sema::getMismatchingDeleteExpressions() const { 2434 return DeleteExprs; 2435 } 2436 2437 void Sema::setOpenCLExtensionForType(QualType T, llvm::StringRef ExtStr) { 2438 if (ExtStr.empty()) 2439 return; 2440 llvm::SmallVector<StringRef, 1> Exts; 2441 ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false); 2442 auto CanT = T.getCanonicalType().getTypePtr(); 2443 for (auto &I : Exts) 2444 OpenCLTypeExtMap[CanT].insert(I.str()); 2445 } 2446 2447 void Sema::setOpenCLExtensionForDecl(Decl *FD, StringRef ExtStr) { 2448 llvm::SmallVector<StringRef, 1> Exts; 2449 ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false); 2450 if (Exts.empty()) 2451 return; 2452 for (auto &I : Exts) 2453 OpenCLDeclExtMap[FD].insert(I.str()); 2454 } 2455 2456 void Sema::setCurrentOpenCLExtensionForType(QualType T) { 2457 if (CurrOpenCLExtension.empty()) 2458 return; 2459 setOpenCLExtensionForType(T, CurrOpenCLExtension); 2460 } 2461 2462 void Sema::setCurrentOpenCLExtensionForDecl(Decl *D) { 2463 if (CurrOpenCLExtension.empty()) 2464 return; 2465 setOpenCLExtensionForDecl(D, CurrOpenCLExtension); 2466 } 2467 2468 std::string Sema::getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD) { 2469 if (!OpenCLDeclExtMap.empty()) 2470 return getOpenCLExtensionsFromExtMap(FD, OpenCLDeclExtMap); 2471 2472 return ""; 2473 } 2474 2475 std::string Sema::getOpenCLExtensionsFromTypeExtMap(FunctionType *FT) { 2476 if (!OpenCLTypeExtMap.empty()) 2477 return getOpenCLExtensionsFromExtMap(FT, OpenCLTypeExtMap); 2478 2479 return ""; 2480 } 2481 2482 template <typename T, typename MapT> 2483 std::string Sema::getOpenCLExtensionsFromExtMap(T *FDT, MapT &Map) { 2484 auto Loc = Map.find(FDT); 2485 return llvm::join(Loc->second, " "); 2486 } 2487 2488 bool Sema::isOpenCLDisabledDecl(Decl *FD) { 2489 auto Loc = OpenCLDeclExtMap.find(FD); 2490 if (Loc == OpenCLDeclExtMap.end()) 2491 return false; 2492 for (auto &I : Loc->second) { 2493 if (!getOpenCLOptions().isEnabled(I)) 2494 return true; 2495 } 2496 return false; 2497 } 2498 2499 template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> 2500 bool Sema::checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, 2501 DiagInfoT DiagInfo, MapT &Map, 2502 unsigned Selector, 2503 SourceRange SrcRange) { 2504 auto Loc = Map.find(D); 2505 if (Loc == Map.end()) 2506 return false; 2507 bool Disabled = false; 2508 for (auto &I : Loc->second) { 2509 if (I != CurrOpenCLExtension && !getOpenCLOptions().isEnabled(I)) { 2510 Diag(DiagLoc, diag::err_opencl_requires_extension) << Selector << DiagInfo 2511 << I << SrcRange; 2512 Disabled = true; 2513 } 2514 } 2515 return Disabled; 2516 } 2517 2518 bool Sema::checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType QT) { 2519 // Check extensions for declared types. 2520 Decl *Decl = nullptr; 2521 if (auto TypedefT = dyn_cast<TypedefType>(QT.getTypePtr())) 2522 Decl = TypedefT->getDecl(); 2523 if (auto TagT = dyn_cast<TagType>(QT.getCanonicalType().getTypePtr())) 2524 Decl = TagT->getDecl(); 2525 auto Loc = DS.getTypeSpecTypeLoc(); 2526 2527 // Check extensions for vector types. 2528 // e.g. double4 is not allowed when cl_khr_fp64 is absent. 2529 if (QT->isExtVectorType()) { 2530 auto TypePtr = QT->castAs<ExtVectorType>()->getElementType().getTypePtr(); 2531 return checkOpenCLDisabledTypeOrDecl(TypePtr, Loc, QT, OpenCLTypeExtMap); 2532 } 2533 2534 if (checkOpenCLDisabledTypeOrDecl(Decl, Loc, QT, OpenCLDeclExtMap)) 2535 return true; 2536 2537 // Check extensions for builtin types. 2538 return checkOpenCLDisabledTypeOrDecl(QT.getCanonicalType().getTypePtr(), Loc, 2539 QT, OpenCLTypeExtMap); 2540 } 2541 2542 bool Sema::checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E) { 2543 IdentifierInfo *FnName = D.getIdentifier(); 2544 return checkOpenCLDisabledTypeOrDecl(&D, E.getBeginLoc(), FnName, 2545 OpenCLDeclExtMap, 1, D.getSourceRange()); 2546 } 2547