1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the actions class which performs semantic analysis and 10 // builds an AST out of a parse stream. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "UsedDeclVisitor.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/ASTDiagnostic.h" 17 #include "clang/AST/Decl.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclFriend.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/ExprCXX.h" 23 #include "clang/AST/PrettyDeclStackTrace.h" 24 #include "clang/AST/StmtCXX.h" 25 #include "clang/AST/TypeOrdering.h" 26 #include "clang/Basic/DarwinSDKInfo.h" 27 #include "clang/Basic/DiagnosticOptions.h" 28 #include "clang/Basic/PartialDiagnostic.h" 29 #include "clang/Basic/SourceManager.h" 30 #include "clang/Basic/TargetInfo.h" 31 #include "clang/Lex/HeaderSearch.h" 32 #include "clang/Lex/HeaderSearchOptions.h" 33 #include "clang/Lex/Preprocessor.h" 34 #include "clang/Sema/CXXFieldCollector.h" 35 #include "clang/Sema/EnterExpressionEvaluationContext.h" 36 #include "clang/Sema/ExternalSemaSource.h" 37 #include "clang/Sema/Initialization.h" 38 #include "clang/Sema/MultiplexExternalSemaSource.h" 39 #include "clang/Sema/ObjCMethodList.h" 40 #include "clang/Sema/RISCVIntrinsicManager.h" 41 #include "clang/Sema/Scope.h" 42 #include "clang/Sema/ScopeInfo.h" 43 #include "clang/Sema/SemaAMDGPU.h" 44 #include "clang/Sema/SemaARM.h" 45 #include "clang/Sema/SemaAVR.h" 46 #include "clang/Sema/SemaBPF.h" 47 #include "clang/Sema/SemaCUDA.h" 48 #include "clang/Sema/SemaCodeCompletion.h" 49 #include "clang/Sema/SemaConsumer.h" 50 #include "clang/Sema/SemaHLSL.h" 51 #include "clang/Sema/SemaHexagon.h" 52 #include "clang/Sema/SemaLoongArch.h" 53 #include "clang/Sema/SemaM68k.h" 54 #include "clang/Sema/SemaMIPS.h" 55 #include "clang/Sema/SemaMSP430.h" 56 #include "clang/Sema/SemaNVPTX.h" 57 #include "clang/Sema/SemaObjC.h" 58 #include "clang/Sema/SemaOpenACC.h" 59 #include "clang/Sema/SemaOpenCL.h" 60 #include "clang/Sema/SemaOpenMP.h" 61 #include "clang/Sema/SemaPPC.h" 62 #include "clang/Sema/SemaPseudoObject.h" 63 #include "clang/Sema/SemaRISCV.h" 64 #include "clang/Sema/SemaSPIRV.h" 65 #include "clang/Sema/SemaSYCL.h" 66 #include "clang/Sema/SemaSwift.h" 67 #include "clang/Sema/SemaSystemZ.h" 68 #include "clang/Sema/SemaWasm.h" 69 #include "clang/Sema/SemaX86.h" 70 #include "clang/Sema/TemplateDeduction.h" 71 #include "clang/Sema/TemplateInstCallback.h" 72 #include "clang/Sema/TypoCorrection.h" 73 #include "llvm/ADT/DenseMap.h" 74 #include "llvm/ADT/STLExtras.h" 75 #include "llvm/ADT/SmallPtrSet.h" 76 #include "llvm/Support/TimeProfiler.h" 77 #include <optional> 78 79 using namespace clang; 80 using namespace sema; 81 82 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) { 83 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts); 84 } 85 86 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); } 87 88 DarwinSDKInfo * 89 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, 90 StringRef Platform) { 91 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking(); 92 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) { 93 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking) 94 << Platform; 95 WarnedDarwinSDKInfoMissing = true; 96 } 97 return SDKInfo; 98 } 99 100 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() { 101 if (CachedDarwinSDKInfo) 102 return CachedDarwinSDKInfo->get(); 103 auto SDKInfo = parseDarwinSDKInfo( 104 PP.getFileManager().getVirtualFileSystem(), 105 PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot); 106 if (SDKInfo && *SDKInfo) { 107 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo)); 108 return CachedDarwinSDKInfo->get(); 109 } 110 if (!SDKInfo) 111 llvm::consumeError(SDKInfo.takeError()); 112 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>(); 113 return nullptr; 114 } 115 116 IdentifierInfo *Sema::InventAbbreviatedTemplateParameterTypeName( 117 const IdentifierInfo *ParamName, unsigned int Index) { 118 std::string InventedName; 119 llvm::raw_string_ostream OS(InventedName); 120 121 if (!ParamName) 122 OS << "auto:" << Index + 1; 123 else 124 OS << ParamName->getName() << ":auto"; 125 126 return &Context.Idents.get(OS.str()); 127 } 128 129 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, 130 const Preprocessor &PP) { 131 PrintingPolicy Policy = Context.getPrintingPolicy(); 132 // In diagnostics, we print _Bool as bool if the latter is defined as the 133 // former. 134 Policy.Bool = Context.getLangOpts().Bool; 135 if (!Policy.Bool) { 136 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) { 137 Policy.Bool = BoolMacro->isObjectLike() && 138 BoolMacro->getNumTokens() == 1 && 139 BoolMacro->getReplacementToken(0).is(tok::kw__Bool); 140 } 141 } 142 143 // Shorten the data output if needed 144 Policy.EntireContentsOfLargeArray = false; 145 146 return Policy; 147 } 148 149 void Sema::ActOnTranslationUnitScope(Scope *S) { 150 TUScope = S; 151 PushDeclContext(S, Context.getTranslationUnitDecl()); 152 } 153 154 namespace clang { 155 namespace sema { 156 157 class SemaPPCallbacks : public PPCallbacks { 158 Sema *S = nullptr; 159 llvm::SmallVector<SourceLocation, 8> IncludeStack; 160 llvm::SmallVector<llvm::TimeTraceProfilerEntry *, 8> ProfilerStack; 161 162 public: 163 void set(Sema &S) { this->S = &S; } 164 165 void reset() { S = nullptr; } 166 167 void FileChanged(SourceLocation Loc, FileChangeReason Reason, 168 SrcMgr::CharacteristicKind FileType, 169 FileID PrevFID) override { 170 if (!S) 171 return; 172 switch (Reason) { 173 case EnterFile: { 174 SourceManager &SM = S->getSourceManager(); 175 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc)); 176 if (IncludeLoc.isValid()) { 177 if (llvm::timeTraceProfilerEnabled()) { 178 OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getFileID(Loc)); 179 ProfilerStack.push_back(llvm::timeTraceAsyncProfilerBegin( 180 "Source", FE ? FE->getName() : StringRef("<unknown>"))); 181 } 182 183 IncludeStack.push_back(IncludeLoc); 184 S->DiagnoseNonDefaultPragmaAlignPack( 185 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude, 186 IncludeLoc); 187 } 188 break; 189 } 190 case ExitFile: 191 if (!IncludeStack.empty()) { 192 if (llvm::timeTraceProfilerEnabled()) 193 llvm::timeTraceProfilerEnd(ProfilerStack.pop_back_val()); 194 195 S->DiagnoseNonDefaultPragmaAlignPack( 196 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit, 197 IncludeStack.pop_back_val()); 198 } 199 break; 200 default: 201 break; 202 } 203 } 204 }; 205 206 } // end namespace sema 207 } // end namespace clang 208 209 const unsigned Sema::MaxAlignmentExponent; 210 const uint64_t Sema::MaximumAlignment; 211 212 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, 213 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) 214 : SemaBase(*this), CollectStats(false), TUKind(TUKind), 215 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), 216 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), 217 SourceMgr(PP.getSourceManager()), APINotes(SourceMgr, LangOpts), 218 AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr), 219 LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr), 220 OpaqueParser(nullptr), CurContext(nullptr), ExternalSource(nullptr), 221 StackHandler(Diags), CurScope(nullptr), Ident_super(nullptr), 222 AMDGPUPtr(std::make_unique<SemaAMDGPU>(*this)), 223 ARMPtr(std::make_unique<SemaARM>(*this)), 224 AVRPtr(std::make_unique<SemaAVR>(*this)), 225 BPFPtr(std::make_unique<SemaBPF>(*this)), 226 CodeCompletionPtr( 227 std::make_unique<SemaCodeCompletion>(*this, CodeCompleter)), 228 CUDAPtr(std::make_unique<SemaCUDA>(*this)), 229 HLSLPtr(std::make_unique<SemaHLSL>(*this)), 230 HexagonPtr(std::make_unique<SemaHexagon>(*this)), 231 LoongArchPtr(std::make_unique<SemaLoongArch>(*this)), 232 M68kPtr(std::make_unique<SemaM68k>(*this)), 233 MIPSPtr(std::make_unique<SemaMIPS>(*this)), 234 MSP430Ptr(std::make_unique<SemaMSP430>(*this)), 235 NVPTXPtr(std::make_unique<SemaNVPTX>(*this)), 236 ObjCPtr(std::make_unique<SemaObjC>(*this)), 237 OpenACCPtr(std::make_unique<SemaOpenACC>(*this)), 238 OpenCLPtr(std::make_unique<SemaOpenCL>(*this)), 239 OpenMPPtr(std::make_unique<SemaOpenMP>(*this)), 240 PPCPtr(std::make_unique<SemaPPC>(*this)), 241 PseudoObjectPtr(std::make_unique<SemaPseudoObject>(*this)), 242 RISCVPtr(std::make_unique<SemaRISCV>(*this)), 243 SPIRVPtr(std::make_unique<SemaSPIRV>(*this)), 244 SYCLPtr(std::make_unique<SemaSYCL>(*this)), 245 SwiftPtr(std::make_unique<SemaSwift>(*this)), 246 SystemZPtr(std::make_unique<SemaSystemZ>(*this)), 247 WasmPtr(std::make_unique<SemaWasm>(*this)), 248 X86Ptr(std::make_unique<SemaX86>(*this)), 249 MSPointerToMemberRepresentationMethod( 250 LangOpts.getMSPointerToMemberRepresentationMethod()), 251 MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()), 252 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)), 253 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr), 254 CodeSegStack(nullptr), StrictGuardStackCheckStack(false), 255 FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr), 256 VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr), 257 StdCoroutineTraitsCache(nullptr), IdResolver(pp), 258 OriginalLexicalContext(nullptr), StdInitializerList(nullptr), 259 FullyCheckedComparisonCategories( 260 static_cast<unsigned>(ComparisonCategoryType::Last) + 1), 261 StdSourceLocationImplDecl(nullptr), CXXTypeInfoDecl(nullptr), 262 GlobalNewDeleteDeclared(false), DisableTypoCorrection(false), 263 TyposCorrected(0), IsBuildingRecoveryCallExpr(false), NumSFINAEErrors(0), 264 AccessCheckingSFINAE(false), CurrentInstantiationScope(nullptr), 265 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), 266 ArgumentPackSubstitutionIndex(-1), SatisfactionCache(Context) { 267 assert(pp.TUKind == TUKind); 268 TUScope = nullptr; 269 270 LoadedExternalKnownNamespaces = false; 271 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I) 272 ObjC().NSNumberLiteralMethods[I] = nullptr; 273 274 if (getLangOpts().ObjC) 275 ObjC().NSAPIObj.reset(new NSAPI(Context)); 276 277 if (getLangOpts().CPlusPlus) 278 FieldCollector.reset(new CXXFieldCollector()); 279 280 // Tell diagnostics how to render things from the AST library. 281 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); 282 283 // This evaluation context exists to ensure that there's always at least one 284 // valid evaluation context available. It is never removed from the 285 // evaluation stack. 286 ExprEvalContexts.emplace_back( 287 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{}, 288 nullptr, ExpressionEvaluationContextRecord::EK_Other); 289 290 // Initialization of data sharing attributes stack for OpenMP 291 OpenMP().InitDataSharingAttributesStack(); 292 293 std::unique_ptr<sema::SemaPPCallbacks> Callbacks = 294 std::make_unique<sema::SemaPPCallbacks>(); 295 SemaPPCallbackHandler = Callbacks.get(); 296 PP.addPPCallbacks(std::move(Callbacks)); 297 SemaPPCallbackHandler->set(*this); 298 299 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod()); 300 } 301 302 // Anchor Sema's type info to this TU. 303 void Sema::anchor() {} 304 305 void Sema::addImplicitTypedef(StringRef Name, QualType T) { 306 DeclarationName DN = &Context.Idents.get(Name); 307 if (IdResolver.begin(DN) == IdResolver.end()) 308 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope); 309 } 310 311 void Sema::Initialize() { 312 // Create BuiltinVaListDecl *before* ExternalSemaSource::InitializeSema(this) 313 // because during initialization ASTReader can emit globals that require 314 // name mangling. And the name mangling uses BuiltinVaListDecl. 315 if (Context.getTargetInfo().hasBuiltinMSVaList()) 316 (void)Context.getBuiltinMSVaListDecl(); 317 (void)Context.getBuiltinVaListDecl(); 318 319 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 320 SC->InitializeSema(*this); 321 322 // Tell the external Sema source about this Sema object. 323 if (ExternalSemaSource *ExternalSema 324 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 325 ExternalSema->InitializeSema(*this); 326 327 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we 328 // will not be able to merge any duplicate __va_list_tag decls correctly. 329 VAListTagName = PP.getIdentifierInfo("__va_list_tag"); 330 331 if (!TUScope) 332 return; 333 334 // Initialize predefined 128-bit integer types, if needed. 335 if (Context.getTargetInfo().hasInt128Type() || 336 (Context.getAuxTargetInfo() && 337 Context.getAuxTargetInfo()->hasInt128Type())) { 338 // If either of the 128-bit integer types are unavailable to name lookup, 339 // define them now. 340 DeclarationName Int128 = &Context.Idents.get("__int128_t"); 341 if (IdResolver.begin(Int128) == IdResolver.end()) 342 PushOnScopeChains(Context.getInt128Decl(), TUScope); 343 344 DeclarationName UInt128 = &Context.Idents.get("__uint128_t"); 345 if (IdResolver.begin(UInt128) == IdResolver.end()) 346 PushOnScopeChains(Context.getUInt128Decl(), TUScope); 347 } 348 349 350 // Initialize predefined Objective-C types: 351 if (getLangOpts().ObjC) { 352 // If 'SEL' does not yet refer to any declarations, make it refer to the 353 // predefined 'SEL'. 354 DeclarationName SEL = &Context.Idents.get("SEL"); 355 if (IdResolver.begin(SEL) == IdResolver.end()) 356 PushOnScopeChains(Context.getObjCSelDecl(), TUScope); 357 358 // If 'id' does not yet refer to any declarations, make it refer to the 359 // predefined 'id'. 360 DeclarationName Id = &Context.Idents.get("id"); 361 if (IdResolver.begin(Id) == IdResolver.end()) 362 PushOnScopeChains(Context.getObjCIdDecl(), TUScope); 363 364 // Create the built-in typedef for 'Class'. 365 DeclarationName Class = &Context.Idents.get("Class"); 366 if (IdResolver.begin(Class) == IdResolver.end()) 367 PushOnScopeChains(Context.getObjCClassDecl(), TUScope); 368 369 // Create the built-in forward declaratino for 'Protocol'. 370 DeclarationName Protocol = &Context.Idents.get("Protocol"); 371 if (IdResolver.begin(Protocol) == IdResolver.end()) 372 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope); 373 } 374 375 // Create the internal type for the *StringMakeConstantString builtins. 376 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString"); 377 if (IdResolver.begin(ConstantString) == IdResolver.end()) 378 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope); 379 380 // Initialize Microsoft "predefined C++ types". 381 if (getLangOpts().MSVCCompat) { 382 if (getLangOpts().CPlusPlus && 383 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) 384 PushOnScopeChains( 385 Context.buildImplicitRecord("type_info", TagTypeKind::Class), 386 TUScope); 387 388 addImplicitTypedef("size_t", Context.getSizeType()); 389 } 390 391 // Initialize predefined OpenCL types and supported extensions and (optional) 392 // core features. 393 if (getLangOpts().OpenCL) { 394 getOpenCLOptions().addSupport( 395 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts()); 396 addImplicitTypedef("sampler_t", Context.OCLSamplerTy); 397 addImplicitTypedef("event_t", Context.OCLEventTy); 398 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion(); 399 if (OCLCompatibleVersion >= 200) { 400 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) { 401 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy); 402 addImplicitTypedef("queue_t", Context.OCLQueueTy); 403 } 404 if (getLangOpts().OpenCLPipes) 405 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy); 406 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); 407 addImplicitTypedef("atomic_uint", 408 Context.getAtomicType(Context.UnsignedIntTy)); 409 addImplicitTypedef("atomic_float", 410 Context.getAtomicType(Context.FloatTy)); 411 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as 412 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. 413 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); 414 415 416 // OpenCL v2.0 s6.13.11.6: 417 // - The atomic_long and atomic_ulong types are supported if the 418 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics 419 // extensions are supported. 420 // - The atomic_double type is only supported if double precision 421 // is supported and the cl_khr_int64_base_atomics and 422 // cl_khr_int64_extended_atomics extensions are supported. 423 // - If the device address space is 64-bits, the data types 424 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and 425 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and 426 // cl_khr_int64_extended_atomics extensions are supported. 427 428 auto AddPointerSizeDependentTypes = [&]() { 429 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType()); 430 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType()); 431 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType()); 432 auto AtomicPtrDiffT = 433 Context.getAtomicType(Context.getPointerDiffType()); 434 addImplicitTypedef("atomic_size_t", AtomicSizeT); 435 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT); 436 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT); 437 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT); 438 }; 439 440 if (Context.getTypeSize(Context.getSizeType()) == 32) { 441 AddPointerSizeDependentTypes(); 442 } 443 444 if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) { 445 auto AtomicHalfT = Context.getAtomicType(Context.HalfTy); 446 addImplicitTypedef("atomic_half", AtomicHalfT); 447 } 448 449 std::vector<QualType> Atomic64BitTypes; 450 if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics", 451 getLangOpts()) && 452 getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics", 453 getLangOpts())) { 454 if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) { 455 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy); 456 addImplicitTypedef("atomic_double", AtomicDoubleT); 457 Atomic64BitTypes.push_back(AtomicDoubleT); 458 } 459 auto AtomicLongT = Context.getAtomicType(Context.LongTy); 460 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy); 461 addImplicitTypedef("atomic_long", AtomicLongT); 462 addImplicitTypedef("atomic_ulong", AtomicULongT); 463 464 465 if (Context.getTypeSize(Context.getSizeType()) == 64) { 466 AddPointerSizeDependentTypes(); 467 } 468 } 469 } 470 471 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 472 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \ 473 addImplicitTypedef(#ExtType, Context.Id##Ty); \ 474 } 475 #include "clang/Basic/OpenCLExtensionTypes.def" 476 } 477 478 if (Context.getTargetInfo().hasAArch64SVETypes() || 479 (Context.getAuxTargetInfo() && 480 Context.getAuxTargetInfo()->hasAArch64SVETypes())) { 481 #define SVE_TYPE(Name, Id, SingletonId) \ 482 addImplicitTypedef(Name, Context.SingletonId); 483 #include "clang/Basic/AArch64SVEACLETypes.def" 484 } 485 486 if (Context.getTargetInfo().getTriple().isPPC64()) { 487 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 488 addImplicitTypedef(#Name, Context.Id##Ty); 489 #include "clang/Basic/PPCTypes.def" 490 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 491 addImplicitTypedef(#Name, Context.Id##Ty); 492 #include "clang/Basic/PPCTypes.def" 493 } 494 495 if (Context.getTargetInfo().hasRISCVVTypes()) { 496 #define RVV_TYPE(Name, Id, SingletonId) \ 497 addImplicitTypedef(Name, Context.SingletonId); 498 #include "clang/Basic/RISCVVTypes.def" 499 } 500 501 if (Context.getTargetInfo().getTriple().isWasm() && 502 Context.getTargetInfo().hasFeature("reference-types")) { 503 #define WASM_TYPE(Name, Id, SingletonId) \ 504 addImplicitTypedef(Name, Context.SingletonId); 505 #include "clang/Basic/WebAssemblyReferenceTypes.def" 506 } 507 508 if (Context.getTargetInfo().getTriple().isAMDGPU() || 509 (Context.getAuxTargetInfo() && 510 Context.getAuxTargetInfo()->getTriple().isAMDGPU())) { 511 #define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) \ 512 addImplicitTypedef(Name, Context.SingletonId); 513 #include "clang/Basic/AMDGPUTypes.def" 514 } 515 516 if (Context.getTargetInfo().hasBuiltinMSVaList()) { 517 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list"); 518 if (IdResolver.begin(MSVaList) == IdResolver.end()) 519 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope); 520 } 521 522 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); 523 if (IdResolver.begin(BuiltinVaList) == IdResolver.end()) 524 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope); 525 } 526 527 Sema::~Sema() { 528 assert(InstantiatingSpecializations.empty() && 529 "failed to clean up an InstantiatingTemplate?"); 530 531 if (VisContext) FreeVisContext(); 532 533 // Kill all the active scopes. 534 for (sema::FunctionScopeInfo *FSI : FunctionScopes) 535 delete FSI; 536 537 // Tell the SemaConsumer to forget about us; we're going out of scope. 538 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 539 SC->ForgetSema(); 540 541 // Detach from the external Sema source. 542 if (ExternalSemaSource *ExternalSema 543 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 544 ExternalSema->ForgetSema(); 545 546 // Delete cached satisfactions. 547 std::vector<ConstraintSatisfaction *> Satisfactions; 548 Satisfactions.reserve(SatisfactionCache.size()); 549 for (auto &Node : SatisfactionCache) 550 Satisfactions.push_back(&Node); 551 for (auto *Node : Satisfactions) 552 delete Node; 553 554 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); 555 556 // Destroys data sharing attributes stack for OpenMP 557 OpenMP().DestroyDataSharingAttributesStack(); 558 559 // Detach from the PP callback handler which outlives Sema since it's owned 560 // by the preprocessor. 561 SemaPPCallbackHandler->reset(); 562 } 563 564 void Sema::runWithSufficientStackSpace(SourceLocation Loc, 565 llvm::function_ref<void()> Fn) { 566 StackHandler.runWithSufficientStackSpace(Loc, Fn); 567 } 568 569 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc, 570 UnavailableAttr::ImplicitReason reason) { 571 // If we're not in a function, it's an error. 572 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext); 573 if (!fn) return false; 574 575 // If we're in template instantiation, it's an error. 576 if (inTemplateInstantiation()) 577 return false; 578 579 // If that function's not in a system header, it's an error. 580 if (!Context.getSourceManager().isInSystemHeader(loc)) 581 return false; 582 583 // If the function is already unavailable, it's not an error. 584 if (fn->hasAttr<UnavailableAttr>()) return true; 585 586 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc)); 587 return true; 588 } 589 590 ASTMutationListener *Sema::getASTMutationListener() const { 591 return getASTConsumer().GetASTMutationListener(); 592 } 593 594 void Sema::addExternalSource(ExternalSemaSource *E) { 595 assert(E && "Cannot use with NULL ptr"); 596 597 if (!ExternalSource) { 598 ExternalSource = E; 599 return; 600 } 601 602 if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(ExternalSource)) 603 Ex->AddSource(E); 604 else 605 ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E); 606 } 607 608 void Sema::PrintStats() const { 609 llvm::errs() << "\n*** Semantic Analysis Stats:\n"; 610 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n"; 611 612 BumpAlloc.PrintStats(); 613 AnalysisWarnings.PrintStats(); 614 } 615 616 void Sema::diagnoseNullableToNonnullConversion(QualType DstType, 617 QualType SrcType, 618 SourceLocation Loc) { 619 std::optional<NullabilityKind> ExprNullability = SrcType->getNullability(); 620 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable && 621 *ExprNullability != NullabilityKind::NullableResult)) 622 return; 623 624 std::optional<NullabilityKind> TypeNullability = DstType->getNullability(); 625 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull) 626 return; 627 628 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType; 629 } 630 631 // Generate diagnostics when adding or removing effects in a type conversion. 632 void Sema::diagnoseFunctionEffectConversion(QualType DstType, QualType SrcType, 633 SourceLocation Loc) { 634 const auto SrcFX = FunctionEffectsRef::get(SrcType); 635 const auto DstFX = FunctionEffectsRef::get(DstType); 636 if (SrcFX != DstFX) { 637 for (const auto &Diff : FunctionEffectDiffVector(SrcFX, DstFX)) { 638 if (Diff.shouldDiagnoseConversion(SrcType, SrcFX, DstType, DstFX)) 639 Diag(Loc, diag::warn_invalid_add_func_effects) << Diff.effectName(); 640 } 641 } 642 } 643 644 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) { 645 // nullptr only exists from C++11 on, so don't warn on its absence earlier. 646 if (!getLangOpts().CPlusPlus11) 647 return; 648 649 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer) 650 return; 651 652 const Expr *EStripped = E->IgnoreParenImpCasts(); 653 if (EStripped->getType()->isNullPtrType()) 654 return; 655 if (isa<GNUNullExpr>(EStripped)) 656 return; 657 658 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant, 659 E->getBeginLoc())) 660 return; 661 662 // Don't diagnose the conversion from a 0 literal to a null pointer argument 663 // in a synthesized call to operator<=>. 664 if (!CodeSynthesisContexts.empty() && 665 CodeSynthesisContexts.back().Kind == 666 CodeSynthesisContext::RewritingOperatorAsSpaceship) 667 return; 668 669 // Ignore null pointers in defaulted comparison operators. 670 FunctionDecl *FD = getCurFunctionDecl(); 671 if (FD && FD->isDefaulted()) { 672 return; 673 } 674 675 // If it is a macro from system header, and if the macro name is not "NULL", 676 // do not warn. 677 // Note that uses of "NULL" will be ignored above on systems that define it 678 // as __null. 679 SourceLocation MaybeMacroLoc = E->getBeginLoc(); 680 if (Diags.getSuppressSystemWarnings() && 681 SourceMgr.isInSystemMacro(MaybeMacroLoc) && 682 !findMacroSpelling(MaybeMacroLoc, "NULL")) 683 return; 684 685 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant) 686 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr"); 687 } 688 689 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast. 690 /// If there is already an implicit cast, merge into the existing one. 691 /// The result is of the given category. 692 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, 693 CastKind Kind, ExprValueKind VK, 694 const CXXCastPath *BasePath, 695 CheckedConversionKind CCK) { 696 #ifndef NDEBUG 697 if (VK == VK_PRValue && !E->isPRValue()) { 698 switch (Kind) { 699 default: 700 llvm_unreachable( 701 ("can't implicitly cast glvalue to prvalue with this cast " 702 "kind: " + 703 std::string(CastExpr::getCastKindName(Kind))) 704 .c_str()); 705 case CK_Dependent: 706 case CK_LValueToRValue: 707 case CK_ArrayToPointerDecay: 708 case CK_FunctionToPointerDecay: 709 case CK_ToVoid: 710 case CK_NonAtomicToAtomic: 711 case CK_HLSLArrayRValue: 712 break; 713 } 714 } 715 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) && 716 "can't cast prvalue to glvalue"); 717 #endif 718 719 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc()); 720 diagnoseZeroToNullptrConversion(Kind, E); 721 if (Context.hasAnyFunctionEffects() && !isCast(CCK) && 722 Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer) 723 diagnoseFunctionEffectConversion(Ty, E->getType(), E->getBeginLoc()); 724 725 QualType ExprTy = Context.getCanonicalType(E->getType()); 726 QualType TypeTy = Context.getCanonicalType(Ty); 727 728 // This cast is used in place of a regular LValue to RValue cast for 729 // HLSL Array Parameter Types. It needs to be emitted even if 730 // ExprTy == TypeTy, except if E is an HLSLOutArgExpr 731 // Emitting a cast in that case will prevent HLSLOutArgExpr from 732 // being handled properly in EmitCallArg 733 if (Kind == CK_HLSLArrayRValue && !isa<HLSLOutArgExpr>(E)) 734 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK, 735 CurFPFeatureOverrides()); 736 737 if (ExprTy == TypeTy) 738 return E; 739 740 if (Kind == CK_ArrayToPointerDecay) { 741 // C++1z [conv.array]: The temporary materialization conversion is applied. 742 // We also use this to fuel C++ DR1213, which applies to C++11 onwards. 743 if (getLangOpts().CPlusPlus && E->isPRValue()) { 744 // The temporary is an lvalue in C++98 and an xvalue otherwise. 745 ExprResult Materialized = CreateMaterializeTemporaryExpr( 746 E->getType(), E, !getLangOpts().CPlusPlus11); 747 if (Materialized.isInvalid()) 748 return ExprError(); 749 E = Materialized.get(); 750 } 751 // C17 6.7.1p6 footnote 124: The implementation can treat any register 752 // declaration simply as an auto declaration. However, whether or not 753 // addressable storage is actually used, the address of any part of an 754 // object declared with storage-class specifier register cannot be 755 // computed, either explicitly(by use of the unary & operator as discussed 756 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as 757 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an 758 // array declared with storage-class specifier register is sizeof. 759 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) { 760 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 761 if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 762 if (VD->getStorageClass() == SC_Register) { 763 Diag(E->getExprLoc(), diag::err_typecheck_address_of) 764 << /*register variable*/ 3 << E->getSourceRange(); 765 return ExprError(); 766 } 767 } 768 } 769 } 770 } 771 772 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { 773 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { 774 ImpCast->setType(Ty); 775 ImpCast->setValueKind(VK); 776 return E; 777 } 778 } 779 780 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK, 781 CurFPFeatureOverrides()); 782 } 783 784 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) { 785 switch (ScalarTy->getScalarTypeKind()) { 786 case Type::STK_Bool: return CK_NoOp; 787 case Type::STK_CPointer: return CK_PointerToBoolean; 788 case Type::STK_BlockPointer: return CK_PointerToBoolean; 789 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean; 790 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean; 791 case Type::STK_Integral: return CK_IntegralToBoolean; 792 case Type::STK_Floating: return CK_FloatingToBoolean; 793 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean; 794 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean; 795 case Type::STK_FixedPoint: return CK_FixedPointToBoolean; 796 } 797 llvm_unreachable("unknown scalar type kind"); 798 } 799 800 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector. 801 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) { 802 if (D->getMostRecentDecl()->isUsed()) 803 return true; 804 805 if (D->isExternallyVisible()) 806 return true; 807 808 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 809 // If this is a function template and none of its specializations is used, 810 // we should warn. 811 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate()) 812 for (const auto *Spec : Template->specializations()) 813 if (ShouldRemoveFromUnused(SemaRef, Spec)) 814 return true; 815 816 // UnusedFileScopedDecls stores the first declaration. 817 // The declaration may have become definition so check again. 818 const FunctionDecl *DeclToCheck; 819 if (FD->hasBody(DeclToCheck)) 820 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 821 822 // Later redecls may add new information resulting in not having to warn, 823 // so check again. 824 DeclToCheck = FD->getMostRecentDecl(); 825 if (DeclToCheck != FD) 826 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 827 } 828 829 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 830 // If a variable usable in constant expressions is referenced, 831 // don't warn if it isn't used: if the value of a variable is required 832 // for the computation of a constant expression, it doesn't make sense to 833 // warn even if the variable isn't odr-used. (isReferenced doesn't 834 // precisely reflect that, but it's a decent approximation.) 835 if (VD->isReferenced() && 836 VD->mightBeUsableInConstantExpressions(SemaRef->Context)) 837 return true; 838 839 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate()) 840 // If this is a variable template and none of its specializations is used, 841 // we should warn. 842 for (const auto *Spec : Template->specializations()) 843 if (ShouldRemoveFromUnused(SemaRef, Spec)) 844 return true; 845 846 // UnusedFileScopedDecls stores the first declaration. 847 // The declaration may have become definition so check again. 848 const VarDecl *DeclToCheck = VD->getDefinition(); 849 if (DeclToCheck) 850 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 851 852 // Later redecls may add new information resulting in not having to warn, 853 // so check again. 854 DeclToCheck = VD->getMostRecentDecl(); 855 if (DeclToCheck != VD) 856 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 857 } 858 859 return false; 860 } 861 862 static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) { 863 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 864 return FD->isExternC(); 865 return cast<VarDecl>(ND)->isExternC(); 866 } 867 868 /// Determine whether ND is an external-linkage function or variable whose 869 /// type has no linkage. 870 bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const { 871 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage, 872 // because we also want to catch the case where its type has VisibleNoLinkage, 873 // which does not affect the linkage of VD. 874 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() && 875 !isExternalFormalLinkage(VD->getType()->getLinkage()) && 876 !isFunctionOrVarDeclExternC(VD); 877 } 878 879 /// Obtains a sorted list of functions and variables that are undefined but 880 /// ODR-used. 881 void Sema::getUndefinedButUsed( 882 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) { 883 for (const auto &UndefinedUse : UndefinedButUsed) { 884 NamedDecl *ND = UndefinedUse.first; 885 886 // Ignore attributes that have become invalid. 887 if (ND->isInvalidDecl()) continue; 888 889 // __attribute__((weakref)) is basically a definition. 890 if (ND->hasAttr<WeakRefAttr>()) continue; 891 892 if (isa<CXXDeductionGuideDecl>(ND)) 893 continue; 894 895 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) { 896 // An exported function will always be emitted when defined, so even if 897 // the function is inline, it doesn't have to be emitted in this TU. An 898 // imported function implies that it has been exported somewhere else. 899 continue; 900 } 901 902 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 903 if (FD->isDefined()) 904 continue; 905 if (FD->isExternallyVisible() && 906 !isExternalWithNoLinkageType(FD) && 907 !FD->getMostRecentDecl()->isInlined() && 908 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 909 continue; 910 if (FD->getBuiltinID()) 911 continue; 912 } else { 913 const auto *VD = cast<VarDecl>(ND); 914 if (VD->hasDefinition() != VarDecl::DeclarationOnly) 915 continue; 916 if (VD->isExternallyVisible() && 917 !isExternalWithNoLinkageType(VD) && 918 !VD->getMostRecentDecl()->isInline() && 919 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 920 continue; 921 922 // Skip VarDecls that lack formal definitions but which we know are in 923 // fact defined somewhere. 924 if (VD->isKnownToBeDefined()) 925 continue; 926 } 927 928 Undefined.push_back(std::make_pair(ND, UndefinedUse.second)); 929 } 930 } 931 932 /// checkUndefinedButUsed - Check for undefined objects with internal linkage 933 /// or that are inline. 934 static void checkUndefinedButUsed(Sema &S) { 935 if (S.UndefinedButUsed.empty()) return; 936 937 // Collect all the still-undefined entities with internal linkage. 938 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined; 939 S.getUndefinedButUsed(Undefined); 940 S.UndefinedButUsed.clear(); 941 if (Undefined.empty()) return; 942 943 for (const auto &Undef : Undefined) { 944 ValueDecl *VD = cast<ValueDecl>(Undef.first); 945 SourceLocation UseLoc = Undef.second; 946 947 if (S.isExternalWithNoLinkageType(VD)) { 948 // C++ [basic.link]p8: 949 // A type without linkage shall not be used as the type of a variable 950 // or function with external linkage unless 951 // -- the entity has C language linkage 952 // -- the entity is not odr-used or is defined in the same TU 953 // 954 // As an extension, accept this in cases where the type is externally 955 // visible, since the function or variable actually can be defined in 956 // another translation unit in that case. 957 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage()) 958 ? diag::ext_undefined_internal_type 959 : diag::err_undefined_internal_type) 960 << isa<VarDecl>(VD) << VD; 961 } else if (!VD->isExternallyVisible()) { 962 // FIXME: We can promote this to an error. The function or variable can't 963 // be defined anywhere else, so the program must necessarily violate the 964 // one definition rule. 965 bool IsImplicitBase = false; 966 if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) { 967 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>(); 968 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive( 969 llvm::omp::TraitProperty:: 970 implementation_extension_disable_implicit_base)) { 971 const auto *Func = cast<FunctionDecl>( 972 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl()); 973 IsImplicitBase = BaseD->isImplicit() && 974 Func->getIdentifier()->isMangledOpenMPVariantName(); 975 } 976 } 977 if (!S.getLangOpts().OpenMP || !IsImplicitBase) 978 S.Diag(VD->getLocation(), diag::warn_undefined_internal) 979 << isa<VarDecl>(VD) << VD; 980 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) { 981 (void)FD; 982 assert(FD->getMostRecentDecl()->isInlined() && 983 "used object requires definition but isn't inline or internal?"); 984 // FIXME: This is ill-formed; we should reject. 985 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD; 986 } else { 987 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() && 988 "used var requires definition but isn't inline or internal?"); 989 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD; 990 } 991 if (UseLoc.isValid()) 992 S.Diag(UseLoc, diag::note_used_here); 993 } 994 } 995 996 void Sema::LoadExternalWeakUndeclaredIdentifiers() { 997 if (!ExternalSource) 998 return; 999 1000 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; 1001 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); 1002 for (auto &WeakID : WeakIDs) 1003 (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second); 1004 } 1005 1006 1007 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap; 1008 1009 /// Returns true, if all methods and nested classes of the given 1010 /// CXXRecordDecl are defined in this translation unit. 1011 /// 1012 /// Should only be called from ActOnEndOfTranslationUnit so that all 1013 /// definitions are actually read. 1014 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD, 1015 RecordCompleteMap &MNCComplete) { 1016 RecordCompleteMap::iterator Cache = MNCComplete.find(RD); 1017 if (Cache != MNCComplete.end()) 1018 return Cache->second; 1019 if (!RD->isCompleteDefinition()) 1020 return false; 1021 bool Complete = true; 1022 for (DeclContext::decl_iterator I = RD->decls_begin(), 1023 E = RD->decls_end(); 1024 I != E && Complete; ++I) { 1025 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) 1026 Complete = M->isDefined() || M->isDefaulted() || 1027 (M->isPureVirtual() && !isa<CXXDestructorDecl>(M)); 1028 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I)) 1029 // If the template function is marked as late template parsed at this 1030 // point, it has not been instantiated and therefore we have not 1031 // performed semantic analysis on it yet, so we cannot know if the type 1032 // can be considered complete. 1033 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() && 1034 F->getTemplatedDecl()->isDefined(); 1035 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) { 1036 if (R->isInjectedClassName()) 1037 continue; 1038 if (R->hasDefinition()) 1039 Complete = MethodsAndNestedClassesComplete(R->getDefinition(), 1040 MNCComplete); 1041 else 1042 Complete = false; 1043 } 1044 } 1045 MNCComplete[RD] = Complete; 1046 return Complete; 1047 } 1048 1049 /// Returns true, if the given CXXRecordDecl is fully defined in this 1050 /// translation unit, i.e. all methods are defined or pure virtual and all 1051 /// friends, friend functions and nested classes are fully defined in this 1052 /// translation unit. 1053 /// 1054 /// Should only be called from ActOnEndOfTranslationUnit so that all 1055 /// definitions are actually read. 1056 static bool IsRecordFullyDefined(const CXXRecordDecl *RD, 1057 RecordCompleteMap &RecordsComplete, 1058 RecordCompleteMap &MNCComplete) { 1059 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD); 1060 if (Cache != RecordsComplete.end()) 1061 return Cache->second; 1062 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete); 1063 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(), 1064 E = RD->friend_end(); 1065 I != E && Complete; ++I) { 1066 // Check if friend classes and methods are complete. 1067 if (TypeSourceInfo *TSI = (*I)->getFriendType()) { 1068 // Friend classes are available as the TypeSourceInfo of the FriendDecl. 1069 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl()) 1070 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete); 1071 else 1072 Complete = false; 1073 } else { 1074 // Friend functions are available through the NamedDecl of FriendDecl. 1075 if (const FunctionDecl *FD = 1076 dyn_cast<FunctionDecl>((*I)->getFriendDecl())) 1077 Complete = FD->isDefined(); 1078 else 1079 // This is a template friend, give up. 1080 Complete = false; 1081 } 1082 } 1083 RecordsComplete[RD] = Complete; 1084 return Complete; 1085 } 1086 1087 void Sema::emitAndClearUnusedLocalTypedefWarnings() { 1088 if (ExternalSource) 1089 ExternalSource->ReadUnusedLocalTypedefNameCandidates( 1090 UnusedLocalTypedefNameCandidates); 1091 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) { 1092 if (TD->isReferenced()) 1093 continue; 1094 Diag(TD->getLocation(), diag::warn_unused_local_typedef) 1095 << isa<TypeAliasDecl>(TD) << TD->getDeclName(); 1096 } 1097 UnusedLocalTypedefNameCandidates.clear(); 1098 } 1099 1100 void Sema::ActOnStartOfTranslationUnit() { 1101 if (getLangOpts().CPlusPlusModules && 1102 getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit) 1103 HandleStartOfHeaderUnit(); 1104 } 1105 1106 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) { 1107 // No explicit actions are required at the end of the global module fragment. 1108 if (Kind == TUFragmentKind::Global) 1109 return; 1110 1111 // Transfer late parsed template instantiations over to the pending template 1112 // instantiation list. During normal compilation, the late template parser 1113 // will be installed and instantiating these templates will succeed. 1114 // 1115 // If we are building a TU prefix for serialization, it is also safe to 1116 // transfer these over, even though they are not parsed. The end of the TU 1117 // should be outside of any eager template instantiation scope, so when this 1118 // AST is deserialized, these templates will not be parsed until the end of 1119 // the combined TU. 1120 PendingInstantiations.insert(PendingInstantiations.end(), 1121 LateParsedInstantiations.begin(), 1122 LateParsedInstantiations.end()); 1123 LateParsedInstantiations.clear(); 1124 1125 // If DefinedUsedVTables ends up marking any virtual member functions it 1126 // might lead to more pending template instantiations, which we then need 1127 // to instantiate. 1128 DefineUsedVTables(); 1129 1130 // C++: Perform implicit template instantiations. 1131 // 1132 // FIXME: When we perform these implicit instantiations, we do not 1133 // carefully keep track of the point of instantiation (C++ [temp.point]). 1134 // This means that name lookup that occurs within the template 1135 // instantiation will always happen at the end of the translation unit, 1136 // so it will find some names that are not required to be found. This is 1137 // valid, but we could do better by diagnosing if an instantiation uses a 1138 // name that was not visible at its first point of instantiation. 1139 if (ExternalSource) { 1140 // Load pending instantiations from the external source. 1141 SmallVector<PendingImplicitInstantiation, 4> Pending; 1142 ExternalSource->ReadPendingInstantiations(Pending); 1143 for (auto PII : Pending) 1144 if (auto Func = dyn_cast<FunctionDecl>(PII.first)) 1145 Func->setInstantiationIsPending(true); 1146 PendingInstantiations.insert(PendingInstantiations.begin(), 1147 Pending.begin(), Pending.end()); 1148 } 1149 1150 { 1151 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1152 PerformPendingInstantiations(); 1153 } 1154 1155 emitDeferredDiags(); 1156 1157 assert(LateParsedInstantiations.empty() && 1158 "end of TU template instantiation should not create more " 1159 "late-parsed templates"); 1160 1161 // Report diagnostics for uncorrected delayed typos. Ideally all of them 1162 // should have been corrected by that time, but it is very hard to cover all 1163 // cases in practice. 1164 for (const auto &Typo : DelayedTypos) { 1165 // We pass an empty TypoCorrection to indicate no correction was performed. 1166 Typo.second.DiagHandler(TypoCorrection()); 1167 } 1168 DelayedTypos.clear(); 1169 } 1170 1171 void Sema::ActOnEndOfTranslationUnit() { 1172 assert(DelayedDiagnostics.getCurrentPool() == nullptr 1173 && "reached end of translation unit with a pool attached?"); 1174 1175 // If code completion is enabled, don't perform any end-of-translation-unit 1176 // work. 1177 if (PP.isCodeCompletionEnabled()) 1178 return; 1179 1180 // Complete translation units and modules define vtables and perform implicit 1181 // instantiations. PCH files do not. 1182 if (TUKind != TU_Prefix) { 1183 ObjC().DiagnoseUseOfUnimplementedSelectors(); 1184 1185 ActOnEndOfTranslationUnitFragment( 1186 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind == 1187 Module::PrivateModuleFragment 1188 ? TUFragmentKind::Private 1189 : TUFragmentKind::Normal); 1190 1191 if (LateTemplateParserCleanup) 1192 LateTemplateParserCleanup(OpaqueParser); 1193 1194 CheckDelayedMemberExceptionSpecs(); 1195 } else { 1196 // If we are building a TU prefix for serialization, it is safe to transfer 1197 // these over, even though they are not parsed. The end of the TU should be 1198 // outside of any eager template instantiation scope, so when this AST is 1199 // deserialized, these templates will not be parsed until the end of the 1200 // combined TU. 1201 PendingInstantiations.insert(PendingInstantiations.end(), 1202 LateParsedInstantiations.begin(), 1203 LateParsedInstantiations.end()); 1204 LateParsedInstantiations.clear(); 1205 1206 if (LangOpts.PCHInstantiateTemplates) { 1207 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1208 PerformPendingInstantiations(); 1209 } 1210 } 1211 1212 DiagnoseUnterminatedPragmaAlignPack(); 1213 DiagnoseUnterminatedPragmaAttribute(); 1214 OpenMP().DiagnoseUnterminatedOpenMPDeclareTarget(); 1215 DiagnosePrecisionLossInComplexDivision(); 1216 1217 // All delayed member exception specs should be checked or we end up accepting 1218 // incompatible declarations. 1219 assert(DelayedOverridingExceptionSpecChecks.empty()); 1220 assert(DelayedEquivalentExceptionSpecChecks.empty()); 1221 1222 // All dllexport classes should have been processed already. 1223 assert(DelayedDllExportClasses.empty()); 1224 assert(DelayedDllExportMemberFunctions.empty()); 1225 1226 // Remove file scoped decls that turned out to be used. 1227 UnusedFileScopedDecls.erase( 1228 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true), 1229 UnusedFileScopedDecls.end(), 1230 [this](const DeclaratorDecl *DD) { 1231 return ShouldRemoveFromUnused(this, DD); 1232 }), 1233 UnusedFileScopedDecls.end()); 1234 1235 if (TUKind == TU_Prefix) { 1236 // Translation unit prefixes don't need any of the checking below. 1237 if (!PP.isIncrementalProcessingEnabled()) 1238 TUScope = nullptr; 1239 return; 1240 } 1241 1242 // Check for #pragma weak identifiers that were never declared 1243 LoadExternalWeakUndeclaredIdentifiers(); 1244 for (const auto &WeakIDs : WeakUndeclaredIdentifiers) { 1245 if (WeakIDs.second.empty()) 1246 continue; 1247 1248 Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(), 1249 LookupOrdinaryName); 1250 if (PrevDecl != nullptr && 1251 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) 1252 for (const auto &WI : WeakIDs.second) 1253 Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type) 1254 << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction; 1255 else 1256 for (const auto &WI : WeakIDs.second) 1257 Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared) 1258 << WeakIDs.first; 1259 } 1260 1261 if (LangOpts.CPlusPlus11 && 1262 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation())) 1263 CheckDelegatingCtorCycles(); 1264 1265 if (!Diags.hasErrorOccurred()) { 1266 if (ExternalSource) 1267 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed); 1268 checkUndefinedButUsed(*this); 1269 } 1270 1271 // A global-module-fragment is only permitted within a module unit. 1272 if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind == 1273 Module::ExplicitGlobalModuleFragment) { 1274 Diag(ModuleScopes.back().BeginLoc, 1275 diag::err_module_declaration_missing_after_global_module_introducer); 1276 } else if (getLangOpts().getCompilingModule() == 1277 LangOptions::CMK_ModuleInterface && 1278 // We can't use ModuleScopes here since ModuleScopes is always 1279 // empty if we're compiling the BMI. 1280 !getASTContext().getCurrentNamedModule()) { 1281 // If we are building a module interface unit, we should have seen the 1282 // module declaration. 1283 // 1284 // FIXME: Make a better guess as to where to put the module declaration. 1285 Diag(getSourceManager().getLocForStartOfFile( 1286 getSourceManager().getMainFileID()), 1287 diag::err_module_declaration_missing); 1288 } 1289 1290 // Now we can decide whether the modules we're building need an initializer. 1291 if (Module *CurrentModule = getCurrentModule(); 1292 CurrentModule && CurrentModule->isInterfaceOrPartition()) { 1293 auto DoesModNeedInit = [this](Module *M) { 1294 if (!getASTContext().getModuleInitializers(M).empty()) 1295 return true; 1296 for (auto [Exported, _] : M->Exports) 1297 if (Exported->isNamedModuleInterfaceHasInit()) 1298 return true; 1299 for (Module *I : M->Imports) 1300 if (I->isNamedModuleInterfaceHasInit()) 1301 return true; 1302 1303 return false; 1304 }; 1305 1306 CurrentModule->NamedModuleHasInit = 1307 DoesModNeedInit(CurrentModule) || 1308 llvm::any_of(CurrentModule->submodules(), 1309 [&](auto *SubM) { return DoesModNeedInit(SubM); }); 1310 } 1311 1312 if (TUKind == TU_ClangModule) { 1313 // If we are building a module, resolve all of the exported declarations 1314 // now. 1315 if (Module *CurrentModule = PP.getCurrentModule()) { 1316 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); 1317 1318 SmallVector<Module *, 2> Stack; 1319 Stack.push_back(CurrentModule); 1320 while (!Stack.empty()) { 1321 Module *Mod = Stack.pop_back_val(); 1322 1323 // Resolve the exported declarations and conflicts. 1324 // FIXME: Actually complain, once we figure out how to teach the 1325 // diagnostic client to deal with complaints in the module map at this 1326 // point. 1327 ModMap.resolveExports(Mod, /*Complain=*/false); 1328 ModMap.resolveUses(Mod, /*Complain=*/false); 1329 ModMap.resolveConflicts(Mod, /*Complain=*/false); 1330 1331 // Queue the submodules, so their exports will also be resolved. 1332 auto SubmodulesRange = Mod->submodules(); 1333 Stack.append(SubmodulesRange.begin(), SubmodulesRange.end()); 1334 } 1335 } 1336 1337 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for 1338 // modules when they are built, not every time they are used. 1339 emitAndClearUnusedLocalTypedefWarnings(); 1340 } 1341 1342 // C++ standard modules. Diagnose cases where a function is declared inline 1343 // in the module purview but has no definition before the end of the TU or 1344 // the start of a Private Module Fragment (if one is present). 1345 if (!PendingInlineFuncDecls.empty()) { 1346 for (auto *D : PendingInlineFuncDecls) { 1347 if (auto *FD = dyn_cast<FunctionDecl>(D)) { 1348 bool DefInPMF = false; 1349 if (auto *FDD = FD->getDefinition()) { 1350 DefInPMF = FDD->getOwningModule()->isPrivateModule(); 1351 if (!DefInPMF) 1352 continue; 1353 } 1354 Diag(FD->getLocation(), diag::err_export_inline_not_defined) 1355 << DefInPMF; 1356 // If we have a PMF it should be at the end of the ModuleScopes. 1357 if (DefInPMF && 1358 ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) { 1359 Diag(ModuleScopes.back().BeginLoc, 1360 diag::note_private_module_fragment); 1361 } 1362 } 1363 } 1364 PendingInlineFuncDecls.clear(); 1365 } 1366 1367 // C99 6.9.2p2: 1368 // A declaration of an identifier for an object that has file 1369 // scope without an initializer, and without a storage-class 1370 // specifier or with the storage-class specifier static, 1371 // constitutes a tentative definition. If a translation unit 1372 // contains one or more tentative definitions for an identifier, 1373 // and the translation unit contains no external definition for 1374 // that identifier, then the behavior is exactly as if the 1375 // translation unit contains a file scope declaration of that 1376 // identifier, with the composite type as of the end of the 1377 // translation unit, with an initializer equal to 0. 1378 llvm::SmallSet<VarDecl *, 32> Seen; 1379 for (TentativeDefinitionsType::iterator 1380 T = TentativeDefinitions.begin(ExternalSource.get()), 1381 TEnd = TentativeDefinitions.end(); 1382 T != TEnd; ++T) { 1383 VarDecl *VD = (*T)->getActingDefinition(); 1384 1385 // If the tentative definition was completed, getActingDefinition() returns 1386 // null. If we've already seen this variable before, insert()'s second 1387 // return value is false. 1388 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second) 1389 continue; 1390 1391 if (const IncompleteArrayType *ArrayT 1392 = Context.getAsIncompleteArrayType(VD->getType())) { 1393 // Set the length of the array to 1 (C99 6.9.2p5). 1394 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array); 1395 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true); 1396 QualType T = Context.getConstantArrayType( 1397 ArrayT->getElementType(), One, nullptr, ArraySizeModifier::Normal, 0); 1398 VD->setType(T); 1399 } else if (RequireCompleteType(VD->getLocation(), VD->getType(), 1400 diag::err_tentative_def_incomplete_type)) 1401 VD->setInvalidDecl(); 1402 1403 // No initialization is performed for a tentative definition. 1404 CheckCompleteVariableDeclaration(VD); 1405 1406 // Notify the consumer that we've completed a tentative definition. 1407 if (!VD->isInvalidDecl()) 1408 Consumer.CompleteTentativeDefinition(VD); 1409 } 1410 1411 for (auto *D : ExternalDeclarations) { 1412 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed()) 1413 continue; 1414 1415 Consumer.CompleteExternalDeclaration(D); 1416 } 1417 1418 if (LangOpts.HLSL) 1419 HLSL().DiagnoseAvailabilityViolations( 1420 getASTContext().getTranslationUnitDecl()); 1421 1422 // If there were errors, disable 'unused' warnings since they will mostly be 1423 // noise. Don't warn for a use from a module: either we should warn on all 1424 // file-scope declarations in modules or not at all, but whether the 1425 // declaration is used is immaterial. 1426 if (!Diags.hasErrorOccurred() && TUKind != TU_ClangModule) { 1427 // Output warning for unused file scoped decls. 1428 for (UnusedFileScopedDeclsType::iterator 1429 I = UnusedFileScopedDecls.begin(ExternalSource.get()), 1430 E = UnusedFileScopedDecls.end(); 1431 I != E; ++I) { 1432 if (ShouldRemoveFromUnused(this, *I)) 1433 continue; 1434 1435 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { 1436 const FunctionDecl *DiagD; 1437 if (!FD->hasBody(DiagD)) 1438 DiagD = FD; 1439 if (DiagD->isDeleted()) 1440 continue; // Deleted functions are supposed to be unused. 1441 SourceRange DiagRange = DiagD->getLocation(); 1442 if (const ASTTemplateArgumentListInfo *ASTTAL = 1443 DiagD->getTemplateSpecializationArgsAsWritten()) 1444 DiagRange.setEnd(ASTTAL->RAngleLoc); 1445 if (DiagD->isReferenced()) { 1446 if (isa<CXXMethodDecl>(DiagD)) 1447 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function) 1448 << DiagD << DiagRange; 1449 else { 1450 if (FD->getStorageClass() == SC_Static && 1451 !FD->isInlineSpecified() && 1452 !SourceMgr.isInMainFile( 1453 SourceMgr.getExpansionLoc(FD->getLocation()))) 1454 Diag(DiagD->getLocation(), 1455 diag::warn_unneeded_static_internal_decl) 1456 << DiagD << DiagRange; 1457 else 1458 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1459 << /*function=*/0 << DiagD << DiagRange; 1460 } 1461 } else if (!FD->isTargetMultiVersion() || 1462 FD->isTargetMultiVersionDefault()) { 1463 if (FD->getDescribedFunctionTemplate()) 1464 Diag(DiagD->getLocation(), diag::warn_unused_template) 1465 << /*function=*/0 << DiagD << DiagRange; 1466 else 1467 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD) 1468 ? diag::warn_unused_member_function 1469 : diag::warn_unused_function) 1470 << DiagD << DiagRange; 1471 } 1472 } else { 1473 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition(); 1474 if (!DiagD) 1475 DiagD = cast<VarDecl>(*I); 1476 SourceRange DiagRange = DiagD->getLocation(); 1477 if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(DiagD)) { 1478 if (const ASTTemplateArgumentListInfo *ASTTAL = 1479 VTSD->getTemplateArgsAsWritten()) 1480 DiagRange.setEnd(ASTTAL->RAngleLoc); 1481 } 1482 if (DiagD->isReferenced()) { 1483 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1484 << /*variable=*/1 << DiagD << DiagRange; 1485 } else if (DiagD->getDescribedVarTemplate()) { 1486 Diag(DiagD->getLocation(), diag::warn_unused_template) 1487 << /*variable=*/1 << DiagD << DiagRange; 1488 } else if (DiagD->getType().isConstQualified()) { 1489 const SourceManager &SM = SourceMgr; 1490 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) || 1491 !PP.getLangOpts().IsHeaderFile) 1492 Diag(DiagD->getLocation(), diag::warn_unused_const_variable) 1493 << DiagD << DiagRange; 1494 } else { 1495 Diag(DiagD->getLocation(), diag::warn_unused_variable) 1496 << DiagD << DiagRange; 1497 } 1498 } 1499 } 1500 1501 emitAndClearUnusedLocalTypedefWarnings(); 1502 } 1503 1504 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) { 1505 // FIXME: Load additional unused private field candidates from the external 1506 // source. 1507 RecordCompleteMap RecordsComplete; 1508 RecordCompleteMap MNCComplete; 1509 for (const NamedDecl *D : UnusedPrivateFields) { 1510 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); 1511 if (RD && !RD->isUnion() && 1512 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) { 1513 Diag(D->getLocation(), diag::warn_unused_private_field) 1514 << D->getDeclName(); 1515 } 1516 } 1517 } 1518 1519 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { 1520 if (ExternalSource) 1521 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); 1522 for (const auto &DeletedFieldInfo : DeleteExprs) { 1523 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { 1524 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, 1525 DeleteExprLoc.second); 1526 } 1527 } 1528 } 1529 1530 AnalysisWarnings.IssueWarnings(Context.getTranslationUnitDecl()); 1531 1532 if (Context.hasAnyFunctionEffects()) 1533 performFunctionEffectAnalysis(Context.getTranslationUnitDecl()); 1534 1535 // Check we've noticed that we're no longer parsing the initializer for every 1536 // variable. If we miss cases, then at best we have a performance issue and 1537 // at worst a rejects-valid bug. 1538 assert(ParsingInitForAutoVars.empty() && 1539 "Didn't unmark var as having its initializer parsed"); 1540 1541 if (!PP.isIncrementalProcessingEnabled()) 1542 TUScope = nullptr; 1543 } 1544 1545 1546 //===----------------------------------------------------------------------===// 1547 // Helper functions. 1548 //===----------------------------------------------------------------------===// 1549 1550 DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const { 1551 DeclContext *DC = CurContext; 1552 1553 while (true) { 1554 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) || 1555 isa<RequiresExprBodyDecl>(DC)) { 1556 DC = DC->getParent(); 1557 } else if (!AllowLambda && isa<CXXMethodDecl>(DC) && 1558 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call && 1559 cast<CXXRecordDecl>(DC->getParent())->isLambda()) { 1560 DC = DC->getParent()->getParent(); 1561 } else break; 1562 } 1563 1564 return DC; 1565 } 1566 1567 /// getCurFunctionDecl - If inside of a function body, this returns a pointer 1568 /// to the function decl for the function being parsed. If we're currently 1569 /// in a 'block', this returns the containing context. 1570 FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const { 1571 DeclContext *DC = getFunctionLevelDeclContext(AllowLambda); 1572 return dyn_cast<FunctionDecl>(DC); 1573 } 1574 1575 ObjCMethodDecl *Sema::getCurMethodDecl() { 1576 DeclContext *DC = getFunctionLevelDeclContext(); 1577 while (isa<RecordDecl>(DC)) 1578 DC = DC->getParent(); 1579 return dyn_cast<ObjCMethodDecl>(DC); 1580 } 1581 1582 NamedDecl *Sema::getCurFunctionOrMethodDecl() const { 1583 DeclContext *DC = getFunctionLevelDeclContext(); 1584 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC)) 1585 return cast<NamedDecl>(DC); 1586 return nullptr; 1587 } 1588 1589 LangAS Sema::getDefaultCXXMethodAddrSpace() const { 1590 if (getLangOpts().OpenCL) 1591 return getASTContext().getDefaultOpenCLPointeeAddrSpace(); 1592 return LangAS::Default; 1593 } 1594 1595 void Sema::EmitDiagnostic(unsigned DiagID, const DiagnosticBuilder &DB) { 1596 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here 1597 // and yet we also use the current diag ID on the DiagnosticsEngine. This has 1598 // been made more painfully obvious by the refactor that introduced this 1599 // function, but it is possible that the incoming argument can be 1600 // eliminated. If it truly cannot be (for example, there is some reentrancy 1601 // issue I am not seeing yet), then there should at least be a clarifying 1602 // comment somewhere. 1603 Diagnostic DiagInfo(&Diags, DB); 1604 if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) { 1605 switch (DiagnosticIDs::getDiagnosticSFINAEResponse(DiagInfo.getID())) { 1606 case DiagnosticIDs::SFINAE_Report: 1607 // We'll report the diagnostic below. 1608 break; 1609 1610 case DiagnosticIDs::SFINAE_SubstitutionFailure: 1611 // Count this failure so that we know that template argument deduction 1612 // has failed. 1613 ++NumSFINAEErrors; 1614 1615 // Make a copy of this suppressed diagnostic and store it with the 1616 // template-deduction information. 1617 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1618 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1619 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1620 } 1621 1622 Diags.setLastDiagnosticIgnored(true); 1623 return; 1624 1625 case DiagnosticIDs::SFINAE_AccessControl: { 1626 // Per C++ Core Issue 1170, access control is part of SFINAE. 1627 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily 1628 // make access control a part of SFINAE for the purposes of checking 1629 // type traits. 1630 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11) 1631 break; 1632 1633 SourceLocation Loc = DiagInfo.getLocation(); 1634 1635 // Suppress this diagnostic. 1636 ++NumSFINAEErrors; 1637 1638 // Make a copy of this suppressed diagnostic and store it with the 1639 // template-deduction information. 1640 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1641 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1642 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1643 } 1644 1645 Diags.setLastDiagnosticIgnored(true); 1646 1647 // Now produce a C++98 compatibility warning. 1648 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control); 1649 1650 // The last diagnostic which Sema produced was ignored. Suppress any 1651 // notes attached to it. 1652 Diags.setLastDiagnosticIgnored(true); 1653 return; 1654 } 1655 1656 case DiagnosticIDs::SFINAE_Suppress: 1657 // Make a copy of this suppressed diagnostic and store it with the 1658 // template-deduction information; 1659 if (*Info) { 1660 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(), 1661 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1662 } 1663 1664 // Suppress this diagnostic. 1665 Diags.setLastDiagnosticIgnored(true); 1666 return; 1667 } 1668 } 1669 1670 // Copy the diagnostic printing policy over the ASTContext printing policy. 1671 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292 1672 Context.setPrintingPolicy(getPrintingPolicy()); 1673 1674 // Emit the diagnostic. 1675 if (!Diags.EmitDiagnostic(DB)) 1676 return; 1677 1678 // If this is not a note, and we're in a template instantiation 1679 // that is different from the last template instantiation where 1680 // we emitted an error, print a template instantiation 1681 // backtrace. 1682 if (!DiagnosticIDs::isBuiltinNote(DiagID)) 1683 PrintContextStack(); 1684 } 1685 1686 bool Sema::hasUncompilableErrorOccurred() const { 1687 if (getDiagnostics().hasUncompilableErrorOccurred()) 1688 return true; 1689 auto *FD = dyn_cast<FunctionDecl>(CurContext); 1690 if (!FD) 1691 return false; 1692 auto Loc = DeviceDeferredDiags.find(FD); 1693 if (Loc == DeviceDeferredDiags.end()) 1694 return false; 1695 for (auto PDAt : Loc->second) { 1696 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID())) 1697 return true; 1698 } 1699 return false; 1700 } 1701 1702 // Print notes showing how we can reach FD starting from an a priori 1703 // known-callable function. 1704 static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) { 1705 auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(FD); 1706 while (FnIt != S.CUDA().DeviceKnownEmittedFns.end()) { 1707 // Respect error limit. 1708 if (S.Diags.hasFatalErrorOccurred()) 1709 return; 1710 DiagnosticBuilder Builder( 1711 S.Diags.Report(FnIt->second.Loc, diag::note_called_by)); 1712 Builder << FnIt->second.FD; 1713 FnIt = S.CUDA().DeviceKnownEmittedFns.find(FnIt->second.FD); 1714 } 1715 } 1716 1717 namespace { 1718 1719 /// Helper class that emits deferred diagnostic messages if an entity directly 1720 /// or indirectly using the function that causes the deferred diagnostic 1721 /// messages is known to be emitted. 1722 /// 1723 /// During parsing of AST, certain diagnostic messages are recorded as deferred 1724 /// diagnostics since it is unknown whether the functions containing such 1725 /// diagnostics will be emitted. A list of potentially emitted functions and 1726 /// variables that may potentially trigger emission of functions are also 1727 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions 1728 /// by each function to emit deferred diagnostics. 1729 /// 1730 /// During the visit, certain OpenMP directives or initializer of variables 1731 /// with certain OpenMP attributes will cause subsequent visiting of any 1732 /// functions enter a state which is called OpenMP device context in this 1733 /// implementation. The state is exited when the directive or initializer is 1734 /// exited. This state can change the emission states of subsequent uses 1735 /// of functions. 1736 /// 1737 /// Conceptually the functions or variables to be visited form a use graph 1738 /// where the parent node uses the child node. At any point of the visit, 1739 /// the tree nodes traversed from the tree root to the current node form a use 1740 /// stack. The emission state of the current node depends on two factors: 1741 /// 1. the emission state of the root node 1742 /// 2. whether the current node is in OpenMP device context 1743 /// If the function is decided to be emitted, its contained deferred diagnostics 1744 /// are emitted, together with the information about the use stack. 1745 /// 1746 class DeferredDiagnosticsEmitter 1747 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> { 1748 public: 1749 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited; 1750 1751 // Whether the function is already in the current use-path. 1752 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath; 1753 1754 // The current use-path. 1755 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath; 1756 1757 // Whether the visiting of the function has been done. Done[0] is for the 1758 // case not in OpenMP device context. Done[1] is for the case in OpenMP 1759 // device context. We need two sets because diagnostics emission may be 1760 // different depending on whether it is in OpenMP device context. 1761 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2]; 1762 1763 // Emission state of the root node of the current use graph. 1764 bool ShouldEmitRootNode; 1765 1766 // Current OpenMP device context level. It is initialized to 0 and each 1767 // entering of device context increases it by 1 and each exit decreases 1768 // it by 1. Non-zero value indicates it is currently in device context. 1769 unsigned InOMPDeviceContext; 1770 1771 DeferredDiagnosticsEmitter(Sema &S) 1772 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {} 1773 1774 bool shouldVisitDiscardedStmt() const { return false; } 1775 1776 void VisitOMPTargetDirective(OMPTargetDirective *Node) { 1777 ++InOMPDeviceContext; 1778 Inherited::VisitOMPTargetDirective(Node); 1779 --InOMPDeviceContext; 1780 } 1781 1782 void visitUsedDecl(SourceLocation Loc, Decl *D) { 1783 if (isa<VarDecl>(D)) 1784 return; 1785 if (auto *FD = dyn_cast<FunctionDecl>(D)) 1786 checkFunc(Loc, FD); 1787 else 1788 Inherited::visitUsedDecl(Loc, D); 1789 } 1790 1791 void checkVar(VarDecl *VD) { 1792 assert(VD->isFileVarDecl() && 1793 "Should only check file-scope variables"); 1794 if (auto *Init = VD->getInit()) { 1795 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); 1796 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost || 1797 *DevTy == OMPDeclareTargetDeclAttr::DT_Any); 1798 if (IsDev) 1799 ++InOMPDeviceContext; 1800 this->Visit(Init); 1801 if (IsDev) 1802 --InOMPDeviceContext; 1803 } 1804 } 1805 1806 void checkFunc(SourceLocation Loc, FunctionDecl *FD) { 1807 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0]; 1808 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back(); 1809 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) || 1810 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD)) 1811 return; 1812 // Finalize analysis of OpenMP-specific constructs. 1813 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 && 1814 (ShouldEmitRootNode || InOMPDeviceContext)) 1815 S.OpenMP().finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); 1816 if (Caller) 1817 S.CUDA().DeviceKnownEmittedFns[FD] = {Caller, Loc}; 1818 // Always emit deferred diagnostics for the direct users. This does not 1819 // lead to explosion of diagnostics since each user is visited at most 1820 // twice. 1821 if (ShouldEmitRootNode || InOMPDeviceContext) 1822 emitDeferredDiags(FD, Caller); 1823 // Do not revisit a function if the function body has been completely 1824 // visited before. 1825 if (!Done.insert(FD).second) 1826 return; 1827 InUsePath.insert(FD); 1828 UsePath.push_back(FD); 1829 if (auto *S = FD->getBody()) { 1830 this->Visit(S); 1831 } 1832 UsePath.pop_back(); 1833 InUsePath.erase(FD); 1834 } 1835 1836 void checkRecordedDecl(Decl *D) { 1837 if (auto *FD = dyn_cast<FunctionDecl>(D)) { 1838 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) == 1839 Sema::FunctionEmissionStatus::Emitted; 1840 checkFunc(SourceLocation(), FD); 1841 } else 1842 checkVar(cast<VarDecl>(D)); 1843 } 1844 1845 // Emit any deferred diagnostics for FD 1846 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) { 1847 auto It = S.DeviceDeferredDiags.find(FD); 1848 if (It == S.DeviceDeferredDiags.end()) 1849 return; 1850 bool HasWarningOrError = false; 1851 bool FirstDiag = true; 1852 for (PartialDiagnosticAt &PDAt : It->second) { 1853 // Respect error limit. 1854 if (S.Diags.hasFatalErrorOccurred()) 1855 return; 1856 const SourceLocation &Loc = PDAt.first; 1857 const PartialDiagnostic &PD = PDAt.second; 1858 HasWarningOrError |= 1859 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >= 1860 DiagnosticsEngine::Warning; 1861 { 1862 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID())); 1863 PD.Emit(Builder); 1864 } 1865 // Emit the note on the first diagnostic in case too many diagnostics 1866 // cause the note not emitted. 1867 if (FirstDiag && HasWarningOrError && ShowCallStack) { 1868 emitCallStackNotes(S, FD); 1869 FirstDiag = false; 1870 } 1871 } 1872 } 1873 }; 1874 } // namespace 1875 1876 void Sema::emitDeferredDiags() { 1877 if (ExternalSource) 1878 ExternalSource->ReadDeclsToCheckForDeferredDiags( 1879 DeclsToCheckForDeferredDiags); 1880 1881 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) || 1882 DeclsToCheckForDeferredDiags.empty()) 1883 return; 1884 1885 DeferredDiagnosticsEmitter DDE(*this); 1886 for (auto *D : DeclsToCheckForDeferredDiags) 1887 DDE.checkRecordedDecl(D); 1888 } 1889 1890 // In CUDA, there are some constructs which may appear in semantically-valid 1891 // code, but trigger errors if we ever generate code for the function in which 1892 // they appear. Essentially every construct you're not allowed to use on the 1893 // device falls into this category, because you are allowed to use these 1894 // constructs in a __host__ __device__ function, but only if that function is 1895 // never codegen'ed on the device. 1896 // 1897 // To handle semantic checking for these constructs, we keep track of the set of 1898 // functions we know will be emitted, either because we could tell a priori that 1899 // they would be emitted, or because they were transitively called by a 1900 // known-emitted function. 1901 // 1902 // We also keep a partial call graph of which not-known-emitted functions call 1903 // which other not-known-emitted functions. 1904 // 1905 // When we see something which is illegal if the current function is emitted 1906 // (usually by way of DiagIfDeviceCode, DiagIfHostCode, or 1907 // CheckCall), we first check if the current function is known-emitted. If 1908 // so, we immediately output the diagnostic. 1909 // 1910 // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags 1911 // until we discover that the function is known-emitted, at which point we take 1912 // it out of this map and emit the diagnostic. 1913 1914 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc, 1915 unsigned DiagID, 1916 const FunctionDecl *Fn, 1917 Sema &S) 1918 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn), 1919 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) { 1920 switch (K) { 1921 case K_Nop: 1922 break; 1923 case K_Immediate: 1924 case K_ImmediateWithCallStack: 1925 ImmediateDiag.emplace( 1926 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID)); 1927 break; 1928 case K_Deferred: 1929 assert(Fn && "Must have a function to attach the deferred diag to."); 1930 auto &Diags = S.DeviceDeferredDiags[Fn]; 1931 PartialDiagId.emplace(Diags.size()); 1932 Diags.emplace_back(Loc, S.PDiag(DiagID)); 1933 break; 1934 } 1935 } 1936 1937 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D) 1938 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn), 1939 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag), 1940 PartialDiagId(D.PartialDiagId) { 1941 // Clean the previous diagnostics. 1942 D.ShowCallStack = false; 1943 D.ImmediateDiag.reset(); 1944 D.PartialDiagId.reset(); 1945 } 1946 1947 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() { 1948 if (ImmediateDiag) { 1949 // Emit our diagnostic and, if it was a warning or error, output a callstack 1950 // if Fn isn't a priori known-emitted. 1951 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel( 1952 DiagID, Loc) >= DiagnosticsEngine::Warning; 1953 ImmediateDiag.reset(); // Emit the immediate diag. 1954 if (IsWarningOrError && ShowCallStack) 1955 emitCallStackNotes(S, Fn); 1956 } else { 1957 assert((!PartialDiagId || ShowCallStack) && 1958 "Must always show call stack for deferred diags."); 1959 } 1960 } 1961 1962 Sema::SemaDiagnosticBuilder 1963 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) { 1964 FD = FD ? FD : getCurFunctionDecl(); 1965 if (LangOpts.OpenMP) 1966 return LangOpts.OpenMPIsTargetDevice 1967 ? OpenMP().diagIfOpenMPDeviceCode(Loc, DiagID, FD) 1968 : OpenMP().diagIfOpenMPHostCode(Loc, DiagID, FD); 1969 if (getLangOpts().CUDA) 1970 return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID) 1971 : CUDA().DiagIfHostCode(Loc, DiagID); 1972 1973 if (getLangOpts().SYCLIsDevice) 1974 return SYCL().DiagIfDeviceCode(Loc, DiagID); 1975 1976 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID, 1977 FD, *this); 1978 } 1979 1980 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) { 1981 if (isUnevaluatedContext() || Ty.isNull()) 1982 return; 1983 1984 // The original idea behind checkTypeSupport function is that unused 1985 // declarations can be replaced with an array of bytes of the same size during 1986 // codegen, such replacement doesn't seem to be possible for types without 1987 // constant byte size like zero length arrays. So, do a deep check for SYCL. 1988 if (D && LangOpts.SYCLIsDevice) { 1989 llvm::DenseSet<QualType> Visited; 1990 SYCL().deepTypeCheckForDevice(Loc, Visited, D); 1991 } 1992 1993 Decl *C = cast<Decl>(getCurLexicalContext()); 1994 1995 // Memcpy operations for structs containing a member with unsupported type 1996 // are ok, though. 1997 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) { 1998 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 1999 MD->isTrivial()) 2000 return; 2001 2002 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD)) 2003 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial()) 2004 return; 2005 } 2006 2007 // Try to associate errors with the lexical context, if that is a function, or 2008 // the value declaration otherwise. 2009 const FunctionDecl *FD = isa<FunctionDecl>(C) 2010 ? cast<FunctionDecl>(C) 2011 : dyn_cast_or_null<FunctionDecl>(D); 2012 2013 auto CheckDeviceType = [&](QualType Ty) { 2014 if (Ty->isDependentType()) 2015 return; 2016 2017 if (Ty->isBitIntType()) { 2018 if (!Context.getTargetInfo().hasBitIntType()) { 2019 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 2020 if (D) 2021 PD << D; 2022 else 2023 PD << "expression"; 2024 targetDiag(Loc, PD, FD) 2025 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/ 2026 << Ty << Context.getTargetInfo().getTriple().str(); 2027 } 2028 return; 2029 } 2030 2031 // Check if we are dealing with two 'long double' but with different 2032 // semantics. 2033 bool LongDoubleMismatched = false; 2034 if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) { 2035 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty); 2036 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() && 2037 !Context.getTargetInfo().hasFloat128Type()) || 2038 (&Sem == &llvm::APFloat::PPCDoubleDouble() && 2039 !Context.getTargetInfo().hasIbm128Type())) 2040 LongDoubleMismatched = true; 2041 } 2042 2043 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) || 2044 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) || 2045 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) || 2046 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 && 2047 !Context.getTargetInfo().hasInt128Type()) || 2048 (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() && 2049 !LangOpts.CUDAIsDevice) || 2050 LongDoubleMismatched) { 2051 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 2052 if (D) 2053 PD << D; 2054 else 2055 PD << "expression"; 2056 2057 if (targetDiag(Loc, PD, FD) 2058 << true /*show bit size*/ 2059 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty 2060 << false /*return*/ << Context.getTargetInfo().getTriple().str()) { 2061 if (D) 2062 D->setInvalidDecl(); 2063 } 2064 if (D) 2065 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 2066 } 2067 }; 2068 2069 auto CheckType = [&](QualType Ty, bool IsRetTy = false) { 2070 if (LangOpts.SYCLIsDevice || 2071 (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) || 2072 LangOpts.CUDAIsDevice) 2073 CheckDeviceType(Ty); 2074 2075 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType(); 2076 const TargetInfo &TI = Context.getTargetInfo(); 2077 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) { 2078 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 2079 if (D) 2080 PD << D; 2081 else 2082 PD << "expression"; 2083 2084 if (Diag(Loc, PD, FD) 2085 << false /*show bit size*/ << 0 << Ty << false /*return*/ 2086 << TI.getTriple().str()) { 2087 if (D) 2088 D->setInvalidDecl(); 2089 } 2090 if (D) 2091 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 2092 } 2093 2094 bool IsDouble = UnqualTy == Context.DoubleTy; 2095 bool IsFloat = UnqualTy == Context.FloatTy; 2096 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) { 2097 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 2098 if (D) 2099 PD << D; 2100 else 2101 PD << "expression"; 2102 2103 if (Diag(Loc, PD, FD) 2104 << false /*show bit size*/ << 0 << Ty << true /*return*/ 2105 << TI.getTriple().str()) { 2106 if (D) 2107 D->setInvalidDecl(); 2108 } 2109 if (D) 2110 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 2111 } 2112 2113 if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) { 2114 llvm::StringMap<bool> CallerFeatureMap; 2115 Context.getFunctionFeatureMap(CallerFeatureMap, FD); 2116 RISCV().checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap); 2117 } 2118 2119 // Don't allow SVE types in functions without a SVE target. 2120 if (Ty->isSVESizelessBuiltinType() && FD) { 2121 llvm::StringMap<bool> CallerFeatureMap; 2122 Context.getFunctionFeatureMap(CallerFeatureMap, FD); 2123 if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap)) { 2124 if (!Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap)) 2125 Diag(Loc, diag::err_sve_vector_in_non_sve_target) << Ty; 2126 else if (!IsArmStreamingFunction(FD, 2127 /*IncludeLocallyStreaming=*/true)) { 2128 Diag(Loc, diag::err_sve_vector_in_non_streaming_function) << Ty; 2129 } 2130 } 2131 } 2132 }; 2133 2134 CheckType(Ty); 2135 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) { 2136 for (const auto &ParamTy : FPTy->param_types()) 2137 CheckType(ParamTy); 2138 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true); 2139 } 2140 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty)) 2141 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true); 2142 } 2143 2144 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) { 2145 SourceLocation loc = locref; 2146 if (!loc.isMacroID()) return false; 2147 2148 // There's no good way right now to look at the intermediate 2149 // expansions, so just jump to the expansion location. 2150 loc = getSourceManager().getExpansionLoc(loc); 2151 2152 // If that's written with the name, stop here. 2153 SmallString<16> buffer; 2154 if (getPreprocessor().getSpelling(loc, buffer) == name) { 2155 locref = loc; 2156 return true; 2157 } 2158 return false; 2159 } 2160 2161 Scope *Sema::getScopeForContext(DeclContext *Ctx) { 2162 2163 if (!Ctx) 2164 return nullptr; 2165 2166 Ctx = Ctx->getPrimaryContext(); 2167 for (Scope *S = getCurScope(); S; S = S->getParent()) { 2168 // Ignore scopes that cannot have declarations. This is important for 2169 // out-of-line definitions of static class members. 2170 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) 2171 if (DeclContext *Entity = S->getEntity()) 2172 if (Ctx == Entity->getPrimaryContext()) 2173 return S; 2174 } 2175 2176 return nullptr; 2177 } 2178 2179 /// Enter a new function scope 2180 void Sema::PushFunctionScope() { 2181 if (FunctionScopes.empty() && CachedFunctionScope) { 2182 // Use CachedFunctionScope to avoid allocating memory when possible. 2183 CachedFunctionScope->Clear(); 2184 FunctionScopes.push_back(CachedFunctionScope.release()); 2185 } else { 2186 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); 2187 } 2188 if (LangOpts.OpenMP) 2189 OpenMP().pushOpenMPFunctionRegion(); 2190 } 2191 2192 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { 2193 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(), 2194 BlockScope, Block)); 2195 CapturingFunctionScopes++; 2196 } 2197 2198 LambdaScopeInfo *Sema::PushLambdaScope() { 2199 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics()); 2200 FunctionScopes.push_back(LSI); 2201 CapturingFunctionScopes++; 2202 return LSI; 2203 } 2204 2205 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) { 2206 if (LambdaScopeInfo *const LSI = getCurLambda()) { 2207 LSI->AutoTemplateParameterDepth = Depth; 2208 return; 2209 } 2210 llvm_unreachable( 2211 "Remove assertion if intentionally called in a non-lambda context."); 2212 } 2213 2214 // Check that the type of the VarDecl has an accessible copy constructor and 2215 // resolve its destructor's exception specification. 2216 // This also performs initialization of block variables when they are moved 2217 // to the heap. It uses the same rules as applicable for implicit moves 2218 // according to the C++ standard in effect ([class.copy.elision]p3). 2219 static void checkEscapingByref(VarDecl *VD, Sema &S) { 2220 QualType T = VD->getType(); 2221 EnterExpressionEvaluationContext scope( 2222 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated); 2223 SourceLocation Loc = VD->getLocation(); 2224 Expr *VarRef = 2225 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); 2226 ExprResult Result; 2227 auto IE = InitializedEntity::InitializeBlock(Loc, T); 2228 if (S.getLangOpts().CPlusPlus23) { 2229 auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr, 2230 VK_XValue, FPOptionsOverride()); 2231 Result = S.PerformCopyInitialization(IE, SourceLocation(), E); 2232 } else { 2233 Result = S.PerformMoveOrCopyInitialization( 2234 IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible}, 2235 VarRef); 2236 } 2237 2238 if (!Result.isInvalid()) { 2239 Result = S.MaybeCreateExprWithCleanups(Result); 2240 Expr *Init = Result.getAs<Expr>(); 2241 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init)); 2242 } 2243 2244 // The destructor's exception specification is needed when IRGen generates 2245 // block copy/destroy functions. Resolve it here. 2246 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 2247 if (CXXDestructorDecl *DD = RD->getDestructor()) { 2248 auto *FPT = DD->getType()->castAs<FunctionProtoType>(); 2249 S.ResolveExceptionSpec(Loc, FPT); 2250 } 2251 } 2252 2253 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { 2254 // Set the EscapingByref flag of __block variables captured by 2255 // escaping blocks. 2256 for (const BlockDecl *BD : FSI.Blocks) { 2257 for (const BlockDecl::Capture &BC : BD->captures()) { 2258 VarDecl *VD = BC.getVariable(); 2259 if (VD->hasAttr<BlocksAttr>()) { 2260 // Nothing to do if this is a __block variable captured by a 2261 // non-escaping block. 2262 if (BD->doesNotEscape()) 2263 continue; 2264 VD->setEscapingByref(); 2265 } 2266 // Check whether the captured variable is or contains an object of 2267 // non-trivial C union type. 2268 QualType CapType = BC.getVariable()->getType(); 2269 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() || 2270 CapType.hasNonTrivialToPrimitiveCopyCUnion()) 2271 S.checkNonTrivialCUnion(BC.getVariable()->getType(), 2272 BD->getCaretLocation(), 2273 Sema::NTCUC_BlockCapture, 2274 Sema::NTCUK_Destruct|Sema::NTCUK_Copy); 2275 } 2276 } 2277 2278 for (VarDecl *VD : FSI.ByrefBlockVars) { 2279 // __block variables might require us to capture a copy-initializer. 2280 if (!VD->isEscapingByref()) 2281 continue; 2282 // It's currently invalid to ever have a __block variable with an 2283 // array type; should we diagnose that here? 2284 // Regardless, we don't want to ignore array nesting when 2285 // constructing this copy. 2286 if (VD->getType()->isStructureOrClassType()) 2287 checkEscapingByref(VD, S); 2288 } 2289 } 2290 2291 Sema::PoppedFunctionScopePtr 2292 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, 2293 const Decl *D, QualType BlockType) { 2294 assert(!FunctionScopes.empty() && "mismatched push/pop!"); 2295 2296 markEscapingByrefs(*FunctionScopes.back(), *this); 2297 2298 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(), 2299 PoppedFunctionScopeDeleter(this)); 2300 2301 if (LangOpts.OpenMP) 2302 OpenMP().popOpenMPFunctionRegion(Scope.get()); 2303 2304 // Issue any analysis-based warnings. 2305 if (WP && D) 2306 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType); 2307 else 2308 for (const auto &PUD : Scope->PossiblyUnreachableDiags) 2309 Diag(PUD.Loc, PUD.PD); 2310 2311 return Scope; 2312 } 2313 2314 void Sema::PoppedFunctionScopeDeleter:: 2315 operator()(sema::FunctionScopeInfo *Scope) const { 2316 if (!Scope->isPlainFunction()) 2317 Self->CapturingFunctionScopes--; 2318 // Stash the function scope for later reuse if it's for a normal function. 2319 if (Scope->isPlainFunction() && !Self->CachedFunctionScope) 2320 Self->CachedFunctionScope.reset(Scope); 2321 else 2322 delete Scope; 2323 } 2324 2325 void Sema::PushCompoundScope(bool IsStmtExpr) { 2326 getCurFunction()->CompoundScopes.push_back( 2327 CompoundScopeInfo(IsStmtExpr, getCurFPFeatures())); 2328 } 2329 2330 void Sema::PopCompoundScope() { 2331 FunctionScopeInfo *CurFunction = getCurFunction(); 2332 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop"); 2333 2334 CurFunction->CompoundScopes.pop_back(); 2335 } 2336 2337 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const { 2338 return getCurFunction()->hasUnrecoverableErrorOccurred(); 2339 } 2340 2341 void Sema::setFunctionHasBranchIntoScope() { 2342 if (!FunctionScopes.empty()) 2343 FunctionScopes.back()->setHasBranchIntoScope(); 2344 } 2345 2346 void Sema::setFunctionHasBranchProtectedScope() { 2347 if (!FunctionScopes.empty()) 2348 FunctionScopes.back()->setHasBranchProtectedScope(); 2349 } 2350 2351 void Sema::setFunctionHasIndirectGoto() { 2352 if (!FunctionScopes.empty()) 2353 FunctionScopes.back()->setHasIndirectGoto(); 2354 } 2355 2356 void Sema::setFunctionHasMustTail() { 2357 if (!FunctionScopes.empty()) 2358 FunctionScopes.back()->setHasMustTail(); 2359 } 2360 2361 BlockScopeInfo *Sema::getCurBlock() { 2362 if (FunctionScopes.empty()) 2363 return nullptr; 2364 2365 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back()); 2366 if (CurBSI && CurBSI->TheDecl && 2367 !CurBSI->TheDecl->Encloses(CurContext)) { 2368 // We have switched contexts due to template instantiation. 2369 assert(!CodeSynthesisContexts.empty()); 2370 return nullptr; 2371 } 2372 2373 return CurBSI; 2374 } 2375 2376 FunctionScopeInfo *Sema::getEnclosingFunction() const { 2377 if (FunctionScopes.empty()) 2378 return nullptr; 2379 2380 for (int e = FunctionScopes.size() - 1; e >= 0; --e) { 2381 if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) 2382 continue; 2383 return FunctionScopes[e]; 2384 } 2385 return nullptr; 2386 } 2387 2388 CapturingScopeInfo *Sema::getEnclosingLambdaOrBlock() const { 2389 for (auto *Scope : llvm::reverse(FunctionScopes)) { 2390 if (auto *CSI = dyn_cast<CapturingScopeInfo>(Scope)) { 2391 auto *LSI = dyn_cast<LambdaScopeInfo>(CSI); 2392 if (LSI && LSI->Lambda && !LSI->Lambda->Encloses(CurContext) && 2393 LSI->AfterParameterList) { 2394 // We have switched contexts due to template instantiation. 2395 // FIXME: We should swap out the FunctionScopes during code synthesis 2396 // so that we don't need to check for this. 2397 assert(!CodeSynthesisContexts.empty()); 2398 return nullptr; 2399 } 2400 return CSI; 2401 } 2402 } 2403 return nullptr; 2404 } 2405 2406 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) { 2407 if (FunctionScopes.empty()) 2408 return nullptr; 2409 2410 auto I = FunctionScopes.rbegin(); 2411 if (IgnoreNonLambdaCapturingScope) { 2412 auto E = FunctionScopes.rend(); 2413 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I)) 2414 ++I; 2415 if (I == E) 2416 return nullptr; 2417 } 2418 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I); 2419 if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator && 2420 !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) { 2421 // We have switched contexts due to template instantiation. 2422 assert(!CodeSynthesisContexts.empty()); 2423 return nullptr; 2424 } 2425 2426 return CurLSI; 2427 } 2428 2429 // We have a generic lambda if we parsed auto parameters, or we have 2430 // an associated template parameter list. 2431 LambdaScopeInfo *Sema::getCurGenericLambda() { 2432 if (LambdaScopeInfo *LSI = getCurLambda()) { 2433 return (LSI->TemplateParams.size() || 2434 LSI->GLTemplateParameterList) ? LSI : nullptr; 2435 } 2436 return nullptr; 2437 } 2438 2439 2440 void Sema::ActOnComment(SourceRange Comment) { 2441 if (!LangOpts.RetainCommentsFromSystemHeaders && 2442 SourceMgr.isInSystemHeader(Comment.getBegin())) 2443 return; 2444 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false); 2445 if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) { 2446 SourceRange MagicMarkerRange(Comment.getBegin(), 2447 Comment.getBegin().getLocWithOffset(3)); 2448 StringRef MagicMarkerText; 2449 switch (RC.getKind()) { 2450 case RawComment::RCK_OrdinaryBCPL: 2451 MagicMarkerText = "///<"; 2452 break; 2453 case RawComment::RCK_OrdinaryC: 2454 MagicMarkerText = "/**<"; 2455 break; 2456 case RawComment::RCK_Invalid: 2457 // FIXME: are there other scenarios that could produce an invalid 2458 // raw comment here? 2459 Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment); 2460 return; 2461 default: 2462 llvm_unreachable("if this is an almost Doxygen comment, " 2463 "it should be ordinary"); 2464 } 2465 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) << 2466 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText); 2467 } 2468 Context.addComment(RC); 2469 } 2470 2471 // Pin this vtable to this file. 2472 ExternalSemaSource::~ExternalSemaSource() {} 2473 char ExternalSemaSource::ID; 2474 2475 void ExternalSemaSource::ReadMethodPool(Selector Sel) { } 2476 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { } 2477 2478 void ExternalSemaSource::ReadKnownNamespaces( 2479 SmallVectorImpl<NamespaceDecl *> &Namespaces) { 2480 } 2481 2482 void ExternalSemaSource::ReadUndefinedButUsed( 2483 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {} 2484 2485 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< 2486 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} 2487 2488 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, 2489 UnresolvedSetImpl &OverloadSet) { 2490 ZeroArgCallReturnTy = QualType(); 2491 OverloadSet.clear(); 2492 2493 const OverloadExpr *Overloads = nullptr; 2494 bool IsMemExpr = false; 2495 if (E.getType() == Context.OverloadTy) { 2496 OverloadExpr::FindResult FR = OverloadExpr::find(&E); 2497 2498 // Ignore overloads that are pointer-to-member constants. 2499 if (FR.HasFormOfMemberPointer) 2500 return false; 2501 2502 Overloads = FR.Expression; 2503 } else if (E.getType() == Context.BoundMemberTy) { 2504 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens()); 2505 IsMemExpr = true; 2506 } 2507 2508 bool Ambiguous = false; 2509 bool IsMV = false; 2510 2511 if (Overloads) { 2512 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), 2513 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { 2514 OverloadSet.addDecl(*it); 2515 2516 // Check whether the function is a non-template, non-member which takes no 2517 // arguments. 2518 if (IsMemExpr) 2519 continue; 2520 if (const FunctionDecl *OverloadDecl 2521 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { 2522 if (OverloadDecl->getMinRequiredArguments() == 0) { 2523 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous && 2524 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() || 2525 OverloadDecl->isCPUSpecificMultiVersion()))) { 2526 ZeroArgCallReturnTy = QualType(); 2527 Ambiguous = true; 2528 } else { 2529 ZeroArgCallReturnTy = OverloadDecl->getReturnType(); 2530 IsMV = OverloadDecl->isCPUDispatchMultiVersion() || 2531 OverloadDecl->isCPUSpecificMultiVersion(); 2532 } 2533 } 2534 } 2535 } 2536 2537 // If it's not a member, use better machinery to try to resolve the call 2538 if (!IsMemExpr) 2539 return !ZeroArgCallReturnTy.isNull(); 2540 } 2541 2542 // Attempt to call the member with no arguments - this will correctly handle 2543 // member templates with defaults/deduction of template arguments, overloads 2544 // with default arguments, etc. 2545 if (IsMemExpr && !E.isTypeDependent()) { 2546 Sema::TentativeAnalysisScope Trap(*this); 2547 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), {}, 2548 SourceLocation()); 2549 if (R.isUsable()) { 2550 ZeroArgCallReturnTy = R.get()->getType(); 2551 return true; 2552 } 2553 return false; 2554 } 2555 2556 if (const auto *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { 2557 if (const auto *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { 2558 if (Fun->getMinRequiredArguments() == 0) 2559 ZeroArgCallReturnTy = Fun->getReturnType(); 2560 return true; 2561 } 2562 } 2563 2564 // We don't have an expression that's convenient to get a FunctionDecl from, 2565 // but we can at least check if the type is "function of 0 arguments". 2566 QualType ExprTy = E.getType(); 2567 const FunctionType *FunTy = nullptr; 2568 QualType PointeeTy = ExprTy->getPointeeType(); 2569 if (!PointeeTy.isNull()) 2570 FunTy = PointeeTy->getAs<FunctionType>(); 2571 if (!FunTy) 2572 FunTy = ExprTy->getAs<FunctionType>(); 2573 2574 if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(FunTy)) { 2575 if (FPT->getNumParams() == 0) 2576 ZeroArgCallReturnTy = FunTy->getReturnType(); 2577 return true; 2578 } 2579 return false; 2580 } 2581 2582 /// Give notes for a set of overloads. 2583 /// 2584 /// A companion to tryExprAsCall. In cases when the name that the programmer 2585 /// wrote was an overloaded function, we may be able to make some guesses about 2586 /// plausible overloads based on their return types; such guesses can be handed 2587 /// off to this method to be emitted as notes. 2588 /// 2589 /// \param Overloads - The overloads to note. 2590 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to 2591 /// -fshow-overloads=best, this is the location to attach to the note about too 2592 /// many candidates. Typically this will be the location of the original 2593 /// ill-formed expression. 2594 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, 2595 const SourceLocation FinalNoteLoc) { 2596 unsigned ShownOverloads = 0; 2597 unsigned SuppressedOverloads = 0; 2598 for (UnresolvedSetImpl::iterator It = Overloads.begin(), 2599 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2600 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) { 2601 ++SuppressedOverloads; 2602 continue; 2603 } 2604 2605 const NamedDecl *Fn = (*It)->getUnderlyingDecl(); 2606 // Don't print overloads for non-default multiversioned functions. 2607 if (const auto *FD = Fn->getAsFunction()) { 2608 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() && 2609 !FD->getAttr<TargetAttr>()->isDefaultVersion()) 2610 continue; 2611 if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() && 2612 !FD->getAttr<TargetVersionAttr>()->isDefaultVersion()) 2613 continue; 2614 } 2615 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call); 2616 ++ShownOverloads; 2617 } 2618 2619 S.Diags.overloadCandidatesShown(ShownOverloads); 2620 2621 if (SuppressedOverloads) 2622 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates) 2623 << SuppressedOverloads; 2624 } 2625 2626 static void notePlausibleOverloads(Sema &S, SourceLocation Loc, 2627 const UnresolvedSetImpl &Overloads, 2628 bool (*IsPlausibleResult)(QualType)) { 2629 if (!IsPlausibleResult) 2630 return noteOverloads(S, Overloads, Loc); 2631 2632 UnresolvedSet<2> PlausibleOverloads; 2633 for (OverloadExpr::decls_iterator It = Overloads.begin(), 2634 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2635 const auto *OverloadDecl = cast<FunctionDecl>(*It); 2636 QualType OverloadResultTy = OverloadDecl->getReturnType(); 2637 if (IsPlausibleResult(OverloadResultTy)) 2638 PlausibleOverloads.addDecl(It.getDecl()); 2639 } 2640 noteOverloads(S, PlausibleOverloads, Loc); 2641 } 2642 2643 /// Determine whether the given expression can be called by just 2644 /// putting parentheses after it. Notably, expressions with unary 2645 /// operators can't be because the unary operator will start parsing 2646 /// outside the call. 2647 static bool IsCallableWithAppend(const Expr *E) { 2648 E = E->IgnoreImplicit(); 2649 return (!isa<CStyleCastExpr>(E) && 2650 !isa<UnaryOperator>(E) && 2651 !isa<BinaryOperator>(E) && 2652 !isa<CXXOperatorCallExpr>(E)); 2653 } 2654 2655 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) { 2656 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2657 E = UO->getSubExpr(); 2658 2659 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { 2660 if (ULE->getNumDecls() == 0) 2661 return false; 2662 2663 const NamedDecl *ND = *ULE->decls_begin(); 2664 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2665 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion(); 2666 } 2667 return false; 2668 } 2669 2670 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, 2671 bool ForceComplain, 2672 bool (*IsPlausibleResult)(QualType)) { 2673 SourceLocation Loc = E.get()->getExprLoc(); 2674 SourceRange Range = E.get()->getSourceRange(); 2675 UnresolvedSet<4> Overloads; 2676 2677 // If this is a SFINAE context, don't try anything that might trigger ADL 2678 // prematurely. 2679 if (!isSFINAEContext()) { 2680 QualType ZeroArgCallTy; 2681 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) && 2682 !ZeroArgCallTy.isNull() && 2683 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) { 2684 // At this point, we know E is potentially callable with 0 2685 // arguments and that it returns something of a reasonable type, 2686 // so we can emit a fixit and carry on pretending that E was 2687 // actually a CallExpr. 2688 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd()); 2689 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2690 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range 2691 << (IsCallableWithAppend(E.get()) 2692 ? FixItHint::CreateInsertion(ParenInsertionLoc, 2693 "()") 2694 : FixItHint()); 2695 if (!IsMV) 2696 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2697 2698 // FIXME: Try this before emitting the fixit, and suppress diagnostics 2699 // while doing so. 2700 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), {}, 2701 Range.getEnd().getLocWithOffset(1)); 2702 return true; 2703 } 2704 } 2705 if (!ForceComplain) return false; 2706 2707 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2708 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range; 2709 if (!IsMV) 2710 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2711 E = ExprError(); 2712 return true; 2713 } 2714 2715 IdentifierInfo *Sema::getSuperIdentifier() const { 2716 if (!Ident_super) 2717 Ident_super = &Context.Idents.get("super"); 2718 return Ident_super; 2719 } 2720 2721 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, 2722 CapturedRegionKind K, 2723 unsigned OpenMPCaptureLevel) { 2724 auto *CSI = new CapturedRegionScopeInfo( 2725 getDiagnostics(), S, CD, RD, CD->getContextParam(), K, 2726 (getLangOpts().OpenMP && K == CR_OpenMP) 2727 ? OpenMP().getOpenMPNestingLevel() 2728 : 0, 2729 OpenMPCaptureLevel); 2730 CSI->ReturnType = Context.VoidTy; 2731 FunctionScopes.push_back(CSI); 2732 CapturingFunctionScopes++; 2733 } 2734 2735 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { 2736 if (FunctionScopes.empty()) 2737 return nullptr; 2738 2739 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); 2740 } 2741 2742 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & 2743 Sema::getMismatchingDeleteExpressions() const { 2744 return DeleteExprs; 2745 } 2746 2747 Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S) 2748 : S(S), OldFPFeaturesState(S.CurFPFeatures), 2749 OldOverrides(S.FpPragmaStack.CurrentValue), 2750 OldEvalMethod(S.PP.getCurrentFPEvalMethod()), 2751 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {} 2752 2753 Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() { 2754 S.CurFPFeatures = OldFPFeaturesState; 2755 S.FpPragmaStack.CurrentValue = OldOverrides; 2756 S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod); 2757 } 2758 2759 bool Sema::isDeclaratorFunctionLike(Declarator &D) { 2760 assert(D.getCXXScopeSpec().isSet() && 2761 "can only be called for qualified names"); 2762 2763 auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(), 2764 LookupOrdinaryName, forRedeclarationInCurContext()); 2765 DeclContext *DC = computeDeclContext(D.getCXXScopeSpec(), 2766 !D.getDeclSpec().isFriendSpecified()); 2767 if (!DC) 2768 return false; 2769 2770 LookupQualifiedName(LR, DC); 2771 bool Result = llvm::all_of(LR, [](Decl *Dcl) { 2772 if (NamedDecl *ND = dyn_cast<NamedDecl>(Dcl)) { 2773 ND = ND->getUnderlyingDecl(); 2774 return isa<FunctionDecl>(ND) || isa<FunctionTemplateDecl>(ND) || 2775 isa<UsingDecl>(ND); 2776 } 2777 return false; 2778 }); 2779 return Result; 2780 } 2781 2782 Attr *Sema::CreateAnnotationAttr(const AttributeCommonInfo &CI, StringRef Annot, 2783 MutableArrayRef<Expr *> Args) { 2784 2785 auto *A = AnnotateAttr::Create(Context, Annot, Args.data(), Args.size(), CI); 2786 if (!ConstantFoldAttrArgs( 2787 CI, MutableArrayRef<Expr *>(A->args_begin(), A->args_end()))) { 2788 return nullptr; 2789 } 2790 return A; 2791 } 2792 2793 Attr *Sema::CreateAnnotationAttr(const ParsedAttr &AL) { 2794 // Make sure that there is a string literal as the annotation's first 2795 // argument. 2796 StringRef Str; 2797 if (!checkStringLiteralArgumentAttr(AL, 0, Str)) 2798 return nullptr; 2799 2800 llvm::SmallVector<Expr *, 4> Args; 2801 Args.reserve(AL.getNumArgs() - 1); 2802 for (unsigned Idx = 1; Idx < AL.getNumArgs(); Idx++) { 2803 assert(!AL.isArgIdent(Idx)); 2804 Args.push_back(AL.getArgAsExpr(Idx)); 2805 } 2806 2807 return CreateAnnotationAttr(AL, Str, Args); 2808 } 2809