xref: /llvm-project/clang/lib/Sema/Sema.cpp (revision 0865ecc5150b9a55ba1f9e30b6d463a66ac362a6)
1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the actions class which performs semantic analysis and
10 // builds an AST out of a parse stream.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "UsedDeclVisitor.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/ASTDiagnostic.h"
17 #include "clang/AST/Decl.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/DeclFriend.h"
20 #include "clang/AST/DeclObjC.h"
21 #include "clang/AST/Expr.h"
22 #include "clang/AST/ExprCXX.h"
23 #include "clang/AST/PrettyDeclStackTrace.h"
24 #include "clang/AST/StmtCXX.h"
25 #include "clang/AST/TypeOrdering.h"
26 #include "clang/Basic/DarwinSDKInfo.h"
27 #include "clang/Basic/DiagnosticOptions.h"
28 #include "clang/Basic/PartialDiagnostic.h"
29 #include "clang/Basic/SourceManager.h"
30 #include "clang/Basic/TargetInfo.h"
31 #include "clang/Lex/HeaderSearch.h"
32 #include "clang/Lex/HeaderSearchOptions.h"
33 #include "clang/Lex/Preprocessor.h"
34 #include "clang/Sema/CXXFieldCollector.h"
35 #include "clang/Sema/EnterExpressionEvaluationContext.h"
36 #include "clang/Sema/ExternalSemaSource.h"
37 #include "clang/Sema/Initialization.h"
38 #include "clang/Sema/MultiplexExternalSemaSource.h"
39 #include "clang/Sema/ObjCMethodList.h"
40 #include "clang/Sema/RISCVIntrinsicManager.h"
41 #include "clang/Sema/Scope.h"
42 #include "clang/Sema/ScopeInfo.h"
43 #include "clang/Sema/SemaAMDGPU.h"
44 #include "clang/Sema/SemaARM.h"
45 #include "clang/Sema/SemaAVR.h"
46 #include "clang/Sema/SemaBPF.h"
47 #include "clang/Sema/SemaCUDA.h"
48 #include "clang/Sema/SemaCodeCompletion.h"
49 #include "clang/Sema/SemaConsumer.h"
50 #include "clang/Sema/SemaHLSL.h"
51 #include "clang/Sema/SemaHexagon.h"
52 #include "clang/Sema/SemaLoongArch.h"
53 #include "clang/Sema/SemaM68k.h"
54 #include "clang/Sema/SemaMIPS.h"
55 #include "clang/Sema/SemaMSP430.h"
56 #include "clang/Sema/SemaNVPTX.h"
57 #include "clang/Sema/SemaObjC.h"
58 #include "clang/Sema/SemaOpenACC.h"
59 #include "clang/Sema/SemaOpenCL.h"
60 #include "clang/Sema/SemaOpenMP.h"
61 #include "clang/Sema/SemaPPC.h"
62 #include "clang/Sema/SemaPseudoObject.h"
63 #include "clang/Sema/SemaRISCV.h"
64 #include "clang/Sema/SemaSPIRV.h"
65 #include "clang/Sema/SemaSYCL.h"
66 #include "clang/Sema/SemaSwift.h"
67 #include "clang/Sema/SemaSystemZ.h"
68 #include "clang/Sema/SemaWasm.h"
69 #include "clang/Sema/SemaX86.h"
70 #include "clang/Sema/TemplateDeduction.h"
71 #include "clang/Sema/TemplateInstCallback.h"
72 #include "clang/Sema/TypoCorrection.h"
73 #include "llvm/ADT/DenseMap.h"
74 #include "llvm/ADT/STLExtras.h"
75 #include "llvm/ADT/SmallPtrSet.h"
76 #include "llvm/Support/TimeProfiler.h"
77 #include <optional>
78 
79 using namespace clang;
80 using namespace sema;
81 
82 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
83   return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts);
84 }
85 
86 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
87 
88 DarwinSDKInfo *
89 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
90                                               StringRef Platform) {
91   auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
92   if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
93     Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
94         << Platform;
95     WarnedDarwinSDKInfoMissing = true;
96   }
97   return SDKInfo;
98 }
99 
100 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
101   if (CachedDarwinSDKInfo)
102     return CachedDarwinSDKInfo->get();
103   auto SDKInfo = parseDarwinSDKInfo(
104       PP.getFileManager().getVirtualFileSystem(),
105       PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
106   if (SDKInfo && *SDKInfo) {
107     CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo));
108     return CachedDarwinSDKInfo->get();
109   }
110   if (!SDKInfo)
111     llvm::consumeError(SDKInfo.takeError());
112   CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
113   return nullptr;
114 }
115 
116 IdentifierInfo *Sema::InventAbbreviatedTemplateParameterTypeName(
117     const IdentifierInfo *ParamName, unsigned int Index) {
118   std::string InventedName;
119   llvm::raw_string_ostream OS(InventedName);
120 
121   if (!ParamName)
122     OS << "auto:" << Index + 1;
123   else
124     OS << ParamName->getName() << ":auto";
125 
126   return &Context.Idents.get(OS.str());
127 }
128 
129 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
130                                        const Preprocessor &PP) {
131   PrintingPolicy Policy = Context.getPrintingPolicy();
132   // In diagnostics, we print _Bool as bool if the latter is defined as the
133   // former.
134   Policy.Bool = Context.getLangOpts().Bool;
135   if (!Policy.Bool) {
136     if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) {
137       Policy.Bool = BoolMacro->isObjectLike() &&
138                     BoolMacro->getNumTokens() == 1 &&
139                     BoolMacro->getReplacementToken(0).is(tok::kw__Bool);
140     }
141   }
142 
143   // Shorten the data output if needed
144   Policy.EntireContentsOfLargeArray = false;
145 
146   return Policy;
147 }
148 
149 void Sema::ActOnTranslationUnitScope(Scope *S) {
150   TUScope = S;
151   PushDeclContext(S, Context.getTranslationUnitDecl());
152 }
153 
154 namespace clang {
155 namespace sema {
156 
157 class SemaPPCallbacks : public PPCallbacks {
158   Sema *S = nullptr;
159   llvm::SmallVector<SourceLocation, 8> IncludeStack;
160   llvm::SmallVector<llvm::TimeTraceProfilerEntry *, 8> ProfilerStack;
161 
162 public:
163   void set(Sema &S) { this->S = &S; }
164 
165   void reset() { S = nullptr; }
166 
167   void FileChanged(SourceLocation Loc, FileChangeReason Reason,
168                    SrcMgr::CharacteristicKind FileType,
169                    FileID PrevFID) override {
170     if (!S)
171       return;
172     switch (Reason) {
173     case EnterFile: {
174       SourceManager &SM = S->getSourceManager();
175       SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc));
176       if (IncludeLoc.isValid()) {
177         if (llvm::timeTraceProfilerEnabled()) {
178           OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getFileID(Loc));
179           ProfilerStack.push_back(llvm::timeTraceAsyncProfilerBegin(
180               "Source", FE ? FE->getName() : StringRef("<unknown>")));
181         }
182 
183         IncludeStack.push_back(IncludeLoc);
184         S->DiagnoseNonDefaultPragmaAlignPack(
185             Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
186             IncludeLoc);
187       }
188       break;
189     }
190     case ExitFile:
191       if (!IncludeStack.empty()) {
192         if (llvm::timeTraceProfilerEnabled())
193           llvm::timeTraceProfilerEnd(ProfilerStack.pop_back_val());
194 
195         S->DiagnoseNonDefaultPragmaAlignPack(
196             Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
197             IncludeStack.pop_back_val());
198       }
199       break;
200     default:
201       break;
202     }
203   }
204 };
205 
206 } // end namespace sema
207 } // end namespace clang
208 
209 const unsigned Sema::MaxAlignmentExponent;
210 const uint64_t Sema::MaximumAlignment;
211 
212 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
213            TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
214     : SemaBase(*this), CollectStats(false), TUKind(TUKind),
215       CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
216       Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
217       SourceMgr(PP.getSourceManager()), APINotes(SourceMgr, LangOpts),
218       AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
219       LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr),
220       OpaqueParser(nullptr), CurContext(nullptr), ExternalSource(nullptr),
221       StackHandler(Diags), CurScope(nullptr), Ident_super(nullptr),
222       AMDGPUPtr(std::make_unique<SemaAMDGPU>(*this)),
223       ARMPtr(std::make_unique<SemaARM>(*this)),
224       AVRPtr(std::make_unique<SemaAVR>(*this)),
225       BPFPtr(std::make_unique<SemaBPF>(*this)),
226       CodeCompletionPtr(
227           std::make_unique<SemaCodeCompletion>(*this, CodeCompleter)),
228       CUDAPtr(std::make_unique<SemaCUDA>(*this)),
229       HLSLPtr(std::make_unique<SemaHLSL>(*this)),
230       HexagonPtr(std::make_unique<SemaHexagon>(*this)),
231       LoongArchPtr(std::make_unique<SemaLoongArch>(*this)),
232       M68kPtr(std::make_unique<SemaM68k>(*this)),
233       MIPSPtr(std::make_unique<SemaMIPS>(*this)),
234       MSP430Ptr(std::make_unique<SemaMSP430>(*this)),
235       NVPTXPtr(std::make_unique<SemaNVPTX>(*this)),
236       ObjCPtr(std::make_unique<SemaObjC>(*this)),
237       OpenACCPtr(std::make_unique<SemaOpenACC>(*this)),
238       OpenCLPtr(std::make_unique<SemaOpenCL>(*this)),
239       OpenMPPtr(std::make_unique<SemaOpenMP>(*this)),
240       PPCPtr(std::make_unique<SemaPPC>(*this)),
241       PseudoObjectPtr(std::make_unique<SemaPseudoObject>(*this)),
242       RISCVPtr(std::make_unique<SemaRISCV>(*this)),
243       SPIRVPtr(std::make_unique<SemaSPIRV>(*this)),
244       SYCLPtr(std::make_unique<SemaSYCL>(*this)),
245       SwiftPtr(std::make_unique<SemaSwift>(*this)),
246       SystemZPtr(std::make_unique<SemaSystemZ>(*this)),
247       WasmPtr(std::make_unique<SemaWasm>(*this)),
248       X86Ptr(std::make_unique<SemaX86>(*this)),
249       MSPointerToMemberRepresentationMethod(
250           LangOpts.getMSPointerToMemberRepresentationMethod()),
251       MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()),
252       AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
253       DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
254       CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
255       FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
256       VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
257       StdCoroutineTraitsCache(nullptr), IdResolver(pp),
258       OriginalLexicalContext(nullptr), StdInitializerList(nullptr),
259       FullyCheckedComparisonCategories(
260           static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
261       StdSourceLocationImplDecl(nullptr), CXXTypeInfoDecl(nullptr),
262       GlobalNewDeleteDeclared(false), DisableTypoCorrection(false),
263       TyposCorrected(0), IsBuildingRecoveryCallExpr(false), NumSFINAEErrors(0),
264       AccessCheckingSFINAE(false), CurrentInstantiationScope(nullptr),
265       InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
266       ArgumentPackSubstitutionIndex(-1), SatisfactionCache(Context) {
267   assert(pp.TUKind == TUKind);
268   TUScope = nullptr;
269 
270   LoadedExternalKnownNamespaces = false;
271   for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
272     ObjC().NSNumberLiteralMethods[I] = nullptr;
273 
274   if (getLangOpts().ObjC)
275     ObjC().NSAPIObj.reset(new NSAPI(Context));
276 
277   if (getLangOpts().CPlusPlus)
278     FieldCollector.reset(new CXXFieldCollector());
279 
280   // Tell diagnostics how to render things from the AST library.
281   Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
282 
283   // This evaluation context exists to ensure that there's always at least one
284   // valid evaluation context available. It is never removed from the
285   // evaluation stack.
286   ExprEvalContexts.emplace_back(
287       ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
288       nullptr, ExpressionEvaluationContextRecord::EK_Other);
289 
290   // Initialization of data sharing attributes stack for OpenMP
291   OpenMP().InitDataSharingAttributesStack();
292 
293   std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
294       std::make_unique<sema::SemaPPCallbacks>();
295   SemaPPCallbackHandler = Callbacks.get();
296   PP.addPPCallbacks(std::move(Callbacks));
297   SemaPPCallbackHandler->set(*this);
298 
299   CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
300 }
301 
302 // Anchor Sema's type info to this TU.
303 void Sema::anchor() {}
304 
305 void Sema::addImplicitTypedef(StringRef Name, QualType T) {
306   DeclarationName DN = &Context.Idents.get(Name);
307   if (IdResolver.begin(DN) == IdResolver.end())
308     PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
309 }
310 
311 void Sema::Initialize() {
312   // Create BuiltinVaListDecl *before* ExternalSemaSource::InitializeSema(this)
313   // because during initialization ASTReader can emit globals that require
314   // name mangling. And the name mangling uses BuiltinVaListDecl.
315   if (Context.getTargetInfo().hasBuiltinMSVaList())
316     (void)Context.getBuiltinMSVaListDecl();
317   (void)Context.getBuiltinVaListDecl();
318 
319   if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
320     SC->InitializeSema(*this);
321 
322   // Tell the external Sema source about this Sema object.
323   if (ExternalSemaSource *ExternalSema
324       = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
325     ExternalSema->InitializeSema(*this);
326 
327   // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
328   // will not be able to merge any duplicate __va_list_tag decls correctly.
329   VAListTagName = PP.getIdentifierInfo("__va_list_tag");
330 
331   if (!TUScope)
332     return;
333 
334   // Initialize predefined 128-bit integer types, if needed.
335   if (Context.getTargetInfo().hasInt128Type() ||
336       (Context.getAuxTargetInfo() &&
337        Context.getAuxTargetInfo()->hasInt128Type())) {
338     // If either of the 128-bit integer types are unavailable to name lookup,
339     // define them now.
340     DeclarationName Int128 = &Context.Idents.get("__int128_t");
341     if (IdResolver.begin(Int128) == IdResolver.end())
342       PushOnScopeChains(Context.getInt128Decl(), TUScope);
343 
344     DeclarationName UInt128 = &Context.Idents.get("__uint128_t");
345     if (IdResolver.begin(UInt128) == IdResolver.end())
346       PushOnScopeChains(Context.getUInt128Decl(), TUScope);
347   }
348 
349 
350   // Initialize predefined Objective-C types:
351   if (getLangOpts().ObjC) {
352     // If 'SEL' does not yet refer to any declarations, make it refer to the
353     // predefined 'SEL'.
354     DeclarationName SEL = &Context.Idents.get("SEL");
355     if (IdResolver.begin(SEL) == IdResolver.end())
356       PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
357 
358     // If 'id' does not yet refer to any declarations, make it refer to the
359     // predefined 'id'.
360     DeclarationName Id = &Context.Idents.get("id");
361     if (IdResolver.begin(Id) == IdResolver.end())
362       PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
363 
364     // Create the built-in typedef for 'Class'.
365     DeclarationName Class = &Context.Idents.get("Class");
366     if (IdResolver.begin(Class) == IdResolver.end())
367       PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
368 
369     // Create the built-in forward declaratino for 'Protocol'.
370     DeclarationName Protocol = &Context.Idents.get("Protocol");
371     if (IdResolver.begin(Protocol) == IdResolver.end())
372       PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
373   }
374 
375   // Create the internal type for the *StringMakeConstantString builtins.
376   DeclarationName ConstantString = &Context.Idents.get("__NSConstantString");
377   if (IdResolver.begin(ConstantString) == IdResolver.end())
378     PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope);
379 
380   // Initialize Microsoft "predefined C++ types".
381   if (getLangOpts().MSVCCompat) {
382     if (getLangOpts().CPlusPlus &&
383         IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
384       PushOnScopeChains(
385           Context.buildImplicitRecord("type_info", TagTypeKind::Class),
386           TUScope);
387 
388     addImplicitTypedef("size_t", Context.getSizeType());
389   }
390 
391   // Initialize predefined OpenCL types and supported extensions and (optional)
392   // core features.
393   if (getLangOpts().OpenCL) {
394     getOpenCLOptions().addSupport(
395         Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
396     addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
397     addImplicitTypedef("event_t", Context.OCLEventTy);
398     auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
399     if (OCLCompatibleVersion >= 200) {
400       if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
401         addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
402         addImplicitTypedef("queue_t", Context.OCLQueueTy);
403       }
404       if (getLangOpts().OpenCLPipes)
405         addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
406       addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
407       addImplicitTypedef("atomic_uint",
408                          Context.getAtomicType(Context.UnsignedIntTy));
409       addImplicitTypedef("atomic_float",
410                          Context.getAtomicType(Context.FloatTy));
411       // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
412       // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
413       addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy));
414 
415 
416       // OpenCL v2.0 s6.13.11.6:
417       // - The atomic_long and atomic_ulong types are supported if the
418       //   cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
419       //   extensions are supported.
420       // - The atomic_double type is only supported if double precision
421       //   is supported and the cl_khr_int64_base_atomics and
422       //   cl_khr_int64_extended_atomics extensions are supported.
423       // - If the device address space is 64-bits, the data types
424       //   atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
425       //   atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
426       //   cl_khr_int64_extended_atomics extensions are supported.
427 
428       auto AddPointerSizeDependentTypes = [&]() {
429         auto AtomicSizeT = Context.getAtomicType(Context.getSizeType());
430         auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType());
431         auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType());
432         auto AtomicPtrDiffT =
433             Context.getAtomicType(Context.getPointerDiffType());
434         addImplicitTypedef("atomic_size_t", AtomicSizeT);
435         addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT);
436         addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT);
437         addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT);
438       };
439 
440       if (Context.getTypeSize(Context.getSizeType()) == 32) {
441         AddPointerSizeDependentTypes();
442       }
443 
444       if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) {
445         auto AtomicHalfT = Context.getAtomicType(Context.HalfTy);
446         addImplicitTypedef("atomic_half", AtomicHalfT);
447       }
448 
449       std::vector<QualType> Atomic64BitTypes;
450       if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics",
451                                          getLangOpts()) &&
452           getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics",
453                                          getLangOpts())) {
454         if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) {
455           auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy);
456           addImplicitTypedef("atomic_double", AtomicDoubleT);
457           Atomic64BitTypes.push_back(AtomicDoubleT);
458         }
459         auto AtomicLongT = Context.getAtomicType(Context.LongTy);
460         auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy);
461         addImplicitTypedef("atomic_long", AtomicLongT);
462         addImplicitTypedef("atomic_ulong", AtomicULongT);
463 
464 
465         if (Context.getTypeSize(Context.getSizeType()) == 64) {
466           AddPointerSizeDependentTypes();
467         }
468       }
469     }
470 
471 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext)                                      \
472   if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) {                   \
473     addImplicitTypedef(#ExtType, Context.Id##Ty);                              \
474   }
475 #include "clang/Basic/OpenCLExtensionTypes.def"
476   }
477 
478   if (Context.getTargetInfo().hasAArch64SVETypes() ||
479       (Context.getAuxTargetInfo() &&
480        Context.getAuxTargetInfo()->hasAArch64SVETypes())) {
481 #define SVE_TYPE(Name, Id, SingletonId) \
482     addImplicitTypedef(Name, Context.SingletonId);
483 #include "clang/Basic/AArch64SVEACLETypes.def"
484   }
485 
486   if (Context.getTargetInfo().getTriple().isPPC64()) {
487 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
488       addImplicitTypedef(#Name, Context.Id##Ty);
489 #include "clang/Basic/PPCTypes.def"
490 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
491     addImplicitTypedef(#Name, Context.Id##Ty);
492 #include "clang/Basic/PPCTypes.def"
493   }
494 
495   if (Context.getTargetInfo().hasRISCVVTypes()) {
496 #define RVV_TYPE(Name, Id, SingletonId)                                        \
497   addImplicitTypedef(Name, Context.SingletonId);
498 #include "clang/Basic/RISCVVTypes.def"
499   }
500 
501   if (Context.getTargetInfo().getTriple().isWasm() &&
502       Context.getTargetInfo().hasFeature("reference-types")) {
503 #define WASM_TYPE(Name, Id, SingletonId)                                       \
504   addImplicitTypedef(Name, Context.SingletonId);
505 #include "clang/Basic/WebAssemblyReferenceTypes.def"
506   }
507 
508   if (Context.getTargetInfo().getTriple().isAMDGPU() ||
509       (Context.getAuxTargetInfo() &&
510        Context.getAuxTargetInfo()->getTriple().isAMDGPU())) {
511 #define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align)                       \
512   addImplicitTypedef(Name, Context.SingletonId);
513 #include "clang/Basic/AMDGPUTypes.def"
514   }
515 
516   if (Context.getTargetInfo().hasBuiltinMSVaList()) {
517     DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
518     if (IdResolver.begin(MSVaList) == IdResolver.end())
519       PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
520   }
521 
522   DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
523   if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
524     PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
525 }
526 
527 Sema::~Sema() {
528   assert(InstantiatingSpecializations.empty() &&
529          "failed to clean up an InstantiatingTemplate?");
530 
531   if (VisContext) FreeVisContext();
532 
533   // Kill all the active scopes.
534   for (sema::FunctionScopeInfo *FSI : FunctionScopes)
535     delete FSI;
536 
537   // Tell the SemaConsumer to forget about us; we're going out of scope.
538   if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
539     SC->ForgetSema();
540 
541   // Detach from the external Sema source.
542   if (ExternalSemaSource *ExternalSema
543         = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
544     ExternalSema->ForgetSema();
545 
546   // Delete cached satisfactions.
547   std::vector<ConstraintSatisfaction *> Satisfactions;
548   Satisfactions.reserve(SatisfactionCache.size());
549   for (auto &Node : SatisfactionCache)
550     Satisfactions.push_back(&Node);
551   for (auto *Node : Satisfactions)
552     delete Node;
553 
554   threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache);
555 
556   // Destroys data sharing attributes stack for OpenMP
557   OpenMP().DestroyDataSharingAttributesStack();
558 
559   // Detach from the PP callback handler which outlives Sema since it's owned
560   // by the preprocessor.
561   SemaPPCallbackHandler->reset();
562 }
563 
564 void Sema::runWithSufficientStackSpace(SourceLocation Loc,
565                                        llvm::function_ref<void()> Fn) {
566   StackHandler.runWithSufficientStackSpace(Loc, Fn);
567 }
568 
569 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
570                                       UnavailableAttr::ImplicitReason reason) {
571   // If we're not in a function, it's an error.
572   FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
573   if (!fn) return false;
574 
575   // If we're in template instantiation, it's an error.
576   if (inTemplateInstantiation())
577     return false;
578 
579   // If that function's not in a system header, it's an error.
580   if (!Context.getSourceManager().isInSystemHeader(loc))
581     return false;
582 
583   // If the function is already unavailable, it's not an error.
584   if (fn->hasAttr<UnavailableAttr>()) return true;
585 
586   fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
587   return true;
588 }
589 
590 ASTMutationListener *Sema::getASTMutationListener() const {
591   return getASTConsumer().GetASTMutationListener();
592 }
593 
594 void Sema::addExternalSource(ExternalSemaSource *E) {
595   assert(E && "Cannot use with NULL ptr");
596 
597   if (!ExternalSource) {
598     ExternalSource = E;
599     return;
600   }
601 
602   if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(ExternalSource))
603     Ex->AddSource(E);
604   else
605     ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
606 }
607 
608 void Sema::PrintStats() const {
609   llvm::errs() << "\n*** Semantic Analysis Stats:\n";
610   llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
611 
612   BumpAlloc.PrintStats();
613   AnalysisWarnings.PrintStats();
614 }
615 
616 void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
617                                                QualType SrcType,
618                                                SourceLocation Loc) {
619   std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
620   if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
621                            *ExprNullability != NullabilityKind::NullableResult))
622     return;
623 
624   std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
625   if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
626     return;
627 
628   Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
629 }
630 
631 // Generate diagnostics when adding or removing effects in a type conversion.
632 void Sema::diagnoseFunctionEffectConversion(QualType DstType, QualType SrcType,
633                                             SourceLocation Loc) {
634   const auto SrcFX = FunctionEffectsRef::get(SrcType);
635   const auto DstFX = FunctionEffectsRef::get(DstType);
636   if (SrcFX != DstFX) {
637     for (const auto &Diff : FunctionEffectDiffVector(SrcFX, DstFX)) {
638       if (Diff.shouldDiagnoseConversion(SrcType, SrcFX, DstType, DstFX))
639         Diag(Loc, diag::warn_invalid_add_func_effects) << Diff.effectName();
640     }
641   }
642 }
643 
644 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
645   // nullptr only exists from C++11 on, so don't warn on its absence earlier.
646   if (!getLangOpts().CPlusPlus11)
647     return;
648 
649   if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
650     return;
651 
652   const Expr *EStripped = E->IgnoreParenImpCasts();
653   if (EStripped->getType()->isNullPtrType())
654     return;
655   if (isa<GNUNullExpr>(EStripped))
656     return;
657 
658   if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
659                       E->getBeginLoc()))
660     return;
661 
662   // Don't diagnose the conversion from a 0 literal to a null pointer argument
663   // in a synthesized call to operator<=>.
664   if (!CodeSynthesisContexts.empty() &&
665       CodeSynthesisContexts.back().Kind ==
666           CodeSynthesisContext::RewritingOperatorAsSpaceship)
667     return;
668 
669   // Ignore null pointers in defaulted comparison operators.
670   FunctionDecl *FD = getCurFunctionDecl();
671   if (FD && FD->isDefaulted()) {
672     return;
673   }
674 
675   // If it is a macro from system header, and if the macro name is not "NULL",
676   // do not warn.
677   // Note that uses of "NULL" will be ignored above on systems that define it
678   // as __null.
679   SourceLocation MaybeMacroLoc = E->getBeginLoc();
680   if (Diags.getSuppressSystemWarnings() &&
681       SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
682       !findMacroSpelling(MaybeMacroLoc, "NULL"))
683     return;
684 
685   Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
686       << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
687 }
688 
689 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
690 /// If there is already an implicit cast, merge into the existing one.
691 /// The result is of the given category.
692 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
693                                    CastKind Kind, ExprValueKind VK,
694                                    const CXXCastPath *BasePath,
695                                    CheckedConversionKind CCK) {
696 #ifndef NDEBUG
697   if (VK == VK_PRValue && !E->isPRValue()) {
698     switch (Kind) {
699     default:
700       llvm_unreachable(
701           ("can't implicitly cast glvalue to prvalue with this cast "
702            "kind: " +
703            std::string(CastExpr::getCastKindName(Kind)))
704               .c_str());
705     case CK_Dependent:
706     case CK_LValueToRValue:
707     case CK_ArrayToPointerDecay:
708     case CK_FunctionToPointerDecay:
709     case CK_ToVoid:
710     case CK_NonAtomicToAtomic:
711     case CK_HLSLArrayRValue:
712       break;
713     }
714   }
715   assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
716          "can't cast prvalue to glvalue");
717 #endif
718 
719   diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
720   diagnoseZeroToNullptrConversion(Kind, E);
721   if (Context.hasAnyFunctionEffects() && !isCast(CCK) &&
722       Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
723     diagnoseFunctionEffectConversion(Ty, E->getType(), E->getBeginLoc());
724 
725   QualType ExprTy = Context.getCanonicalType(E->getType());
726   QualType TypeTy = Context.getCanonicalType(Ty);
727 
728   // This cast is used in place of a regular LValue to RValue cast for
729   // HLSL Array Parameter Types. It needs to be emitted even if
730   // ExprTy == TypeTy, except if E is an HLSLOutArgExpr
731   // Emitting a cast in that case will prevent HLSLOutArgExpr from
732   // being handled properly in EmitCallArg
733   if (Kind == CK_HLSLArrayRValue && !isa<HLSLOutArgExpr>(E))
734     return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK,
735                                     CurFPFeatureOverrides());
736 
737   if (ExprTy == TypeTy)
738     return E;
739 
740   if (Kind == CK_ArrayToPointerDecay) {
741     // C++1z [conv.array]: The temporary materialization conversion is applied.
742     // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
743     if (getLangOpts().CPlusPlus && E->isPRValue()) {
744       // The temporary is an lvalue in C++98 and an xvalue otherwise.
745       ExprResult Materialized = CreateMaterializeTemporaryExpr(
746           E->getType(), E, !getLangOpts().CPlusPlus11);
747       if (Materialized.isInvalid())
748         return ExprError();
749       E = Materialized.get();
750     }
751     // C17 6.7.1p6 footnote 124: The implementation can treat any register
752     // declaration simply as an auto declaration. However, whether or not
753     // addressable storage is actually used, the address of any part of an
754     // object declared with storage-class specifier register cannot be
755     // computed, either explicitly(by use of the unary & operator as discussed
756     // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
757     // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
758     // array declared with storage-class specifier register is sizeof.
759     if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
760       if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
761         if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
762           if (VD->getStorageClass() == SC_Register) {
763             Diag(E->getExprLoc(), diag::err_typecheck_address_of)
764                 << /*register variable*/ 3 << E->getSourceRange();
765             return ExprError();
766           }
767         }
768       }
769     }
770   }
771 
772   if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
773     if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
774       ImpCast->setType(Ty);
775       ImpCast->setValueKind(VK);
776       return E;
777     }
778   }
779 
780   return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK,
781                                   CurFPFeatureOverrides());
782 }
783 
784 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
785   switch (ScalarTy->getScalarTypeKind()) {
786   case Type::STK_Bool: return CK_NoOp;
787   case Type::STK_CPointer: return CK_PointerToBoolean;
788   case Type::STK_BlockPointer: return CK_PointerToBoolean;
789   case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
790   case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
791   case Type::STK_Integral: return CK_IntegralToBoolean;
792   case Type::STK_Floating: return CK_FloatingToBoolean;
793   case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
794   case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
795   case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
796   }
797   llvm_unreachable("unknown scalar type kind");
798 }
799 
800 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
801 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
802   if (D->getMostRecentDecl()->isUsed())
803     return true;
804 
805   if (D->isExternallyVisible())
806     return true;
807 
808   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
809     // If this is a function template and none of its specializations is used,
810     // we should warn.
811     if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
812       for (const auto *Spec : Template->specializations())
813         if (ShouldRemoveFromUnused(SemaRef, Spec))
814           return true;
815 
816     // UnusedFileScopedDecls stores the first declaration.
817     // The declaration may have become definition so check again.
818     const FunctionDecl *DeclToCheck;
819     if (FD->hasBody(DeclToCheck))
820       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
821 
822     // Later redecls may add new information resulting in not having to warn,
823     // so check again.
824     DeclToCheck = FD->getMostRecentDecl();
825     if (DeclToCheck != FD)
826       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
827   }
828 
829   if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
830     // If a variable usable in constant expressions is referenced,
831     // don't warn if it isn't used: if the value of a variable is required
832     // for the computation of a constant expression, it doesn't make sense to
833     // warn even if the variable isn't odr-used.  (isReferenced doesn't
834     // precisely reflect that, but it's a decent approximation.)
835     if (VD->isReferenced() &&
836         VD->mightBeUsableInConstantExpressions(SemaRef->Context))
837       return true;
838 
839     if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
840       // If this is a variable template and none of its specializations is used,
841       // we should warn.
842       for (const auto *Spec : Template->specializations())
843         if (ShouldRemoveFromUnused(SemaRef, Spec))
844           return true;
845 
846     // UnusedFileScopedDecls stores the first declaration.
847     // The declaration may have become definition so check again.
848     const VarDecl *DeclToCheck = VD->getDefinition();
849     if (DeclToCheck)
850       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
851 
852     // Later redecls may add new information resulting in not having to warn,
853     // so check again.
854     DeclToCheck = VD->getMostRecentDecl();
855     if (DeclToCheck != VD)
856       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
857   }
858 
859   return false;
860 }
861 
862 static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
863   if (const auto *FD = dyn_cast<FunctionDecl>(ND))
864     return FD->isExternC();
865   return cast<VarDecl>(ND)->isExternC();
866 }
867 
868 /// Determine whether ND is an external-linkage function or variable whose
869 /// type has no linkage.
870 bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
871   // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
872   // because we also want to catch the case where its type has VisibleNoLinkage,
873   // which does not affect the linkage of VD.
874   return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
875          !isExternalFormalLinkage(VD->getType()->getLinkage()) &&
876          !isFunctionOrVarDeclExternC(VD);
877 }
878 
879 /// Obtains a sorted list of functions and variables that are undefined but
880 /// ODR-used.
881 void Sema::getUndefinedButUsed(
882     SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
883   for (const auto &UndefinedUse : UndefinedButUsed) {
884     NamedDecl *ND = UndefinedUse.first;
885 
886     // Ignore attributes that have become invalid.
887     if (ND->isInvalidDecl()) continue;
888 
889     // __attribute__((weakref)) is basically a definition.
890     if (ND->hasAttr<WeakRefAttr>()) continue;
891 
892     if (isa<CXXDeductionGuideDecl>(ND))
893       continue;
894 
895     if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
896       // An exported function will always be emitted when defined, so even if
897       // the function is inline, it doesn't have to be emitted in this TU. An
898       // imported function implies that it has been exported somewhere else.
899       continue;
900     }
901 
902     if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
903       if (FD->isDefined())
904         continue;
905       if (FD->isExternallyVisible() &&
906           !isExternalWithNoLinkageType(FD) &&
907           !FD->getMostRecentDecl()->isInlined() &&
908           !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
909         continue;
910       if (FD->getBuiltinID())
911         continue;
912     } else {
913       const auto *VD = cast<VarDecl>(ND);
914       if (VD->hasDefinition() != VarDecl::DeclarationOnly)
915         continue;
916       if (VD->isExternallyVisible() &&
917           !isExternalWithNoLinkageType(VD) &&
918           !VD->getMostRecentDecl()->isInline() &&
919           !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
920         continue;
921 
922       // Skip VarDecls that lack formal definitions but which we know are in
923       // fact defined somewhere.
924       if (VD->isKnownToBeDefined())
925         continue;
926     }
927 
928     Undefined.push_back(std::make_pair(ND, UndefinedUse.second));
929   }
930 }
931 
932 /// checkUndefinedButUsed - Check for undefined objects with internal linkage
933 /// or that are inline.
934 static void checkUndefinedButUsed(Sema &S) {
935   if (S.UndefinedButUsed.empty()) return;
936 
937   // Collect all the still-undefined entities with internal linkage.
938   SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
939   S.getUndefinedButUsed(Undefined);
940   S.UndefinedButUsed.clear();
941   if (Undefined.empty()) return;
942 
943   for (const auto &Undef : Undefined) {
944     ValueDecl *VD = cast<ValueDecl>(Undef.first);
945     SourceLocation UseLoc = Undef.second;
946 
947     if (S.isExternalWithNoLinkageType(VD)) {
948       // C++ [basic.link]p8:
949       //   A type without linkage shall not be used as the type of a variable
950       //   or function with external linkage unless
951       //    -- the entity has C language linkage
952       //    -- the entity is not odr-used or is defined in the same TU
953       //
954       // As an extension, accept this in cases where the type is externally
955       // visible, since the function or variable actually can be defined in
956       // another translation unit in that case.
957       S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage())
958                                     ? diag::ext_undefined_internal_type
959                                     : diag::err_undefined_internal_type)
960         << isa<VarDecl>(VD) << VD;
961     } else if (!VD->isExternallyVisible()) {
962       // FIXME: We can promote this to an error. The function or variable can't
963       // be defined anywhere else, so the program must necessarily violate the
964       // one definition rule.
965       bool IsImplicitBase = false;
966       if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) {
967         auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
968         if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
969                           llvm::omp::TraitProperty::
970                               implementation_extension_disable_implicit_base)) {
971           const auto *Func = cast<FunctionDecl>(
972               cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl());
973           IsImplicitBase = BaseD->isImplicit() &&
974                            Func->getIdentifier()->isMangledOpenMPVariantName();
975         }
976       }
977       if (!S.getLangOpts().OpenMP || !IsImplicitBase)
978         S.Diag(VD->getLocation(), diag::warn_undefined_internal)
979             << isa<VarDecl>(VD) << VD;
980     } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
981       (void)FD;
982       assert(FD->getMostRecentDecl()->isInlined() &&
983              "used object requires definition but isn't inline or internal?");
984       // FIXME: This is ill-formed; we should reject.
985       S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD;
986     } else {
987       assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
988              "used var requires definition but isn't inline or internal?");
989       S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD;
990     }
991     if (UseLoc.isValid())
992       S.Diag(UseLoc, diag::note_used_here);
993   }
994 }
995 
996 void Sema::LoadExternalWeakUndeclaredIdentifiers() {
997   if (!ExternalSource)
998     return;
999 
1000   SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
1001   ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
1002   for (auto &WeakID : WeakIDs)
1003     (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second);
1004 }
1005 
1006 
1007 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
1008 
1009 /// Returns true, if all methods and nested classes of the given
1010 /// CXXRecordDecl are defined in this translation unit.
1011 ///
1012 /// Should only be called from ActOnEndOfTranslationUnit so that all
1013 /// definitions are actually read.
1014 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
1015                                             RecordCompleteMap &MNCComplete) {
1016   RecordCompleteMap::iterator Cache = MNCComplete.find(RD);
1017   if (Cache != MNCComplete.end())
1018     return Cache->second;
1019   if (!RD->isCompleteDefinition())
1020     return false;
1021   bool Complete = true;
1022   for (DeclContext::decl_iterator I = RD->decls_begin(),
1023                                   E = RD->decls_end();
1024        I != E && Complete; ++I) {
1025     if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
1026       Complete = M->isDefined() || M->isDefaulted() ||
1027                  (M->isPureVirtual() && !isa<CXXDestructorDecl>(M));
1028     else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
1029       // If the template function is marked as late template parsed at this
1030       // point, it has not been instantiated and therefore we have not
1031       // performed semantic analysis on it yet, so we cannot know if the type
1032       // can be considered complete.
1033       Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
1034                   F->getTemplatedDecl()->isDefined();
1035     else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) {
1036       if (R->isInjectedClassName())
1037         continue;
1038       if (R->hasDefinition())
1039         Complete = MethodsAndNestedClassesComplete(R->getDefinition(),
1040                                                    MNCComplete);
1041       else
1042         Complete = false;
1043     }
1044   }
1045   MNCComplete[RD] = Complete;
1046   return Complete;
1047 }
1048 
1049 /// Returns true, if the given CXXRecordDecl is fully defined in this
1050 /// translation unit, i.e. all methods are defined or pure virtual and all
1051 /// friends, friend functions and nested classes are fully defined in this
1052 /// translation unit.
1053 ///
1054 /// Should only be called from ActOnEndOfTranslationUnit so that all
1055 /// definitions are actually read.
1056 static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
1057                                  RecordCompleteMap &RecordsComplete,
1058                                  RecordCompleteMap &MNCComplete) {
1059   RecordCompleteMap::iterator Cache = RecordsComplete.find(RD);
1060   if (Cache != RecordsComplete.end())
1061     return Cache->second;
1062   bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
1063   for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
1064                                       E = RD->friend_end();
1065        I != E && Complete; ++I) {
1066     // Check if friend classes and methods are complete.
1067     if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
1068       // Friend classes are available as the TypeSourceInfo of the FriendDecl.
1069       if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
1070         Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete);
1071       else
1072         Complete = false;
1073     } else {
1074       // Friend functions are available through the NamedDecl of FriendDecl.
1075       if (const FunctionDecl *FD =
1076           dyn_cast<FunctionDecl>((*I)->getFriendDecl()))
1077         Complete = FD->isDefined();
1078       else
1079         // This is a template friend, give up.
1080         Complete = false;
1081     }
1082   }
1083   RecordsComplete[RD] = Complete;
1084   return Complete;
1085 }
1086 
1087 void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1088   if (ExternalSource)
1089     ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1090         UnusedLocalTypedefNameCandidates);
1091   for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1092     if (TD->isReferenced())
1093       continue;
1094     Diag(TD->getLocation(), diag::warn_unused_local_typedef)
1095         << isa<TypeAliasDecl>(TD) << TD->getDeclName();
1096   }
1097   UnusedLocalTypedefNameCandidates.clear();
1098 }
1099 
1100 void Sema::ActOnStartOfTranslationUnit() {
1101   if (getLangOpts().CPlusPlusModules &&
1102       getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1103     HandleStartOfHeaderUnit();
1104 }
1105 
1106 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1107   // No explicit actions are required at the end of the global module fragment.
1108   if (Kind == TUFragmentKind::Global)
1109     return;
1110 
1111   // Transfer late parsed template instantiations over to the pending template
1112   // instantiation list. During normal compilation, the late template parser
1113   // will be installed and instantiating these templates will succeed.
1114   //
1115   // If we are building a TU prefix for serialization, it is also safe to
1116   // transfer these over, even though they are not parsed. The end of the TU
1117   // should be outside of any eager template instantiation scope, so when this
1118   // AST is deserialized, these templates will not be parsed until the end of
1119   // the combined TU.
1120   PendingInstantiations.insert(PendingInstantiations.end(),
1121                                LateParsedInstantiations.begin(),
1122                                LateParsedInstantiations.end());
1123   LateParsedInstantiations.clear();
1124 
1125   // If DefinedUsedVTables ends up marking any virtual member functions it
1126   // might lead to more pending template instantiations, which we then need
1127   // to instantiate.
1128   DefineUsedVTables();
1129 
1130   // C++: Perform implicit template instantiations.
1131   //
1132   // FIXME: When we perform these implicit instantiations, we do not
1133   // carefully keep track of the point of instantiation (C++ [temp.point]).
1134   // This means that name lookup that occurs within the template
1135   // instantiation will always happen at the end of the translation unit,
1136   // so it will find some names that are not required to be found. This is
1137   // valid, but we could do better by diagnosing if an instantiation uses a
1138   // name that was not visible at its first point of instantiation.
1139   if (ExternalSource) {
1140     // Load pending instantiations from the external source.
1141     SmallVector<PendingImplicitInstantiation, 4> Pending;
1142     ExternalSource->ReadPendingInstantiations(Pending);
1143     for (auto PII : Pending)
1144       if (auto Func = dyn_cast<FunctionDecl>(PII.first))
1145         Func->setInstantiationIsPending(true);
1146     PendingInstantiations.insert(PendingInstantiations.begin(),
1147                                  Pending.begin(), Pending.end());
1148   }
1149 
1150   {
1151     llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1152     PerformPendingInstantiations();
1153   }
1154 
1155   emitDeferredDiags();
1156 
1157   assert(LateParsedInstantiations.empty() &&
1158          "end of TU template instantiation should not create more "
1159          "late-parsed templates");
1160 
1161   // Report diagnostics for uncorrected delayed typos. Ideally all of them
1162   // should have been corrected by that time, but it is very hard to cover all
1163   // cases in practice.
1164   for (const auto &Typo : DelayedTypos) {
1165     // We pass an empty TypoCorrection to indicate no correction was performed.
1166     Typo.second.DiagHandler(TypoCorrection());
1167   }
1168   DelayedTypos.clear();
1169 }
1170 
1171 void Sema::ActOnEndOfTranslationUnit() {
1172   assert(DelayedDiagnostics.getCurrentPool() == nullptr
1173          && "reached end of translation unit with a pool attached?");
1174 
1175   // If code completion is enabled, don't perform any end-of-translation-unit
1176   // work.
1177   if (PP.isCodeCompletionEnabled())
1178     return;
1179 
1180   // Complete translation units and modules define vtables and perform implicit
1181   // instantiations. PCH files do not.
1182   if (TUKind != TU_Prefix) {
1183     ObjC().DiagnoseUseOfUnimplementedSelectors();
1184 
1185     ActOnEndOfTranslationUnitFragment(
1186         !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1187                                      Module::PrivateModuleFragment
1188             ? TUFragmentKind::Private
1189             : TUFragmentKind::Normal);
1190 
1191     if (LateTemplateParserCleanup)
1192       LateTemplateParserCleanup(OpaqueParser);
1193 
1194     CheckDelayedMemberExceptionSpecs();
1195   } else {
1196     // If we are building a TU prefix for serialization, it is safe to transfer
1197     // these over, even though they are not parsed. The end of the TU should be
1198     // outside of any eager template instantiation scope, so when this AST is
1199     // deserialized, these templates will not be parsed until the end of the
1200     // combined TU.
1201     PendingInstantiations.insert(PendingInstantiations.end(),
1202                                  LateParsedInstantiations.begin(),
1203                                  LateParsedInstantiations.end());
1204     LateParsedInstantiations.clear();
1205 
1206     if (LangOpts.PCHInstantiateTemplates) {
1207       llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1208       PerformPendingInstantiations();
1209     }
1210   }
1211 
1212   DiagnoseUnterminatedPragmaAlignPack();
1213   DiagnoseUnterminatedPragmaAttribute();
1214   OpenMP().DiagnoseUnterminatedOpenMPDeclareTarget();
1215   DiagnosePrecisionLossInComplexDivision();
1216 
1217   // All delayed member exception specs should be checked or we end up accepting
1218   // incompatible declarations.
1219   assert(DelayedOverridingExceptionSpecChecks.empty());
1220   assert(DelayedEquivalentExceptionSpecChecks.empty());
1221 
1222   // All dllexport classes should have been processed already.
1223   assert(DelayedDllExportClasses.empty());
1224   assert(DelayedDllExportMemberFunctions.empty());
1225 
1226   // Remove file scoped decls that turned out to be used.
1227   UnusedFileScopedDecls.erase(
1228       std::remove_if(UnusedFileScopedDecls.begin(nullptr, true),
1229                      UnusedFileScopedDecls.end(),
1230                      [this](const DeclaratorDecl *DD) {
1231                        return ShouldRemoveFromUnused(this, DD);
1232                      }),
1233       UnusedFileScopedDecls.end());
1234 
1235   if (TUKind == TU_Prefix) {
1236     // Translation unit prefixes don't need any of the checking below.
1237     if (!PP.isIncrementalProcessingEnabled())
1238       TUScope = nullptr;
1239     return;
1240   }
1241 
1242   // Check for #pragma weak identifiers that were never declared
1243   LoadExternalWeakUndeclaredIdentifiers();
1244   for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1245     if (WeakIDs.second.empty())
1246       continue;
1247 
1248     Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(),
1249                                       LookupOrdinaryName);
1250     if (PrevDecl != nullptr &&
1251         !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
1252       for (const auto &WI : WeakIDs.second)
1253         Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
1254             << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
1255     else
1256       for (const auto &WI : WeakIDs.second)
1257         Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
1258             << WeakIDs.first;
1259   }
1260 
1261   if (LangOpts.CPlusPlus11 &&
1262       !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
1263     CheckDelegatingCtorCycles();
1264 
1265   if (!Diags.hasErrorOccurred()) {
1266     if (ExternalSource)
1267       ExternalSource->ReadUndefinedButUsed(UndefinedButUsed);
1268     checkUndefinedButUsed(*this);
1269   }
1270 
1271   // A global-module-fragment is only permitted within a module unit.
1272   if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1273                                    Module::ExplicitGlobalModuleFragment) {
1274     Diag(ModuleScopes.back().BeginLoc,
1275          diag::err_module_declaration_missing_after_global_module_introducer);
1276   } else if (getLangOpts().getCompilingModule() ==
1277                  LangOptions::CMK_ModuleInterface &&
1278              // We can't use ModuleScopes here since ModuleScopes is always
1279              // empty if we're compiling the BMI.
1280              !getASTContext().getCurrentNamedModule()) {
1281     // If we are building a module interface unit, we should have seen the
1282     // module declaration.
1283     //
1284     // FIXME: Make a better guess as to where to put the module declaration.
1285     Diag(getSourceManager().getLocForStartOfFile(
1286              getSourceManager().getMainFileID()),
1287          diag::err_module_declaration_missing);
1288   }
1289 
1290   // Now we can decide whether the modules we're building need an initializer.
1291   if (Module *CurrentModule = getCurrentModule();
1292       CurrentModule && CurrentModule->isInterfaceOrPartition()) {
1293     auto DoesModNeedInit = [this](Module *M) {
1294       if (!getASTContext().getModuleInitializers(M).empty())
1295         return true;
1296       for (auto [Exported, _] : M->Exports)
1297         if (Exported->isNamedModuleInterfaceHasInit())
1298           return true;
1299       for (Module *I : M->Imports)
1300         if (I->isNamedModuleInterfaceHasInit())
1301           return true;
1302 
1303       return false;
1304     };
1305 
1306     CurrentModule->NamedModuleHasInit =
1307         DoesModNeedInit(CurrentModule) ||
1308         llvm::any_of(CurrentModule->submodules(),
1309                      [&](auto *SubM) { return DoesModNeedInit(SubM); });
1310   }
1311 
1312   if (TUKind == TU_ClangModule) {
1313     // If we are building a module, resolve all of the exported declarations
1314     // now.
1315     if (Module *CurrentModule = PP.getCurrentModule()) {
1316       ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1317 
1318       SmallVector<Module *, 2> Stack;
1319       Stack.push_back(CurrentModule);
1320       while (!Stack.empty()) {
1321         Module *Mod = Stack.pop_back_val();
1322 
1323         // Resolve the exported declarations and conflicts.
1324         // FIXME: Actually complain, once we figure out how to teach the
1325         // diagnostic client to deal with complaints in the module map at this
1326         // point.
1327         ModMap.resolveExports(Mod, /*Complain=*/false);
1328         ModMap.resolveUses(Mod, /*Complain=*/false);
1329         ModMap.resolveConflicts(Mod, /*Complain=*/false);
1330 
1331         // Queue the submodules, so their exports will also be resolved.
1332         auto SubmodulesRange = Mod->submodules();
1333         Stack.append(SubmodulesRange.begin(), SubmodulesRange.end());
1334       }
1335     }
1336 
1337     // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1338     // modules when they are built, not every time they are used.
1339     emitAndClearUnusedLocalTypedefWarnings();
1340   }
1341 
1342   // C++ standard modules. Diagnose cases where a function is declared inline
1343   // in the module purview but has no definition before the end of the TU or
1344   // the start of a Private Module Fragment (if one is present).
1345   if (!PendingInlineFuncDecls.empty()) {
1346     for (auto *D : PendingInlineFuncDecls) {
1347       if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1348         bool DefInPMF = false;
1349         if (auto *FDD = FD->getDefinition()) {
1350           DefInPMF = FDD->getOwningModule()->isPrivateModule();
1351           if (!DefInPMF)
1352             continue;
1353         }
1354         Diag(FD->getLocation(), diag::err_export_inline_not_defined)
1355             << DefInPMF;
1356         // If we have a PMF it should be at the end of the ModuleScopes.
1357         if (DefInPMF &&
1358             ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1359           Diag(ModuleScopes.back().BeginLoc,
1360                diag::note_private_module_fragment);
1361         }
1362       }
1363     }
1364     PendingInlineFuncDecls.clear();
1365   }
1366 
1367   // C99 6.9.2p2:
1368   //   A declaration of an identifier for an object that has file
1369   //   scope without an initializer, and without a storage-class
1370   //   specifier or with the storage-class specifier static,
1371   //   constitutes a tentative definition. If a translation unit
1372   //   contains one or more tentative definitions for an identifier,
1373   //   and the translation unit contains no external definition for
1374   //   that identifier, then the behavior is exactly as if the
1375   //   translation unit contains a file scope declaration of that
1376   //   identifier, with the composite type as of the end of the
1377   //   translation unit, with an initializer equal to 0.
1378   llvm::SmallSet<VarDecl *, 32> Seen;
1379   for (TentativeDefinitionsType::iterator
1380            T = TentativeDefinitions.begin(ExternalSource.get()),
1381            TEnd = TentativeDefinitions.end();
1382        T != TEnd; ++T) {
1383     VarDecl *VD = (*T)->getActingDefinition();
1384 
1385     // If the tentative definition was completed, getActingDefinition() returns
1386     // null. If we've already seen this variable before, insert()'s second
1387     // return value is false.
1388     if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second)
1389       continue;
1390 
1391     if (const IncompleteArrayType *ArrayT
1392         = Context.getAsIncompleteArrayType(VD->getType())) {
1393       // Set the length of the array to 1 (C99 6.9.2p5).
1394       Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
1395       llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
1396       QualType T = Context.getConstantArrayType(
1397           ArrayT->getElementType(), One, nullptr, ArraySizeModifier::Normal, 0);
1398       VD->setType(T);
1399     } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
1400                                    diag::err_tentative_def_incomplete_type))
1401       VD->setInvalidDecl();
1402 
1403     // No initialization is performed for a tentative definition.
1404     CheckCompleteVariableDeclaration(VD);
1405 
1406     // Notify the consumer that we've completed a tentative definition.
1407     if (!VD->isInvalidDecl())
1408       Consumer.CompleteTentativeDefinition(VD);
1409   }
1410 
1411   for (auto *D : ExternalDeclarations) {
1412     if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1413       continue;
1414 
1415     Consumer.CompleteExternalDeclaration(D);
1416   }
1417 
1418   if (LangOpts.HLSL)
1419     HLSL().DiagnoseAvailabilityViolations(
1420         getASTContext().getTranslationUnitDecl());
1421 
1422   // If there were errors, disable 'unused' warnings since they will mostly be
1423   // noise. Don't warn for a use from a module: either we should warn on all
1424   // file-scope declarations in modules or not at all, but whether the
1425   // declaration is used is immaterial.
1426   if (!Diags.hasErrorOccurred() && TUKind != TU_ClangModule) {
1427     // Output warning for unused file scoped decls.
1428     for (UnusedFileScopedDeclsType::iterator
1429              I = UnusedFileScopedDecls.begin(ExternalSource.get()),
1430              E = UnusedFileScopedDecls.end();
1431          I != E; ++I) {
1432       if (ShouldRemoveFromUnused(this, *I))
1433         continue;
1434 
1435       if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
1436         const FunctionDecl *DiagD;
1437         if (!FD->hasBody(DiagD))
1438           DiagD = FD;
1439         if (DiagD->isDeleted())
1440           continue; // Deleted functions are supposed to be unused.
1441         SourceRange DiagRange = DiagD->getLocation();
1442         if (const ASTTemplateArgumentListInfo *ASTTAL =
1443                 DiagD->getTemplateSpecializationArgsAsWritten())
1444           DiagRange.setEnd(ASTTAL->RAngleLoc);
1445         if (DiagD->isReferenced()) {
1446           if (isa<CXXMethodDecl>(DiagD))
1447             Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
1448                 << DiagD << DiagRange;
1449           else {
1450             if (FD->getStorageClass() == SC_Static &&
1451                 !FD->isInlineSpecified() &&
1452                 !SourceMgr.isInMainFile(
1453                    SourceMgr.getExpansionLoc(FD->getLocation())))
1454               Diag(DiagD->getLocation(),
1455                    diag::warn_unneeded_static_internal_decl)
1456                   << DiagD << DiagRange;
1457             else
1458               Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1459                   << /*function=*/0 << DiagD << DiagRange;
1460           }
1461         } else if (!FD->isTargetMultiVersion() ||
1462                    FD->isTargetMultiVersionDefault()) {
1463           if (FD->getDescribedFunctionTemplate())
1464             Diag(DiagD->getLocation(), diag::warn_unused_template)
1465                 << /*function=*/0 << DiagD << DiagRange;
1466           else
1467             Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
1468                                            ? diag::warn_unused_member_function
1469                                            : diag::warn_unused_function)
1470                 << DiagD << DiagRange;
1471         }
1472       } else {
1473         const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
1474         if (!DiagD)
1475           DiagD = cast<VarDecl>(*I);
1476         SourceRange DiagRange = DiagD->getLocation();
1477         if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(DiagD)) {
1478           if (const ASTTemplateArgumentListInfo *ASTTAL =
1479                   VTSD->getTemplateArgsAsWritten())
1480             DiagRange.setEnd(ASTTAL->RAngleLoc);
1481         }
1482         if (DiagD->isReferenced()) {
1483           Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1484               << /*variable=*/1 << DiagD << DiagRange;
1485         } else if (DiagD->getDescribedVarTemplate()) {
1486           Diag(DiagD->getLocation(), diag::warn_unused_template)
1487               << /*variable=*/1 << DiagD << DiagRange;
1488         } else if (DiagD->getType().isConstQualified()) {
1489           const SourceManager &SM = SourceMgr;
1490           if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
1491               !PP.getLangOpts().IsHeaderFile)
1492             Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
1493                 << DiagD << DiagRange;
1494         } else {
1495           Diag(DiagD->getLocation(), diag::warn_unused_variable)
1496               << DiagD << DiagRange;
1497         }
1498       }
1499     }
1500 
1501     emitAndClearUnusedLocalTypedefWarnings();
1502   }
1503 
1504   if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
1505     // FIXME: Load additional unused private field candidates from the external
1506     // source.
1507     RecordCompleteMap RecordsComplete;
1508     RecordCompleteMap MNCComplete;
1509     for (const NamedDecl *D : UnusedPrivateFields) {
1510       const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
1511       if (RD && !RD->isUnion() &&
1512           IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1513         Diag(D->getLocation(), diag::warn_unused_private_field)
1514               << D->getDeclName();
1515       }
1516     }
1517   }
1518 
1519   if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
1520     if (ExternalSource)
1521       ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1522     for (const auto &DeletedFieldInfo : DeleteExprs) {
1523       for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1524         AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first,
1525                                   DeleteExprLoc.second);
1526       }
1527     }
1528   }
1529 
1530   AnalysisWarnings.IssueWarnings(Context.getTranslationUnitDecl());
1531 
1532   if (Context.hasAnyFunctionEffects())
1533     performFunctionEffectAnalysis(Context.getTranslationUnitDecl());
1534 
1535   // Check we've noticed that we're no longer parsing the initializer for every
1536   // variable. If we miss cases, then at best we have a performance issue and
1537   // at worst a rejects-valid bug.
1538   assert(ParsingInitForAutoVars.empty() &&
1539          "Didn't unmark var as having its initializer parsed");
1540 
1541   if (!PP.isIncrementalProcessingEnabled())
1542     TUScope = nullptr;
1543 }
1544 
1545 
1546 //===----------------------------------------------------------------------===//
1547 // Helper functions.
1548 //===----------------------------------------------------------------------===//
1549 
1550 DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
1551   DeclContext *DC = CurContext;
1552 
1553   while (true) {
1554     if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) ||
1555         isa<RequiresExprBodyDecl>(DC)) {
1556       DC = DC->getParent();
1557     } else if (!AllowLambda && isa<CXXMethodDecl>(DC) &&
1558                cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
1559                cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
1560       DC = DC->getParent()->getParent();
1561     } else break;
1562   }
1563 
1564   return DC;
1565 }
1566 
1567 /// getCurFunctionDecl - If inside of a function body, this returns a pointer
1568 /// to the function decl for the function being parsed.  If we're currently
1569 /// in a 'block', this returns the containing context.
1570 FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
1571   DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1572   return dyn_cast<FunctionDecl>(DC);
1573 }
1574 
1575 ObjCMethodDecl *Sema::getCurMethodDecl() {
1576   DeclContext *DC = getFunctionLevelDeclContext();
1577   while (isa<RecordDecl>(DC))
1578     DC = DC->getParent();
1579   return dyn_cast<ObjCMethodDecl>(DC);
1580 }
1581 
1582 NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
1583   DeclContext *DC = getFunctionLevelDeclContext();
1584   if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
1585     return cast<NamedDecl>(DC);
1586   return nullptr;
1587 }
1588 
1589 LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1590   if (getLangOpts().OpenCL)
1591     return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1592   return LangAS::Default;
1593 }
1594 
1595 void Sema::EmitDiagnostic(unsigned DiagID, const DiagnosticBuilder &DB) {
1596   // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1597   // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1598   // been made more painfully obvious by the refactor that introduced this
1599   // function, but it is possible that the incoming argument can be
1600   // eliminated. If it truly cannot be (for example, there is some reentrancy
1601   // issue I am not seeing yet), then there should at least be a clarifying
1602   // comment somewhere.
1603   Diagnostic DiagInfo(&Diags, DB);
1604   if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) {
1605     switch (DiagnosticIDs::getDiagnosticSFINAEResponse(DiagInfo.getID())) {
1606     case DiagnosticIDs::SFINAE_Report:
1607       // We'll report the diagnostic below.
1608       break;
1609 
1610     case DiagnosticIDs::SFINAE_SubstitutionFailure:
1611       // Count this failure so that we know that template argument deduction
1612       // has failed.
1613       ++NumSFINAEErrors;
1614 
1615       // Make a copy of this suppressed diagnostic and store it with the
1616       // template-deduction information.
1617       if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1618         (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1619                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1620       }
1621 
1622       Diags.setLastDiagnosticIgnored(true);
1623       return;
1624 
1625     case DiagnosticIDs::SFINAE_AccessControl: {
1626       // Per C++ Core Issue 1170, access control is part of SFINAE.
1627       // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
1628       // make access control a part of SFINAE for the purposes of checking
1629       // type traits.
1630       if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
1631         break;
1632 
1633       SourceLocation Loc = DiagInfo.getLocation();
1634 
1635       // Suppress this diagnostic.
1636       ++NumSFINAEErrors;
1637 
1638       // Make a copy of this suppressed diagnostic and store it with the
1639       // template-deduction information.
1640       if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1641         (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1642                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1643       }
1644 
1645       Diags.setLastDiagnosticIgnored(true);
1646 
1647       // Now produce a C++98 compatibility warning.
1648       Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
1649 
1650       // The last diagnostic which Sema produced was ignored. Suppress any
1651       // notes attached to it.
1652       Diags.setLastDiagnosticIgnored(true);
1653       return;
1654     }
1655 
1656     case DiagnosticIDs::SFINAE_Suppress:
1657       // Make a copy of this suppressed diagnostic and store it with the
1658       // template-deduction information;
1659       if (*Info) {
1660         (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
1661                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1662       }
1663 
1664       // Suppress this diagnostic.
1665       Diags.setLastDiagnosticIgnored(true);
1666       return;
1667     }
1668   }
1669 
1670   // Copy the diagnostic printing policy over the ASTContext printing policy.
1671   // TODO: Stop doing that.  See: https://reviews.llvm.org/D45093#1090292
1672   Context.setPrintingPolicy(getPrintingPolicy());
1673 
1674   // Emit the diagnostic.
1675   if (!Diags.EmitDiagnostic(DB))
1676     return;
1677 
1678   // If this is not a note, and we're in a template instantiation
1679   // that is different from the last template instantiation where
1680   // we emitted an error, print a template instantiation
1681   // backtrace.
1682   if (!Diags.getDiagnosticIDs()->isNote(DiagID))
1683     PrintContextStack();
1684 }
1685 
1686 bool Sema::hasUncompilableErrorOccurred() const {
1687   if (getDiagnostics().hasUncompilableErrorOccurred())
1688     return true;
1689   auto *FD = dyn_cast<FunctionDecl>(CurContext);
1690   if (!FD)
1691     return false;
1692   auto Loc = DeviceDeferredDiags.find(FD);
1693   if (Loc == DeviceDeferredDiags.end())
1694     return false;
1695   for (auto PDAt : Loc->second) {
1696     if (Diags.getDiagnosticIDs()->isDefaultMappingAsError(
1697             PDAt.second.getDiagID()))
1698       return true;
1699   }
1700   return false;
1701 }
1702 
1703 // Print notes showing how we can reach FD starting from an a priori
1704 // known-callable function.
1705 static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
1706   auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(FD);
1707   while (FnIt != S.CUDA().DeviceKnownEmittedFns.end()) {
1708     // Respect error limit.
1709     if (S.Diags.hasFatalErrorOccurred())
1710       return;
1711     DiagnosticBuilder Builder(
1712         S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
1713     Builder << FnIt->second.FD;
1714     FnIt = S.CUDA().DeviceKnownEmittedFns.find(FnIt->second.FD);
1715   }
1716 }
1717 
1718 namespace {
1719 
1720 /// Helper class that emits deferred diagnostic messages if an entity directly
1721 /// or indirectly using the function that causes the deferred diagnostic
1722 /// messages is known to be emitted.
1723 ///
1724 /// During parsing of AST, certain diagnostic messages are recorded as deferred
1725 /// diagnostics since it is unknown whether the functions containing such
1726 /// diagnostics will be emitted. A list of potentially emitted functions and
1727 /// variables that may potentially trigger emission of functions are also
1728 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1729 /// by each function to emit deferred diagnostics.
1730 ///
1731 /// During the visit, certain OpenMP directives or initializer of variables
1732 /// with certain OpenMP attributes will cause subsequent visiting of any
1733 /// functions enter a state which is called OpenMP device context in this
1734 /// implementation. The state is exited when the directive or initializer is
1735 /// exited. This state can change the emission states of subsequent uses
1736 /// of functions.
1737 ///
1738 /// Conceptually the functions or variables to be visited form a use graph
1739 /// where the parent node uses the child node. At any point of the visit,
1740 /// the tree nodes traversed from the tree root to the current node form a use
1741 /// stack. The emission state of the current node depends on two factors:
1742 ///    1. the emission state of the root node
1743 ///    2. whether the current node is in OpenMP device context
1744 /// If the function is decided to be emitted, its contained deferred diagnostics
1745 /// are emitted, together with the information about the use stack.
1746 ///
1747 class DeferredDiagnosticsEmitter
1748     : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1749 public:
1750   typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1751 
1752   // Whether the function is already in the current use-path.
1753   llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1754 
1755   // The current use-path.
1756   llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1757 
1758   // Whether the visiting of the function has been done. Done[0] is for the
1759   // case not in OpenMP device context. Done[1] is for the case in OpenMP
1760   // device context. We need two sets because diagnostics emission may be
1761   // different depending on whether it is in OpenMP device context.
1762   llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1763 
1764   // Emission state of the root node of the current use graph.
1765   bool ShouldEmitRootNode;
1766 
1767   // Current OpenMP device context level. It is initialized to 0 and each
1768   // entering of device context increases it by 1 and each exit decreases
1769   // it by 1. Non-zero value indicates it is currently in device context.
1770   unsigned InOMPDeviceContext;
1771 
1772   DeferredDiagnosticsEmitter(Sema &S)
1773       : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1774 
1775   bool shouldVisitDiscardedStmt() const { return false; }
1776 
1777   void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1778     ++InOMPDeviceContext;
1779     Inherited::VisitOMPTargetDirective(Node);
1780     --InOMPDeviceContext;
1781   }
1782 
1783   void visitUsedDecl(SourceLocation Loc, Decl *D) {
1784     if (isa<VarDecl>(D))
1785       return;
1786     if (auto *FD = dyn_cast<FunctionDecl>(D))
1787       checkFunc(Loc, FD);
1788     else
1789       Inherited::visitUsedDecl(Loc, D);
1790   }
1791 
1792   void checkVar(VarDecl *VD) {
1793     assert(VD->isFileVarDecl() &&
1794            "Should only check file-scope variables");
1795     if (auto *Init = VD->getInit()) {
1796       auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
1797       bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
1798                              *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
1799       if (IsDev)
1800         ++InOMPDeviceContext;
1801       this->Visit(Init);
1802       if (IsDev)
1803         --InOMPDeviceContext;
1804     }
1805   }
1806 
1807   void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
1808     auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
1809     FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1810     if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1811         S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD))
1812       return;
1813     // Finalize analysis of OpenMP-specific constructs.
1814     if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1815         (ShouldEmitRootNode || InOMPDeviceContext))
1816       S.OpenMP().finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
1817     if (Caller)
1818       S.CUDA().DeviceKnownEmittedFns[FD] = {Caller, Loc};
1819     // Always emit deferred diagnostics for the direct users. This does not
1820     // lead to explosion of diagnostics since each user is visited at most
1821     // twice.
1822     if (ShouldEmitRootNode || InOMPDeviceContext)
1823       emitDeferredDiags(FD, Caller);
1824     // Do not revisit a function if the function body has been completely
1825     // visited before.
1826     if (!Done.insert(FD).second)
1827       return;
1828     InUsePath.insert(FD);
1829     UsePath.push_back(FD);
1830     if (auto *S = FD->getBody()) {
1831       this->Visit(S);
1832     }
1833     UsePath.pop_back();
1834     InUsePath.erase(FD);
1835   }
1836 
1837   void checkRecordedDecl(Decl *D) {
1838     if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1839       ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) ==
1840                            Sema::FunctionEmissionStatus::Emitted;
1841       checkFunc(SourceLocation(), FD);
1842     } else
1843       checkVar(cast<VarDecl>(D));
1844   }
1845 
1846   // Emit any deferred diagnostics for FD
1847   void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
1848     auto It = S.DeviceDeferredDiags.find(FD);
1849     if (It == S.DeviceDeferredDiags.end())
1850       return;
1851     bool HasWarningOrError = false;
1852     bool FirstDiag = true;
1853     for (PartialDiagnosticAt &PDAt : It->second) {
1854       // Respect error limit.
1855       if (S.Diags.hasFatalErrorOccurred())
1856         return;
1857       const SourceLocation &Loc = PDAt.first;
1858       const PartialDiagnostic &PD = PDAt.second;
1859       HasWarningOrError |=
1860           S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >=
1861           DiagnosticsEngine::Warning;
1862       {
1863         DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
1864         PD.Emit(Builder);
1865       }
1866       // Emit the note on the first diagnostic in case too many diagnostics
1867       // cause the note not emitted.
1868       if (FirstDiag && HasWarningOrError && ShowCallStack) {
1869         emitCallStackNotes(S, FD);
1870         FirstDiag = false;
1871       }
1872     }
1873   }
1874 };
1875 } // namespace
1876 
1877 void Sema::emitDeferredDiags() {
1878   if (ExternalSource)
1879     ExternalSource->ReadDeclsToCheckForDeferredDiags(
1880         DeclsToCheckForDeferredDiags);
1881 
1882   if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
1883       DeclsToCheckForDeferredDiags.empty())
1884     return;
1885 
1886   DeferredDiagnosticsEmitter DDE(*this);
1887   for (auto *D : DeclsToCheckForDeferredDiags)
1888     DDE.checkRecordedDecl(D);
1889 }
1890 
1891 // In CUDA, there are some constructs which may appear in semantically-valid
1892 // code, but trigger errors if we ever generate code for the function in which
1893 // they appear.  Essentially every construct you're not allowed to use on the
1894 // device falls into this category, because you are allowed to use these
1895 // constructs in a __host__ __device__ function, but only if that function is
1896 // never codegen'ed on the device.
1897 //
1898 // To handle semantic checking for these constructs, we keep track of the set of
1899 // functions we know will be emitted, either because we could tell a priori that
1900 // they would be emitted, or because they were transitively called by a
1901 // known-emitted function.
1902 //
1903 // We also keep a partial call graph of which not-known-emitted functions call
1904 // which other not-known-emitted functions.
1905 //
1906 // When we see something which is illegal if the current function is emitted
1907 // (usually by way of DiagIfDeviceCode, DiagIfHostCode, or
1908 // CheckCall), we first check if the current function is known-emitted.  If
1909 // so, we immediately output the diagnostic.
1910 //
1911 // Otherwise, we "defer" the diagnostic.  It sits in Sema::DeviceDeferredDiags
1912 // until we discover that the function is known-emitted, at which point we take
1913 // it out of this map and emit the diagnostic.
1914 
1915 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
1916                                                    unsigned DiagID,
1917                                                    const FunctionDecl *Fn,
1918                                                    Sema &S)
1919     : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
1920       ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
1921   switch (K) {
1922   case K_Nop:
1923     break;
1924   case K_Immediate:
1925   case K_ImmediateWithCallStack:
1926     ImmediateDiag.emplace(
1927         ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
1928     break;
1929   case K_Deferred:
1930     assert(Fn && "Must have a function to attach the deferred diag to.");
1931     auto &Diags = S.DeviceDeferredDiags[Fn];
1932     PartialDiagId.emplace(Diags.size());
1933     Diags.emplace_back(Loc, S.PDiag(DiagID));
1934     break;
1935   }
1936 }
1937 
1938 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
1939     : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
1940       ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
1941       PartialDiagId(D.PartialDiagId) {
1942   // Clean the previous diagnostics.
1943   D.ShowCallStack = false;
1944   D.ImmediateDiag.reset();
1945   D.PartialDiagId.reset();
1946 }
1947 
1948 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
1949   if (ImmediateDiag) {
1950     // Emit our diagnostic and, if it was a warning or error, output a callstack
1951     // if Fn isn't a priori known-emitted.
1952     bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
1953                                 DiagID, Loc) >= DiagnosticsEngine::Warning;
1954     ImmediateDiag.reset(); // Emit the immediate diag.
1955     if (IsWarningOrError && ShowCallStack)
1956       emitCallStackNotes(S, Fn);
1957   } else {
1958     assert((!PartialDiagId || ShowCallStack) &&
1959            "Must always show call stack for deferred diags.");
1960   }
1961 }
1962 
1963 Sema::SemaDiagnosticBuilder
1964 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
1965   FD = FD ? FD : getCurFunctionDecl();
1966   if (LangOpts.OpenMP)
1967     return LangOpts.OpenMPIsTargetDevice
1968                ? OpenMP().diagIfOpenMPDeviceCode(Loc, DiagID, FD)
1969                : OpenMP().diagIfOpenMPHostCode(Loc, DiagID, FD);
1970   if (getLangOpts().CUDA)
1971     return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID)
1972                                       : CUDA().DiagIfHostCode(Loc, DiagID);
1973 
1974   if (getLangOpts().SYCLIsDevice)
1975     return SYCL().DiagIfDeviceCode(Loc, DiagID);
1976 
1977   return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
1978                                FD, *this);
1979 }
1980 
1981 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
1982   if (isUnevaluatedContext() || Ty.isNull())
1983     return;
1984 
1985   // The original idea behind checkTypeSupport function is that unused
1986   // declarations can be replaced with an array of bytes of the same size during
1987   // codegen, such replacement doesn't seem to be possible for types without
1988   // constant byte size like zero length arrays. So, do a deep check for SYCL.
1989   if (D && LangOpts.SYCLIsDevice) {
1990     llvm::DenseSet<QualType> Visited;
1991     SYCL().deepTypeCheckForDevice(Loc, Visited, D);
1992   }
1993 
1994   Decl *C = cast<Decl>(getCurLexicalContext());
1995 
1996   // Memcpy operations for structs containing a member with unsupported type
1997   // are ok, though.
1998   if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) {
1999     if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
2000         MD->isTrivial())
2001       return;
2002 
2003     if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD))
2004       if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
2005         return;
2006   }
2007 
2008   // Try to associate errors with the lexical context, if that is a function, or
2009   // the value declaration otherwise.
2010   const FunctionDecl *FD = isa<FunctionDecl>(C)
2011                                ? cast<FunctionDecl>(C)
2012                                : dyn_cast_or_null<FunctionDecl>(D);
2013 
2014   auto CheckDeviceType = [&](QualType Ty) {
2015     if (Ty->isDependentType())
2016       return;
2017 
2018     if (Ty->isBitIntType()) {
2019       if (!Context.getTargetInfo().hasBitIntType()) {
2020         PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2021         if (D)
2022           PD << D;
2023         else
2024           PD << "expression";
2025         targetDiag(Loc, PD, FD)
2026             << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
2027             << Ty << Context.getTargetInfo().getTriple().str();
2028       }
2029       return;
2030     }
2031 
2032     // Check if we are dealing with two 'long double' but with different
2033     // semantics.
2034     bool LongDoubleMismatched = false;
2035     if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) {
2036       const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty);
2037       if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
2038            !Context.getTargetInfo().hasFloat128Type()) ||
2039           (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
2040            !Context.getTargetInfo().hasIbm128Type()))
2041         LongDoubleMismatched = true;
2042     }
2043 
2044     if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
2045         (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
2046         (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
2047         (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
2048          !Context.getTargetInfo().hasInt128Type()) ||
2049         (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
2050          !LangOpts.CUDAIsDevice) ||
2051         LongDoubleMismatched) {
2052       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2053       if (D)
2054         PD << D;
2055       else
2056         PD << "expression";
2057 
2058       if (targetDiag(Loc, PD, FD)
2059           << true /*show bit size*/
2060           << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
2061           << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
2062         if (D)
2063           D->setInvalidDecl();
2064       }
2065       if (D)
2066         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2067     }
2068   };
2069 
2070   auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
2071     if (LangOpts.SYCLIsDevice ||
2072         (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
2073         LangOpts.CUDAIsDevice)
2074       CheckDeviceType(Ty);
2075 
2076     QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2077     const TargetInfo &TI = Context.getTargetInfo();
2078     if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2079       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2080       if (D)
2081         PD << D;
2082       else
2083         PD << "expression";
2084 
2085       if (Diag(Loc, PD, FD)
2086           << false /*show bit size*/ << 0 << Ty << false /*return*/
2087           << TI.getTriple().str()) {
2088         if (D)
2089           D->setInvalidDecl();
2090       }
2091       if (D)
2092         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2093     }
2094 
2095     bool IsDouble = UnqualTy == Context.DoubleTy;
2096     bool IsFloat = UnqualTy == Context.FloatTy;
2097     if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2098       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2099       if (D)
2100         PD << D;
2101       else
2102         PD << "expression";
2103 
2104       if (Diag(Loc, PD, FD)
2105           << false /*show bit size*/ << 0 << Ty << true /*return*/
2106           << TI.getTriple().str()) {
2107         if (D)
2108           D->setInvalidDecl();
2109       }
2110       if (D)
2111         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2112     }
2113 
2114     if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
2115       llvm::StringMap<bool> CallerFeatureMap;
2116       Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2117       RISCV().checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
2118     }
2119 
2120     // Don't allow SVE types in functions without a SVE target.
2121     if (Ty->isSVESizelessBuiltinType() && FD) {
2122       llvm::StringMap<bool> CallerFeatureMap;
2123       Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2124       if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap)) {
2125         if (!Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
2126           Diag(Loc, diag::err_sve_vector_in_non_sve_target) << Ty;
2127         else if (!IsArmStreamingFunction(FD,
2128                                          /*IncludeLocallyStreaming=*/true)) {
2129           Diag(Loc, diag::err_sve_vector_in_non_streaming_function) << Ty;
2130         }
2131       }
2132     }
2133   };
2134 
2135   CheckType(Ty);
2136   if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) {
2137     for (const auto &ParamTy : FPTy->param_types())
2138       CheckType(ParamTy);
2139     CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2140   }
2141   if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty))
2142     CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2143 }
2144 
2145 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2146   SourceLocation loc = locref;
2147   if (!loc.isMacroID()) return false;
2148 
2149   // There's no good way right now to look at the intermediate
2150   // expansions, so just jump to the expansion location.
2151   loc = getSourceManager().getExpansionLoc(loc);
2152 
2153   // If that's written with the name, stop here.
2154   SmallString<16> buffer;
2155   if (getPreprocessor().getSpelling(loc, buffer) == name) {
2156     locref = loc;
2157     return true;
2158   }
2159   return false;
2160 }
2161 
2162 Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2163 
2164   if (!Ctx)
2165     return nullptr;
2166 
2167   Ctx = Ctx->getPrimaryContext();
2168   for (Scope *S = getCurScope(); S; S = S->getParent()) {
2169     // Ignore scopes that cannot have declarations. This is important for
2170     // out-of-line definitions of static class members.
2171     if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2172       if (DeclContext *Entity = S->getEntity())
2173         if (Ctx == Entity->getPrimaryContext())
2174           return S;
2175   }
2176 
2177   return nullptr;
2178 }
2179 
2180 /// Enter a new function scope
2181 void Sema::PushFunctionScope() {
2182   if (FunctionScopes.empty() && CachedFunctionScope) {
2183     // Use CachedFunctionScope to avoid allocating memory when possible.
2184     CachedFunctionScope->Clear();
2185     FunctionScopes.push_back(CachedFunctionScope.release());
2186   } else {
2187     FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
2188   }
2189   if (LangOpts.OpenMP)
2190     OpenMP().pushOpenMPFunctionRegion();
2191 }
2192 
2193 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2194   FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
2195                                               BlockScope, Block));
2196   CapturingFunctionScopes++;
2197 }
2198 
2199 LambdaScopeInfo *Sema::PushLambdaScope() {
2200   LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2201   FunctionScopes.push_back(LSI);
2202   CapturingFunctionScopes++;
2203   return LSI;
2204 }
2205 
2206 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2207   if (LambdaScopeInfo *const LSI = getCurLambda()) {
2208     LSI->AutoTemplateParameterDepth = Depth;
2209     return;
2210   }
2211   llvm_unreachable(
2212       "Remove assertion if intentionally called in a non-lambda context.");
2213 }
2214 
2215 // Check that the type of the VarDecl has an accessible copy constructor and
2216 // resolve its destructor's exception specification.
2217 // This also performs initialization of block variables when they are moved
2218 // to the heap. It uses the same rules as applicable for implicit moves
2219 // according to the C++ standard in effect ([class.copy.elision]p3).
2220 static void checkEscapingByref(VarDecl *VD, Sema &S) {
2221   QualType T = VD->getType();
2222   EnterExpressionEvaluationContext scope(
2223       S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2224   SourceLocation Loc = VD->getLocation();
2225   Expr *VarRef =
2226       new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2227   ExprResult Result;
2228   auto IE = InitializedEntity::InitializeBlock(Loc, T);
2229   if (S.getLangOpts().CPlusPlus23) {
2230     auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr,
2231                                        VK_XValue, FPOptionsOverride());
2232     Result = S.PerformCopyInitialization(IE, SourceLocation(), E);
2233   } else {
2234     Result = S.PerformMoveOrCopyInitialization(
2235         IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible},
2236         VarRef);
2237   }
2238 
2239   if (!Result.isInvalid()) {
2240     Result = S.MaybeCreateExprWithCleanups(Result);
2241     Expr *Init = Result.getAs<Expr>();
2242     S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init));
2243   }
2244 
2245   // The destructor's exception specification is needed when IRGen generates
2246   // block copy/destroy functions. Resolve it here.
2247   if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2248     if (CXXDestructorDecl *DD = RD->getDestructor()) {
2249       auto *FPT = DD->getType()->castAs<FunctionProtoType>();
2250       S.ResolveExceptionSpec(Loc, FPT);
2251     }
2252 }
2253 
2254 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2255   // Set the EscapingByref flag of __block variables captured by
2256   // escaping blocks.
2257   for (const BlockDecl *BD : FSI.Blocks) {
2258     for (const BlockDecl::Capture &BC : BD->captures()) {
2259       VarDecl *VD = BC.getVariable();
2260       if (VD->hasAttr<BlocksAttr>()) {
2261         // Nothing to do if this is a __block variable captured by a
2262         // non-escaping block.
2263         if (BD->doesNotEscape())
2264           continue;
2265         VD->setEscapingByref();
2266       }
2267       // Check whether the captured variable is or contains an object of
2268       // non-trivial C union type.
2269       QualType CapType = BC.getVariable()->getType();
2270       if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2271           CapType.hasNonTrivialToPrimitiveCopyCUnion())
2272         S.checkNonTrivialCUnion(BC.getVariable()->getType(),
2273                                 BD->getCaretLocation(),
2274                                 Sema::NTCUC_BlockCapture,
2275                                 Sema::NTCUK_Destruct|Sema::NTCUK_Copy);
2276     }
2277   }
2278 
2279   for (VarDecl *VD : FSI.ByrefBlockVars) {
2280     // __block variables might require us to capture a copy-initializer.
2281     if (!VD->isEscapingByref())
2282       continue;
2283     // It's currently invalid to ever have a __block variable with an
2284     // array type; should we diagnose that here?
2285     // Regardless, we don't want to ignore array nesting when
2286     // constructing this copy.
2287     if (VD->getType()->isStructureOrClassType())
2288       checkEscapingByref(VD, S);
2289   }
2290 }
2291 
2292 Sema::PoppedFunctionScopePtr
2293 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
2294                            const Decl *D, QualType BlockType) {
2295   assert(!FunctionScopes.empty() && "mismatched push/pop!");
2296 
2297   markEscapingByrefs(*FunctionScopes.back(), *this);
2298 
2299   PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2300                                PoppedFunctionScopeDeleter(this));
2301 
2302   if (LangOpts.OpenMP)
2303     OpenMP().popOpenMPFunctionRegion(Scope.get());
2304 
2305   // Issue any analysis-based warnings.
2306   if (WP && D)
2307     AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType);
2308   else
2309     for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2310       Diag(PUD.Loc, PUD.PD);
2311 
2312   return Scope;
2313 }
2314 
2315 void Sema::PoppedFunctionScopeDeleter::
2316 operator()(sema::FunctionScopeInfo *Scope) const {
2317   if (!Scope->isPlainFunction())
2318     Self->CapturingFunctionScopes--;
2319   // Stash the function scope for later reuse if it's for a normal function.
2320   if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2321     Self->CachedFunctionScope.reset(Scope);
2322   else
2323     delete Scope;
2324 }
2325 
2326 void Sema::PushCompoundScope(bool IsStmtExpr) {
2327   getCurFunction()->CompoundScopes.push_back(
2328       CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2329 }
2330 
2331 void Sema::PopCompoundScope() {
2332   FunctionScopeInfo *CurFunction = getCurFunction();
2333   assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2334 
2335   CurFunction->CompoundScopes.pop_back();
2336 }
2337 
2338 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2339   return getCurFunction()->hasUnrecoverableErrorOccurred();
2340 }
2341 
2342 void Sema::setFunctionHasBranchIntoScope() {
2343   if (!FunctionScopes.empty())
2344     FunctionScopes.back()->setHasBranchIntoScope();
2345 }
2346 
2347 void Sema::setFunctionHasBranchProtectedScope() {
2348   if (!FunctionScopes.empty())
2349     FunctionScopes.back()->setHasBranchProtectedScope();
2350 }
2351 
2352 void Sema::setFunctionHasIndirectGoto() {
2353   if (!FunctionScopes.empty())
2354     FunctionScopes.back()->setHasIndirectGoto();
2355 }
2356 
2357 void Sema::setFunctionHasMustTail() {
2358   if (!FunctionScopes.empty())
2359     FunctionScopes.back()->setHasMustTail();
2360 }
2361 
2362 BlockScopeInfo *Sema::getCurBlock() {
2363   if (FunctionScopes.empty())
2364     return nullptr;
2365 
2366   auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back());
2367   if (CurBSI && CurBSI->TheDecl &&
2368       !CurBSI->TheDecl->Encloses(CurContext)) {
2369     // We have switched contexts due to template instantiation.
2370     assert(!CodeSynthesisContexts.empty());
2371     return nullptr;
2372   }
2373 
2374   return CurBSI;
2375 }
2376 
2377 FunctionScopeInfo *Sema::getEnclosingFunction() const {
2378   if (FunctionScopes.empty())
2379     return nullptr;
2380 
2381   for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2382     if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
2383       continue;
2384     return FunctionScopes[e];
2385   }
2386   return nullptr;
2387 }
2388 
2389 CapturingScopeInfo *Sema::getEnclosingLambdaOrBlock() const {
2390   for (auto *Scope : llvm::reverse(FunctionScopes)) {
2391     if (auto *CSI = dyn_cast<CapturingScopeInfo>(Scope)) {
2392       auto *LSI = dyn_cast<LambdaScopeInfo>(CSI);
2393       if (LSI && LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
2394           LSI->AfterParameterList) {
2395         // We have switched contexts due to template instantiation.
2396         // FIXME: We should swap out the FunctionScopes during code synthesis
2397         // so that we don't need to check for this.
2398         assert(!CodeSynthesisContexts.empty());
2399         return nullptr;
2400       }
2401       return CSI;
2402     }
2403   }
2404   return nullptr;
2405 }
2406 
2407 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2408   if (FunctionScopes.empty())
2409     return nullptr;
2410 
2411   auto I = FunctionScopes.rbegin();
2412   if (IgnoreNonLambdaCapturingScope) {
2413     auto E = FunctionScopes.rend();
2414     while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I))
2415       ++I;
2416     if (I == E)
2417       return nullptr;
2418   }
2419   auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I);
2420   if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
2421       !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) {
2422     // We have switched contexts due to template instantiation.
2423     assert(!CodeSynthesisContexts.empty());
2424     return nullptr;
2425   }
2426 
2427   return CurLSI;
2428 }
2429 
2430 // We have a generic lambda if we parsed auto parameters, or we have
2431 // an associated template parameter list.
2432 LambdaScopeInfo *Sema::getCurGenericLambda() {
2433   if (LambdaScopeInfo *LSI =  getCurLambda()) {
2434     return (LSI->TemplateParams.size() ||
2435                     LSI->GLTemplateParameterList) ? LSI : nullptr;
2436   }
2437   return nullptr;
2438 }
2439 
2440 
2441 void Sema::ActOnComment(SourceRange Comment) {
2442   if (!LangOpts.RetainCommentsFromSystemHeaders &&
2443       SourceMgr.isInSystemHeader(Comment.getBegin()))
2444     return;
2445   RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2446   if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
2447     SourceRange MagicMarkerRange(Comment.getBegin(),
2448                                  Comment.getBegin().getLocWithOffset(3));
2449     StringRef MagicMarkerText;
2450     switch (RC.getKind()) {
2451     case RawComment::RCK_OrdinaryBCPL:
2452       MagicMarkerText = "///<";
2453       break;
2454     case RawComment::RCK_OrdinaryC:
2455       MagicMarkerText = "/**<";
2456       break;
2457     case RawComment::RCK_Invalid:
2458       // FIXME: are there other scenarios that could produce an invalid
2459       // raw comment here?
2460       Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment);
2461       return;
2462     default:
2463       llvm_unreachable("if this is an almost Doxygen comment, "
2464                        "it should be ordinary");
2465     }
2466     Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
2467       FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
2468   }
2469   Context.addComment(RC);
2470 }
2471 
2472 // Pin this vtable to this file.
2473 ExternalSemaSource::~ExternalSemaSource() {}
2474 char ExternalSemaSource::ID;
2475 
2476 void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
2477 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2478 
2479 void ExternalSemaSource::ReadKnownNamespaces(
2480                            SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2481 }
2482 
2483 void ExternalSemaSource::ReadUndefinedButUsed(
2484     llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2485 
2486 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2487     FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2488 
2489 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2490                          UnresolvedSetImpl &OverloadSet) {
2491   ZeroArgCallReturnTy = QualType();
2492   OverloadSet.clear();
2493 
2494   const OverloadExpr *Overloads = nullptr;
2495   bool IsMemExpr = false;
2496   if (E.getType() == Context.OverloadTy) {
2497     OverloadExpr::FindResult FR = OverloadExpr::find(&E);
2498 
2499     // Ignore overloads that are pointer-to-member constants.
2500     if (FR.HasFormOfMemberPointer)
2501       return false;
2502 
2503     Overloads = FR.Expression;
2504   } else if (E.getType() == Context.BoundMemberTy) {
2505     Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens());
2506     IsMemExpr = true;
2507   }
2508 
2509   bool Ambiguous = false;
2510   bool IsMV = false;
2511 
2512   if (Overloads) {
2513     for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2514          DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2515       OverloadSet.addDecl(*it);
2516 
2517       // Check whether the function is a non-template, non-member which takes no
2518       // arguments.
2519       if (IsMemExpr)
2520         continue;
2521       if (const FunctionDecl *OverloadDecl
2522             = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) {
2523         if (OverloadDecl->getMinRequiredArguments() == 0) {
2524           if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2525               (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2526                           OverloadDecl->isCPUSpecificMultiVersion()))) {
2527             ZeroArgCallReturnTy = QualType();
2528             Ambiguous = true;
2529           } else {
2530             ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2531             IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2532                    OverloadDecl->isCPUSpecificMultiVersion();
2533           }
2534         }
2535       }
2536     }
2537 
2538     // If it's not a member, use better machinery to try to resolve the call
2539     if (!IsMemExpr)
2540       return !ZeroArgCallReturnTy.isNull();
2541   }
2542 
2543   // Attempt to call the member with no arguments - this will correctly handle
2544   // member templates with defaults/deduction of template arguments, overloads
2545   // with default arguments, etc.
2546   if (IsMemExpr && !E.isTypeDependent()) {
2547     Sema::TentativeAnalysisScope Trap(*this);
2548     ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), {},
2549                                              SourceLocation());
2550     if (R.isUsable()) {
2551       ZeroArgCallReturnTy = R.get()->getType();
2552       return true;
2553     }
2554     return false;
2555   }
2556 
2557   if (const auto *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
2558     if (const auto *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
2559       if (Fun->getMinRequiredArguments() == 0)
2560         ZeroArgCallReturnTy = Fun->getReturnType();
2561       return true;
2562     }
2563   }
2564 
2565   // We don't have an expression that's convenient to get a FunctionDecl from,
2566   // but we can at least check if the type is "function of 0 arguments".
2567   QualType ExprTy = E.getType();
2568   const FunctionType *FunTy = nullptr;
2569   QualType PointeeTy = ExprTy->getPointeeType();
2570   if (!PointeeTy.isNull())
2571     FunTy = PointeeTy->getAs<FunctionType>();
2572   if (!FunTy)
2573     FunTy = ExprTy->getAs<FunctionType>();
2574 
2575   if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(FunTy)) {
2576     if (FPT->getNumParams() == 0)
2577       ZeroArgCallReturnTy = FunTy->getReturnType();
2578     return true;
2579   }
2580   return false;
2581 }
2582 
2583 /// Give notes for a set of overloads.
2584 ///
2585 /// A companion to tryExprAsCall. In cases when the name that the programmer
2586 /// wrote was an overloaded function, we may be able to make some guesses about
2587 /// plausible overloads based on their return types; such guesses can be handed
2588 /// off to this method to be emitted as notes.
2589 ///
2590 /// \param Overloads - The overloads to note.
2591 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2592 ///  -fshow-overloads=best, this is the location to attach to the note about too
2593 ///  many candidates. Typically this will be the location of the original
2594 ///  ill-formed expression.
2595 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2596                           const SourceLocation FinalNoteLoc) {
2597   unsigned ShownOverloads = 0;
2598   unsigned SuppressedOverloads = 0;
2599   for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2600        DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2601     if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2602       ++SuppressedOverloads;
2603       continue;
2604     }
2605 
2606     const NamedDecl *Fn = (*It)->getUnderlyingDecl();
2607     // Don't print overloads for non-default multiversioned functions.
2608     if (const auto *FD = Fn->getAsFunction()) {
2609       if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2610           !FD->getAttr<TargetAttr>()->isDefaultVersion())
2611         continue;
2612       if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2613           !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2614         continue;
2615     }
2616     S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
2617     ++ShownOverloads;
2618   }
2619 
2620   S.Diags.overloadCandidatesShown(ShownOverloads);
2621 
2622   if (SuppressedOverloads)
2623     S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
2624       << SuppressedOverloads;
2625 }
2626 
2627 static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2628                                    const UnresolvedSetImpl &Overloads,
2629                                    bool (*IsPlausibleResult)(QualType)) {
2630   if (!IsPlausibleResult)
2631     return noteOverloads(S, Overloads, Loc);
2632 
2633   UnresolvedSet<2> PlausibleOverloads;
2634   for (OverloadExpr::decls_iterator It = Overloads.begin(),
2635          DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2636     const auto *OverloadDecl = cast<FunctionDecl>(*It);
2637     QualType OverloadResultTy = OverloadDecl->getReturnType();
2638     if (IsPlausibleResult(OverloadResultTy))
2639       PlausibleOverloads.addDecl(It.getDecl());
2640   }
2641   noteOverloads(S, PlausibleOverloads, Loc);
2642 }
2643 
2644 /// Determine whether the given expression can be called by just
2645 /// putting parentheses after it.  Notably, expressions with unary
2646 /// operators can't be because the unary operator will start parsing
2647 /// outside the call.
2648 static bool IsCallableWithAppend(const Expr *E) {
2649   E = E->IgnoreImplicit();
2650   return (!isa<CStyleCastExpr>(E) &&
2651           !isa<UnaryOperator>(E) &&
2652           !isa<BinaryOperator>(E) &&
2653           !isa<CXXOperatorCallExpr>(E));
2654 }
2655 
2656 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2657   if (const auto *UO = dyn_cast<UnaryOperator>(E))
2658     E = UO->getSubExpr();
2659 
2660   if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
2661     if (ULE->getNumDecls() == 0)
2662       return false;
2663 
2664     const NamedDecl *ND = *ULE->decls_begin();
2665     if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2666       return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2667   }
2668   return false;
2669 }
2670 
2671 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2672                                 bool ForceComplain,
2673                                 bool (*IsPlausibleResult)(QualType)) {
2674   SourceLocation Loc = E.get()->getExprLoc();
2675   SourceRange Range = E.get()->getSourceRange();
2676   UnresolvedSet<4> Overloads;
2677 
2678   // If this is a SFINAE context, don't try anything that might trigger ADL
2679   // prematurely.
2680   if (!isSFINAEContext()) {
2681     QualType ZeroArgCallTy;
2682     if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
2683         !ZeroArgCallTy.isNull() &&
2684         (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2685       // At this point, we know E is potentially callable with 0
2686       // arguments and that it returns something of a reasonable type,
2687       // so we can emit a fixit and carry on pretending that E was
2688       // actually a CallExpr.
2689       SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
2690       bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2691       Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2692                     << (IsCallableWithAppend(E.get())
2693                             ? FixItHint::CreateInsertion(ParenInsertionLoc,
2694                                                          "()")
2695                             : FixItHint());
2696       if (!IsMV)
2697         notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2698 
2699       // FIXME: Try this before emitting the fixit, and suppress diagnostics
2700       // while doing so.
2701       E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), {},
2702                         Range.getEnd().getLocWithOffset(1));
2703       return true;
2704     }
2705   }
2706   if (!ForceComplain) return false;
2707 
2708   bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2709   Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2710   if (!IsMV)
2711     notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2712   E = ExprError();
2713   return true;
2714 }
2715 
2716 IdentifierInfo *Sema::getSuperIdentifier() const {
2717   if (!Ident_super)
2718     Ident_super = &Context.Idents.get("super");
2719   return Ident_super;
2720 }
2721 
2722 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2723                                    CapturedRegionKind K,
2724                                    unsigned OpenMPCaptureLevel) {
2725   auto *CSI = new CapturedRegionScopeInfo(
2726       getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2727       (getLangOpts().OpenMP && K == CR_OpenMP)
2728           ? OpenMP().getOpenMPNestingLevel()
2729           : 0,
2730       OpenMPCaptureLevel);
2731   CSI->ReturnType = Context.VoidTy;
2732   FunctionScopes.push_back(CSI);
2733   CapturingFunctionScopes++;
2734 }
2735 
2736 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2737   if (FunctionScopes.empty())
2738     return nullptr;
2739 
2740   return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back());
2741 }
2742 
2743 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
2744 Sema::getMismatchingDeleteExpressions() const {
2745   return DeleteExprs;
2746 }
2747 
2748 Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2749     : S(S), OldFPFeaturesState(S.CurFPFeatures),
2750       OldOverrides(S.FpPragmaStack.CurrentValue),
2751       OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2752       OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2753 
2754 Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2755   S.CurFPFeatures = OldFPFeaturesState;
2756   S.FpPragmaStack.CurrentValue = OldOverrides;
2757   S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod);
2758 }
2759 
2760 bool Sema::isDeclaratorFunctionLike(Declarator &D) {
2761   assert(D.getCXXScopeSpec().isSet() &&
2762          "can only be called for qualified names");
2763 
2764   auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
2765                          LookupOrdinaryName, forRedeclarationInCurContext());
2766   DeclContext *DC = computeDeclContext(D.getCXXScopeSpec(),
2767                                        !D.getDeclSpec().isFriendSpecified());
2768   if (!DC)
2769     return false;
2770 
2771   LookupQualifiedName(LR, DC);
2772   bool Result = llvm::all_of(LR, [](Decl *Dcl) {
2773     if (NamedDecl *ND = dyn_cast<NamedDecl>(Dcl)) {
2774       ND = ND->getUnderlyingDecl();
2775       return isa<FunctionDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
2776              isa<UsingDecl>(ND);
2777     }
2778     return false;
2779   });
2780   return Result;
2781 }
2782 
2783 Attr *Sema::CreateAnnotationAttr(const AttributeCommonInfo &CI, StringRef Annot,
2784                                  MutableArrayRef<Expr *> Args) {
2785 
2786   auto *A = AnnotateAttr::Create(Context, Annot, Args.data(), Args.size(), CI);
2787   if (!ConstantFoldAttrArgs(
2788           CI, MutableArrayRef<Expr *>(A->args_begin(), A->args_end()))) {
2789     return nullptr;
2790   }
2791   return A;
2792 }
2793 
2794 Attr *Sema::CreateAnnotationAttr(const ParsedAttr &AL) {
2795   // Make sure that there is a string literal as the annotation's first
2796   // argument.
2797   StringRef Str;
2798   if (!checkStringLiteralArgumentAttr(AL, 0, Str))
2799     return nullptr;
2800 
2801   llvm::SmallVector<Expr *, 4> Args;
2802   Args.reserve(AL.getNumArgs() - 1);
2803   for (unsigned Idx = 1; Idx < AL.getNumArgs(); Idx++) {
2804     assert(!AL.isArgIdent(Idx));
2805     Args.push_back(AL.getArgAsExpr(Idx));
2806   }
2807 
2808   return CreateAnnotationAttr(AL, Str, Args);
2809 }
2810