1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/ProfileList.h" 62 #include "clang/Basic/SourceLocation.h" 63 #include "clang/Basic/SourceManager.h" 64 #include "clang/Basic/Specifiers.h" 65 #include "clang/Basic/TargetCXXABI.h" 66 #include "clang/Basic/TargetInfo.h" 67 #include "clang/Basic/XRayLists.h" 68 #include "llvm/ADT/APFixedPoint.h" 69 #include "llvm/ADT/APInt.h" 70 #include "llvm/ADT/APSInt.h" 71 #include "llvm/ADT/ArrayRef.h" 72 #include "llvm/ADT/DenseMap.h" 73 #include "llvm/ADT/DenseSet.h" 74 #include "llvm/ADT/FoldingSet.h" 75 #include "llvm/ADT/PointerUnion.h" 76 #include "llvm/ADT/STLExtras.h" 77 #include "llvm/ADT/SmallPtrSet.h" 78 #include "llvm/ADT/SmallVector.h" 79 #include "llvm/ADT/StringExtras.h" 80 #include "llvm/ADT/StringRef.h" 81 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 82 #include "llvm/Support/Capacity.h" 83 #include "llvm/Support/Casting.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/MD5.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/TargetParser/Triple.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <optional> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 102 using namespace clang; 103 104 enum FloatingRank { 105 BFloat16Rank, 106 Float16Rank, 107 HalfRank, 108 FloatRank, 109 DoubleRank, 110 LongDoubleRank, 111 Float128Rank, 112 Ibm128Rank 113 }; 114 115 /// \returns The locations that are relevant when searching for Doc comments 116 /// related to \p D. 117 static SmallVector<SourceLocation, 2> 118 getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) { 119 assert(D); 120 121 // User can not attach documentation to implicit declarations. 122 if (D->isImplicit()) 123 return {}; 124 125 // User can not attach documentation to implicit instantiations. 126 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 127 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 128 return {}; 129 } 130 131 if (const auto *VD = dyn_cast<VarDecl>(D)) { 132 if (VD->isStaticDataMember() && 133 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 134 return {}; 135 } 136 137 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 138 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 139 return {}; 140 } 141 142 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 143 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 144 if (TSK == TSK_ImplicitInstantiation || 145 TSK == TSK_Undeclared) 146 return {}; 147 } 148 149 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 150 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 151 return {}; 152 } 153 if (const auto *TD = dyn_cast<TagDecl>(D)) { 154 // When tag declaration (but not definition!) is part of the 155 // decl-specifier-seq of some other declaration, it doesn't get comment 156 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 157 return {}; 158 } 159 // TODO: handle comments for function parameters properly. 160 if (isa<ParmVarDecl>(D)) 161 return {}; 162 163 // TODO: we could look up template parameter documentation in the template 164 // documentation. 165 if (isa<TemplateTypeParmDecl>(D) || 166 isa<NonTypeTemplateParmDecl>(D) || 167 isa<TemplateTemplateParmDecl>(D)) 168 return {}; 169 170 SmallVector<SourceLocation, 2> Locations; 171 // Find declaration location. 172 // For Objective-C declarations we generally don't expect to have multiple 173 // declarators, thus use declaration starting location as the "declaration 174 // location". 175 // For all other declarations multiple declarators are used quite frequently, 176 // so we use the location of the identifier as the "declaration location". 177 SourceLocation BaseLocation; 178 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 179 isa<ObjCPropertyDecl>(D) || isa<RedeclarableTemplateDecl>(D) || 180 isa<ClassTemplateSpecializationDecl>(D) || 181 // Allow association with Y across {} in `typedef struct X {} Y`. 182 isa<TypedefDecl>(D)) 183 BaseLocation = D->getBeginLoc(); 184 else 185 BaseLocation = D->getLocation(); 186 187 if (!D->getLocation().isMacroID()) { 188 Locations.emplace_back(BaseLocation); 189 } else { 190 const auto *DeclCtx = D->getDeclContext(); 191 192 // When encountering definitions generated from a macro (that are not 193 // contained by another declaration in the macro) we need to try and find 194 // the comment at the location of the expansion but if there is no comment 195 // there we should retry to see if there is a comment inside the macro as 196 // well. To this end we return first BaseLocation to first look at the 197 // expansion site, the second value is the spelling location of the 198 // beginning of the declaration defined inside the macro. 199 if (!(DeclCtx && 200 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) { 201 Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation)); 202 } 203 204 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that 205 // we don't refer to the macro argument location at the expansion site (this 206 // can happen if the name's spelling is provided via macro argument), and 207 // always to the declaration itself. 208 Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc())); 209 } 210 211 return Locations; 212 } 213 214 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 215 const Decl *D, const SourceLocation RepresentativeLocForDecl, 216 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 217 // If the declaration doesn't map directly to a location in a file, we 218 // can't find the comment. 219 if (RepresentativeLocForDecl.isInvalid() || 220 !RepresentativeLocForDecl.isFileID()) 221 return nullptr; 222 223 // If there are no comments anywhere, we won't find anything. 224 if (CommentsInTheFile.empty()) 225 return nullptr; 226 227 // Decompose the location for the declaration and find the beginning of the 228 // file buffer. 229 const std::pair<FileID, unsigned> DeclLocDecomp = 230 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 231 232 // Slow path. 233 auto OffsetCommentBehindDecl = 234 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 235 236 // First check whether we have a trailing comment. 237 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 238 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 239 if ((CommentBehindDecl->isDocumentation() || 240 LangOpts.CommentOpts.ParseAllComments) && 241 CommentBehindDecl->isTrailingComment() && 242 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 243 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 244 245 // Check that Doxygen trailing comment comes after the declaration, starts 246 // on the same line and in the same file as the declaration. 247 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 248 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 249 OffsetCommentBehindDecl->first)) { 250 return CommentBehindDecl; 251 } 252 } 253 } 254 255 // The comment just after the declaration was not a trailing comment. 256 // Let's look at the previous comment. 257 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 258 return nullptr; 259 260 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 261 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 262 263 // Check that we actually have a non-member Doxygen comment. 264 if (!(CommentBeforeDecl->isDocumentation() || 265 LangOpts.CommentOpts.ParseAllComments) || 266 CommentBeforeDecl->isTrailingComment()) 267 return nullptr; 268 269 // Decompose the end of the comment. 270 const unsigned CommentEndOffset = 271 Comments.getCommentEndOffset(CommentBeforeDecl); 272 273 // Get the corresponding buffer. 274 bool Invalid = false; 275 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 276 &Invalid).data(); 277 if (Invalid) 278 return nullptr; 279 280 // Extract text between the comment and declaration. 281 StringRef Text(Buffer + CommentEndOffset, 282 DeclLocDecomp.second - CommentEndOffset); 283 284 // There should be no other declarations or preprocessor directives between 285 // comment and declaration. 286 if (Text.find_last_of(";{}#@") != StringRef::npos) 287 return nullptr; 288 289 return CommentBeforeDecl; 290 } 291 292 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 293 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 294 295 for (const auto DeclLoc : DeclLocs) { 296 // If the declaration doesn't map directly to a location in a file, we 297 // can't find the comment. 298 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 299 continue; 300 301 if (ExternalSource && !CommentsLoaded) { 302 ExternalSource->ReadComments(); 303 CommentsLoaded = true; 304 } 305 306 if (Comments.empty()) 307 continue; 308 309 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 310 if (!File.isValid()) 311 continue; 312 313 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 314 if (!CommentsInThisFile || CommentsInThisFile->empty()) 315 continue; 316 317 if (RawComment *Comment = 318 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) 319 return Comment; 320 } 321 322 return nullptr; 323 } 324 325 void ASTContext::addComment(const RawComment &RC) { 326 assert(LangOpts.RetainCommentsFromSystemHeaders || 327 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 328 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 329 } 330 331 /// If we have a 'templated' declaration for a template, adjust 'D' to 332 /// refer to the actual template. 333 /// If we have an implicit instantiation, adjust 'D' to refer to template. 334 static const Decl &adjustDeclToTemplate(const Decl &D) { 335 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 336 // Is this function declaration part of a function template? 337 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 338 return *FTD; 339 340 // Nothing to do if function is not an implicit instantiation. 341 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 342 return D; 343 344 // Function is an implicit instantiation of a function template? 345 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 346 return *FTD; 347 348 // Function is instantiated from a member definition of a class template? 349 if (const FunctionDecl *MemberDecl = 350 FD->getInstantiatedFromMemberFunction()) 351 return *MemberDecl; 352 353 return D; 354 } 355 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 356 // Static data member is instantiated from a member definition of a class 357 // template? 358 if (VD->isStaticDataMember()) 359 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 360 return *MemberDecl; 361 362 return D; 363 } 364 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 365 // Is this class declaration part of a class template? 366 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 367 return *CTD; 368 369 // Class is an implicit instantiation of a class template or partial 370 // specialization? 371 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 372 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 373 return D; 374 llvm::PointerUnion<ClassTemplateDecl *, 375 ClassTemplatePartialSpecializationDecl *> 376 PU = CTSD->getSpecializedTemplateOrPartial(); 377 return PU.is<ClassTemplateDecl *>() 378 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 379 : *static_cast<const Decl *>( 380 PU.get<ClassTemplatePartialSpecializationDecl *>()); 381 } 382 383 // Class is instantiated from a member definition of a class template? 384 if (const MemberSpecializationInfo *Info = 385 CRD->getMemberSpecializationInfo()) 386 return *Info->getInstantiatedFrom(); 387 388 return D; 389 } 390 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 391 // Enum is instantiated from a member definition of a class template? 392 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 393 return *MemberDecl; 394 395 return D; 396 } 397 // FIXME: Adjust alias templates? 398 return D; 399 } 400 401 const RawComment *ASTContext::getRawCommentForAnyRedecl( 402 const Decl *D, 403 const Decl **OriginalDecl) const { 404 if (!D) { 405 if (OriginalDecl) 406 OriginalDecl = nullptr; 407 return nullptr; 408 } 409 410 D = &adjustDeclToTemplate(*D); 411 412 // Any comment directly attached to D? 413 { 414 auto DeclComment = DeclRawComments.find(D); 415 if (DeclComment != DeclRawComments.end()) { 416 if (OriginalDecl) 417 *OriginalDecl = D; 418 return DeclComment->second; 419 } 420 } 421 422 // Any comment attached to any redeclaration of D? 423 const Decl *CanonicalD = D->getCanonicalDecl(); 424 if (!CanonicalD) 425 return nullptr; 426 427 { 428 auto RedeclComment = RedeclChainComments.find(CanonicalD); 429 if (RedeclComment != RedeclChainComments.end()) { 430 if (OriginalDecl) 431 *OriginalDecl = RedeclComment->second; 432 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 433 assert(CommentAtRedecl != DeclRawComments.end() && 434 "This decl is supposed to have comment attached."); 435 return CommentAtRedecl->second; 436 } 437 } 438 439 // Any redeclarations of D that we haven't checked for comments yet? 440 // We can't use DenseMap::iterator directly since it'd get invalid. 441 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 442 return CommentlessRedeclChains.lookup(CanonicalD); 443 }(); 444 445 for (const auto Redecl : D->redecls()) { 446 assert(Redecl); 447 // Skip all redeclarations that have been checked previously. 448 if (LastCheckedRedecl) { 449 if (LastCheckedRedecl == Redecl) { 450 LastCheckedRedecl = nullptr; 451 } 452 continue; 453 } 454 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 455 if (RedeclComment) { 456 cacheRawCommentForDecl(*Redecl, *RedeclComment); 457 if (OriginalDecl) 458 *OriginalDecl = Redecl; 459 return RedeclComment; 460 } 461 CommentlessRedeclChains[CanonicalD] = Redecl; 462 } 463 464 if (OriginalDecl) 465 *OriginalDecl = nullptr; 466 return nullptr; 467 } 468 469 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 470 const RawComment &Comment) const { 471 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 472 DeclRawComments.try_emplace(&OriginalD, &Comment); 473 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 474 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 475 CommentlessRedeclChains.erase(CanonicalDecl); 476 } 477 478 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 479 SmallVectorImpl<const NamedDecl *> &Redeclared) { 480 const DeclContext *DC = ObjCMethod->getDeclContext(); 481 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 482 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 483 if (!ID) 484 return; 485 // Add redeclared method here. 486 for (const auto *Ext : ID->known_extensions()) { 487 if (ObjCMethodDecl *RedeclaredMethod = 488 Ext->getMethod(ObjCMethod->getSelector(), 489 ObjCMethod->isInstanceMethod())) 490 Redeclared.push_back(RedeclaredMethod); 491 } 492 } 493 } 494 495 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 496 const Preprocessor *PP) { 497 if (Comments.empty() || Decls.empty()) 498 return; 499 500 FileID File; 501 for (const Decl *D : Decls) { 502 if (D->isInvalidDecl()) 503 continue; 504 505 D = &adjustDeclToTemplate(*D); 506 SourceLocation Loc = D->getLocation(); 507 if (Loc.isValid()) { 508 // See if there are any new comments that are not attached to a decl. 509 // The location doesn't have to be precise - we care only about the file. 510 File = SourceMgr.getDecomposedLoc(Loc).first; 511 break; 512 } 513 } 514 515 if (File.isInvalid()) 516 return; 517 518 auto CommentsInThisFile = Comments.getCommentsInFile(File); 519 if (!CommentsInThisFile || CommentsInThisFile->empty() || 520 CommentsInThisFile->rbegin()->second->isAttached()) 521 return; 522 523 // There is at least one comment not attached to a decl. 524 // Maybe it should be attached to one of Decls? 525 // 526 // Note that this way we pick up not only comments that precede the 527 // declaration, but also comments that *follow* the declaration -- thanks to 528 // the lookahead in the lexer: we've consumed the semicolon and looked 529 // ahead through comments. 530 for (const Decl *D : Decls) { 531 assert(D); 532 if (D->isInvalidDecl()) 533 continue; 534 535 D = &adjustDeclToTemplate(*D); 536 537 if (DeclRawComments.count(D) > 0) 538 continue; 539 540 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 541 542 for (const auto DeclLoc : DeclLocs) { 543 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 544 continue; 545 546 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl( 547 D, DeclLoc, *CommentsInThisFile)) { 548 cacheRawCommentForDecl(*D, *DocComment); 549 comments::FullComment *FC = DocComment->parse(*this, PP, D); 550 ParsedComments[D->getCanonicalDecl()] = FC; 551 break; 552 } 553 } 554 } 555 } 556 557 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 558 const Decl *D) const { 559 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 560 ThisDeclInfo->CommentDecl = D; 561 ThisDeclInfo->IsFilled = false; 562 ThisDeclInfo->fill(); 563 ThisDeclInfo->CommentDecl = FC->getDecl(); 564 if (!ThisDeclInfo->TemplateParameters) 565 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 566 comments::FullComment *CFC = 567 new (*this) comments::FullComment(FC->getBlocks(), 568 ThisDeclInfo); 569 return CFC; 570 } 571 572 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 573 const RawComment *RC = getRawCommentForDeclNoCache(D); 574 return RC ? RC->parse(*this, nullptr, D) : nullptr; 575 } 576 577 comments::FullComment *ASTContext::getCommentForDecl( 578 const Decl *D, 579 const Preprocessor *PP) const { 580 if (!D || D->isInvalidDecl()) 581 return nullptr; 582 D = &adjustDeclToTemplate(*D); 583 584 const Decl *Canonical = D->getCanonicalDecl(); 585 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 586 ParsedComments.find(Canonical); 587 588 if (Pos != ParsedComments.end()) { 589 if (Canonical != D) { 590 comments::FullComment *FC = Pos->second; 591 comments::FullComment *CFC = cloneFullComment(FC, D); 592 return CFC; 593 } 594 return Pos->second; 595 } 596 597 const Decl *OriginalDecl = nullptr; 598 599 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 600 if (!RC) { 601 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 602 SmallVector<const NamedDecl*, 8> Overridden; 603 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 604 if (OMD && OMD->isPropertyAccessor()) 605 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 606 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 607 return cloneFullComment(FC, D); 608 if (OMD) 609 addRedeclaredMethods(OMD, Overridden); 610 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 611 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 612 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 613 return cloneFullComment(FC, D); 614 } 615 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 616 // Attach any tag type's documentation to its typedef if latter 617 // does not have one of its own. 618 QualType QT = TD->getUnderlyingType(); 619 if (const auto *TT = QT->getAs<TagType>()) 620 if (const Decl *TD = TT->getDecl()) 621 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 622 return cloneFullComment(FC, D); 623 } 624 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 625 while (IC->getSuperClass()) { 626 IC = IC->getSuperClass(); 627 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 628 return cloneFullComment(FC, D); 629 } 630 } 631 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 632 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 633 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 634 return cloneFullComment(FC, D); 635 } 636 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 637 if (!(RD = RD->getDefinition())) 638 return nullptr; 639 // Check non-virtual bases. 640 for (const auto &I : RD->bases()) { 641 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 642 continue; 643 QualType Ty = I.getType(); 644 if (Ty.isNull()) 645 continue; 646 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 647 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 648 continue; 649 650 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 651 return cloneFullComment(FC, D); 652 } 653 } 654 // Check virtual bases. 655 for (const auto &I : RD->vbases()) { 656 if (I.getAccessSpecifier() != AS_public) 657 continue; 658 QualType Ty = I.getType(); 659 if (Ty.isNull()) 660 continue; 661 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 662 if (!(VirtualBase= VirtualBase->getDefinition())) 663 continue; 664 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 665 return cloneFullComment(FC, D); 666 } 667 } 668 } 669 return nullptr; 670 } 671 672 // If the RawComment was attached to other redeclaration of this Decl, we 673 // should parse the comment in context of that other Decl. This is important 674 // because comments can contain references to parameter names which can be 675 // different across redeclarations. 676 if (D != OriginalDecl && OriginalDecl) 677 return getCommentForDecl(OriginalDecl, PP); 678 679 comments::FullComment *FC = RC->parse(*this, PP, D); 680 ParsedComments[Canonical] = FC; 681 return FC; 682 } 683 684 void 685 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 686 const ASTContext &C, 687 TemplateTemplateParmDecl *Parm) { 688 ID.AddInteger(Parm->getDepth()); 689 ID.AddInteger(Parm->getPosition()); 690 ID.AddBoolean(Parm->isParameterPack()); 691 692 TemplateParameterList *Params = Parm->getTemplateParameters(); 693 ID.AddInteger(Params->size()); 694 for (TemplateParameterList::const_iterator P = Params->begin(), 695 PEnd = Params->end(); 696 P != PEnd; ++P) { 697 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 698 ID.AddInteger(0); 699 ID.AddBoolean(TTP->isParameterPack()); 700 if (TTP->isExpandedParameterPack()) { 701 ID.AddBoolean(true); 702 ID.AddInteger(TTP->getNumExpansionParameters()); 703 } else 704 ID.AddBoolean(false); 705 continue; 706 } 707 708 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 709 ID.AddInteger(1); 710 ID.AddBoolean(NTTP->isParameterPack()); 711 ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType())) 712 .getAsOpaquePtr()); 713 if (NTTP->isExpandedParameterPack()) { 714 ID.AddBoolean(true); 715 ID.AddInteger(NTTP->getNumExpansionTypes()); 716 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 717 QualType T = NTTP->getExpansionType(I); 718 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 719 } 720 } else 721 ID.AddBoolean(false); 722 continue; 723 } 724 725 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 726 ID.AddInteger(2); 727 Profile(ID, C, TTP); 728 } 729 } 730 731 TemplateTemplateParmDecl * 732 ASTContext::getCanonicalTemplateTemplateParmDecl( 733 TemplateTemplateParmDecl *TTP) const { 734 // Check if we already have a canonical template template parameter. 735 llvm::FoldingSetNodeID ID; 736 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 737 void *InsertPos = nullptr; 738 CanonicalTemplateTemplateParm *Canonical 739 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 740 if (Canonical) 741 return Canonical->getParam(); 742 743 // Build a canonical template parameter list. 744 TemplateParameterList *Params = TTP->getTemplateParameters(); 745 SmallVector<NamedDecl *, 4> CanonParams; 746 CanonParams.reserve(Params->size()); 747 for (TemplateParameterList::const_iterator P = Params->begin(), 748 PEnd = Params->end(); 749 P != PEnd; ++P) { 750 // Note that, per C++20 [temp.over.link]/6, when determining whether 751 // template-parameters are equivalent, constraints are ignored. 752 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 753 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create( 754 *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 755 TTP->getDepth(), TTP->getIndex(), nullptr, false, 756 TTP->isParameterPack(), /*HasTypeConstraint=*/false, 757 TTP->isExpandedParameterPack() 758 ? std::optional<unsigned>(TTP->getNumExpansionParameters()) 759 : std::nullopt); 760 CanonParams.push_back(NewTTP); 761 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 762 QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType())); 763 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 764 NonTypeTemplateParmDecl *Param; 765 if (NTTP->isExpandedParameterPack()) { 766 SmallVector<QualType, 2> ExpandedTypes; 767 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 768 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 769 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 770 ExpandedTInfos.push_back( 771 getTrivialTypeSourceInfo(ExpandedTypes.back())); 772 } 773 774 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 775 SourceLocation(), 776 SourceLocation(), 777 NTTP->getDepth(), 778 NTTP->getPosition(), nullptr, 779 T, 780 TInfo, 781 ExpandedTypes, 782 ExpandedTInfos); 783 } else { 784 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 785 SourceLocation(), 786 SourceLocation(), 787 NTTP->getDepth(), 788 NTTP->getPosition(), nullptr, 789 T, 790 NTTP->isParameterPack(), 791 TInfo); 792 } 793 CanonParams.push_back(Param); 794 } else 795 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 796 cast<TemplateTemplateParmDecl>(*P))); 797 } 798 799 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create( 800 *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), 801 TTP->getPosition(), TTP->isParameterPack(), nullptr, 802 TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), 803 CanonParams, SourceLocation(), 804 /*RequiresClause=*/nullptr)); 805 806 // Get the new insert position for the node we care about. 807 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 808 assert(!Canonical && "Shouldn't be in the map!"); 809 (void)Canonical; 810 811 // Create the canonical template template parameter entry. 812 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 813 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 814 return CanonTTP; 815 } 816 817 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 818 auto Kind = getTargetInfo().getCXXABI().getKind(); 819 return getLangOpts().CXXABI.value_or(Kind); 820 } 821 822 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 823 if (!LangOpts.CPlusPlus) return nullptr; 824 825 switch (getCXXABIKind()) { 826 case TargetCXXABI::AppleARM64: 827 case TargetCXXABI::Fuchsia: 828 case TargetCXXABI::GenericARM: // Same as Itanium at this level 829 case TargetCXXABI::iOS: 830 case TargetCXXABI::WatchOS: 831 case TargetCXXABI::GenericAArch64: 832 case TargetCXXABI::GenericMIPS: 833 case TargetCXXABI::GenericItanium: 834 case TargetCXXABI::WebAssembly: 835 case TargetCXXABI::XL: 836 return CreateItaniumCXXABI(*this); 837 case TargetCXXABI::Microsoft: 838 return CreateMicrosoftCXXABI(*this); 839 } 840 llvm_unreachable("Invalid CXXABI type!"); 841 } 842 843 interp::Context &ASTContext::getInterpContext() { 844 if (!InterpContext) { 845 InterpContext.reset(new interp::Context(*this)); 846 } 847 return *InterpContext.get(); 848 } 849 850 ParentMapContext &ASTContext::getParentMapContext() { 851 if (!ParentMapCtx) 852 ParentMapCtx.reset(new ParentMapContext(*this)); 853 return *ParentMapCtx.get(); 854 } 855 856 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 857 const LangOptions &LangOpts) { 858 switch (LangOpts.getAddressSpaceMapMangling()) { 859 case LangOptions::ASMM_Target: 860 return TI.useAddressSpaceMapMangling(); 861 case LangOptions::ASMM_On: 862 return true; 863 case LangOptions::ASMM_Off: 864 return false; 865 } 866 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 867 } 868 869 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 870 IdentifierTable &idents, SelectorTable &sels, 871 Builtin::Context &builtins, TranslationUnitKind TUKind) 872 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 873 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()), 874 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()), 875 DependentSizedMatrixTypes(this_()), 876 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 877 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()), 878 TemplateSpecializationTypes(this_()), 879 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 880 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()), 881 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 882 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 883 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 884 LangOpts.XRayNeverInstrumentFiles, 885 LangOpts.XRayAttrListFiles, SM)), 886 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 887 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 888 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 889 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 890 CompCategories(this_()), LastSDM(nullptr, 0) { 891 addTranslationUnitDecl(); 892 } 893 894 void ASTContext::cleanup() { 895 // Release the DenseMaps associated with DeclContext objects. 896 // FIXME: Is this the ideal solution? 897 ReleaseDeclContextMaps(); 898 899 // Call all of the deallocation functions on all of their targets. 900 for (auto &Pair : Deallocations) 901 (Pair.first)(Pair.second); 902 Deallocations.clear(); 903 904 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 905 // because they can contain DenseMaps. 906 for (llvm::DenseMap<const ObjCContainerDecl*, 907 const ASTRecordLayout*>::iterator 908 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 909 // Increment in loop to prevent using deallocated memory. 910 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 911 R->Destroy(*this); 912 ObjCLayouts.clear(); 913 914 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 915 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 916 // Increment in loop to prevent using deallocated memory. 917 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 918 R->Destroy(*this); 919 } 920 ASTRecordLayouts.clear(); 921 922 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 923 AEnd = DeclAttrs.end(); 924 A != AEnd; ++A) 925 A->second->~AttrVec(); 926 DeclAttrs.clear(); 927 928 for (const auto &Value : ModuleInitializers) 929 Value.second->~PerModuleInitializers(); 930 ModuleInitializers.clear(); 931 } 932 933 ASTContext::~ASTContext() { cleanup(); } 934 935 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 936 TraversalScope = TopLevelDecls; 937 getParentMapContext().clear(); 938 } 939 940 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 941 Deallocations.push_back({Callback, Data}); 942 } 943 944 void 945 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 946 ExternalSource = std::move(Source); 947 } 948 949 void ASTContext::PrintStats() const { 950 llvm::errs() << "\n*** AST Context Stats:\n"; 951 llvm::errs() << " " << Types.size() << " types total.\n"; 952 953 unsigned counts[] = { 954 #define TYPE(Name, Parent) 0, 955 #define ABSTRACT_TYPE(Name, Parent) 956 #include "clang/AST/TypeNodes.inc" 957 0 // Extra 958 }; 959 960 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 961 Type *T = Types[i]; 962 counts[(unsigned)T->getTypeClass()]++; 963 } 964 965 unsigned Idx = 0; 966 unsigned TotalBytes = 0; 967 #define TYPE(Name, Parent) \ 968 if (counts[Idx]) \ 969 llvm::errs() << " " << counts[Idx] << " " << #Name \ 970 << " types, " << sizeof(Name##Type) << " each " \ 971 << "(" << counts[Idx] * sizeof(Name##Type) \ 972 << " bytes)\n"; \ 973 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 974 ++Idx; 975 #define ABSTRACT_TYPE(Name, Parent) 976 #include "clang/AST/TypeNodes.inc" 977 978 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 979 980 // Implicit special member functions. 981 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 982 << NumImplicitDefaultConstructors 983 << " implicit default constructors created\n"; 984 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 985 << NumImplicitCopyConstructors 986 << " implicit copy constructors created\n"; 987 if (getLangOpts().CPlusPlus) 988 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 989 << NumImplicitMoveConstructors 990 << " implicit move constructors created\n"; 991 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 992 << NumImplicitCopyAssignmentOperators 993 << " implicit copy assignment operators created\n"; 994 if (getLangOpts().CPlusPlus) 995 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 996 << NumImplicitMoveAssignmentOperators 997 << " implicit move assignment operators created\n"; 998 llvm::errs() << NumImplicitDestructorsDeclared << "/" 999 << NumImplicitDestructors 1000 << " implicit destructors created\n"; 1001 1002 if (ExternalSource) { 1003 llvm::errs() << "\n"; 1004 ExternalSource->PrintStats(); 1005 } 1006 1007 BumpAlloc.PrintStats(); 1008 } 1009 1010 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1011 bool NotifyListeners) { 1012 if (NotifyListeners) 1013 if (auto *Listener = getASTMutationListener()) 1014 Listener->RedefinedHiddenDefinition(ND, M); 1015 1016 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1017 } 1018 1019 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1020 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1021 if (It == MergedDefModules.end()) 1022 return; 1023 1024 auto &Merged = It->second; 1025 llvm::DenseSet<Module*> Found; 1026 for (Module *&M : Merged) 1027 if (!Found.insert(M).second) 1028 M = nullptr; 1029 llvm::erase(Merged, nullptr); 1030 } 1031 1032 ArrayRef<Module *> 1033 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1034 auto MergedIt = 1035 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1036 if (MergedIt == MergedDefModules.end()) 1037 return std::nullopt; 1038 return MergedIt->second; 1039 } 1040 1041 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1042 if (LazyInitializers.empty()) 1043 return; 1044 1045 auto *Source = Ctx.getExternalSource(); 1046 assert(Source && "lazy initializers but no external source"); 1047 1048 auto LazyInits = std::move(LazyInitializers); 1049 LazyInitializers.clear(); 1050 1051 for (auto ID : LazyInits) 1052 Initializers.push_back(Source->GetExternalDecl(ID)); 1053 1054 assert(LazyInitializers.empty() && 1055 "GetExternalDecl for lazy module initializer added more inits"); 1056 } 1057 1058 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1059 // One special case: if we add a module initializer that imports another 1060 // module, and that module's only initializer is an ImportDecl, simplify. 1061 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1062 auto It = ModuleInitializers.find(ID->getImportedModule()); 1063 1064 // Maybe the ImportDecl does nothing at all. (Common case.) 1065 if (It == ModuleInitializers.end()) 1066 return; 1067 1068 // Maybe the ImportDecl only imports another ImportDecl. 1069 auto &Imported = *It->second; 1070 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1071 Imported.resolve(*this); 1072 auto *OnlyDecl = Imported.Initializers.front(); 1073 if (isa<ImportDecl>(OnlyDecl)) 1074 D = OnlyDecl; 1075 } 1076 } 1077 1078 auto *&Inits = ModuleInitializers[M]; 1079 if (!Inits) 1080 Inits = new (*this) PerModuleInitializers; 1081 Inits->Initializers.push_back(D); 1082 } 1083 1084 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1085 auto *&Inits = ModuleInitializers[M]; 1086 if (!Inits) 1087 Inits = new (*this) PerModuleInitializers; 1088 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1089 IDs.begin(), IDs.end()); 1090 } 1091 1092 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1093 auto It = ModuleInitializers.find(M); 1094 if (It == ModuleInitializers.end()) 1095 return std::nullopt; 1096 1097 auto *Inits = It->second; 1098 Inits->resolve(*this); 1099 return Inits->Initializers; 1100 } 1101 1102 void ASTContext::setCurrentNamedModule(Module *M) { 1103 assert(M->isNamedModule()); 1104 assert(!CurrentCXXNamedModule && 1105 "We should set named module for ASTContext for only once"); 1106 CurrentCXXNamedModule = M; 1107 } 1108 1109 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1110 if (!ExternCContext) 1111 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1112 1113 return ExternCContext; 1114 } 1115 1116 BuiltinTemplateDecl * 1117 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1118 const IdentifierInfo *II) const { 1119 auto *BuiltinTemplate = 1120 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1121 BuiltinTemplate->setImplicit(); 1122 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1123 1124 return BuiltinTemplate; 1125 } 1126 1127 BuiltinTemplateDecl * 1128 ASTContext::getMakeIntegerSeqDecl() const { 1129 if (!MakeIntegerSeqDecl) 1130 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1131 getMakeIntegerSeqName()); 1132 return MakeIntegerSeqDecl; 1133 } 1134 1135 BuiltinTemplateDecl * 1136 ASTContext::getTypePackElementDecl() const { 1137 if (!TypePackElementDecl) 1138 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1139 getTypePackElementName()); 1140 return TypePackElementDecl; 1141 } 1142 1143 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1144 RecordDecl::TagKind TK) const { 1145 SourceLocation Loc; 1146 RecordDecl *NewDecl; 1147 if (getLangOpts().CPlusPlus) 1148 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1149 Loc, &Idents.get(Name)); 1150 else 1151 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1152 &Idents.get(Name)); 1153 NewDecl->setImplicit(); 1154 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1155 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1156 return NewDecl; 1157 } 1158 1159 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1160 StringRef Name) const { 1161 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1162 TypedefDecl *NewDecl = TypedefDecl::Create( 1163 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1164 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1165 NewDecl->setImplicit(); 1166 return NewDecl; 1167 } 1168 1169 TypedefDecl *ASTContext::getInt128Decl() const { 1170 if (!Int128Decl) 1171 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1172 return Int128Decl; 1173 } 1174 1175 TypedefDecl *ASTContext::getUInt128Decl() const { 1176 if (!UInt128Decl) 1177 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1178 return UInt128Decl; 1179 } 1180 1181 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1182 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K); 1183 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1184 Types.push_back(Ty); 1185 } 1186 1187 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1188 const TargetInfo *AuxTarget) { 1189 assert((!this->Target || this->Target == &Target) && 1190 "Incorrect target reinitialization"); 1191 assert(VoidTy.isNull() && "Context reinitialized?"); 1192 1193 this->Target = &Target; 1194 this->AuxTarget = AuxTarget; 1195 1196 ABI.reset(createCXXABI(Target)); 1197 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1198 1199 // C99 6.2.5p19. 1200 InitBuiltinType(VoidTy, BuiltinType::Void); 1201 1202 // C99 6.2.5p2. 1203 InitBuiltinType(BoolTy, BuiltinType::Bool); 1204 // C99 6.2.5p3. 1205 if (LangOpts.CharIsSigned) 1206 InitBuiltinType(CharTy, BuiltinType::Char_S); 1207 else 1208 InitBuiltinType(CharTy, BuiltinType::Char_U); 1209 // C99 6.2.5p4. 1210 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1211 InitBuiltinType(ShortTy, BuiltinType::Short); 1212 InitBuiltinType(IntTy, BuiltinType::Int); 1213 InitBuiltinType(LongTy, BuiltinType::Long); 1214 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1215 1216 // C99 6.2.5p6. 1217 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1218 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1219 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1220 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1221 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1222 1223 // C99 6.2.5p10. 1224 InitBuiltinType(FloatTy, BuiltinType::Float); 1225 InitBuiltinType(DoubleTy, BuiltinType::Double); 1226 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1227 1228 // GNU extension, __float128 for IEEE quadruple precision 1229 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1230 1231 // __ibm128 for IBM extended precision 1232 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1233 1234 // C11 extension ISO/IEC TS 18661-3 1235 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1236 1237 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1238 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1239 InitBuiltinType(AccumTy, BuiltinType::Accum); 1240 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1241 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1242 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1243 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1244 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1245 InitBuiltinType(FractTy, BuiltinType::Fract); 1246 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1247 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1248 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1249 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1250 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1251 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1252 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1253 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1254 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1255 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1256 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1257 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1258 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1259 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1260 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1261 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1262 1263 // GNU extension, 128-bit integers. 1264 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1265 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1266 1267 // C++ 3.9.1p5 1268 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1269 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1270 else // -fshort-wchar makes wchar_t be unsigned. 1271 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1272 if (LangOpts.CPlusPlus && LangOpts.WChar) 1273 WideCharTy = WCharTy; 1274 else { 1275 // C99 (or C++ using -fno-wchar). 1276 WideCharTy = getFromTargetType(Target.getWCharType()); 1277 } 1278 1279 WIntTy = getFromTargetType(Target.getWIntType()); 1280 1281 // C++20 (proposed) 1282 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1283 1284 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1285 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1286 else // C99 1287 Char16Ty = getFromTargetType(Target.getChar16Type()); 1288 1289 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1290 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1291 else // C99 1292 Char32Ty = getFromTargetType(Target.getChar32Type()); 1293 1294 // Placeholder type for type-dependent expressions whose type is 1295 // completely unknown. No code should ever check a type against 1296 // DependentTy and users should never see it; however, it is here to 1297 // help diagnose failures to properly check for type-dependent 1298 // expressions. 1299 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1300 1301 // Placeholder type for functions. 1302 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1303 1304 // Placeholder type for bound members. 1305 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1306 1307 // Placeholder type for pseudo-objects. 1308 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1309 1310 // "any" type; useful for debugger-like clients. 1311 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1312 1313 // Placeholder type for unbridged ARC casts. 1314 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1315 1316 // Placeholder type for builtin functions. 1317 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1318 1319 // Placeholder type for OMP array sections. 1320 if (LangOpts.OpenMP) { 1321 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1322 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1323 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1324 } 1325 // Placeholder type for OpenACC array sections. 1326 if (LangOpts.OpenACC) { 1327 // FIXME: Once we implement OpenACC array sections in Sema, this will either 1328 // be combined with the OpenMP type, or given its own type. In the meantime, 1329 // just use the OpenMP type so that parsing can work. 1330 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1331 } 1332 if (LangOpts.MatrixTypes) 1333 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1334 1335 // Builtin types for 'id', 'Class', and 'SEL'. 1336 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1337 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1338 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1339 1340 if (LangOpts.OpenCL) { 1341 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1342 InitBuiltinType(SingletonId, BuiltinType::Id); 1343 #include "clang/Basic/OpenCLImageTypes.def" 1344 1345 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1346 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1347 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1348 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1349 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1350 1351 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1352 InitBuiltinType(Id##Ty, BuiltinType::Id); 1353 #include "clang/Basic/OpenCLExtensionTypes.def" 1354 } 1355 1356 if (Target.hasAArch64SVETypes()) { 1357 #define SVE_TYPE(Name, Id, SingletonId) \ 1358 InitBuiltinType(SingletonId, BuiltinType::Id); 1359 #include "clang/Basic/AArch64SVEACLETypes.def" 1360 } 1361 1362 if (Target.getTriple().isPPC64()) { 1363 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1364 InitBuiltinType(Id##Ty, BuiltinType::Id); 1365 #include "clang/Basic/PPCTypes.def" 1366 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1367 InitBuiltinType(Id##Ty, BuiltinType::Id); 1368 #include "clang/Basic/PPCTypes.def" 1369 } 1370 1371 if (Target.hasRISCVVTypes()) { 1372 #define RVV_TYPE(Name, Id, SingletonId) \ 1373 InitBuiltinType(SingletonId, BuiltinType::Id); 1374 #include "clang/Basic/RISCVVTypes.def" 1375 } 1376 1377 if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) { 1378 #define WASM_TYPE(Name, Id, SingletonId) \ 1379 InitBuiltinType(SingletonId, BuiltinType::Id); 1380 #include "clang/Basic/WebAssemblyReferenceTypes.def" 1381 } 1382 1383 // Builtin type for __objc_yes and __objc_no 1384 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1385 SignedCharTy : BoolTy); 1386 1387 ObjCConstantStringType = QualType(); 1388 1389 ObjCSuperType = QualType(); 1390 1391 // void * type 1392 if (LangOpts.OpenCLGenericAddressSpace) { 1393 auto Q = VoidTy.getQualifiers(); 1394 Q.setAddressSpace(LangAS::opencl_generic); 1395 VoidPtrTy = getPointerType(getCanonicalType( 1396 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1397 } else { 1398 VoidPtrTy = getPointerType(VoidTy); 1399 } 1400 1401 // nullptr type (C++0x 2.14.7) 1402 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1403 1404 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1405 InitBuiltinType(HalfTy, BuiltinType::Half); 1406 1407 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1408 1409 // Builtin type used to help define __builtin_va_list. 1410 VaListTagDecl = nullptr; 1411 1412 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1413 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1414 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1415 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1416 } 1417 } 1418 1419 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1420 return SourceMgr.getDiagnostics(); 1421 } 1422 1423 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1424 AttrVec *&Result = DeclAttrs[D]; 1425 if (!Result) { 1426 void *Mem = Allocate(sizeof(AttrVec)); 1427 Result = new (Mem) AttrVec; 1428 } 1429 1430 return *Result; 1431 } 1432 1433 /// Erase the attributes corresponding to the given declaration. 1434 void ASTContext::eraseDeclAttrs(const Decl *D) { 1435 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1436 if (Pos != DeclAttrs.end()) { 1437 Pos->second->~AttrVec(); 1438 DeclAttrs.erase(Pos); 1439 } 1440 } 1441 1442 // FIXME: Remove ? 1443 MemberSpecializationInfo * 1444 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1445 assert(Var->isStaticDataMember() && "Not a static data member"); 1446 return getTemplateOrSpecializationInfo(Var) 1447 .dyn_cast<MemberSpecializationInfo *>(); 1448 } 1449 1450 ASTContext::TemplateOrSpecializationInfo 1451 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1452 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1453 TemplateOrInstantiation.find(Var); 1454 if (Pos == TemplateOrInstantiation.end()) 1455 return {}; 1456 1457 return Pos->second; 1458 } 1459 1460 void 1461 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1462 TemplateSpecializationKind TSK, 1463 SourceLocation PointOfInstantiation) { 1464 assert(Inst->isStaticDataMember() && "Not a static data member"); 1465 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1466 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1467 Tmpl, TSK, PointOfInstantiation)); 1468 } 1469 1470 void 1471 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1472 TemplateOrSpecializationInfo TSI) { 1473 assert(!TemplateOrInstantiation[Inst] && 1474 "Already noted what the variable was instantiated from"); 1475 TemplateOrInstantiation[Inst] = TSI; 1476 } 1477 1478 NamedDecl * 1479 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1480 return InstantiatedFromUsingDecl.lookup(UUD); 1481 } 1482 1483 void 1484 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1485 assert((isa<UsingDecl>(Pattern) || 1486 isa<UnresolvedUsingValueDecl>(Pattern) || 1487 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1488 "pattern decl is not a using decl"); 1489 assert((isa<UsingDecl>(Inst) || 1490 isa<UnresolvedUsingValueDecl>(Inst) || 1491 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1492 "instantiation did not produce a using decl"); 1493 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1494 InstantiatedFromUsingDecl[Inst] = Pattern; 1495 } 1496 1497 UsingEnumDecl * 1498 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1499 return InstantiatedFromUsingEnumDecl.lookup(UUD); 1500 } 1501 1502 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1503 UsingEnumDecl *Pattern) { 1504 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1505 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1506 } 1507 1508 UsingShadowDecl * 1509 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1510 return InstantiatedFromUsingShadowDecl.lookup(Inst); 1511 } 1512 1513 void 1514 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1515 UsingShadowDecl *Pattern) { 1516 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1517 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1518 } 1519 1520 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1521 return InstantiatedFromUnnamedFieldDecl.lookup(Field); 1522 } 1523 1524 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1525 FieldDecl *Tmpl) { 1526 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1527 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1528 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1529 "Already noted what unnamed field was instantiated from"); 1530 1531 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1532 } 1533 1534 ASTContext::overridden_cxx_method_iterator 1535 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1536 return overridden_methods(Method).begin(); 1537 } 1538 1539 ASTContext::overridden_cxx_method_iterator 1540 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1541 return overridden_methods(Method).end(); 1542 } 1543 1544 unsigned 1545 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1546 auto Range = overridden_methods(Method); 1547 return Range.end() - Range.begin(); 1548 } 1549 1550 ASTContext::overridden_method_range 1551 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1552 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1553 OverriddenMethods.find(Method->getCanonicalDecl()); 1554 if (Pos == OverriddenMethods.end()) 1555 return overridden_method_range(nullptr, nullptr); 1556 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1557 } 1558 1559 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1560 const CXXMethodDecl *Overridden) { 1561 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1562 OverriddenMethods[Method].push_back(Overridden); 1563 } 1564 1565 void ASTContext::getOverriddenMethods( 1566 const NamedDecl *D, 1567 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1568 assert(D); 1569 1570 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1571 Overridden.append(overridden_methods_begin(CXXMethod), 1572 overridden_methods_end(CXXMethod)); 1573 return; 1574 } 1575 1576 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1577 if (!Method) 1578 return; 1579 1580 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1581 Method->getOverriddenMethods(OverDecls); 1582 Overridden.append(OverDecls.begin(), OverDecls.end()); 1583 } 1584 1585 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1586 assert(!Import->getNextLocalImport() && 1587 "Import declaration already in the chain"); 1588 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1589 if (!FirstLocalImport) { 1590 FirstLocalImport = Import; 1591 LastLocalImport = Import; 1592 return; 1593 } 1594 1595 LastLocalImport->setNextLocalImport(Import); 1596 LastLocalImport = Import; 1597 } 1598 1599 //===----------------------------------------------------------------------===// 1600 // Type Sizing and Analysis 1601 //===----------------------------------------------------------------------===// 1602 1603 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1604 /// scalar floating point type. 1605 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1606 switch (T->castAs<BuiltinType>()->getKind()) { 1607 default: 1608 llvm_unreachable("Not a floating point type!"); 1609 case BuiltinType::BFloat16: 1610 return Target->getBFloat16Format(); 1611 case BuiltinType::Float16: 1612 return Target->getHalfFormat(); 1613 case BuiltinType::Half: 1614 // For HLSL, when the native half type is disabled, half will be treat as 1615 // float. 1616 if (getLangOpts().HLSL) 1617 if (getLangOpts().NativeHalfType) 1618 return Target->getHalfFormat(); 1619 else 1620 return Target->getFloatFormat(); 1621 else 1622 return Target->getHalfFormat(); 1623 case BuiltinType::Float: return Target->getFloatFormat(); 1624 case BuiltinType::Double: return Target->getDoubleFormat(); 1625 case BuiltinType::Ibm128: 1626 return Target->getIbm128Format(); 1627 case BuiltinType::LongDouble: 1628 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1629 return AuxTarget->getLongDoubleFormat(); 1630 return Target->getLongDoubleFormat(); 1631 case BuiltinType::Float128: 1632 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1633 return AuxTarget->getFloat128Format(); 1634 return Target->getFloat128Format(); 1635 } 1636 } 1637 1638 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1639 unsigned Align = Target->getCharWidth(); 1640 1641 const unsigned AlignFromAttr = D->getMaxAlignment(); 1642 if (AlignFromAttr) 1643 Align = AlignFromAttr; 1644 1645 // __attribute__((aligned)) can increase or decrease alignment 1646 // *except* on a struct or struct member, where it only increases 1647 // alignment unless 'packed' is also specified. 1648 // 1649 // It is an error for alignas to decrease alignment, so we can 1650 // ignore that possibility; Sema should diagnose it. 1651 bool UseAlignAttrOnly; 1652 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) 1653 UseAlignAttrOnly = 1654 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>(); 1655 else 1656 UseAlignAttrOnly = AlignFromAttr != 0; 1657 // If we're using the align attribute only, just ignore everything 1658 // else about the declaration and its type. 1659 if (UseAlignAttrOnly) { 1660 // do nothing 1661 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1662 QualType T = VD->getType(); 1663 if (const auto *RT = T->getAs<ReferenceType>()) { 1664 if (ForAlignof) 1665 T = RT->getPointeeType(); 1666 else 1667 T = getPointerType(RT->getPointeeType()); 1668 } 1669 QualType BaseT = getBaseElementType(T); 1670 if (T->isFunctionType()) 1671 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1672 else if (!BaseT->isIncompleteType()) { 1673 // Adjust alignments of declarations with array type by the 1674 // large-array alignment on the target. 1675 if (const ArrayType *arrayType = getAsArrayType(T)) { 1676 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1677 if (!ForAlignof && MinWidth) { 1678 if (isa<VariableArrayType>(arrayType)) 1679 Align = std::max(Align, Target->getLargeArrayAlign()); 1680 else if (isa<ConstantArrayType>(arrayType) && 1681 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1682 Align = std::max(Align, Target->getLargeArrayAlign()); 1683 } 1684 } 1685 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1686 if (BaseT.getQualifiers().hasUnaligned()) 1687 Align = Target->getCharWidth(); 1688 } 1689 1690 // Ensure miminum alignment for global variables. 1691 if (const auto *VD = dyn_cast<VarDecl>(D)) 1692 if (VD->hasGlobalStorage() && !ForAlignof) { 1693 uint64_t TypeSize = 1694 !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0; 1695 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1696 } 1697 1698 // Fields can be subject to extra alignment constraints, like if 1699 // the field is packed, the struct is packed, or the struct has a 1700 // a max-field-alignment constraint (#pragma pack). So calculate 1701 // the actual alignment of the field within the struct, and then 1702 // (as we're expected to) constrain that by the alignment of the type. 1703 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1704 const RecordDecl *Parent = Field->getParent(); 1705 // We can only produce a sensible answer if the record is valid. 1706 if (!Parent->isInvalidDecl()) { 1707 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1708 1709 // Start with the record's overall alignment. 1710 unsigned FieldAlign = toBits(Layout.getAlignment()); 1711 1712 // Use the GCD of that and the offset within the record. 1713 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1714 if (Offset > 0) { 1715 // Alignment is always a power of 2, so the GCD will be a power of 2, 1716 // which means we get to do this crazy thing instead of Euclid's. 1717 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1718 if (LowBitOfOffset < FieldAlign) 1719 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1720 } 1721 1722 Align = std::min(Align, FieldAlign); 1723 } 1724 } 1725 } 1726 1727 // Some targets have hard limitation on the maximum requestable alignment in 1728 // aligned attribute for static variables. 1729 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1730 const auto *VD = dyn_cast<VarDecl>(D); 1731 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1732 Align = std::min(Align, MaxAlignedAttr); 1733 1734 return toCharUnitsFromBits(Align); 1735 } 1736 1737 CharUnits ASTContext::getExnObjectAlignment() const { 1738 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1739 } 1740 1741 // getTypeInfoDataSizeInChars - Return the size of a type, in 1742 // chars. If the type is a record, its data size is returned. This is 1743 // the size of the memcpy that's performed when assigning this type 1744 // using a trivial copy/move assignment operator. 1745 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1746 TypeInfoChars Info = getTypeInfoInChars(T); 1747 1748 // In C++, objects can sometimes be allocated into the tail padding 1749 // of a base-class subobject. We decide whether that's possible 1750 // during class layout, so here we can just trust the layout results. 1751 if (getLangOpts().CPlusPlus) { 1752 if (const auto *RT = T->getAs<RecordType>()) { 1753 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1754 Info.Width = layout.getDataSize(); 1755 } 1756 } 1757 1758 return Info; 1759 } 1760 1761 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1762 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1763 TypeInfoChars 1764 static getConstantArrayInfoInChars(const ASTContext &Context, 1765 const ConstantArrayType *CAT) { 1766 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1767 uint64_t Size = CAT->getSize().getZExtValue(); 1768 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1769 (uint64_t)(-1)/Size) && 1770 "Overflow in array type char size evaluation"); 1771 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1772 unsigned Align = EltInfo.Align.getQuantity(); 1773 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1774 Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1775 Width = llvm::alignTo(Width, Align); 1776 return TypeInfoChars(CharUnits::fromQuantity(Width), 1777 CharUnits::fromQuantity(Align), 1778 EltInfo.AlignRequirement); 1779 } 1780 1781 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1782 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1783 return getConstantArrayInfoInChars(*this, CAT); 1784 TypeInfo Info = getTypeInfo(T); 1785 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1786 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1787 } 1788 1789 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1790 return getTypeInfoInChars(T.getTypePtr()); 1791 } 1792 1793 bool ASTContext::isPromotableIntegerType(QualType T) const { 1794 // HLSL doesn't promote all small integer types to int, it 1795 // just uses the rank-based promotion rules for all types. 1796 if (getLangOpts().HLSL) 1797 return false; 1798 1799 if (const auto *BT = T->getAs<BuiltinType>()) 1800 switch (BT->getKind()) { 1801 case BuiltinType::Bool: 1802 case BuiltinType::Char_S: 1803 case BuiltinType::Char_U: 1804 case BuiltinType::SChar: 1805 case BuiltinType::UChar: 1806 case BuiltinType::Short: 1807 case BuiltinType::UShort: 1808 case BuiltinType::WChar_S: 1809 case BuiltinType::WChar_U: 1810 case BuiltinType::Char8: 1811 case BuiltinType::Char16: 1812 case BuiltinType::Char32: 1813 return true; 1814 default: 1815 return false; 1816 } 1817 1818 // Enumerated types are promotable to their compatible integer types 1819 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). 1820 if (const auto *ET = T->getAs<EnumType>()) { 1821 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() || 1822 ET->getDecl()->isScoped()) 1823 return false; 1824 1825 return true; 1826 } 1827 1828 return false; 1829 } 1830 1831 bool ASTContext::isAlignmentRequired(const Type *T) const { 1832 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1833 } 1834 1835 bool ASTContext::isAlignmentRequired(QualType T) const { 1836 return isAlignmentRequired(T.getTypePtr()); 1837 } 1838 1839 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1840 bool NeedsPreferredAlignment) const { 1841 // An alignment on a typedef overrides anything else. 1842 if (const auto *TT = T->getAs<TypedefType>()) 1843 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1844 return Align; 1845 1846 // If we have an (array of) complete type, we're done. 1847 T = getBaseElementType(T); 1848 if (!T->isIncompleteType()) 1849 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1850 1851 // If we had an array type, its element type might be a typedef 1852 // type with an alignment attribute. 1853 if (const auto *TT = T->getAs<TypedefType>()) 1854 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1855 return Align; 1856 1857 // Otherwise, see if the declaration of the type had an attribute. 1858 if (const auto *TT = T->getAs<TagType>()) 1859 return TT->getDecl()->getMaxAlignment(); 1860 1861 return 0; 1862 } 1863 1864 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1865 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1866 if (I != MemoizedTypeInfo.end()) 1867 return I->second; 1868 1869 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1870 TypeInfo TI = getTypeInfoImpl(T); 1871 MemoizedTypeInfo[T] = TI; 1872 return TI; 1873 } 1874 1875 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1876 /// method does not work on incomplete types. 1877 /// 1878 /// FIXME: Pointers into different addr spaces could have different sizes and 1879 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1880 /// should take a QualType, &c. 1881 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1882 uint64_t Width = 0; 1883 unsigned Align = 8; 1884 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1885 LangAS AS = LangAS::Default; 1886 switch (T->getTypeClass()) { 1887 #define TYPE(Class, Base) 1888 #define ABSTRACT_TYPE(Class, Base) 1889 #define NON_CANONICAL_TYPE(Class, Base) 1890 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1891 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1892 case Type::Class: \ 1893 assert(!T->isDependentType() && "should not see dependent types here"); \ 1894 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1895 #include "clang/AST/TypeNodes.inc" 1896 llvm_unreachable("Should not see dependent types"); 1897 1898 case Type::FunctionNoProto: 1899 case Type::FunctionProto: 1900 // GCC extension: alignof(function) = 32 bits 1901 Width = 0; 1902 Align = 32; 1903 break; 1904 1905 case Type::IncompleteArray: 1906 case Type::VariableArray: 1907 case Type::ConstantArray: { 1908 // Model non-constant sized arrays as size zero, but track the alignment. 1909 uint64_t Size = 0; 1910 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1911 Size = CAT->getSize().getZExtValue(); 1912 1913 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1914 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1915 "Overflow in array type bit size evaluation"); 1916 Width = EltInfo.Width * Size; 1917 Align = EltInfo.Align; 1918 AlignRequirement = EltInfo.AlignRequirement; 1919 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1920 getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1921 Width = llvm::alignTo(Width, Align); 1922 break; 1923 } 1924 1925 case Type::ExtVector: 1926 case Type::Vector: { 1927 const auto *VT = cast<VectorType>(T); 1928 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1929 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 1930 : EltInfo.Width * VT->getNumElements(); 1931 // Enforce at least byte size and alignment. 1932 Width = std::max<unsigned>(8, Width); 1933 Align = std::max<unsigned>(8, Width); 1934 1935 // If the alignment is not a power of 2, round up to the next power of 2. 1936 // This happens for non-power-of-2 length vectors. 1937 if (Align & (Align-1)) { 1938 Align = llvm::bit_ceil(Align); 1939 Width = llvm::alignTo(Width, Align); 1940 } 1941 // Adjust the alignment based on the target max. 1942 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1943 if (TargetVectorAlign && TargetVectorAlign < Align) 1944 Align = TargetVectorAlign; 1945 if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 1946 // Adjust the alignment for fixed-length SVE vectors. This is important 1947 // for non-power-of-2 vector lengths. 1948 Align = 128; 1949 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 1950 // Adjust the alignment for fixed-length SVE predicates. 1951 Align = 16; 1952 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData) 1953 // Adjust the alignment for fixed-length RVV vectors. 1954 Align = std::min<unsigned>(64, Width); 1955 break; 1956 } 1957 1958 case Type::ConstantMatrix: { 1959 const auto *MT = cast<ConstantMatrixType>(T); 1960 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 1961 // The internal layout of a matrix value is implementation defined. 1962 // Initially be ABI compatible with arrays with respect to alignment and 1963 // size. 1964 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 1965 Align = ElementInfo.Align; 1966 break; 1967 } 1968 1969 case Type::Builtin: 1970 switch (cast<BuiltinType>(T)->getKind()) { 1971 default: llvm_unreachable("Unknown builtin type!"); 1972 case BuiltinType::Void: 1973 // GCC extension: alignof(void) = 8 bits. 1974 Width = 0; 1975 Align = 8; 1976 break; 1977 case BuiltinType::Bool: 1978 Width = Target->getBoolWidth(); 1979 Align = Target->getBoolAlign(); 1980 break; 1981 case BuiltinType::Char_S: 1982 case BuiltinType::Char_U: 1983 case BuiltinType::UChar: 1984 case BuiltinType::SChar: 1985 case BuiltinType::Char8: 1986 Width = Target->getCharWidth(); 1987 Align = Target->getCharAlign(); 1988 break; 1989 case BuiltinType::WChar_S: 1990 case BuiltinType::WChar_U: 1991 Width = Target->getWCharWidth(); 1992 Align = Target->getWCharAlign(); 1993 break; 1994 case BuiltinType::Char16: 1995 Width = Target->getChar16Width(); 1996 Align = Target->getChar16Align(); 1997 break; 1998 case BuiltinType::Char32: 1999 Width = Target->getChar32Width(); 2000 Align = Target->getChar32Align(); 2001 break; 2002 case BuiltinType::UShort: 2003 case BuiltinType::Short: 2004 Width = Target->getShortWidth(); 2005 Align = Target->getShortAlign(); 2006 break; 2007 case BuiltinType::UInt: 2008 case BuiltinType::Int: 2009 Width = Target->getIntWidth(); 2010 Align = Target->getIntAlign(); 2011 break; 2012 case BuiltinType::ULong: 2013 case BuiltinType::Long: 2014 Width = Target->getLongWidth(); 2015 Align = Target->getLongAlign(); 2016 break; 2017 case BuiltinType::ULongLong: 2018 case BuiltinType::LongLong: 2019 Width = Target->getLongLongWidth(); 2020 Align = Target->getLongLongAlign(); 2021 break; 2022 case BuiltinType::Int128: 2023 case BuiltinType::UInt128: 2024 Width = 128; 2025 Align = Target->getInt128Align(); 2026 break; 2027 case BuiltinType::ShortAccum: 2028 case BuiltinType::UShortAccum: 2029 case BuiltinType::SatShortAccum: 2030 case BuiltinType::SatUShortAccum: 2031 Width = Target->getShortAccumWidth(); 2032 Align = Target->getShortAccumAlign(); 2033 break; 2034 case BuiltinType::Accum: 2035 case BuiltinType::UAccum: 2036 case BuiltinType::SatAccum: 2037 case BuiltinType::SatUAccum: 2038 Width = Target->getAccumWidth(); 2039 Align = Target->getAccumAlign(); 2040 break; 2041 case BuiltinType::LongAccum: 2042 case BuiltinType::ULongAccum: 2043 case BuiltinType::SatLongAccum: 2044 case BuiltinType::SatULongAccum: 2045 Width = Target->getLongAccumWidth(); 2046 Align = Target->getLongAccumAlign(); 2047 break; 2048 case BuiltinType::ShortFract: 2049 case BuiltinType::UShortFract: 2050 case BuiltinType::SatShortFract: 2051 case BuiltinType::SatUShortFract: 2052 Width = Target->getShortFractWidth(); 2053 Align = Target->getShortFractAlign(); 2054 break; 2055 case BuiltinType::Fract: 2056 case BuiltinType::UFract: 2057 case BuiltinType::SatFract: 2058 case BuiltinType::SatUFract: 2059 Width = Target->getFractWidth(); 2060 Align = Target->getFractAlign(); 2061 break; 2062 case BuiltinType::LongFract: 2063 case BuiltinType::ULongFract: 2064 case BuiltinType::SatLongFract: 2065 case BuiltinType::SatULongFract: 2066 Width = Target->getLongFractWidth(); 2067 Align = Target->getLongFractAlign(); 2068 break; 2069 case BuiltinType::BFloat16: 2070 if (Target->hasBFloat16Type()) { 2071 Width = Target->getBFloat16Width(); 2072 Align = Target->getBFloat16Align(); 2073 } else if ((getLangOpts().SYCLIsDevice || 2074 (getLangOpts().OpenMP && 2075 getLangOpts().OpenMPIsTargetDevice)) && 2076 AuxTarget->hasBFloat16Type()) { 2077 Width = AuxTarget->getBFloat16Width(); 2078 Align = AuxTarget->getBFloat16Align(); 2079 } 2080 break; 2081 case BuiltinType::Float16: 2082 case BuiltinType::Half: 2083 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2084 !getLangOpts().OpenMPIsTargetDevice) { 2085 Width = Target->getHalfWidth(); 2086 Align = Target->getHalfAlign(); 2087 } else { 2088 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2089 "Expected OpenMP device compilation."); 2090 Width = AuxTarget->getHalfWidth(); 2091 Align = AuxTarget->getHalfAlign(); 2092 } 2093 break; 2094 case BuiltinType::Float: 2095 Width = Target->getFloatWidth(); 2096 Align = Target->getFloatAlign(); 2097 break; 2098 case BuiltinType::Double: 2099 Width = Target->getDoubleWidth(); 2100 Align = Target->getDoubleAlign(); 2101 break; 2102 case BuiltinType::Ibm128: 2103 Width = Target->getIbm128Width(); 2104 Align = Target->getIbm128Align(); 2105 break; 2106 case BuiltinType::LongDouble: 2107 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2108 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2109 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2110 Width = AuxTarget->getLongDoubleWidth(); 2111 Align = AuxTarget->getLongDoubleAlign(); 2112 } else { 2113 Width = Target->getLongDoubleWidth(); 2114 Align = Target->getLongDoubleAlign(); 2115 } 2116 break; 2117 case BuiltinType::Float128: 2118 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2119 !getLangOpts().OpenMPIsTargetDevice) { 2120 Width = Target->getFloat128Width(); 2121 Align = Target->getFloat128Align(); 2122 } else { 2123 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2124 "Expected OpenMP device compilation."); 2125 Width = AuxTarget->getFloat128Width(); 2126 Align = AuxTarget->getFloat128Align(); 2127 } 2128 break; 2129 case BuiltinType::NullPtr: 2130 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*) 2131 Width = Target->getPointerWidth(LangAS::Default); 2132 Align = Target->getPointerAlign(LangAS::Default); 2133 break; 2134 case BuiltinType::ObjCId: 2135 case BuiltinType::ObjCClass: 2136 case BuiltinType::ObjCSel: 2137 Width = Target->getPointerWidth(LangAS::Default); 2138 Align = Target->getPointerAlign(LangAS::Default); 2139 break; 2140 case BuiltinType::OCLSampler: 2141 case BuiltinType::OCLEvent: 2142 case BuiltinType::OCLClkEvent: 2143 case BuiltinType::OCLQueue: 2144 case BuiltinType::OCLReserveID: 2145 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2146 case BuiltinType::Id: 2147 #include "clang/Basic/OpenCLImageTypes.def" 2148 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2149 case BuiltinType::Id: 2150 #include "clang/Basic/OpenCLExtensionTypes.def" 2151 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 2152 Width = Target->getPointerWidth(AS); 2153 Align = Target->getPointerAlign(AS); 2154 break; 2155 // The SVE types are effectively target-specific. The length of an 2156 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2157 // of 128 bits. There is one predicate bit for each vector byte, so the 2158 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2159 // 2160 // Because the length is only known at runtime, we use a dummy value 2161 // of 0 for the static length. The alignment values are those defined 2162 // by the Procedure Call Standard for the Arm Architecture. 2163 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2164 IsSigned, IsFP, IsBF) \ 2165 case BuiltinType::Id: \ 2166 Width = 0; \ 2167 Align = 128; \ 2168 break; 2169 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2170 case BuiltinType::Id: \ 2171 Width = 0; \ 2172 Align = 16; \ 2173 break; 2174 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \ 2175 case BuiltinType::Id: \ 2176 Width = 0; \ 2177 Align = 16; \ 2178 break; 2179 #include "clang/Basic/AArch64SVEACLETypes.def" 2180 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2181 case BuiltinType::Id: \ 2182 Width = Size; \ 2183 Align = Size; \ 2184 break; 2185 #include "clang/Basic/PPCTypes.def" 2186 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2187 IsFP, IsBF) \ 2188 case BuiltinType::Id: \ 2189 Width = 0; \ 2190 Align = ElBits; \ 2191 break; 2192 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2193 case BuiltinType::Id: \ 2194 Width = 0; \ 2195 Align = 8; \ 2196 break; 2197 #include "clang/Basic/RISCVVTypes.def" 2198 #define WASM_TYPE(Name, Id, SingletonId) \ 2199 case BuiltinType::Id: \ 2200 Width = 0; \ 2201 Align = 8; \ 2202 break; 2203 #include "clang/Basic/WebAssemblyReferenceTypes.def" 2204 } 2205 break; 2206 case Type::ObjCObjectPointer: 2207 Width = Target->getPointerWidth(LangAS::Default); 2208 Align = Target->getPointerAlign(LangAS::Default); 2209 break; 2210 case Type::BlockPointer: 2211 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace(); 2212 Width = Target->getPointerWidth(AS); 2213 Align = Target->getPointerAlign(AS); 2214 break; 2215 case Type::LValueReference: 2216 case Type::RValueReference: 2217 // alignof and sizeof should never enter this code path here, so we go 2218 // the pointer route. 2219 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace(); 2220 Width = Target->getPointerWidth(AS); 2221 Align = Target->getPointerAlign(AS); 2222 break; 2223 case Type::Pointer: 2224 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace(); 2225 Width = Target->getPointerWidth(AS); 2226 Align = Target->getPointerAlign(AS); 2227 break; 2228 case Type::MemberPointer: { 2229 const auto *MPT = cast<MemberPointerType>(T); 2230 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2231 Width = MPI.Width; 2232 Align = MPI.Align; 2233 break; 2234 } 2235 case Type::Complex: { 2236 // Complex types have the same alignment as their elements, but twice the 2237 // size. 2238 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2239 Width = EltInfo.Width * 2; 2240 Align = EltInfo.Align; 2241 break; 2242 } 2243 case Type::ObjCObject: 2244 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2245 case Type::Adjusted: 2246 case Type::Decayed: 2247 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2248 case Type::ObjCInterface: { 2249 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2250 if (ObjCI->getDecl()->isInvalidDecl()) { 2251 Width = 8; 2252 Align = 8; 2253 break; 2254 } 2255 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2256 Width = toBits(Layout.getSize()); 2257 Align = toBits(Layout.getAlignment()); 2258 break; 2259 } 2260 case Type::BitInt: { 2261 const auto *EIT = cast<BitIntType>(T); 2262 Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()), 2263 getCharWidth(), Target->getLongLongAlign()); 2264 Width = llvm::alignTo(EIT->getNumBits(), Align); 2265 break; 2266 } 2267 case Type::Record: 2268 case Type::Enum: { 2269 const auto *TT = cast<TagType>(T); 2270 2271 if (TT->getDecl()->isInvalidDecl()) { 2272 Width = 8; 2273 Align = 8; 2274 break; 2275 } 2276 2277 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2278 const EnumDecl *ED = ET->getDecl(); 2279 TypeInfo Info = 2280 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2281 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2282 Info.Align = AttrAlign; 2283 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2284 } 2285 return Info; 2286 } 2287 2288 const auto *RT = cast<RecordType>(TT); 2289 const RecordDecl *RD = RT->getDecl(); 2290 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2291 Width = toBits(Layout.getSize()); 2292 Align = toBits(Layout.getAlignment()); 2293 AlignRequirement = RD->hasAttr<AlignedAttr>() 2294 ? AlignRequirementKind::RequiredByRecord 2295 : AlignRequirementKind::None; 2296 break; 2297 } 2298 2299 case Type::SubstTemplateTypeParm: 2300 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2301 getReplacementType().getTypePtr()); 2302 2303 case Type::Auto: 2304 case Type::DeducedTemplateSpecialization: { 2305 const auto *A = cast<DeducedType>(T); 2306 assert(!A->getDeducedType().isNull() && 2307 "cannot request the size of an undeduced or dependent auto type"); 2308 return getTypeInfo(A->getDeducedType().getTypePtr()); 2309 } 2310 2311 case Type::Paren: 2312 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2313 2314 case Type::MacroQualified: 2315 return getTypeInfo( 2316 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2317 2318 case Type::ObjCTypeParam: 2319 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2320 2321 case Type::Using: 2322 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2323 2324 case Type::Typedef: { 2325 const auto *TT = cast<TypedefType>(T); 2326 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr()); 2327 // If the typedef has an aligned attribute on it, it overrides any computed 2328 // alignment we have. This violates the GCC documentation (which says that 2329 // attribute(aligned) can only round up) but matches its implementation. 2330 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) { 2331 Align = AttrAlign; 2332 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2333 } else { 2334 Align = Info.Align; 2335 AlignRequirement = Info.AlignRequirement; 2336 } 2337 Width = Info.Width; 2338 break; 2339 } 2340 2341 case Type::Elaborated: 2342 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2343 2344 case Type::Attributed: 2345 return getTypeInfo( 2346 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2347 2348 case Type::BTFTagAttributed: 2349 return getTypeInfo( 2350 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2351 2352 case Type::Atomic: { 2353 // Start with the base type information. 2354 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2355 Width = Info.Width; 2356 Align = Info.Align; 2357 2358 if (!Width) { 2359 // An otherwise zero-sized type should still generate an 2360 // atomic operation. 2361 Width = Target->getCharWidth(); 2362 assert(Align); 2363 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2364 // If the size of the type doesn't exceed the platform's max 2365 // atomic promotion width, make the size and alignment more 2366 // favorable to atomic operations: 2367 2368 // Round the size up to a power of 2. 2369 Width = llvm::bit_ceil(Width); 2370 2371 // Set the alignment equal to the size. 2372 Align = static_cast<unsigned>(Width); 2373 } 2374 } 2375 break; 2376 2377 case Type::Pipe: 2378 Width = Target->getPointerWidth(LangAS::opencl_global); 2379 Align = Target->getPointerAlign(LangAS::opencl_global); 2380 break; 2381 } 2382 2383 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2384 return TypeInfo(Width, Align, AlignRequirement); 2385 } 2386 2387 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2388 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2389 if (I != MemoizedUnadjustedAlign.end()) 2390 return I->second; 2391 2392 unsigned UnadjustedAlign; 2393 if (const auto *RT = T->getAs<RecordType>()) { 2394 const RecordDecl *RD = RT->getDecl(); 2395 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2396 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2397 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2398 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2399 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2400 } else { 2401 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2402 } 2403 2404 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2405 return UnadjustedAlign; 2406 } 2407 2408 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2409 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign( 2410 getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap); 2411 return SimdAlign; 2412 } 2413 2414 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2415 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2416 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2417 } 2418 2419 /// toBits - Convert a size in characters to a size in characters. 2420 int64_t ASTContext::toBits(CharUnits CharSize) const { 2421 return CharSize.getQuantity() * getCharWidth(); 2422 } 2423 2424 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2425 /// This method does not work on incomplete types. 2426 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2427 return getTypeInfoInChars(T).Width; 2428 } 2429 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2430 return getTypeInfoInChars(T).Width; 2431 } 2432 2433 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2434 /// characters. This method does not work on incomplete types. 2435 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2436 return toCharUnitsFromBits(getTypeAlign(T)); 2437 } 2438 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2439 return toCharUnitsFromBits(getTypeAlign(T)); 2440 } 2441 2442 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2443 /// type, in characters, before alignment adjustments. This method does 2444 /// not work on incomplete types. 2445 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2446 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2447 } 2448 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2449 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2450 } 2451 2452 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2453 /// type for the current target in bits. This can be different than the ABI 2454 /// alignment in cases where it is beneficial for performance or backwards 2455 /// compatibility preserving to overalign a data type. (Note: despite the name, 2456 /// the preferred alignment is ABI-impacting, and not an optimization.) 2457 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2458 TypeInfo TI = getTypeInfo(T); 2459 unsigned ABIAlign = TI.Align; 2460 2461 T = T->getBaseElementTypeUnsafe(); 2462 2463 // The preferred alignment of member pointers is that of a pointer. 2464 if (T->isMemberPointerType()) 2465 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2466 2467 if (!Target->allowsLargerPreferedTypeAlignment()) 2468 return ABIAlign; 2469 2470 if (const auto *RT = T->getAs<RecordType>()) { 2471 const RecordDecl *RD = RT->getDecl(); 2472 2473 // When used as part of a typedef, or together with a 'packed' attribute, 2474 // the 'aligned' attribute can be used to decrease alignment. Note that the 2475 // 'packed' case is already taken into consideration when computing the 2476 // alignment, we only need to handle the typedef case here. 2477 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2478 RD->isInvalidDecl()) 2479 return ABIAlign; 2480 2481 unsigned PreferredAlign = static_cast<unsigned>( 2482 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2483 assert(PreferredAlign >= ABIAlign && 2484 "PreferredAlign should be at least as large as ABIAlign."); 2485 return PreferredAlign; 2486 } 2487 2488 // Double (and, for targets supporting AIX `power` alignment, long double) and 2489 // long long should be naturally aligned (despite requiring less alignment) if 2490 // possible. 2491 if (const auto *CT = T->getAs<ComplexType>()) 2492 T = CT->getElementType().getTypePtr(); 2493 if (const auto *ET = T->getAs<EnumType>()) 2494 T = ET->getDecl()->getIntegerType().getTypePtr(); 2495 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2496 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2497 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2498 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2499 Target->defaultsToAIXPowerAlignment())) 2500 // Don't increase the alignment if an alignment attribute was specified on a 2501 // typedef declaration. 2502 if (!TI.isAlignRequired()) 2503 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2504 2505 return ABIAlign; 2506 } 2507 2508 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2509 /// for __attribute__((aligned)) on this target, to be used if no alignment 2510 /// value is specified. 2511 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2512 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2513 } 2514 2515 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2516 /// to a global variable of the specified type. 2517 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2518 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2519 return std::max(getPreferredTypeAlign(T), 2520 getTargetInfo().getMinGlobalAlign(TypeSize)); 2521 } 2522 2523 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2524 /// should be given to a global variable of the specified type. 2525 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2526 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2527 } 2528 2529 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2530 CharUnits Offset = CharUnits::Zero(); 2531 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2532 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2533 Offset += Layout->getBaseClassOffset(Base); 2534 Layout = &getASTRecordLayout(Base); 2535 } 2536 return Offset; 2537 } 2538 2539 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2540 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2541 CharUnits ThisAdjustment = CharUnits::Zero(); 2542 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2543 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2544 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2545 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2546 const CXXRecordDecl *Base = RD; 2547 const CXXRecordDecl *Derived = Path[I]; 2548 if (DerivedMember) 2549 std::swap(Base, Derived); 2550 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2551 RD = Path[I]; 2552 } 2553 if (DerivedMember) 2554 ThisAdjustment = -ThisAdjustment; 2555 return ThisAdjustment; 2556 } 2557 2558 /// DeepCollectObjCIvars - 2559 /// This routine first collects all declared, but not synthesized, ivars in 2560 /// super class and then collects all ivars, including those synthesized for 2561 /// current class. This routine is used for implementation of current class 2562 /// when all ivars, declared and synthesized are known. 2563 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2564 bool leafClass, 2565 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2566 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2567 DeepCollectObjCIvars(SuperClass, false, Ivars); 2568 if (!leafClass) { 2569 llvm::append_range(Ivars, OI->ivars()); 2570 } else { 2571 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2572 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2573 Iv= Iv->getNextIvar()) 2574 Ivars.push_back(Iv); 2575 } 2576 } 2577 2578 /// CollectInheritedProtocols - Collect all protocols in current class and 2579 /// those inherited by it. 2580 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2581 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2582 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2583 // We can use protocol_iterator here instead of 2584 // all_referenced_protocol_iterator since we are walking all categories. 2585 for (auto *Proto : OI->all_referenced_protocols()) { 2586 CollectInheritedProtocols(Proto, Protocols); 2587 } 2588 2589 // Categories of this Interface. 2590 for (const auto *Cat : OI->visible_categories()) 2591 CollectInheritedProtocols(Cat, Protocols); 2592 2593 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2594 while (SD) { 2595 CollectInheritedProtocols(SD, Protocols); 2596 SD = SD->getSuperClass(); 2597 } 2598 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2599 for (auto *Proto : OC->protocols()) { 2600 CollectInheritedProtocols(Proto, Protocols); 2601 } 2602 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2603 // Insert the protocol. 2604 if (!Protocols.insert( 2605 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2606 return; 2607 2608 for (auto *Proto : OP->protocols()) 2609 CollectInheritedProtocols(Proto, Protocols); 2610 } 2611 } 2612 2613 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2614 const RecordDecl *RD, 2615 bool CheckIfTriviallyCopyable) { 2616 assert(RD->isUnion() && "Must be union type"); 2617 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2618 2619 for (const auto *Field : RD->fields()) { 2620 if (!Context.hasUniqueObjectRepresentations(Field->getType(), 2621 CheckIfTriviallyCopyable)) 2622 return false; 2623 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2624 if (FieldSize != UnionSize) 2625 return false; 2626 } 2627 return !RD->field_empty(); 2628 } 2629 2630 static int64_t getSubobjectOffset(const FieldDecl *Field, 2631 const ASTContext &Context, 2632 const clang::ASTRecordLayout & /*Layout*/) { 2633 return Context.getFieldOffset(Field); 2634 } 2635 2636 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2637 const ASTContext &Context, 2638 const clang::ASTRecordLayout &Layout) { 2639 return Context.toBits(Layout.getBaseClassOffset(RD)); 2640 } 2641 2642 static std::optional<int64_t> 2643 structHasUniqueObjectRepresentations(const ASTContext &Context, 2644 const RecordDecl *RD, 2645 bool CheckIfTriviallyCopyable); 2646 2647 static std::optional<int64_t> 2648 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context, 2649 bool CheckIfTriviallyCopyable) { 2650 if (Field->getType()->isRecordType()) { 2651 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2652 if (!RD->isUnion()) 2653 return structHasUniqueObjectRepresentations(Context, RD, 2654 CheckIfTriviallyCopyable); 2655 } 2656 2657 // A _BitInt type may not be unique if it has padding bits 2658 // but if it is a bitfield the padding bits are not used. 2659 bool IsBitIntType = Field->getType()->isBitIntType(); 2660 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2661 !Context.hasUniqueObjectRepresentations(Field->getType(), 2662 CheckIfTriviallyCopyable)) 2663 return std::nullopt; 2664 2665 int64_t FieldSizeInBits = 2666 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2667 if (Field->isBitField()) { 2668 // If we have explicit padding bits, they don't contribute bits 2669 // to the actual object representation, so return 0. 2670 if (Field->isUnnamedBitfield()) 2671 return 0; 2672 2673 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2674 if (IsBitIntType) { 2675 if ((unsigned)BitfieldSize > 2676 cast<BitIntType>(Field->getType())->getNumBits()) 2677 return std::nullopt; 2678 } else if (BitfieldSize > FieldSizeInBits) { 2679 return std::nullopt; 2680 } 2681 FieldSizeInBits = BitfieldSize; 2682 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations( 2683 Field->getType(), CheckIfTriviallyCopyable)) { 2684 return std::nullopt; 2685 } 2686 return FieldSizeInBits; 2687 } 2688 2689 static std::optional<int64_t> 2690 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context, 2691 bool CheckIfTriviallyCopyable) { 2692 return structHasUniqueObjectRepresentations(Context, RD, 2693 CheckIfTriviallyCopyable); 2694 } 2695 2696 template <typename RangeT> 2697 static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2698 const RangeT &Subobjects, int64_t CurOffsetInBits, 2699 const ASTContext &Context, const clang::ASTRecordLayout &Layout, 2700 bool CheckIfTriviallyCopyable) { 2701 for (const auto *Subobject : Subobjects) { 2702 std::optional<int64_t> SizeInBits = 2703 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable); 2704 if (!SizeInBits) 2705 return std::nullopt; 2706 if (*SizeInBits != 0) { 2707 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2708 if (Offset != CurOffsetInBits) 2709 return std::nullopt; 2710 CurOffsetInBits += *SizeInBits; 2711 } 2712 } 2713 return CurOffsetInBits; 2714 } 2715 2716 static std::optional<int64_t> 2717 structHasUniqueObjectRepresentations(const ASTContext &Context, 2718 const RecordDecl *RD, 2719 bool CheckIfTriviallyCopyable) { 2720 assert(!RD->isUnion() && "Must be struct/class type"); 2721 const auto &Layout = Context.getASTRecordLayout(RD); 2722 2723 int64_t CurOffsetInBits = 0; 2724 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2725 if (ClassDecl->isDynamicClass()) 2726 return std::nullopt; 2727 2728 SmallVector<CXXRecordDecl *, 4> Bases; 2729 for (const auto &Base : ClassDecl->bases()) { 2730 // Empty types can be inherited from, and non-empty types can potentially 2731 // have tail padding, so just make sure there isn't an error. 2732 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2733 } 2734 2735 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2736 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2737 }); 2738 2739 std::optional<int64_t> OffsetAfterBases = 2740 structSubobjectsHaveUniqueObjectRepresentations( 2741 Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable); 2742 if (!OffsetAfterBases) 2743 return std::nullopt; 2744 CurOffsetInBits = *OffsetAfterBases; 2745 } 2746 2747 std::optional<int64_t> OffsetAfterFields = 2748 structSubobjectsHaveUniqueObjectRepresentations( 2749 RD->fields(), CurOffsetInBits, Context, Layout, 2750 CheckIfTriviallyCopyable); 2751 if (!OffsetAfterFields) 2752 return std::nullopt; 2753 CurOffsetInBits = *OffsetAfterFields; 2754 2755 return CurOffsetInBits; 2756 } 2757 2758 bool ASTContext::hasUniqueObjectRepresentations( 2759 QualType Ty, bool CheckIfTriviallyCopyable) const { 2760 // C++17 [meta.unary.prop]: 2761 // The predicate condition for a template specialization 2762 // has_unique_object_representations<T> shall be satisfied if and only if: 2763 // (9.1) - T is trivially copyable, and 2764 // (9.2) - any two objects of type T with the same value have the same 2765 // object representation, where: 2766 // - two objects of array or non-union class type are considered to have 2767 // the same value if their respective sequences of direct subobjects 2768 // have the same values, and 2769 // - two objects of union type are considered to have the same value if 2770 // they have the same active member and the corresponding members have 2771 // the same value. 2772 // The set of scalar types for which this condition holds is 2773 // implementation-defined. [ Note: If a type has padding bits, the condition 2774 // does not hold; otherwise, the condition holds true for unsigned integral 2775 // types. -- end note ] 2776 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2777 2778 // Arrays are unique only if their element type is unique. 2779 if (Ty->isArrayType()) 2780 return hasUniqueObjectRepresentations(getBaseElementType(Ty), 2781 CheckIfTriviallyCopyable); 2782 2783 // (9.1) - T is trivially copyable... 2784 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this)) 2785 return false; 2786 2787 // All integrals and enums are unique. 2788 if (Ty->isIntegralOrEnumerationType()) { 2789 // Except _BitInt types that have padding bits. 2790 if (const auto *BIT = Ty->getAs<BitIntType>()) 2791 return getTypeSize(BIT) == BIT->getNumBits(); 2792 2793 return true; 2794 } 2795 2796 // All other pointers are unique. 2797 if (Ty->isPointerType()) 2798 return true; 2799 2800 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 2801 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2802 2803 if (Ty->isRecordType()) { 2804 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2805 2806 if (Record->isInvalidDecl()) 2807 return false; 2808 2809 if (Record->isUnion()) 2810 return unionHasUniqueObjectRepresentations(*this, Record, 2811 CheckIfTriviallyCopyable); 2812 2813 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations( 2814 *this, Record, CheckIfTriviallyCopyable); 2815 2816 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); 2817 } 2818 2819 // FIXME: More cases to handle here (list by rsmith): 2820 // vectors (careful about, eg, vector of 3 foo) 2821 // _Complex int and friends 2822 // _Atomic T 2823 // Obj-C block pointers 2824 // Obj-C object pointers 2825 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2826 // clk_event_t, queue_t, reserve_id_t) 2827 // There're also Obj-C class types and the Obj-C selector type, but I think it 2828 // makes sense for those to return false here. 2829 2830 return false; 2831 } 2832 2833 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2834 unsigned count = 0; 2835 // Count ivars declared in class extension. 2836 for (const auto *Ext : OI->known_extensions()) 2837 count += Ext->ivar_size(); 2838 2839 // Count ivar defined in this class's implementation. This 2840 // includes synthesized ivars. 2841 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2842 count += ImplDecl->ivar_size(); 2843 2844 return count; 2845 } 2846 2847 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2848 if (!E) 2849 return false; 2850 2851 // nullptr_t is always treated as null. 2852 if (E->getType()->isNullPtrType()) return true; 2853 2854 if (E->getType()->isAnyPointerType() && 2855 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2856 Expr::NPC_ValueDependentIsNull)) 2857 return true; 2858 2859 // Unfortunately, __null has type 'int'. 2860 if (isa<GNUNullExpr>(E)) return true; 2861 2862 return false; 2863 } 2864 2865 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2866 /// exists. 2867 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2868 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2869 I = ObjCImpls.find(D); 2870 if (I != ObjCImpls.end()) 2871 return cast<ObjCImplementationDecl>(I->second); 2872 return nullptr; 2873 } 2874 2875 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2876 /// exists. 2877 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2878 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2879 I = ObjCImpls.find(D); 2880 if (I != ObjCImpls.end()) 2881 return cast<ObjCCategoryImplDecl>(I->second); 2882 return nullptr; 2883 } 2884 2885 /// Set the implementation of ObjCInterfaceDecl. 2886 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2887 ObjCImplementationDecl *ImplD) { 2888 assert(IFaceD && ImplD && "Passed null params"); 2889 ObjCImpls[IFaceD] = ImplD; 2890 } 2891 2892 /// Set the implementation of ObjCCategoryDecl. 2893 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2894 ObjCCategoryImplDecl *ImplD) { 2895 assert(CatD && ImplD && "Passed null params"); 2896 ObjCImpls[CatD] = ImplD; 2897 } 2898 2899 const ObjCMethodDecl * 2900 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2901 return ObjCMethodRedecls.lookup(MD); 2902 } 2903 2904 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2905 const ObjCMethodDecl *Redecl) { 2906 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2907 ObjCMethodRedecls[MD] = Redecl; 2908 } 2909 2910 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2911 const NamedDecl *ND) const { 2912 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2913 return ID; 2914 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2915 return CD->getClassInterface(); 2916 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2917 return IMD->getClassInterface(); 2918 2919 return nullptr; 2920 } 2921 2922 /// Get the copy initialization expression of VarDecl, or nullptr if 2923 /// none exists. 2924 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2925 assert(VD && "Passed null params"); 2926 assert(VD->hasAttr<BlocksAttr>() && 2927 "getBlockVarCopyInits - not __block var"); 2928 auto I = BlockVarCopyInits.find(VD); 2929 if (I != BlockVarCopyInits.end()) 2930 return I->second; 2931 return {nullptr, false}; 2932 } 2933 2934 /// Set the copy initialization expression of a block var decl. 2935 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2936 bool CanThrow) { 2937 assert(VD && CopyExpr && "Passed null params"); 2938 assert(VD->hasAttr<BlocksAttr>() && 2939 "setBlockVarCopyInits - not __block var"); 2940 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2941 } 2942 2943 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2944 unsigned DataSize) const { 2945 if (!DataSize) 2946 DataSize = TypeLoc::getFullDataSizeForType(T); 2947 else 2948 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2949 "incorrect data size provided to CreateTypeSourceInfo!"); 2950 2951 auto *TInfo = 2952 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2953 new (TInfo) TypeSourceInfo(T, DataSize); 2954 return TInfo; 2955 } 2956 2957 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2958 SourceLocation L) const { 2959 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2960 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2961 return DI; 2962 } 2963 2964 const ASTRecordLayout & 2965 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2966 return getObjCLayout(D, nullptr); 2967 } 2968 2969 const ASTRecordLayout & 2970 ASTContext::getASTObjCImplementationLayout( 2971 const ObjCImplementationDecl *D) const { 2972 return getObjCLayout(D->getClassInterface(), D); 2973 } 2974 2975 static auto getCanonicalTemplateArguments(const ASTContext &C, 2976 ArrayRef<TemplateArgument> Args, 2977 bool &AnyNonCanonArgs) { 2978 SmallVector<TemplateArgument, 16> CanonArgs(Args); 2979 for (auto &Arg : CanonArgs) { 2980 TemplateArgument OrigArg = Arg; 2981 Arg = C.getCanonicalTemplateArgument(Arg); 2982 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg); 2983 } 2984 return CanonArgs; 2985 } 2986 2987 //===----------------------------------------------------------------------===// 2988 // Type creation/memoization methods 2989 //===----------------------------------------------------------------------===// 2990 2991 QualType 2992 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 2993 unsigned fastQuals = quals.getFastQualifiers(); 2994 quals.removeFastQualifiers(); 2995 2996 // Check if we've already instantiated this type. 2997 llvm::FoldingSetNodeID ID; 2998 ExtQuals::Profile(ID, baseType, quals); 2999 void *insertPos = nullptr; 3000 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 3001 assert(eq->getQualifiers() == quals); 3002 return QualType(eq, fastQuals); 3003 } 3004 3005 // If the base type is not canonical, make the appropriate canonical type. 3006 QualType canon; 3007 if (!baseType->isCanonicalUnqualified()) { 3008 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 3009 canonSplit.Quals.addConsistentQualifiers(quals); 3010 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3011 3012 // Re-find the insert position. 3013 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3014 } 3015 3016 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals); 3017 ExtQualNodes.InsertNode(eq, insertPos); 3018 return QualType(eq, fastQuals); 3019 } 3020 3021 QualType ASTContext::getAddrSpaceQualType(QualType T, 3022 LangAS AddressSpace) const { 3023 QualType CanT = getCanonicalType(T); 3024 if (CanT.getAddressSpace() == AddressSpace) 3025 return T; 3026 3027 // If we are composing extended qualifiers together, merge together 3028 // into one ExtQuals node. 3029 QualifierCollector Quals; 3030 const Type *TypeNode = Quals.strip(T); 3031 3032 // If this type already has an address space specified, it cannot get 3033 // another one. 3034 assert(!Quals.hasAddressSpace() && 3035 "Type cannot be in multiple addr spaces!"); 3036 Quals.addAddressSpace(AddressSpace); 3037 3038 return getExtQualType(TypeNode, Quals); 3039 } 3040 3041 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3042 // If the type is not qualified with an address space, just return it 3043 // immediately. 3044 if (!T.hasAddressSpace()) 3045 return T; 3046 3047 // If we are composing extended qualifiers together, merge together 3048 // into one ExtQuals node. 3049 QualifierCollector Quals; 3050 const Type *TypeNode; 3051 3052 while (T.hasAddressSpace()) { 3053 TypeNode = Quals.strip(T); 3054 3055 // If the type no longer has an address space after stripping qualifiers, 3056 // jump out. 3057 if (!QualType(TypeNode, 0).hasAddressSpace()) 3058 break; 3059 3060 // There might be sugar in the way. Strip it and try again. 3061 T = T.getSingleStepDesugaredType(*this); 3062 } 3063 3064 Quals.removeAddressSpace(); 3065 3066 // Removal of the address space can mean there are no longer any 3067 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3068 // or required. 3069 if (Quals.hasNonFastQualifiers()) 3070 return getExtQualType(TypeNode, Quals); 3071 else 3072 return QualType(TypeNode, Quals.getFastQualifiers()); 3073 } 3074 3075 QualType ASTContext::getObjCGCQualType(QualType T, 3076 Qualifiers::GC GCAttr) const { 3077 QualType CanT = getCanonicalType(T); 3078 if (CanT.getObjCGCAttr() == GCAttr) 3079 return T; 3080 3081 if (const auto *ptr = T->getAs<PointerType>()) { 3082 QualType Pointee = ptr->getPointeeType(); 3083 if (Pointee->isAnyPointerType()) { 3084 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3085 return getPointerType(ResultType); 3086 } 3087 } 3088 3089 // If we are composing extended qualifiers together, merge together 3090 // into one ExtQuals node. 3091 QualifierCollector Quals; 3092 const Type *TypeNode = Quals.strip(T); 3093 3094 // If this type already has an ObjCGC specified, it cannot get 3095 // another one. 3096 assert(!Quals.hasObjCGCAttr() && 3097 "Type cannot have multiple ObjCGCs!"); 3098 Quals.addObjCGCAttr(GCAttr); 3099 3100 return getExtQualType(TypeNode, Quals); 3101 } 3102 3103 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3104 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3105 QualType Pointee = Ptr->getPointeeType(); 3106 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3107 return getPointerType(removeAddrSpaceQualType(Pointee)); 3108 } 3109 } 3110 return T; 3111 } 3112 3113 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3114 FunctionType::ExtInfo Info) { 3115 if (T->getExtInfo() == Info) 3116 return T; 3117 3118 QualType Result; 3119 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3120 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3121 } else { 3122 const auto *FPT = cast<FunctionProtoType>(T); 3123 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3124 EPI.ExtInfo = Info; 3125 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3126 } 3127 3128 return cast<FunctionType>(Result.getTypePtr()); 3129 } 3130 3131 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3132 QualType ResultType) { 3133 FD = FD->getMostRecentDecl(); 3134 while (true) { 3135 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3136 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3137 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3138 if (FunctionDecl *Next = FD->getPreviousDecl()) 3139 FD = Next; 3140 else 3141 break; 3142 } 3143 if (ASTMutationListener *L = getASTMutationListener()) 3144 L->DeducedReturnType(FD, ResultType); 3145 } 3146 3147 /// Get a function type and produce the equivalent function type with the 3148 /// specified exception specification. Type sugar that can be present on a 3149 /// declaration of a function with an exception specification is permitted 3150 /// and preserved. Other type sugar (for instance, typedefs) is not. 3151 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3152 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { 3153 // Might have some parens. 3154 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3155 return getParenType( 3156 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3157 3158 // Might be wrapped in a macro qualified type. 3159 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3160 return getMacroQualifiedType( 3161 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3162 MQT->getMacroIdentifier()); 3163 3164 // Might have a calling-convention attribute. 3165 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3166 return getAttributedType( 3167 AT->getAttrKind(), 3168 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3169 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3170 3171 // Anything else must be a function type. Rebuild it with the new exception 3172 // specification. 3173 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3174 return getFunctionType( 3175 Proto->getReturnType(), Proto->getParamTypes(), 3176 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3177 } 3178 3179 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3180 QualType U) const { 3181 return hasSameType(T, U) || 3182 (getLangOpts().CPlusPlus17 && 3183 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3184 getFunctionTypeWithExceptionSpec(U, EST_None))); 3185 } 3186 3187 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3188 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3189 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3190 SmallVector<QualType, 16> Args(Proto->param_types().size()); 3191 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3192 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]); 3193 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3194 } 3195 3196 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3197 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3198 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3199 } 3200 3201 return T; 3202 } 3203 3204 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3205 return hasSameType(T, U) || 3206 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3207 getFunctionTypeWithoutPtrSizes(U)); 3208 } 3209 3210 void ASTContext::adjustExceptionSpec( 3211 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3212 bool AsWritten) { 3213 // Update the type. 3214 QualType Updated = 3215 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3216 FD->setType(Updated); 3217 3218 if (!AsWritten) 3219 return; 3220 3221 // Update the type in the type source information too. 3222 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3223 // If the type and the type-as-written differ, we may need to update 3224 // the type-as-written too. 3225 if (TSInfo->getType() != FD->getType()) 3226 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3227 3228 // FIXME: When we get proper type location information for exceptions, 3229 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3230 // up the TypeSourceInfo; 3231 assert(TypeLoc::getFullDataSizeForType(Updated) == 3232 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3233 "TypeLoc size mismatch from updating exception specification"); 3234 TSInfo->overrideType(Updated); 3235 } 3236 } 3237 3238 /// getComplexType - Return the uniqued reference to the type for a complex 3239 /// number with the specified element type. 3240 QualType ASTContext::getComplexType(QualType T) const { 3241 // Unique pointers, to guarantee there is only one pointer of a particular 3242 // structure. 3243 llvm::FoldingSetNodeID ID; 3244 ComplexType::Profile(ID, T); 3245 3246 void *InsertPos = nullptr; 3247 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3248 return QualType(CT, 0); 3249 3250 // If the pointee type isn't canonical, this won't be a canonical type either, 3251 // so fill in the canonical type field. 3252 QualType Canonical; 3253 if (!T.isCanonical()) { 3254 Canonical = getComplexType(getCanonicalType(T)); 3255 3256 // Get the new insert position for the node we care about. 3257 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3258 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3259 } 3260 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical); 3261 Types.push_back(New); 3262 ComplexTypes.InsertNode(New, InsertPos); 3263 return QualType(New, 0); 3264 } 3265 3266 /// getPointerType - Return the uniqued reference to the type for a pointer to 3267 /// the specified type. 3268 QualType ASTContext::getPointerType(QualType T) const { 3269 // Unique pointers, to guarantee there is only one pointer of a particular 3270 // structure. 3271 llvm::FoldingSetNodeID ID; 3272 PointerType::Profile(ID, T); 3273 3274 void *InsertPos = nullptr; 3275 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3276 return QualType(PT, 0); 3277 3278 // If the pointee type isn't canonical, this won't be a canonical type either, 3279 // so fill in the canonical type field. 3280 QualType Canonical; 3281 if (!T.isCanonical()) { 3282 Canonical = getPointerType(getCanonicalType(T)); 3283 3284 // Get the new insert position for the node we care about. 3285 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3286 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3287 } 3288 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical); 3289 Types.push_back(New); 3290 PointerTypes.InsertNode(New, InsertPos); 3291 return QualType(New, 0); 3292 } 3293 3294 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3295 llvm::FoldingSetNodeID ID; 3296 AdjustedType::Profile(ID, Orig, New); 3297 void *InsertPos = nullptr; 3298 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3299 if (AT) 3300 return QualType(AT, 0); 3301 3302 QualType Canonical = getCanonicalType(New); 3303 3304 // Get the new insert position for the node we care about. 3305 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3306 assert(!AT && "Shouldn't be in the map!"); 3307 3308 AT = new (*this, alignof(AdjustedType)) 3309 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3310 Types.push_back(AT); 3311 AdjustedTypes.InsertNode(AT, InsertPos); 3312 return QualType(AT, 0); 3313 } 3314 3315 QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const { 3316 llvm::FoldingSetNodeID ID; 3317 AdjustedType::Profile(ID, Orig, Decayed); 3318 void *InsertPos = nullptr; 3319 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3320 if (AT) 3321 return QualType(AT, 0); 3322 3323 QualType Canonical = getCanonicalType(Decayed); 3324 3325 // Get the new insert position for the node we care about. 3326 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3327 assert(!AT && "Shouldn't be in the map!"); 3328 3329 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical); 3330 Types.push_back(AT); 3331 AdjustedTypes.InsertNode(AT, InsertPos); 3332 return QualType(AT, 0); 3333 } 3334 3335 QualType ASTContext::getDecayedType(QualType T) const { 3336 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3337 3338 QualType Decayed; 3339 3340 // C99 6.7.5.3p7: 3341 // A declaration of a parameter as "array of type" shall be 3342 // adjusted to "qualified pointer to type", where the type 3343 // qualifiers (if any) are those specified within the [ and ] of 3344 // the array type derivation. 3345 if (T->isArrayType()) 3346 Decayed = getArrayDecayedType(T); 3347 3348 // C99 6.7.5.3p8: 3349 // A declaration of a parameter as "function returning type" 3350 // shall be adjusted to "pointer to function returning type", as 3351 // in 6.3.2.1. 3352 if (T->isFunctionType()) 3353 Decayed = getPointerType(T); 3354 3355 return getDecayedType(T, Decayed); 3356 } 3357 3358 /// getBlockPointerType - Return the uniqued reference to the type for 3359 /// a pointer to the specified block. 3360 QualType ASTContext::getBlockPointerType(QualType T) const { 3361 assert(T->isFunctionType() && "block of function types only"); 3362 // Unique pointers, to guarantee there is only one block of a particular 3363 // structure. 3364 llvm::FoldingSetNodeID ID; 3365 BlockPointerType::Profile(ID, T); 3366 3367 void *InsertPos = nullptr; 3368 if (BlockPointerType *PT = 3369 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3370 return QualType(PT, 0); 3371 3372 // If the block pointee type isn't canonical, this won't be a canonical 3373 // type either so fill in the canonical type field. 3374 QualType Canonical; 3375 if (!T.isCanonical()) { 3376 Canonical = getBlockPointerType(getCanonicalType(T)); 3377 3378 // Get the new insert position for the node we care about. 3379 BlockPointerType *NewIP = 3380 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3381 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3382 } 3383 auto *New = 3384 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical); 3385 Types.push_back(New); 3386 BlockPointerTypes.InsertNode(New, InsertPos); 3387 return QualType(New, 0); 3388 } 3389 3390 /// getLValueReferenceType - Return the uniqued reference to the type for an 3391 /// lvalue reference to the specified type. 3392 QualType 3393 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3394 assert((!T->isPlaceholderType() || 3395 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3396 "Unresolved placeholder type"); 3397 3398 // Unique pointers, to guarantee there is only one pointer of a particular 3399 // structure. 3400 llvm::FoldingSetNodeID ID; 3401 ReferenceType::Profile(ID, T, SpelledAsLValue); 3402 3403 void *InsertPos = nullptr; 3404 if (LValueReferenceType *RT = 3405 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3406 return QualType(RT, 0); 3407 3408 const auto *InnerRef = T->getAs<ReferenceType>(); 3409 3410 // If the referencee type isn't canonical, this won't be a canonical type 3411 // either, so fill in the canonical type field. 3412 QualType Canonical; 3413 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3414 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3415 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3416 3417 // Get the new insert position for the node we care about. 3418 LValueReferenceType *NewIP = 3419 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3420 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3421 } 3422 3423 auto *New = new (*this, alignof(LValueReferenceType)) 3424 LValueReferenceType(T, Canonical, SpelledAsLValue); 3425 Types.push_back(New); 3426 LValueReferenceTypes.InsertNode(New, InsertPos); 3427 3428 return QualType(New, 0); 3429 } 3430 3431 /// getRValueReferenceType - Return the uniqued reference to the type for an 3432 /// rvalue reference to the specified type. 3433 QualType ASTContext::getRValueReferenceType(QualType T) const { 3434 assert((!T->isPlaceholderType() || 3435 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3436 "Unresolved placeholder type"); 3437 3438 // Unique pointers, to guarantee there is only one pointer of a particular 3439 // structure. 3440 llvm::FoldingSetNodeID ID; 3441 ReferenceType::Profile(ID, T, false); 3442 3443 void *InsertPos = nullptr; 3444 if (RValueReferenceType *RT = 3445 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3446 return QualType(RT, 0); 3447 3448 const auto *InnerRef = T->getAs<ReferenceType>(); 3449 3450 // If the referencee type isn't canonical, this won't be a canonical type 3451 // either, so fill in the canonical type field. 3452 QualType Canonical; 3453 if (InnerRef || !T.isCanonical()) { 3454 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3455 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3456 3457 // Get the new insert position for the node we care about. 3458 RValueReferenceType *NewIP = 3459 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3460 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3461 } 3462 3463 auto *New = new (*this, alignof(RValueReferenceType)) 3464 RValueReferenceType(T, Canonical); 3465 Types.push_back(New); 3466 RValueReferenceTypes.InsertNode(New, InsertPos); 3467 return QualType(New, 0); 3468 } 3469 3470 /// getMemberPointerType - Return the uniqued reference to the type for a 3471 /// member pointer to the specified type, in the specified class. 3472 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3473 // Unique pointers, to guarantee there is only one pointer of a particular 3474 // structure. 3475 llvm::FoldingSetNodeID ID; 3476 MemberPointerType::Profile(ID, T, Cls); 3477 3478 void *InsertPos = nullptr; 3479 if (MemberPointerType *PT = 3480 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3481 return QualType(PT, 0); 3482 3483 // If the pointee or class type isn't canonical, this won't be a canonical 3484 // type either, so fill in the canonical type field. 3485 QualType Canonical; 3486 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3487 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3488 3489 // Get the new insert position for the node we care about. 3490 MemberPointerType *NewIP = 3491 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3492 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3493 } 3494 auto *New = new (*this, alignof(MemberPointerType)) 3495 MemberPointerType(T, Cls, Canonical); 3496 Types.push_back(New); 3497 MemberPointerTypes.InsertNode(New, InsertPos); 3498 return QualType(New, 0); 3499 } 3500 3501 /// getConstantArrayType - Return the unique reference to the type for an 3502 /// array of the specified element type. 3503 QualType ASTContext::getConstantArrayType(QualType EltTy, 3504 const llvm::APInt &ArySizeIn, 3505 const Expr *SizeExpr, 3506 ArraySizeModifier ASM, 3507 unsigned IndexTypeQuals) const { 3508 assert((EltTy->isDependentType() || 3509 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3510 "Constant array of VLAs is illegal!"); 3511 3512 // We only need the size as part of the type if it's instantiation-dependent. 3513 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3514 SizeExpr = nullptr; 3515 3516 // Convert the array size into a canonical width matching the pointer size for 3517 // the target. 3518 llvm::APInt ArySize(ArySizeIn); 3519 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3520 3521 llvm::FoldingSetNodeID ID; 3522 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3523 IndexTypeQuals); 3524 3525 void *InsertPos = nullptr; 3526 if (ConstantArrayType *ATP = 3527 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3528 return QualType(ATP, 0); 3529 3530 // If the element type isn't canonical or has qualifiers, or the array bound 3531 // is instantiation-dependent, this won't be a canonical type either, so fill 3532 // in the canonical type field. 3533 QualType Canon; 3534 // FIXME: Check below should look for qualifiers behind sugar. 3535 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3536 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3537 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3538 ASM, IndexTypeQuals); 3539 Canon = getQualifiedType(Canon, canonSplit.Quals); 3540 3541 // Get the new insert position for the node we care about. 3542 ConstantArrayType *NewIP = 3543 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3544 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3545 } 3546 3547 void *Mem = Allocate( 3548 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3549 alignof(ConstantArrayType)); 3550 auto *New = new (Mem) 3551 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3552 ConstantArrayTypes.InsertNode(New, InsertPos); 3553 Types.push_back(New); 3554 return QualType(New, 0); 3555 } 3556 3557 /// getVariableArrayDecayedType - Turns the given type, which may be 3558 /// variably-modified, into the corresponding type with all the known 3559 /// sizes replaced with [*]. 3560 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3561 // Vastly most common case. 3562 if (!type->isVariablyModifiedType()) return type; 3563 3564 QualType result; 3565 3566 SplitQualType split = type.getSplitDesugaredType(); 3567 const Type *ty = split.Ty; 3568 switch (ty->getTypeClass()) { 3569 #define TYPE(Class, Base) 3570 #define ABSTRACT_TYPE(Class, Base) 3571 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3572 #include "clang/AST/TypeNodes.inc" 3573 llvm_unreachable("didn't desugar past all non-canonical types?"); 3574 3575 // These types should never be variably-modified. 3576 case Type::Builtin: 3577 case Type::Complex: 3578 case Type::Vector: 3579 case Type::DependentVector: 3580 case Type::ExtVector: 3581 case Type::DependentSizedExtVector: 3582 case Type::ConstantMatrix: 3583 case Type::DependentSizedMatrix: 3584 case Type::DependentAddressSpace: 3585 case Type::ObjCObject: 3586 case Type::ObjCInterface: 3587 case Type::ObjCObjectPointer: 3588 case Type::Record: 3589 case Type::Enum: 3590 case Type::UnresolvedUsing: 3591 case Type::TypeOfExpr: 3592 case Type::TypeOf: 3593 case Type::Decltype: 3594 case Type::UnaryTransform: 3595 case Type::DependentName: 3596 case Type::InjectedClassName: 3597 case Type::TemplateSpecialization: 3598 case Type::DependentTemplateSpecialization: 3599 case Type::TemplateTypeParm: 3600 case Type::SubstTemplateTypeParmPack: 3601 case Type::Auto: 3602 case Type::DeducedTemplateSpecialization: 3603 case Type::PackExpansion: 3604 case Type::BitInt: 3605 case Type::DependentBitInt: 3606 llvm_unreachable("type should never be variably-modified"); 3607 3608 // These types can be variably-modified but should never need to 3609 // further decay. 3610 case Type::FunctionNoProto: 3611 case Type::FunctionProto: 3612 case Type::BlockPointer: 3613 case Type::MemberPointer: 3614 case Type::Pipe: 3615 return type; 3616 3617 // These types can be variably-modified. All these modifications 3618 // preserve structure except as noted by comments. 3619 // TODO: if we ever care about optimizing VLAs, there are no-op 3620 // optimizations available here. 3621 case Type::Pointer: 3622 result = getPointerType(getVariableArrayDecayedType( 3623 cast<PointerType>(ty)->getPointeeType())); 3624 break; 3625 3626 case Type::LValueReference: { 3627 const auto *lv = cast<LValueReferenceType>(ty); 3628 result = getLValueReferenceType( 3629 getVariableArrayDecayedType(lv->getPointeeType()), 3630 lv->isSpelledAsLValue()); 3631 break; 3632 } 3633 3634 case Type::RValueReference: { 3635 const auto *lv = cast<RValueReferenceType>(ty); 3636 result = getRValueReferenceType( 3637 getVariableArrayDecayedType(lv->getPointeeType())); 3638 break; 3639 } 3640 3641 case Type::Atomic: { 3642 const auto *at = cast<AtomicType>(ty); 3643 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3644 break; 3645 } 3646 3647 case Type::ConstantArray: { 3648 const auto *cat = cast<ConstantArrayType>(ty); 3649 result = getConstantArrayType( 3650 getVariableArrayDecayedType(cat->getElementType()), 3651 cat->getSize(), 3652 cat->getSizeExpr(), 3653 cat->getSizeModifier(), 3654 cat->getIndexTypeCVRQualifiers()); 3655 break; 3656 } 3657 3658 case Type::DependentSizedArray: { 3659 const auto *dat = cast<DependentSizedArrayType>(ty); 3660 result = getDependentSizedArrayType( 3661 getVariableArrayDecayedType(dat->getElementType()), 3662 dat->getSizeExpr(), 3663 dat->getSizeModifier(), 3664 dat->getIndexTypeCVRQualifiers(), 3665 dat->getBracketsRange()); 3666 break; 3667 } 3668 3669 // Turn incomplete types into [*] types. 3670 case Type::IncompleteArray: { 3671 const auto *iat = cast<IncompleteArrayType>(ty); 3672 result = 3673 getVariableArrayType(getVariableArrayDecayedType(iat->getElementType()), 3674 /*size*/ nullptr, ArraySizeModifier::Normal, 3675 iat->getIndexTypeCVRQualifiers(), SourceRange()); 3676 break; 3677 } 3678 3679 // Turn VLA types into [*] types. 3680 case Type::VariableArray: { 3681 const auto *vat = cast<VariableArrayType>(ty); 3682 result = getVariableArrayType( 3683 getVariableArrayDecayedType(vat->getElementType()), 3684 /*size*/ nullptr, ArraySizeModifier::Star, 3685 vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange()); 3686 break; 3687 } 3688 } 3689 3690 // Apply the top-level qualifiers from the original. 3691 return getQualifiedType(result, split.Quals); 3692 } 3693 3694 /// getVariableArrayType - Returns a non-unique reference to the type for a 3695 /// variable array of the specified element type. 3696 QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts, 3697 ArraySizeModifier ASM, 3698 unsigned IndexTypeQuals, 3699 SourceRange Brackets) const { 3700 // Since we don't unique expressions, it isn't possible to unique VLA's 3701 // that have an expression provided for their size. 3702 QualType Canon; 3703 3704 // Be sure to pull qualifiers off the element type. 3705 // FIXME: Check below should look for qualifiers behind sugar. 3706 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3707 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3708 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3709 IndexTypeQuals, Brackets); 3710 Canon = getQualifiedType(Canon, canonSplit.Quals); 3711 } 3712 3713 auto *New = new (*this, alignof(VariableArrayType)) 3714 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3715 3716 VariableArrayTypes.push_back(New); 3717 Types.push_back(New); 3718 return QualType(New, 0); 3719 } 3720 3721 /// getDependentSizedArrayType - Returns a non-unique reference to 3722 /// the type for a dependently-sized array of the specified element 3723 /// type. 3724 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3725 Expr *numElements, 3726 ArraySizeModifier ASM, 3727 unsigned elementTypeQuals, 3728 SourceRange brackets) const { 3729 assert((!numElements || numElements->isTypeDependent() || 3730 numElements->isValueDependent()) && 3731 "Size must be type- or value-dependent!"); 3732 3733 // Dependently-sized array types that do not have a specified number 3734 // of elements will have their sizes deduced from a dependent 3735 // initializer. We do no canonicalization here at all, which is okay 3736 // because they can't be used in most locations. 3737 if (!numElements) { 3738 auto *newType = new (*this, alignof(DependentSizedArrayType)) 3739 DependentSizedArrayType(elementType, QualType(), numElements, ASM, 3740 elementTypeQuals, brackets); 3741 Types.push_back(newType); 3742 return QualType(newType, 0); 3743 } 3744 3745 // Otherwise, we actually build a new type every time, but we 3746 // also build a canonical type. 3747 3748 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3749 3750 void *insertPos = nullptr; 3751 llvm::FoldingSetNodeID ID; 3752 DependentSizedArrayType::Profile(ID, *this, 3753 QualType(canonElementType.Ty, 0), 3754 ASM, elementTypeQuals, numElements); 3755 3756 // Look for an existing type with these properties. 3757 DependentSizedArrayType *canonTy = 3758 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3759 3760 // If we don't have one, build one. 3761 if (!canonTy) { 3762 canonTy = new (*this, alignof(DependentSizedArrayType)) 3763 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(), 3764 numElements, ASM, elementTypeQuals, brackets); 3765 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3766 Types.push_back(canonTy); 3767 } 3768 3769 // Apply qualifiers from the element type to the array. 3770 QualType canon = getQualifiedType(QualType(canonTy,0), 3771 canonElementType.Quals); 3772 3773 // If we didn't need extra canonicalization for the element type or the size 3774 // expression, then just use that as our result. 3775 if (QualType(canonElementType.Ty, 0) == elementType && 3776 canonTy->getSizeExpr() == numElements) 3777 return canon; 3778 3779 // Otherwise, we need to build a type which follows the spelling 3780 // of the element type. 3781 auto *sugaredType = new (*this, alignof(DependentSizedArrayType)) 3782 DependentSizedArrayType(elementType, canon, numElements, ASM, 3783 elementTypeQuals, brackets); 3784 Types.push_back(sugaredType); 3785 return QualType(sugaredType, 0); 3786 } 3787 3788 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3789 ArraySizeModifier ASM, 3790 unsigned elementTypeQuals) const { 3791 llvm::FoldingSetNodeID ID; 3792 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3793 3794 void *insertPos = nullptr; 3795 if (IncompleteArrayType *iat = 3796 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3797 return QualType(iat, 0); 3798 3799 // If the element type isn't canonical, this won't be a canonical type 3800 // either, so fill in the canonical type field. We also have to pull 3801 // qualifiers off the element type. 3802 QualType canon; 3803 3804 // FIXME: Check below should look for qualifiers behind sugar. 3805 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3806 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3807 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3808 ASM, elementTypeQuals); 3809 canon = getQualifiedType(canon, canonSplit.Quals); 3810 3811 // Get the new insert position for the node we care about. 3812 IncompleteArrayType *existing = 3813 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3814 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3815 } 3816 3817 auto *newType = new (*this, alignof(IncompleteArrayType)) 3818 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3819 3820 IncompleteArrayTypes.InsertNode(newType, insertPos); 3821 Types.push_back(newType); 3822 return QualType(newType, 0); 3823 } 3824 3825 ASTContext::BuiltinVectorTypeInfo 3826 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3827 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3828 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3829 NUMVECTORS}; 3830 3831 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3832 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3833 3834 switch (Ty->getKind()) { 3835 default: 3836 llvm_unreachable("Unsupported builtin vector type"); 3837 case BuiltinType::SveInt8: 3838 return SVE_INT_ELTTY(8, 16, true, 1); 3839 case BuiltinType::SveUint8: 3840 return SVE_INT_ELTTY(8, 16, false, 1); 3841 case BuiltinType::SveInt8x2: 3842 return SVE_INT_ELTTY(8, 16, true, 2); 3843 case BuiltinType::SveUint8x2: 3844 return SVE_INT_ELTTY(8, 16, false, 2); 3845 case BuiltinType::SveInt8x3: 3846 return SVE_INT_ELTTY(8, 16, true, 3); 3847 case BuiltinType::SveUint8x3: 3848 return SVE_INT_ELTTY(8, 16, false, 3); 3849 case BuiltinType::SveInt8x4: 3850 return SVE_INT_ELTTY(8, 16, true, 4); 3851 case BuiltinType::SveUint8x4: 3852 return SVE_INT_ELTTY(8, 16, false, 4); 3853 case BuiltinType::SveInt16: 3854 return SVE_INT_ELTTY(16, 8, true, 1); 3855 case BuiltinType::SveUint16: 3856 return SVE_INT_ELTTY(16, 8, false, 1); 3857 case BuiltinType::SveInt16x2: 3858 return SVE_INT_ELTTY(16, 8, true, 2); 3859 case BuiltinType::SveUint16x2: 3860 return SVE_INT_ELTTY(16, 8, false, 2); 3861 case BuiltinType::SveInt16x3: 3862 return SVE_INT_ELTTY(16, 8, true, 3); 3863 case BuiltinType::SveUint16x3: 3864 return SVE_INT_ELTTY(16, 8, false, 3); 3865 case BuiltinType::SveInt16x4: 3866 return SVE_INT_ELTTY(16, 8, true, 4); 3867 case BuiltinType::SveUint16x4: 3868 return SVE_INT_ELTTY(16, 8, false, 4); 3869 case BuiltinType::SveInt32: 3870 return SVE_INT_ELTTY(32, 4, true, 1); 3871 case BuiltinType::SveUint32: 3872 return SVE_INT_ELTTY(32, 4, false, 1); 3873 case BuiltinType::SveInt32x2: 3874 return SVE_INT_ELTTY(32, 4, true, 2); 3875 case BuiltinType::SveUint32x2: 3876 return SVE_INT_ELTTY(32, 4, false, 2); 3877 case BuiltinType::SveInt32x3: 3878 return SVE_INT_ELTTY(32, 4, true, 3); 3879 case BuiltinType::SveUint32x3: 3880 return SVE_INT_ELTTY(32, 4, false, 3); 3881 case BuiltinType::SveInt32x4: 3882 return SVE_INT_ELTTY(32, 4, true, 4); 3883 case BuiltinType::SveUint32x4: 3884 return SVE_INT_ELTTY(32, 4, false, 4); 3885 case BuiltinType::SveInt64: 3886 return SVE_INT_ELTTY(64, 2, true, 1); 3887 case BuiltinType::SveUint64: 3888 return SVE_INT_ELTTY(64, 2, false, 1); 3889 case BuiltinType::SveInt64x2: 3890 return SVE_INT_ELTTY(64, 2, true, 2); 3891 case BuiltinType::SveUint64x2: 3892 return SVE_INT_ELTTY(64, 2, false, 2); 3893 case BuiltinType::SveInt64x3: 3894 return SVE_INT_ELTTY(64, 2, true, 3); 3895 case BuiltinType::SveUint64x3: 3896 return SVE_INT_ELTTY(64, 2, false, 3); 3897 case BuiltinType::SveInt64x4: 3898 return SVE_INT_ELTTY(64, 2, true, 4); 3899 case BuiltinType::SveUint64x4: 3900 return SVE_INT_ELTTY(64, 2, false, 4); 3901 case BuiltinType::SveBool: 3902 return SVE_ELTTY(BoolTy, 16, 1); 3903 case BuiltinType::SveBoolx2: 3904 return SVE_ELTTY(BoolTy, 16, 2); 3905 case BuiltinType::SveBoolx4: 3906 return SVE_ELTTY(BoolTy, 16, 4); 3907 case BuiltinType::SveFloat16: 3908 return SVE_ELTTY(HalfTy, 8, 1); 3909 case BuiltinType::SveFloat16x2: 3910 return SVE_ELTTY(HalfTy, 8, 2); 3911 case BuiltinType::SveFloat16x3: 3912 return SVE_ELTTY(HalfTy, 8, 3); 3913 case BuiltinType::SveFloat16x4: 3914 return SVE_ELTTY(HalfTy, 8, 4); 3915 case BuiltinType::SveFloat32: 3916 return SVE_ELTTY(FloatTy, 4, 1); 3917 case BuiltinType::SveFloat32x2: 3918 return SVE_ELTTY(FloatTy, 4, 2); 3919 case BuiltinType::SveFloat32x3: 3920 return SVE_ELTTY(FloatTy, 4, 3); 3921 case BuiltinType::SveFloat32x4: 3922 return SVE_ELTTY(FloatTy, 4, 4); 3923 case BuiltinType::SveFloat64: 3924 return SVE_ELTTY(DoubleTy, 2, 1); 3925 case BuiltinType::SveFloat64x2: 3926 return SVE_ELTTY(DoubleTy, 2, 2); 3927 case BuiltinType::SveFloat64x3: 3928 return SVE_ELTTY(DoubleTy, 2, 3); 3929 case BuiltinType::SveFloat64x4: 3930 return SVE_ELTTY(DoubleTy, 2, 4); 3931 case BuiltinType::SveBFloat16: 3932 return SVE_ELTTY(BFloat16Ty, 8, 1); 3933 case BuiltinType::SveBFloat16x2: 3934 return SVE_ELTTY(BFloat16Ty, 8, 2); 3935 case BuiltinType::SveBFloat16x3: 3936 return SVE_ELTTY(BFloat16Ty, 8, 3); 3937 case BuiltinType::SveBFloat16x4: 3938 return SVE_ELTTY(BFloat16Ty, 8, 4); 3939 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3940 IsSigned) \ 3941 case BuiltinType::Id: \ 3942 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3943 llvm::ElementCount::getScalable(NumEls), NF}; 3944 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3945 case BuiltinType::Id: \ 3946 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3947 llvm::ElementCount::getScalable(NumEls), NF}; 3948 #define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3949 case BuiltinType::Id: \ 3950 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF}; 3951 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3952 case BuiltinType::Id: \ 3953 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3954 #include "clang/Basic/RISCVVTypes.def" 3955 } 3956 } 3957 3958 /// getExternrefType - Return a WebAssembly externref type, which represents an 3959 /// opaque reference to a host value. 3960 QualType ASTContext::getWebAssemblyExternrefType() const { 3961 if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) { 3962 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ 3963 if (BuiltinType::Id == BuiltinType::WasmExternRef) \ 3964 return SingletonId; 3965 #include "clang/Basic/WebAssemblyReferenceTypes.def" 3966 } 3967 llvm_unreachable( 3968 "shouldn't try to generate type externref outside WebAssembly target"); 3969 } 3970 3971 /// getScalableVectorType - Return the unique reference to a scalable vector 3972 /// type of the specified element type and size. VectorType must be a built-in 3973 /// type. 3974 QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, 3975 unsigned NumFields) const { 3976 if (Target->hasAArch64SVETypes()) { 3977 uint64_t EltTySize = getTypeSize(EltTy); 3978 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3979 IsSigned, IsFP, IsBF) \ 3980 if (!EltTy->isBooleanType() && \ 3981 ((EltTy->hasIntegerRepresentation() && \ 3982 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3983 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3984 IsFP && !IsBF) || \ 3985 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3986 IsBF && !IsFP)) && \ 3987 EltTySize == ElBits && NumElts == NumEls) { \ 3988 return SingletonId; \ 3989 } 3990 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3991 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3992 return SingletonId; 3993 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId) 3994 #include "clang/Basic/AArch64SVEACLETypes.def" 3995 } else if (Target->hasRISCVVTypes()) { 3996 uint64_t EltTySize = getTypeSize(EltTy); 3997 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3998 IsFP, IsBF) \ 3999 if (!EltTy->isBooleanType() && \ 4000 ((EltTy->hasIntegerRepresentation() && \ 4001 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 4002 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 4003 IsFP && !IsBF) || \ 4004 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 4005 IsBF && !IsFP)) && \ 4006 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ 4007 return SingletonId; 4008 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 4009 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4010 return SingletonId; 4011 #include "clang/Basic/RISCVVTypes.def" 4012 } 4013 return QualType(); 4014 } 4015 4016 /// getVectorType - Return the unique reference to a vector type of 4017 /// the specified element type and size. VectorType must be a built-in type. 4018 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4019 VectorKind VecKind) const { 4020 assert(vecType->isBuiltinType() || 4021 (vecType->isBitIntType() && 4022 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4023 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4024 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4025 4026 // Check if we've already instantiated a vector of this type. 4027 llvm::FoldingSetNodeID ID; 4028 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4029 4030 void *InsertPos = nullptr; 4031 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4032 return QualType(VTP, 0); 4033 4034 // If the element type isn't canonical, this won't be a canonical type either, 4035 // so fill in the canonical type field. 4036 QualType Canonical; 4037 if (!vecType.isCanonical()) { 4038 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4039 4040 // Get the new insert position for the node we care about. 4041 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4042 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4043 } 4044 auto *New = new (*this, alignof(VectorType)) 4045 VectorType(vecType, NumElts, Canonical, VecKind); 4046 VectorTypes.InsertNode(New, InsertPos); 4047 Types.push_back(New); 4048 return QualType(New, 0); 4049 } 4050 4051 QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4052 SourceLocation AttrLoc, 4053 VectorKind VecKind) const { 4054 llvm::FoldingSetNodeID ID; 4055 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4056 VecKind); 4057 void *InsertPos = nullptr; 4058 DependentVectorType *Canon = 4059 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4060 DependentVectorType *New; 4061 4062 if (Canon) { 4063 New = new (*this, alignof(DependentVectorType)) DependentVectorType( 4064 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4065 } else { 4066 QualType CanonVecTy = getCanonicalType(VecType); 4067 if (CanonVecTy == VecType) { 4068 New = new (*this, alignof(DependentVectorType)) 4069 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4070 4071 DependentVectorType *CanonCheck = 4072 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4073 assert(!CanonCheck && 4074 "Dependent-sized vector_size canonical type broken"); 4075 (void)CanonCheck; 4076 DependentVectorTypes.InsertNode(New, InsertPos); 4077 } else { 4078 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4079 SourceLocation(), VecKind); 4080 New = new (*this, alignof(DependentVectorType)) 4081 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4082 } 4083 } 4084 4085 Types.push_back(New); 4086 return QualType(New, 0); 4087 } 4088 4089 /// getExtVectorType - Return the unique reference to an extended vector type of 4090 /// the specified element type and size. VectorType must be a built-in type. 4091 QualType ASTContext::getExtVectorType(QualType vecType, 4092 unsigned NumElts) const { 4093 assert(vecType->isBuiltinType() || vecType->isDependentType() || 4094 (vecType->isBitIntType() && 4095 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4096 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4097 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4098 4099 // Check if we've already instantiated a vector of this type. 4100 llvm::FoldingSetNodeID ID; 4101 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4102 VectorKind::Generic); 4103 void *InsertPos = nullptr; 4104 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4105 return QualType(VTP, 0); 4106 4107 // If the element type isn't canonical, this won't be a canonical type either, 4108 // so fill in the canonical type field. 4109 QualType Canonical; 4110 if (!vecType.isCanonical()) { 4111 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4112 4113 // Get the new insert position for the node we care about. 4114 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4115 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4116 } 4117 auto *New = new (*this, alignof(ExtVectorType)) 4118 ExtVectorType(vecType, NumElts, Canonical); 4119 VectorTypes.InsertNode(New, InsertPos); 4120 Types.push_back(New); 4121 return QualType(New, 0); 4122 } 4123 4124 QualType 4125 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4126 Expr *SizeExpr, 4127 SourceLocation AttrLoc) const { 4128 llvm::FoldingSetNodeID ID; 4129 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4130 SizeExpr); 4131 4132 void *InsertPos = nullptr; 4133 DependentSizedExtVectorType *Canon 4134 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4135 DependentSizedExtVectorType *New; 4136 if (Canon) { 4137 // We already have a canonical version of this array type; use it as 4138 // the canonical type for a newly-built type. 4139 New = new (*this, alignof(DependentSizedExtVectorType)) 4140 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr, 4141 AttrLoc); 4142 } else { 4143 QualType CanonVecTy = getCanonicalType(vecType); 4144 if (CanonVecTy == vecType) { 4145 New = new (*this, alignof(DependentSizedExtVectorType)) 4146 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc); 4147 4148 DependentSizedExtVectorType *CanonCheck 4149 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4150 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4151 (void)CanonCheck; 4152 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4153 } else { 4154 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4155 SourceLocation()); 4156 New = new (*this, alignof(DependentSizedExtVectorType)) 4157 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc); 4158 } 4159 } 4160 4161 Types.push_back(New); 4162 return QualType(New, 0); 4163 } 4164 4165 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4166 unsigned NumColumns) const { 4167 llvm::FoldingSetNodeID ID; 4168 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4169 Type::ConstantMatrix); 4170 4171 assert(MatrixType::isValidElementType(ElementTy) && 4172 "need a valid element type"); 4173 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4174 ConstantMatrixType::isDimensionValid(NumColumns) && 4175 "need valid matrix dimensions"); 4176 void *InsertPos = nullptr; 4177 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4178 return QualType(MTP, 0); 4179 4180 QualType Canonical; 4181 if (!ElementTy.isCanonical()) { 4182 Canonical = 4183 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4184 4185 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4186 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4187 (void)NewIP; 4188 } 4189 4190 auto *New = new (*this, alignof(ConstantMatrixType)) 4191 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4192 MatrixTypes.InsertNode(New, InsertPos); 4193 Types.push_back(New); 4194 return QualType(New, 0); 4195 } 4196 4197 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4198 Expr *RowExpr, 4199 Expr *ColumnExpr, 4200 SourceLocation AttrLoc) const { 4201 QualType CanonElementTy = getCanonicalType(ElementTy); 4202 llvm::FoldingSetNodeID ID; 4203 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4204 ColumnExpr); 4205 4206 void *InsertPos = nullptr; 4207 DependentSizedMatrixType *Canon = 4208 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4209 4210 if (!Canon) { 4211 Canon = new (*this, alignof(DependentSizedMatrixType)) 4212 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr, 4213 ColumnExpr, AttrLoc); 4214 #ifndef NDEBUG 4215 DependentSizedMatrixType *CanonCheck = 4216 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4217 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4218 #endif 4219 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4220 Types.push_back(Canon); 4221 } 4222 4223 // Already have a canonical version of the matrix type 4224 // 4225 // If it exactly matches the requested type, use it directly. 4226 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4227 Canon->getRowExpr() == ColumnExpr) 4228 return QualType(Canon, 0); 4229 4230 // Use Canon as the canonical type for newly-built type. 4231 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType)) 4232 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr, 4233 ColumnExpr, AttrLoc); 4234 Types.push_back(New); 4235 return QualType(New, 0); 4236 } 4237 4238 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4239 Expr *AddrSpaceExpr, 4240 SourceLocation AttrLoc) const { 4241 assert(AddrSpaceExpr->isInstantiationDependent()); 4242 4243 QualType canonPointeeType = getCanonicalType(PointeeType); 4244 4245 void *insertPos = nullptr; 4246 llvm::FoldingSetNodeID ID; 4247 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4248 AddrSpaceExpr); 4249 4250 DependentAddressSpaceType *canonTy = 4251 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4252 4253 if (!canonTy) { 4254 canonTy = new (*this, alignof(DependentAddressSpaceType)) 4255 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr, 4256 AttrLoc); 4257 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4258 Types.push_back(canonTy); 4259 } 4260 4261 if (canonPointeeType == PointeeType && 4262 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4263 return QualType(canonTy, 0); 4264 4265 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType)) 4266 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0), 4267 AddrSpaceExpr, AttrLoc); 4268 Types.push_back(sugaredType); 4269 return QualType(sugaredType, 0); 4270 } 4271 4272 /// Determine whether \p T is canonical as the result type of a function. 4273 static bool isCanonicalResultType(QualType T) { 4274 return T.isCanonical() && 4275 (T.getObjCLifetime() == Qualifiers::OCL_None || 4276 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4277 } 4278 4279 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4280 QualType 4281 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4282 const FunctionType::ExtInfo &Info) const { 4283 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4284 // functionality creates a function without a prototype regardless of 4285 // language mode (so it makes them even in C++). Once the rewriter has been 4286 // fixed, this assertion can be enabled again. 4287 //assert(!LangOpts.requiresStrictPrototypes() && 4288 // "strict prototypes are disabled"); 4289 4290 // Unique functions, to guarantee there is only one function of a particular 4291 // structure. 4292 llvm::FoldingSetNodeID ID; 4293 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4294 4295 void *InsertPos = nullptr; 4296 if (FunctionNoProtoType *FT = 4297 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4298 return QualType(FT, 0); 4299 4300 QualType Canonical; 4301 if (!isCanonicalResultType(ResultTy)) { 4302 Canonical = 4303 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4304 4305 // Get the new insert position for the node we care about. 4306 FunctionNoProtoType *NewIP = 4307 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4308 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4309 } 4310 4311 auto *New = new (*this, alignof(FunctionNoProtoType)) 4312 FunctionNoProtoType(ResultTy, Canonical, Info); 4313 Types.push_back(New); 4314 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4315 return QualType(New, 0); 4316 } 4317 4318 CanQualType 4319 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4320 CanQualType CanResultType = getCanonicalType(ResultType); 4321 4322 // Canonical result types do not have ARC lifetime qualifiers. 4323 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4324 Qualifiers Qs = CanResultType.getQualifiers(); 4325 Qs.removeObjCLifetime(); 4326 return CanQualType::CreateUnsafe( 4327 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4328 } 4329 4330 return CanResultType; 4331 } 4332 4333 static bool isCanonicalExceptionSpecification( 4334 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4335 if (ESI.Type == EST_None) 4336 return true; 4337 if (!NoexceptInType) 4338 return false; 4339 4340 // C++17 onwards: exception specification is part of the type, as a simple 4341 // boolean "can this function type throw". 4342 if (ESI.Type == EST_BasicNoexcept) 4343 return true; 4344 4345 // A noexcept(expr) specification is (possibly) canonical if expr is 4346 // value-dependent. 4347 if (ESI.Type == EST_DependentNoexcept) 4348 return true; 4349 4350 // A dynamic exception specification is canonical if it only contains pack 4351 // expansions (so we can't tell whether it's non-throwing) and all its 4352 // contained types are canonical. 4353 if (ESI.Type == EST_Dynamic) { 4354 bool AnyPackExpansions = false; 4355 for (QualType ET : ESI.Exceptions) { 4356 if (!ET.isCanonical()) 4357 return false; 4358 if (ET->getAs<PackExpansionType>()) 4359 AnyPackExpansions = true; 4360 } 4361 return AnyPackExpansions; 4362 } 4363 4364 return false; 4365 } 4366 4367 QualType ASTContext::getFunctionTypeInternal( 4368 QualType ResultTy, ArrayRef<QualType> ArgArray, 4369 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4370 size_t NumArgs = ArgArray.size(); 4371 4372 // Unique functions, to guarantee there is only one function of a particular 4373 // structure. 4374 llvm::FoldingSetNodeID ID; 4375 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4376 *this, true); 4377 4378 QualType Canonical; 4379 bool Unique = false; 4380 4381 void *InsertPos = nullptr; 4382 if (FunctionProtoType *FPT = 4383 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4384 QualType Existing = QualType(FPT, 0); 4385 4386 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4387 // it so long as our exception specification doesn't contain a dependent 4388 // noexcept expression, or we're just looking for a canonical type. 4389 // Otherwise, we're going to need to create a type 4390 // sugar node to hold the concrete expression. 4391 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4392 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4393 return Existing; 4394 4395 // We need a new type sugar node for this one, to hold the new noexcept 4396 // expression. We do no canonicalization here, but that's OK since we don't 4397 // expect to see the same noexcept expression much more than once. 4398 Canonical = getCanonicalType(Existing); 4399 Unique = true; 4400 } 4401 4402 bool NoexceptInType = getLangOpts().CPlusPlus17; 4403 bool IsCanonicalExceptionSpec = 4404 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4405 4406 // Determine whether the type being created is already canonical or not. 4407 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4408 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4409 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4410 if (!ArgArray[i].isCanonicalAsParam()) 4411 isCanonical = false; 4412 4413 if (OnlyWantCanonical) 4414 assert(isCanonical && 4415 "given non-canonical parameters constructing canonical type"); 4416 4417 // If this type isn't canonical, get the canonical version of it if we don't 4418 // already have it. The exception spec is only partially part of the 4419 // canonical type, and only in C++17 onwards. 4420 if (!isCanonical && Canonical.isNull()) { 4421 SmallVector<QualType, 16> CanonicalArgs; 4422 CanonicalArgs.reserve(NumArgs); 4423 for (unsigned i = 0; i != NumArgs; ++i) 4424 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4425 4426 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4427 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4428 CanonicalEPI.HasTrailingReturn = false; 4429 4430 if (IsCanonicalExceptionSpec) { 4431 // Exception spec is already OK. 4432 } else if (NoexceptInType) { 4433 switch (EPI.ExceptionSpec.Type) { 4434 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4435 // We don't know yet. It shouldn't matter what we pick here; no-one 4436 // should ever look at this. 4437 [[fallthrough]]; 4438 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4439 CanonicalEPI.ExceptionSpec.Type = EST_None; 4440 break; 4441 4442 // A dynamic exception specification is almost always "not noexcept", 4443 // with the exception that a pack expansion might expand to no types. 4444 case EST_Dynamic: { 4445 bool AnyPacks = false; 4446 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4447 if (ET->getAs<PackExpansionType>()) 4448 AnyPacks = true; 4449 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4450 } 4451 if (!AnyPacks) 4452 CanonicalEPI.ExceptionSpec.Type = EST_None; 4453 else { 4454 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4455 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4456 } 4457 break; 4458 } 4459 4460 case EST_DynamicNone: 4461 case EST_BasicNoexcept: 4462 case EST_NoexceptTrue: 4463 case EST_NoThrow: 4464 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4465 break; 4466 4467 case EST_DependentNoexcept: 4468 llvm_unreachable("dependent noexcept is already canonical"); 4469 } 4470 } else { 4471 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4472 } 4473 4474 // Adjust the canonical function result type. 4475 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4476 Canonical = 4477 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4478 4479 // Get the new insert position for the node we care about. 4480 FunctionProtoType *NewIP = 4481 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4482 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4483 } 4484 4485 // Compute the needed size to hold this FunctionProtoType and the 4486 // various trailing objects. 4487 auto ESH = FunctionProtoType::getExceptionSpecSize( 4488 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4489 size_t Size = FunctionProtoType::totalSizeToAlloc< 4490 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4491 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType, 4492 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers>( 4493 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4494 EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType, 4495 ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4496 EPI.ExtParameterInfos ? NumArgs : 0, 4497 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4498 4499 auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType)); 4500 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4501 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4502 Types.push_back(FTP); 4503 if (!Unique) 4504 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4505 return QualType(FTP, 0); 4506 } 4507 4508 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4509 llvm::FoldingSetNodeID ID; 4510 PipeType::Profile(ID, T, ReadOnly); 4511 4512 void *InsertPos = nullptr; 4513 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4514 return QualType(PT, 0); 4515 4516 // If the pipe element type isn't canonical, this won't be a canonical type 4517 // either, so fill in the canonical type field. 4518 QualType Canonical; 4519 if (!T.isCanonical()) { 4520 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4521 4522 // Get the new insert position for the node we care about. 4523 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4524 assert(!NewIP && "Shouldn't be in the map!"); 4525 (void)NewIP; 4526 } 4527 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly); 4528 Types.push_back(New); 4529 PipeTypes.InsertNode(New, InsertPos); 4530 return QualType(New, 0); 4531 } 4532 4533 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4534 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4535 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4536 : Ty; 4537 } 4538 4539 QualType ASTContext::getReadPipeType(QualType T) const { 4540 return getPipeType(T, true); 4541 } 4542 4543 QualType ASTContext::getWritePipeType(QualType T) const { 4544 return getPipeType(T, false); 4545 } 4546 4547 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4548 llvm::FoldingSetNodeID ID; 4549 BitIntType::Profile(ID, IsUnsigned, NumBits); 4550 4551 void *InsertPos = nullptr; 4552 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4553 return QualType(EIT, 0); 4554 4555 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits); 4556 BitIntTypes.InsertNode(New, InsertPos); 4557 Types.push_back(New); 4558 return QualType(New, 0); 4559 } 4560 4561 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4562 Expr *NumBitsExpr) const { 4563 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4564 llvm::FoldingSetNodeID ID; 4565 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4566 4567 void *InsertPos = nullptr; 4568 if (DependentBitIntType *Existing = 4569 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4570 return QualType(Existing, 0); 4571 4572 auto *New = new (*this, alignof(DependentBitIntType)) 4573 DependentBitIntType(IsUnsigned, NumBitsExpr); 4574 DependentBitIntTypes.InsertNode(New, InsertPos); 4575 4576 Types.push_back(New); 4577 return QualType(New, 0); 4578 } 4579 4580 #ifndef NDEBUG 4581 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4582 if (!isa<CXXRecordDecl>(D)) return false; 4583 const auto *RD = cast<CXXRecordDecl>(D); 4584 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4585 return true; 4586 if (RD->getDescribedClassTemplate() && 4587 !isa<ClassTemplateSpecializationDecl>(RD)) 4588 return true; 4589 return false; 4590 } 4591 #endif 4592 4593 /// getInjectedClassNameType - Return the unique reference to the 4594 /// injected class name type for the specified templated declaration. 4595 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4596 QualType TST) const { 4597 assert(NeedsInjectedClassNameType(Decl)); 4598 if (Decl->TypeForDecl) { 4599 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4600 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4601 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4602 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4603 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4604 } else { 4605 Type *newType = new (*this, alignof(InjectedClassNameType)) 4606 InjectedClassNameType(Decl, TST); 4607 Decl->TypeForDecl = newType; 4608 Types.push_back(newType); 4609 } 4610 return QualType(Decl->TypeForDecl, 0); 4611 } 4612 4613 /// getTypeDeclType - Return the unique reference to the type for the 4614 /// specified type declaration. 4615 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4616 assert(Decl && "Passed null for Decl param"); 4617 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4618 4619 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4620 return getTypedefType(Typedef); 4621 4622 assert(!isa<TemplateTypeParmDecl>(Decl) && 4623 "Template type parameter types are always available."); 4624 4625 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4626 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4627 assert(!NeedsInjectedClassNameType(Record)); 4628 return getRecordType(Record); 4629 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4630 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4631 return getEnumType(Enum); 4632 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4633 return getUnresolvedUsingType(Using); 4634 } else 4635 llvm_unreachable("TypeDecl without a type?"); 4636 4637 return QualType(Decl->TypeForDecl, 0); 4638 } 4639 4640 /// getTypedefType - Return the unique reference to the type for the 4641 /// specified typedef name decl. 4642 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4643 QualType Underlying) const { 4644 if (!Decl->TypeForDecl) { 4645 if (Underlying.isNull()) 4646 Underlying = Decl->getUnderlyingType(); 4647 auto *NewType = new (*this, alignof(TypedefType)) TypedefType( 4648 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying)); 4649 Decl->TypeForDecl = NewType; 4650 Types.push_back(NewType); 4651 return QualType(NewType, 0); 4652 } 4653 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying) 4654 return QualType(Decl->TypeForDecl, 0); 4655 assert(hasSameType(Decl->getUnderlyingType(), Underlying)); 4656 4657 llvm::FoldingSetNodeID ID; 4658 TypedefType::Profile(ID, Decl, Underlying); 4659 4660 void *InsertPos = nullptr; 4661 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4662 assert(!T->typeMatchesDecl() && 4663 "non-divergent case should be handled with TypeDecl"); 4664 return QualType(T, 0); 4665 } 4666 4667 void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true), 4668 alignof(TypedefType)); 4669 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying, 4670 getCanonicalType(Underlying)); 4671 TypedefTypes.InsertNode(NewType, InsertPos); 4672 Types.push_back(NewType); 4673 return QualType(NewType, 0); 4674 } 4675 4676 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4677 QualType Underlying) const { 4678 llvm::FoldingSetNodeID ID; 4679 UsingType::Profile(ID, Found, Underlying); 4680 4681 void *InsertPos = nullptr; 4682 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos)) 4683 return QualType(T, 0); 4684 4685 const Type *TypeForDecl = 4686 cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(); 4687 4688 assert(!Underlying.hasLocalQualifiers()); 4689 QualType Canon = Underlying->getCanonicalTypeInternal(); 4690 assert(TypeForDecl->getCanonicalTypeInternal() == Canon); 4691 4692 if (Underlying.getTypePtr() == TypeForDecl) 4693 Underlying = QualType(); 4694 void *Mem = 4695 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()), 4696 alignof(UsingType)); 4697 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon); 4698 Types.push_back(NewType); 4699 UsingTypes.InsertNode(NewType, InsertPos); 4700 return QualType(NewType, 0); 4701 } 4702 4703 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4704 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4705 4706 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4707 if (PrevDecl->TypeForDecl) 4708 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4709 4710 auto *newType = new (*this, alignof(RecordType)) RecordType(Decl); 4711 Decl->TypeForDecl = newType; 4712 Types.push_back(newType); 4713 return QualType(newType, 0); 4714 } 4715 4716 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4717 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4718 4719 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4720 if (PrevDecl->TypeForDecl) 4721 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4722 4723 auto *newType = new (*this, alignof(EnumType)) EnumType(Decl); 4724 Decl->TypeForDecl = newType; 4725 Types.push_back(newType); 4726 return QualType(newType, 0); 4727 } 4728 4729 QualType ASTContext::getUnresolvedUsingType( 4730 const UnresolvedUsingTypenameDecl *Decl) const { 4731 if (Decl->TypeForDecl) 4732 return QualType(Decl->TypeForDecl, 0); 4733 4734 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4735 Decl->getCanonicalDecl()) 4736 if (CanonicalDecl->TypeForDecl) 4737 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4738 4739 Type *newType = 4740 new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl); 4741 Decl->TypeForDecl = newType; 4742 Types.push_back(newType); 4743 return QualType(newType, 0); 4744 } 4745 4746 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4747 QualType modifiedType, 4748 QualType equivalentType) const { 4749 llvm::FoldingSetNodeID id; 4750 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4751 4752 void *insertPos = nullptr; 4753 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4754 if (type) return QualType(type, 0); 4755 4756 QualType canon = getCanonicalType(equivalentType); 4757 type = new (*this, alignof(AttributedType)) 4758 AttributedType(canon, attrKind, modifiedType, equivalentType); 4759 4760 Types.push_back(type); 4761 AttributedTypes.InsertNode(type, insertPos); 4762 4763 return QualType(type, 0); 4764 } 4765 4766 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 4767 QualType Wrapped) { 4768 llvm::FoldingSetNodeID ID; 4769 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 4770 4771 void *InsertPos = nullptr; 4772 BTFTagAttributedType *Ty = 4773 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 4774 if (Ty) 4775 return QualType(Ty, 0); 4776 4777 QualType Canon = getCanonicalType(Wrapped); 4778 Ty = new (*this, alignof(BTFTagAttributedType)) 4779 BTFTagAttributedType(Canon, Wrapped, BTFAttr); 4780 4781 Types.push_back(Ty); 4782 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 4783 4784 return QualType(Ty, 0); 4785 } 4786 4787 /// Retrieve a substitution-result type. 4788 QualType ASTContext::getSubstTemplateTypeParmType( 4789 QualType Replacement, Decl *AssociatedDecl, unsigned Index, 4790 std::optional<unsigned> PackIndex) const { 4791 llvm::FoldingSetNodeID ID; 4792 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index, 4793 PackIndex); 4794 void *InsertPos = nullptr; 4795 SubstTemplateTypeParmType *SubstParm = 4796 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4797 4798 if (!SubstParm) { 4799 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>( 4800 !Replacement.isCanonical()), 4801 alignof(SubstTemplateTypeParmType)); 4802 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl, 4803 Index, PackIndex); 4804 Types.push_back(SubstParm); 4805 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4806 } 4807 4808 return QualType(SubstParm, 0); 4809 } 4810 4811 /// Retrieve a 4812 QualType 4813 ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl, 4814 unsigned Index, bool Final, 4815 const TemplateArgument &ArgPack) { 4816 #ifndef NDEBUG 4817 for (const auto &P : ArgPack.pack_elements()) 4818 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type"); 4819 #endif 4820 4821 llvm::FoldingSetNodeID ID; 4822 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final, 4823 ArgPack); 4824 void *InsertPos = nullptr; 4825 if (SubstTemplateTypeParmPackType *SubstParm = 4826 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4827 return QualType(SubstParm, 0); 4828 4829 QualType Canon; 4830 { 4831 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); 4832 if (!AssociatedDecl->isCanonicalDecl() || 4833 !CanonArgPack.structurallyEquals(ArgPack)) { 4834 Canon = getSubstTemplateTypeParmPackType( 4835 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack); 4836 [[maybe_unused]] const auto *Nothing = 4837 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4838 assert(!Nothing); 4839 } 4840 } 4841 4842 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType)) 4843 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final, 4844 ArgPack); 4845 Types.push_back(SubstParm); 4846 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4847 return QualType(SubstParm, 0); 4848 } 4849 4850 /// Retrieve the template type parameter type for a template 4851 /// parameter or parameter pack with the given depth, index, and (optionally) 4852 /// name. 4853 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4854 bool ParameterPack, 4855 TemplateTypeParmDecl *TTPDecl) const { 4856 llvm::FoldingSetNodeID ID; 4857 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4858 void *InsertPos = nullptr; 4859 TemplateTypeParmType *TypeParm 4860 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4861 4862 if (TypeParm) 4863 return QualType(TypeParm, 0); 4864 4865 if (TTPDecl) { 4866 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4867 TypeParm = new (*this, alignof(TemplateTypeParmType)) 4868 TemplateTypeParmType(TTPDecl, Canon); 4869 4870 TemplateTypeParmType *TypeCheck 4871 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4872 assert(!TypeCheck && "Template type parameter canonical type broken"); 4873 (void)TypeCheck; 4874 } else 4875 TypeParm = new (*this, alignof(TemplateTypeParmType)) 4876 TemplateTypeParmType(Depth, Index, ParameterPack); 4877 4878 Types.push_back(TypeParm); 4879 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4880 4881 return QualType(TypeParm, 0); 4882 } 4883 4884 TypeSourceInfo * 4885 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4886 SourceLocation NameLoc, 4887 const TemplateArgumentListInfo &Args, 4888 QualType Underlying) const { 4889 assert(!Name.getAsDependentTemplateName() && 4890 "No dependent template names here!"); 4891 QualType TST = 4892 getTemplateSpecializationType(Name, Args.arguments(), Underlying); 4893 4894 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4895 TemplateSpecializationTypeLoc TL = 4896 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4897 TL.setTemplateKeywordLoc(SourceLocation()); 4898 TL.setTemplateNameLoc(NameLoc); 4899 TL.setLAngleLoc(Args.getLAngleLoc()); 4900 TL.setRAngleLoc(Args.getRAngleLoc()); 4901 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4902 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4903 return DI; 4904 } 4905 4906 QualType 4907 ASTContext::getTemplateSpecializationType(TemplateName Template, 4908 ArrayRef<TemplateArgumentLoc> Args, 4909 QualType Underlying) const { 4910 assert(!Template.getAsDependentTemplateName() && 4911 "No dependent template names here!"); 4912 4913 SmallVector<TemplateArgument, 4> ArgVec; 4914 ArgVec.reserve(Args.size()); 4915 for (const TemplateArgumentLoc &Arg : Args) 4916 ArgVec.push_back(Arg.getArgument()); 4917 4918 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4919 } 4920 4921 #ifndef NDEBUG 4922 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4923 for (const TemplateArgument &Arg : Args) 4924 if (Arg.isPackExpansion()) 4925 return true; 4926 4927 return true; 4928 } 4929 #endif 4930 4931 QualType 4932 ASTContext::getTemplateSpecializationType(TemplateName Template, 4933 ArrayRef<TemplateArgument> Args, 4934 QualType Underlying) const { 4935 assert(!Template.getAsDependentTemplateName() && 4936 "No dependent template names here!"); 4937 // Look through qualified template names. 4938 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4939 Template = QTN->getUnderlyingTemplate(); 4940 4941 const auto *TD = Template.getAsTemplateDecl(); 4942 bool IsTypeAlias = TD && TD->isTypeAlias(); 4943 QualType CanonType; 4944 if (!Underlying.isNull()) 4945 CanonType = getCanonicalType(Underlying); 4946 else { 4947 // We can get here with an alias template when the specialization contains 4948 // a pack expansion that does not match up with a parameter pack. 4949 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4950 "Caller must compute aliased type"); 4951 IsTypeAlias = false; 4952 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4953 } 4954 4955 // Allocate the (non-canonical) template specialization type, but don't 4956 // try to unique it: these types typically have location information that 4957 // we don't unique and don't want to lose. 4958 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4959 sizeof(TemplateArgument) * Args.size() + 4960 (IsTypeAlias ? sizeof(QualType) : 0), 4961 alignof(TemplateSpecializationType)); 4962 auto *Spec 4963 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4964 IsTypeAlias ? Underlying : QualType()); 4965 4966 Types.push_back(Spec); 4967 return QualType(Spec, 0); 4968 } 4969 4970 QualType ASTContext::getCanonicalTemplateSpecializationType( 4971 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4972 assert(!Template.getAsDependentTemplateName() && 4973 "No dependent template names here!"); 4974 4975 // Look through qualified template names. 4976 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4977 Template = TemplateName(QTN->getUnderlyingTemplate()); 4978 4979 // Build the canonical template specialization type. 4980 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4981 bool AnyNonCanonArgs = false; 4982 auto CanonArgs = 4983 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 4984 4985 // Determine whether this canonical template specialization type already 4986 // exists. 4987 llvm::FoldingSetNodeID ID; 4988 TemplateSpecializationType::Profile(ID, CanonTemplate, 4989 CanonArgs, *this); 4990 4991 void *InsertPos = nullptr; 4992 TemplateSpecializationType *Spec 4993 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4994 4995 if (!Spec) { 4996 // Allocate a new canonical template specialization type. 4997 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4998 sizeof(TemplateArgument) * CanonArgs.size()), 4999 alignof(TemplateSpecializationType)); 5000 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 5001 CanonArgs, 5002 QualType(), QualType()); 5003 Types.push_back(Spec); 5004 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 5005 } 5006 5007 assert(Spec->isDependentType() && 5008 "Non-dependent template-id type must have a canonical type"); 5009 return QualType(Spec, 0); 5010 } 5011 5012 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 5013 NestedNameSpecifier *NNS, 5014 QualType NamedType, 5015 TagDecl *OwnedTagDecl) const { 5016 llvm::FoldingSetNodeID ID; 5017 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 5018 5019 void *InsertPos = nullptr; 5020 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5021 if (T) 5022 return QualType(T, 0); 5023 5024 QualType Canon = NamedType; 5025 if (!Canon.isCanonical()) { 5026 Canon = getCanonicalType(NamedType); 5027 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5028 assert(!CheckT && "Elaborated canonical type broken"); 5029 (void)CheckT; 5030 } 5031 5032 void *Mem = 5033 Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 5034 alignof(ElaboratedType)); 5035 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 5036 5037 Types.push_back(T); 5038 ElaboratedTypes.InsertNode(T, InsertPos); 5039 return QualType(T, 0); 5040 } 5041 5042 QualType 5043 ASTContext::getParenType(QualType InnerType) const { 5044 llvm::FoldingSetNodeID ID; 5045 ParenType::Profile(ID, InnerType); 5046 5047 void *InsertPos = nullptr; 5048 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5049 if (T) 5050 return QualType(T, 0); 5051 5052 QualType Canon = InnerType; 5053 if (!Canon.isCanonical()) { 5054 Canon = getCanonicalType(InnerType); 5055 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5056 assert(!CheckT && "Paren canonical type broken"); 5057 (void)CheckT; 5058 } 5059 5060 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon); 5061 Types.push_back(T); 5062 ParenTypes.InsertNode(T, InsertPos); 5063 return QualType(T, 0); 5064 } 5065 5066 QualType 5067 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5068 const IdentifierInfo *MacroII) const { 5069 QualType Canon = UnderlyingTy; 5070 if (!Canon.isCanonical()) 5071 Canon = getCanonicalType(UnderlyingTy); 5072 5073 auto *newType = new (*this, alignof(MacroQualifiedType)) 5074 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5075 Types.push_back(newType); 5076 return QualType(newType, 0); 5077 } 5078 5079 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5080 NestedNameSpecifier *NNS, 5081 const IdentifierInfo *Name, 5082 QualType Canon) const { 5083 if (Canon.isNull()) { 5084 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5085 if (CanonNNS != NNS) 5086 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5087 } 5088 5089 llvm::FoldingSetNodeID ID; 5090 DependentNameType::Profile(ID, Keyword, NNS, Name); 5091 5092 void *InsertPos = nullptr; 5093 DependentNameType *T 5094 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5095 if (T) 5096 return QualType(T, 0); 5097 5098 T = new (*this, alignof(DependentNameType)) 5099 DependentNameType(Keyword, NNS, Name, Canon); 5100 Types.push_back(T); 5101 DependentNameTypes.InsertNode(T, InsertPos); 5102 return QualType(T, 0); 5103 } 5104 5105 QualType ASTContext::getDependentTemplateSpecializationType( 5106 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, 5107 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const { 5108 // TODO: avoid this copy 5109 SmallVector<TemplateArgument, 16> ArgCopy; 5110 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5111 ArgCopy.push_back(Args[I].getArgument()); 5112 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5113 } 5114 5115 QualType 5116 ASTContext::getDependentTemplateSpecializationType( 5117 ElaboratedTypeKeyword Keyword, 5118 NestedNameSpecifier *NNS, 5119 const IdentifierInfo *Name, 5120 ArrayRef<TemplateArgument> Args) const { 5121 assert((!NNS || NNS->isDependent()) && 5122 "nested-name-specifier must be dependent"); 5123 5124 llvm::FoldingSetNodeID ID; 5125 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5126 Name, Args); 5127 5128 void *InsertPos = nullptr; 5129 DependentTemplateSpecializationType *T 5130 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5131 if (T) 5132 return QualType(T, 0); 5133 5134 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5135 5136 ElaboratedTypeKeyword CanonKeyword = Keyword; 5137 if (Keyword == ElaboratedTypeKeyword::None) 5138 CanonKeyword = ElaboratedTypeKeyword::Typename; 5139 5140 bool AnyNonCanonArgs = false; 5141 auto CanonArgs = 5142 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5143 5144 QualType Canon; 5145 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5146 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5147 Name, 5148 CanonArgs); 5149 5150 // Find the insert position again. 5151 [[maybe_unused]] auto *Nothing = 5152 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5153 assert(!Nothing && "canonical type broken"); 5154 } 5155 5156 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5157 sizeof(TemplateArgument) * Args.size()), 5158 alignof(DependentTemplateSpecializationType)); 5159 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5160 Name, Args, Canon); 5161 Types.push_back(T); 5162 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5163 return QualType(T, 0); 5164 } 5165 5166 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5167 TemplateArgument Arg; 5168 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5169 QualType ArgType = getTypeDeclType(TTP); 5170 if (TTP->isParameterPack()) 5171 ArgType = getPackExpansionType(ArgType, std::nullopt); 5172 5173 Arg = TemplateArgument(ArgType); 5174 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5175 QualType T = 5176 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5177 // For class NTTPs, ensure we include the 'const' so the type matches that 5178 // of a real template argument. 5179 // FIXME: It would be more faithful to model this as something like an 5180 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5181 if (T->isRecordType()) 5182 T.addConst(); 5183 Expr *E = new (*this) DeclRefExpr( 5184 *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T, 5185 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5186 5187 if (NTTP->isParameterPack()) 5188 E = new (*this) 5189 PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt); 5190 Arg = TemplateArgument(E); 5191 } else { 5192 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5193 if (TTP->isParameterPack()) 5194 Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>()); 5195 else 5196 Arg = TemplateArgument(TemplateName(TTP)); 5197 } 5198 5199 if (Param->isTemplateParameterPack()) 5200 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5201 5202 return Arg; 5203 } 5204 5205 void 5206 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5207 SmallVectorImpl<TemplateArgument> &Args) { 5208 Args.reserve(Args.size() + Params->size()); 5209 5210 for (NamedDecl *Param : *Params) 5211 Args.push_back(getInjectedTemplateArg(Param)); 5212 } 5213 5214 QualType ASTContext::getPackExpansionType(QualType Pattern, 5215 std::optional<unsigned> NumExpansions, 5216 bool ExpectPackInType) { 5217 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5218 "Pack expansions must expand one or more parameter packs"); 5219 5220 llvm::FoldingSetNodeID ID; 5221 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5222 5223 void *InsertPos = nullptr; 5224 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5225 if (T) 5226 return QualType(T, 0); 5227 5228 QualType Canon; 5229 if (!Pattern.isCanonical()) { 5230 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5231 /*ExpectPackInType=*/false); 5232 5233 // Find the insert position again, in case we inserted an element into 5234 // PackExpansionTypes and invalidated our insert position. 5235 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5236 } 5237 5238 T = new (*this, alignof(PackExpansionType)) 5239 PackExpansionType(Pattern, Canon, NumExpansions); 5240 Types.push_back(T); 5241 PackExpansionTypes.InsertNode(T, InsertPos); 5242 return QualType(T, 0); 5243 } 5244 5245 /// CmpProtocolNames - Comparison predicate for sorting protocols 5246 /// alphabetically. 5247 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5248 ObjCProtocolDecl *const *RHS) { 5249 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5250 } 5251 5252 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5253 if (Protocols.empty()) return true; 5254 5255 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5256 return false; 5257 5258 for (unsigned i = 1; i != Protocols.size(); ++i) 5259 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5260 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5261 return false; 5262 return true; 5263 } 5264 5265 static void 5266 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5267 // Sort protocols, keyed by name. 5268 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5269 5270 // Canonicalize. 5271 for (ObjCProtocolDecl *&P : Protocols) 5272 P = P->getCanonicalDecl(); 5273 5274 // Remove duplicates. 5275 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5276 Protocols.erase(ProtocolsEnd, Protocols.end()); 5277 } 5278 5279 QualType ASTContext::getObjCObjectType(QualType BaseType, 5280 ObjCProtocolDecl * const *Protocols, 5281 unsigned NumProtocols) const { 5282 return getObjCObjectType(BaseType, {}, 5283 llvm::ArrayRef(Protocols, NumProtocols), 5284 /*isKindOf=*/false); 5285 } 5286 5287 QualType ASTContext::getObjCObjectType( 5288 QualType baseType, 5289 ArrayRef<QualType> typeArgs, 5290 ArrayRef<ObjCProtocolDecl *> protocols, 5291 bool isKindOf) const { 5292 // If the base type is an interface and there aren't any protocols or 5293 // type arguments to add, then the interface type will do just fine. 5294 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5295 isa<ObjCInterfaceType>(baseType)) 5296 return baseType; 5297 5298 // Look in the folding set for an existing type. 5299 llvm::FoldingSetNodeID ID; 5300 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5301 void *InsertPos = nullptr; 5302 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5303 return QualType(QT, 0); 5304 5305 // Determine the type arguments to be used for canonicalization, 5306 // which may be explicitly specified here or written on the base 5307 // type. 5308 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5309 if (effectiveTypeArgs.empty()) { 5310 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5311 effectiveTypeArgs = baseObject->getTypeArgs(); 5312 } 5313 5314 // Build the canonical type, which has the canonical base type and a 5315 // sorted-and-uniqued list of protocols and the type arguments 5316 // canonicalized. 5317 QualType canonical; 5318 bool typeArgsAreCanonical = llvm::all_of( 5319 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5320 bool protocolsSorted = areSortedAndUniqued(protocols); 5321 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5322 // Determine the canonical type arguments. 5323 ArrayRef<QualType> canonTypeArgs; 5324 SmallVector<QualType, 4> canonTypeArgsVec; 5325 if (!typeArgsAreCanonical) { 5326 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5327 for (auto typeArg : effectiveTypeArgs) 5328 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5329 canonTypeArgs = canonTypeArgsVec; 5330 } else { 5331 canonTypeArgs = effectiveTypeArgs; 5332 } 5333 5334 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5335 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5336 if (!protocolsSorted) { 5337 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5338 SortAndUniqueProtocols(canonProtocolsVec); 5339 canonProtocols = canonProtocolsVec; 5340 } else { 5341 canonProtocols = protocols; 5342 } 5343 5344 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5345 canonProtocols, isKindOf); 5346 5347 // Regenerate InsertPos. 5348 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5349 } 5350 5351 unsigned size = sizeof(ObjCObjectTypeImpl); 5352 size += typeArgs.size() * sizeof(QualType); 5353 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5354 void *mem = Allocate(size, alignof(ObjCObjectTypeImpl)); 5355 auto *T = 5356 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5357 isKindOf); 5358 5359 Types.push_back(T); 5360 ObjCObjectTypes.InsertNode(T, InsertPos); 5361 return QualType(T, 0); 5362 } 5363 5364 /// Apply Objective-C protocol qualifiers to the given type. 5365 /// If this is for the canonical type of a type parameter, we can apply 5366 /// protocol qualifiers on the ObjCObjectPointerType. 5367 QualType 5368 ASTContext::applyObjCProtocolQualifiers(QualType type, 5369 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5370 bool allowOnPointerType) const { 5371 hasError = false; 5372 5373 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5374 return getObjCTypeParamType(objT->getDecl(), protocols); 5375 } 5376 5377 // Apply protocol qualifiers to ObjCObjectPointerType. 5378 if (allowOnPointerType) { 5379 if (const auto *objPtr = 5380 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5381 const ObjCObjectType *objT = objPtr->getObjectType(); 5382 // Merge protocol lists and construct ObjCObjectType. 5383 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5384 protocolsVec.append(objT->qual_begin(), 5385 objT->qual_end()); 5386 protocolsVec.append(protocols.begin(), protocols.end()); 5387 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5388 type = getObjCObjectType( 5389 objT->getBaseType(), 5390 objT->getTypeArgsAsWritten(), 5391 protocols, 5392 objT->isKindOfTypeAsWritten()); 5393 return getObjCObjectPointerType(type); 5394 } 5395 } 5396 5397 // Apply protocol qualifiers to ObjCObjectType. 5398 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5399 // FIXME: Check for protocols to which the class type is already 5400 // known to conform. 5401 5402 return getObjCObjectType(objT->getBaseType(), 5403 objT->getTypeArgsAsWritten(), 5404 protocols, 5405 objT->isKindOfTypeAsWritten()); 5406 } 5407 5408 // If the canonical type is ObjCObjectType, ... 5409 if (type->isObjCObjectType()) { 5410 // Silently overwrite any existing protocol qualifiers. 5411 // TODO: determine whether that's the right thing to do. 5412 5413 // FIXME: Check for protocols to which the class type is already 5414 // known to conform. 5415 return getObjCObjectType(type, {}, protocols, false); 5416 } 5417 5418 // id<protocol-list> 5419 if (type->isObjCIdType()) { 5420 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5421 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5422 objPtr->isKindOfType()); 5423 return getObjCObjectPointerType(type); 5424 } 5425 5426 // Class<protocol-list> 5427 if (type->isObjCClassType()) { 5428 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5429 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5430 objPtr->isKindOfType()); 5431 return getObjCObjectPointerType(type); 5432 } 5433 5434 hasError = true; 5435 return type; 5436 } 5437 5438 QualType 5439 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5440 ArrayRef<ObjCProtocolDecl *> protocols) const { 5441 // Look in the folding set for an existing type. 5442 llvm::FoldingSetNodeID ID; 5443 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5444 void *InsertPos = nullptr; 5445 if (ObjCTypeParamType *TypeParam = 5446 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5447 return QualType(TypeParam, 0); 5448 5449 // We canonicalize to the underlying type. 5450 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5451 if (!protocols.empty()) { 5452 // Apply the protocol qualifers. 5453 bool hasError; 5454 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5455 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5456 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5457 } 5458 5459 unsigned size = sizeof(ObjCTypeParamType); 5460 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5461 void *mem = Allocate(size, alignof(ObjCTypeParamType)); 5462 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5463 5464 Types.push_back(newType); 5465 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5466 return QualType(newType, 0); 5467 } 5468 5469 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5470 ObjCTypeParamDecl *New) const { 5471 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5472 // Update TypeForDecl after updating TypeSourceInfo. 5473 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5474 SmallVector<ObjCProtocolDecl *, 8> protocols; 5475 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5476 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5477 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5478 } 5479 5480 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5481 /// protocol list adopt all protocols in QT's qualified-id protocol 5482 /// list. 5483 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5484 ObjCInterfaceDecl *IC) { 5485 if (!QT->isObjCQualifiedIdType()) 5486 return false; 5487 5488 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5489 // If both the right and left sides have qualifiers. 5490 for (auto *Proto : OPT->quals()) { 5491 if (!IC->ClassImplementsProtocol(Proto, false)) 5492 return false; 5493 } 5494 return true; 5495 } 5496 return false; 5497 } 5498 5499 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5500 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5501 /// of protocols. 5502 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5503 ObjCInterfaceDecl *IDecl) { 5504 if (!QT->isObjCQualifiedIdType()) 5505 return false; 5506 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5507 if (!OPT) 5508 return false; 5509 if (!IDecl->hasDefinition()) 5510 return false; 5511 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5512 CollectInheritedProtocols(IDecl, InheritedProtocols); 5513 if (InheritedProtocols.empty()) 5514 return false; 5515 // Check that if every protocol in list of id<plist> conforms to a protocol 5516 // of IDecl's, then bridge casting is ok. 5517 bool Conforms = false; 5518 for (auto *Proto : OPT->quals()) { 5519 Conforms = false; 5520 for (auto *PI : InheritedProtocols) { 5521 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5522 Conforms = true; 5523 break; 5524 } 5525 } 5526 if (!Conforms) 5527 break; 5528 } 5529 if (Conforms) 5530 return true; 5531 5532 for (auto *PI : InheritedProtocols) { 5533 // If both the right and left sides have qualifiers. 5534 bool Adopts = false; 5535 for (auto *Proto : OPT->quals()) { 5536 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5537 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5538 break; 5539 } 5540 if (!Adopts) 5541 return false; 5542 } 5543 return true; 5544 } 5545 5546 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5547 /// the given object type. 5548 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5549 llvm::FoldingSetNodeID ID; 5550 ObjCObjectPointerType::Profile(ID, ObjectT); 5551 5552 void *InsertPos = nullptr; 5553 if (ObjCObjectPointerType *QT = 5554 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5555 return QualType(QT, 0); 5556 5557 // Find the canonical object type. 5558 QualType Canonical; 5559 if (!ObjectT.isCanonical()) { 5560 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5561 5562 // Regenerate InsertPos. 5563 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5564 } 5565 5566 // No match. 5567 void *Mem = 5568 Allocate(sizeof(ObjCObjectPointerType), alignof(ObjCObjectPointerType)); 5569 auto *QType = 5570 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5571 5572 Types.push_back(QType); 5573 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5574 return QualType(QType, 0); 5575 } 5576 5577 /// getObjCInterfaceType - Return the unique reference to the type for the 5578 /// specified ObjC interface decl. The list of protocols is optional. 5579 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5580 ObjCInterfaceDecl *PrevDecl) const { 5581 if (Decl->TypeForDecl) 5582 return QualType(Decl->TypeForDecl, 0); 5583 5584 if (PrevDecl) { 5585 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5586 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5587 return QualType(PrevDecl->TypeForDecl, 0); 5588 } 5589 5590 // Prefer the definition, if there is one. 5591 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5592 Decl = Def; 5593 5594 void *Mem = Allocate(sizeof(ObjCInterfaceType), alignof(ObjCInterfaceType)); 5595 auto *T = new (Mem) ObjCInterfaceType(Decl); 5596 Decl->TypeForDecl = T; 5597 Types.push_back(T); 5598 return QualType(T, 0); 5599 } 5600 5601 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5602 /// TypeOfExprType AST's (since expression's are never shared). For example, 5603 /// multiple declarations that refer to "typeof(x)" all contain different 5604 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5605 /// on canonical type's (which are always unique). 5606 QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { 5607 TypeOfExprType *toe; 5608 if (tofExpr->isTypeDependent()) { 5609 llvm::FoldingSetNodeID ID; 5610 DependentTypeOfExprType::Profile(ID, *this, tofExpr, 5611 Kind == TypeOfKind::Unqualified); 5612 5613 void *InsertPos = nullptr; 5614 DependentTypeOfExprType *Canon = 5615 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5616 if (Canon) { 5617 // We already have a "canonical" version of an identical, dependent 5618 // typeof(expr) type. Use that as our canonical type. 5619 toe = new (*this, alignof(TypeOfExprType)) 5620 TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); 5621 } else { 5622 // Build a new, canonical typeof(expr) type. 5623 Canon = new (*this, alignof(DependentTypeOfExprType)) 5624 DependentTypeOfExprType(tofExpr, Kind); 5625 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5626 toe = Canon; 5627 } 5628 } else { 5629 QualType Canonical = getCanonicalType(tofExpr->getType()); 5630 toe = new (*this, alignof(TypeOfExprType)) 5631 TypeOfExprType(tofExpr, Kind, Canonical); 5632 } 5633 Types.push_back(toe); 5634 return QualType(toe, 0); 5635 } 5636 5637 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5638 /// TypeOfType nodes. The only motivation to unique these nodes would be 5639 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5640 /// an issue. This doesn't affect the type checker, since it operates 5641 /// on canonical types (which are always unique). 5642 QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { 5643 QualType Canonical = getCanonicalType(tofType); 5644 auto *tot = 5645 new (*this, alignof(TypeOfType)) TypeOfType(tofType, Canonical, Kind); 5646 Types.push_back(tot); 5647 return QualType(tot, 0); 5648 } 5649 5650 /// getReferenceQualifiedType - Given an expr, will return the type for 5651 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5652 /// and class member access into account. 5653 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5654 // C++11 [dcl.type.simple]p4: 5655 // [...] 5656 QualType T = E->getType(); 5657 switch (E->getValueKind()) { 5658 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5659 // type of e; 5660 case VK_XValue: 5661 return getRValueReferenceType(T); 5662 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5663 // type of e; 5664 case VK_LValue: 5665 return getLValueReferenceType(T); 5666 // - otherwise, decltype(e) is the type of e. 5667 case VK_PRValue: 5668 return T; 5669 } 5670 llvm_unreachable("Unknown value kind"); 5671 } 5672 5673 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5674 /// nodes. This would never be helpful, since each such type has its own 5675 /// expression, and would not give a significant memory saving, since there 5676 /// is an Expr tree under each such type. 5677 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5678 DecltypeType *dt; 5679 5680 // C++11 [temp.type]p2: 5681 // If an expression e involves a template parameter, decltype(e) denotes a 5682 // unique dependent type. Two such decltype-specifiers refer to the same 5683 // type only if their expressions are equivalent (14.5.6.1). 5684 if (e->isInstantiationDependent()) { 5685 llvm::FoldingSetNodeID ID; 5686 DependentDecltypeType::Profile(ID, *this, e); 5687 5688 void *InsertPos = nullptr; 5689 DependentDecltypeType *Canon 5690 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5691 if (!Canon) { 5692 // Build a new, canonical decltype(expr) type. 5693 Canon = new (*this, alignof(DependentDecltypeType)) 5694 DependentDecltypeType(e, DependentTy); 5695 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5696 } 5697 dt = new (*this, alignof(DecltypeType)) 5698 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5699 } else { 5700 dt = new (*this, alignof(DecltypeType)) 5701 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5702 } 5703 Types.push_back(dt); 5704 return QualType(dt, 0); 5705 } 5706 5707 /// getUnaryTransformationType - We don't unique these, since the memory 5708 /// savings are minimal and these are rare. 5709 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5710 QualType UnderlyingType, 5711 UnaryTransformType::UTTKind Kind) 5712 const { 5713 UnaryTransformType *ut = nullptr; 5714 5715 if (BaseType->isDependentType()) { 5716 // Look in the folding set for an existing type. 5717 llvm::FoldingSetNodeID ID; 5718 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5719 5720 void *InsertPos = nullptr; 5721 DependentUnaryTransformType *Canon 5722 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5723 5724 if (!Canon) { 5725 // Build a new, canonical __underlying_type(type) type. 5726 Canon = new (*this, alignof(DependentUnaryTransformType)) 5727 DependentUnaryTransformType(*this, getCanonicalType(BaseType), Kind); 5728 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5729 } 5730 ut = new (*this, alignof(UnaryTransformType)) 5731 UnaryTransformType(BaseType, QualType(), Kind, QualType(Canon, 0)); 5732 } else { 5733 QualType CanonType = getCanonicalType(UnderlyingType); 5734 ut = new (*this, alignof(UnaryTransformType)) 5735 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType); 5736 } 5737 Types.push_back(ut); 5738 return QualType(ut, 0); 5739 } 5740 5741 QualType ASTContext::getAutoTypeInternal( 5742 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5743 bool IsPack, ConceptDecl *TypeConstraintConcept, 5744 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5745 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5746 !TypeConstraintConcept && !IsDependent) 5747 return getAutoDeductType(); 5748 5749 // Look in the folding set for an existing type. 5750 void *InsertPos = nullptr; 5751 llvm::FoldingSetNodeID ID; 5752 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5753 TypeConstraintConcept, TypeConstraintArgs); 5754 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5755 return QualType(AT, 0); 5756 5757 QualType Canon; 5758 if (!IsCanon) { 5759 if (!DeducedType.isNull()) { 5760 Canon = DeducedType.getCanonicalType(); 5761 } else if (TypeConstraintConcept) { 5762 bool AnyNonCanonArgs = false; 5763 ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl(); 5764 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments( 5765 *this, TypeConstraintArgs, AnyNonCanonArgs); 5766 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) { 5767 Canon = 5768 getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5769 CanonicalConcept, CanonicalConceptArgs, true); 5770 // Find the insert position again. 5771 [[maybe_unused]] auto *Nothing = 5772 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5773 assert(!Nothing && "canonical type broken"); 5774 } 5775 } 5776 } 5777 5778 void *Mem = Allocate(sizeof(AutoType) + 5779 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5780 alignof(AutoType)); 5781 auto *AT = new (Mem) AutoType( 5782 DeducedType, Keyword, 5783 (IsDependent ? TypeDependence::DependentInstantiation 5784 : TypeDependence::None) | 5785 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5786 Canon, TypeConstraintConcept, TypeConstraintArgs); 5787 Types.push_back(AT); 5788 AutoTypes.InsertNode(AT, InsertPos); 5789 return QualType(AT, 0); 5790 } 5791 5792 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5793 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5794 /// canonical deduced-but-dependent 'auto' type. 5795 QualType 5796 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5797 bool IsDependent, bool IsPack, 5798 ConceptDecl *TypeConstraintConcept, 5799 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5800 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5801 assert((!IsDependent || DeducedType.isNull()) && 5802 "A dependent auto should be undeduced"); 5803 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5804 TypeConstraintConcept, TypeConstraintArgs); 5805 } 5806 5807 QualType ASTContext::getUnconstrainedType(QualType T) const { 5808 QualType CanonT = T.getCanonicalType(); 5809 5810 // Remove a type-constraint from a top-level auto or decltype(auto). 5811 if (auto *AT = CanonT->getAs<AutoType>()) { 5812 if (!AT->isConstrained()) 5813 return T; 5814 return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), false, 5815 AT->containsUnexpandedParameterPack()), 5816 T.getQualifiers()); 5817 } 5818 5819 // FIXME: We only support constrained auto at the top level in the type of a 5820 // non-type template parameter at the moment. Once we lift that restriction, 5821 // we'll need to recursively build types containing auto here. 5822 assert(!CanonT->getContainedAutoType() || 5823 !CanonT->getContainedAutoType()->isConstrained()); 5824 return T; 5825 } 5826 5827 /// Return the uniqued reference to the deduced template specialization type 5828 /// which has been deduced to the given type, or to the canonical undeduced 5829 /// such type, or the canonical deduced-but-dependent such type. 5830 QualType ASTContext::getDeducedTemplateSpecializationType( 5831 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5832 // Look in the folding set for an existing type. 5833 void *InsertPos = nullptr; 5834 llvm::FoldingSetNodeID ID; 5835 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5836 IsDependent); 5837 if (DeducedTemplateSpecializationType *DTST = 5838 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5839 return QualType(DTST, 0); 5840 5841 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType)) 5842 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5843 llvm::FoldingSetNodeID TempID; 5844 DTST->Profile(TempID); 5845 assert(ID == TempID && "ID does not match"); 5846 Types.push_back(DTST); 5847 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5848 return QualType(DTST, 0); 5849 } 5850 5851 /// getAtomicType - Return the uniqued reference to the atomic type for 5852 /// the given value type. 5853 QualType ASTContext::getAtomicType(QualType T) const { 5854 // Unique pointers, to guarantee there is only one pointer of a particular 5855 // structure. 5856 llvm::FoldingSetNodeID ID; 5857 AtomicType::Profile(ID, T); 5858 5859 void *InsertPos = nullptr; 5860 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5861 return QualType(AT, 0); 5862 5863 // If the atomic value type isn't canonical, this won't be a canonical type 5864 // either, so fill in the canonical type field. 5865 QualType Canonical; 5866 if (!T.isCanonical()) { 5867 Canonical = getAtomicType(getCanonicalType(T)); 5868 5869 // Get the new insert position for the node we care about. 5870 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5871 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5872 } 5873 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical); 5874 Types.push_back(New); 5875 AtomicTypes.InsertNode(New, InsertPos); 5876 return QualType(New, 0); 5877 } 5878 5879 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5880 QualType ASTContext::getAutoDeductType() const { 5881 if (AutoDeductTy.isNull()) 5882 AutoDeductTy = QualType(new (*this, alignof(AutoType)) 5883 AutoType(QualType(), AutoTypeKeyword::Auto, 5884 TypeDependence::None, QualType(), 5885 /*concept*/ nullptr, /*args*/ {}), 5886 0); 5887 return AutoDeductTy; 5888 } 5889 5890 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5891 QualType ASTContext::getAutoRRefDeductType() const { 5892 if (AutoRRefDeductTy.isNull()) 5893 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5894 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5895 return AutoRRefDeductTy; 5896 } 5897 5898 /// getTagDeclType - Return the unique reference to the type for the 5899 /// specified TagDecl (struct/union/class/enum) decl. 5900 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5901 assert(Decl); 5902 // FIXME: What is the design on getTagDeclType when it requires casting 5903 // away const? mutable? 5904 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5905 } 5906 5907 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5908 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5909 /// needs to agree with the definition in <stddef.h>. 5910 CanQualType ASTContext::getSizeType() const { 5911 return getFromTargetType(Target->getSizeType()); 5912 } 5913 5914 /// Return the unique signed counterpart of the integer type 5915 /// corresponding to size_t. 5916 CanQualType ASTContext::getSignedSizeType() const { 5917 return getFromTargetType(Target->getSignedSizeType()); 5918 } 5919 5920 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5921 CanQualType ASTContext::getIntMaxType() const { 5922 return getFromTargetType(Target->getIntMaxType()); 5923 } 5924 5925 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5926 CanQualType ASTContext::getUIntMaxType() const { 5927 return getFromTargetType(Target->getUIntMaxType()); 5928 } 5929 5930 /// getSignedWCharType - Return the type of "signed wchar_t". 5931 /// Used when in C++, as a GCC extension. 5932 QualType ASTContext::getSignedWCharType() const { 5933 // FIXME: derive from "Target" ? 5934 return WCharTy; 5935 } 5936 5937 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5938 /// Used when in C++, as a GCC extension. 5939 QualType ASTContext::getUnsignedWCharType() const { 5940 // FIXME: derive from "Target" ? 5941 return UnsignedIntTy; 5942 } 5943 5944 QualType ASTContext::getIntPtrType() const { 5945 return getFromTargetType(Target->getIntPtrType()); 5946 } 5947 5948 QualType ASTContext::getUIntPtrType() const { 5949 return getCorrespondingUnsignedType(getIntPtrType()); 5950 } 5951 5952 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5953 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5954 QualType ASTContext::getPointerDiffType() const { 5955 return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); 5956 } 5957 5958 /// Return the unique unsigned counterpart of "ptrdiff_t" 5959 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5960 /// in the definition of %tu format specifier. 5961 QualType ASTContext::getUnsignedPointerDiffType() const { 5962 return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); 5963 } 5964 5965 /// Return the unique type for "pid_t" defined in 5966 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5967 QualType ASTContext::getProcessIDType() const { 5968 return getFromTargetType(Target->getProcessIDType()); 5969 } 5970 5971 //===----------------------------------------------------------------------===// 5972 // Type Operators 5973 //===----------------------------------------------------------------------===// 5974 5975 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5976 // Push qualifiers into arrays, and then discard any remaining 5977 // qualifiers. 5978 T = getCanonicalType(T); 5979 T = getVariableArrayDecayedType(T); 5980 const Type *Ty = T.getTypePtr(); 5981 QualType Result; 5982 if (isa<ArrayType>(Ty)) { 5983 Result = getArrayDecayedType(QualType(Ty,0)); 5984 } else if (isa<FunctionType>(Ty)) { 5985 Result = getPointerType(QualType(Ty, 0)); 5986 } else { 5987 Result = QualType(Ty, 0); 5988 } 5989 5990 return CanQualType::CreateUnsafe(Result); 5991 } 5992 5993 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5994 Qualifiers &quals) { 5995 SplitQualType splitType = type.getSplitUnqualifiedType(); 5996 5997 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5998 // the unqualified desugared type and then drops it on the floor. 5999 // We then have to strip that sugar back off with 6000 // getUnqualifiedDesugaredType(), which is silly. 6001 const auto *AT = 6002 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 6003 6004 // If we don't have an array, just use the results in splitType. 6005 if (!AT) { 6006 quals = splitType.Quals; 6007 return QualType(splitType.Ty, 0); 6008 } 6009 6010 // Otherwise, recurse on the array's element type. 6011 QualType elementType = AT->getElementType(); 6012 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 6013 6014 // If that didn't change the element type, AT has no qualifiers, so we 6015 // can just use the results in splitType. 6016 if (elementType == unqualElementType) { 6017 assert(quals.empty()); // from the recursive call 6018 quals = splitType.Quals; 6019 return QualType(splitType.Ty, 0); 6020 } 6021 6022 // Otherwise, add in the qualifiers from the outermost type, then 6023 // build the type back up. 6024 quals.addConsistentQualifiers(splitType.Quals); 6025 6026 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 6027 return getConstantArrayType(unqualElementType, CAT->getSize(), 6028 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 6029 } 6030 6031 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 6032 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 6033 } 6034 6035 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 6036 return getVariableArrayType(unqualElementType, 6037 VAT->getSizeExpr(), 6038 VAT->getSizeModifier(), 6039 VAT->getIndexTypeCVRQualifiers(), 6040 VAT->getBracketsRange()); 6041 } 6042 6043 const auto *DSAT = cast<DependentSizedArrayType>(AT); 6044 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 6045 DSAT->getSizeModifier(), 0, 6046 SourceRange()); 6047 } 6048 6049 /// Attempt to unwrap two types that may both be array types with the same bound 6050 /// (or both be array types of unknown bound) for the purpose of comparing the 6051 /// cv-decomposition of two types per C++ [conv.qual]. 6052 /// 6053 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6054 /// C++20 [conv.qual], if permitted by the current language mode. 6055 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 6056 bool AllowPiMismatch) { 6057 while (true) { 6058 auto *AT1 = getAsArrayType(T1); 6059 if (!AT1) 6060 return; 6061 6062 auto *AT2 = getAsArrayType(T2); 6063 if (!AT2) 6064 return; 6065 6066 // If we don't have two array types with the same constant bound nor two 6067 // incomplete array types, we've unwrapped everything we can. 6068 // C++20 also permits one type to be a constant array type and the other 6069 // to be an incomplete array type. 6070 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6071 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6072 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6073 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6074 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6075 isa<IncompleteArrayType>(AT2)))) 6076 return; 6077 } else if (isa<IncompleteArrayType>(AT1)) { 6078 if (!(isa<IncompleteArrayType>(AT2) || 6079 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6080 isa<ConstantArrayType>(AT2)))) 6081 return; 6082 } else { 6083 return; 6084 } 6085 6086 T1 = AT1->getElementType(); 6087 T2 = AT2->getElementType(); 6088 } 6089 } 6090 6091 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6092 /// 6093 /// If T1 and T2 are both pointer types of the same kind, or both array types 6094 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6095 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6096 /// 6097 /// This function will typically be called in a loop that successively 6098 /// "unwraps" pointer and pointer-to-member types to compare them at each 6099 /// level. 6100 /// 6101 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6102 /// C++20 [conv.qual], if permitted by the current language mode. 6103 /// 6104 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6105 /// pair of types that can't be unwrapped further. 6106 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6107 bool AllowPiMismatch) { 6108 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6109 6110 const auto *T1PtrType = T1->getAs<PointerType>(); 6111 const auto *T2PtrType = T2->getAs<PointerType>(); 6112 if (T1PtrType && T2PtrType) { 6113 T1 = T1PtrType->getPointeeType(); 6114 T2 = T2PtrType->getPointeeType(); 6115 return true; 6116 } 6117 6118 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6119 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6120 if (T1MPType && T2MPType && 6121 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6122 QualType(T2MPType->getClass(), 0))) { 6123 T1 = T1MPType->getPointeeType(); 6124 T2 = T2MPType->getPointeeType(); 6125 return true; 6126 } 6127 6128 if (getLangOpts().ObjC) { 6129 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6130 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6131 if (T1OPType && T2OPType) { 6132 T1 = T1OPType->getPointeeType(); 6133 T2 = T2OPType->getPointeeType(); 6134 return true; 6135 } 6136 } 6137 6138 // FIXME: Block pointers, too? 6139 6140 return false; 6141 } 6142 6143 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6144 while (true) { 6145 Qualifiers Quals; 6146 T1 = getUnqualifiedArrayType(T1, Quals); 6147 T2 = getUnqualifiedArrayType(T2, Quals); 6148 if (hasSameType(T1, T2)) 6149 return true; 6150 if (!UnwrapSimilarTypes(T1, T2)) 6151 return false; 6152 } 6153 } 6154 6155 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6156 while (true) { 6157 Qualifiers Quals1, Quals2; 6158 T1 = getUnqualifiedArrayType(T1, Quals1); 6159 T2 = getUnqualifiedArrayType(T2, Quals2); 6160 6161 Quals1.removeCVRQualifiers(); 6162 Quals2.removeCVRQualifiers(); 6163 if (Quals1 != Quals2) 6164 return false; 6165 6166 if (hasSameType(T1, T2)) 6167 return true; 6168 6169 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6170 return false; 6171 } 6172 } 6173 6174 DeclarationNameInfo 6175 ASTContext::getNameForTemplate(TemplateName Name, 6176 SourceLocation NameLoc) const { 6177 switch (Name.getKind()) { 6178 case TemplateName::QualifiedTemplate: 6179 case TemplateName::Template: 6180 // DNInfo work in progress: CHECKME: what about DNLoc? 6181 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6182 NameLoc); 6183 6184 case TemplateName::OverloadedTemplate: { 6185 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6186 // DNInfo work in progress: CHECKME: what about DNLoc? 6187 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6188 } 6189 6190 case TemplateName::AssumedTemplate: { 6191 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6192 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6193 } 6194 6195 case TemplateName::DependentTemplate: { 6196 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6197 DeclarationName DName; 6198 if (DTN->isIdentifier()) { 6199 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6200 return DeclarationNameInfo(DName, NameLoc); 6201 } else { 6202 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6203 // DNInfo work in progress: FIXME: source locations? 6204 DeclarationNameLoc DNLoc = 6205 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6206 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6207 } 6208 } 6209 6210 case TemplateName::SubstTemplateTemplateParm: { 6211 SubstTemplateTemplateParmStorage *subst 6212 = Name.getAsSubstTemplateTemplateParm(); 6213 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6214 NameLoc); 6215 } 6216 6217 case TemplateName::SubstTemplateTemplateParmPack: { 6218 SubstTemplateTemplateParmPackStorage *subst 6219 = Name.getAsSubstTemplateTemplateParmPack(); 6220 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6221 NameLoc); 6222 } 6223 case TemplateName::UsingTemplate: 6224 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6225 NameLoc); 6226 } 6227 6228 llvm_unreachable("bad template name kind!"); 6229 } 6230 6231 TemplateName 6232 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6233 switch (Name.getKind()) { 6234 case TemplateName::UsingTemplate: 6235 case TemplateName::QualifiedTemplate: 6236 case TemplateName::Template: { 6237 TemplateDecl *Template = Name.getAsTemplateDecl(); 6238 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6239 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6240 6241 // The canonical template name is the canonical template declaration. 6242 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6243 } 6244 6245 case TemplateName::OverloadedTemplate: 6246 case TemplateName::AssumedTemplate: 6247 llvm_unreachable("cannot canonicalize unresolved template"); 6248 6249 case TemplateName::DependentTemplate: { 6250 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6251 assert(DTN && "Non-dependent template names must refer to template decls."); 6252 return DTN->CanonicalTemplateName; 6253 } 6254 6255 case TemplateName::SubstTemplateTemplateParm: { 6256 SubstTemplateTemplateParmStorage *subst 6257 = Name.getAsSubstTemplateTemplateParm(); 6258 return getCanonicalTemplateName(subst->getReplacement()); 6259 } 6260 6261 case TemplateName::SubstTemplateTemplateParmPack: { 6262 SubstTemplateTemplateParmPackStorage *subst = 6263 Name.getAsSubstTemplateTemplateParmPack(); 6264 TemplateArgument canonArgPack = 6265 getCanonicalTemplateArgument(subst->getArgumentPack()); 6266 return getSubstTemplateTemplateParmPack( 6267 canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(), 6268 subst->getFinal(), subst->getIndex()); 6269 } 6270 } 6271 6272 llvm_unreachable("bad template name!"); 6273 } 6274 6275 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6276 const TemplateName &Y) const { 6277 return getCanonicalTemplateName(X).getAsVoidPointer() == 6278 getCanonicalTemplateName(Y).getAsVoidPointer(); 6279 } 6280 6281 bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { 6282 if (!XCE != !YCE) 6283 return false; 6284 6285 if (!XCE) 6286 return true; 6287 6288 llvm::FoldingSetNodeID XCEID, YCEID; 6289 XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6290 YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6291 return XCEID == YCEID; 6292 } 6293 6294 bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, 6295 const TypeConstraint *YTC) const { 6296 if (!XTC != !YTC) 6297 return false; 6298 6299 if (!XTC) 6300 return true; 6301 6302 auto *NCX = XTC->getNamedConcept(); 6303 auto *NCY = YTC->getNamedConcept(); 6304 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6305 return false; 6306 if (XTC->getConceptReference()->hasExplicitTemplateArgs() != 6307 YTC->getConceptReference()->hasExplicitTemplateArgs()) 6308 return false; 6309 if (XTC->getConceptReference()->hasExplicitTemplateArgs()) 6310 if (XTC->getConceptReference() 6311 ->getTemplateArgsAsWritten() 6312 ->NumTemplateArgs != 6313 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs) 6314 return false; 6315 6316 // Compare slowly by profiling. 6317 // 6318 // We couldn't compare the profiling result for the template 6319 // args here. Consider the following example in different modules: 6320 // 6321 // template <__integer_like _Tp, C<_Tp> Sentinel> 6322 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { 6323 // return __t; 6324 // } 6325 // 6326 // When we compare the profiling result for `C<_Tp>` in different 6327 // modules, it will compare the type of `_Tp` in different modules. 6328 // However, the type of `_Tp` in different modules refer to different 6329 // types here naturally. So we couldn't compare the profiling result 6330 // for the template args directly. 6331 return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), 6332 YTC->getImmediatelyDeclaredConstraint()); 6333 } 6334 6335 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6336 const NamedDecl *Y) const { 6337 if (X->getKind() != Y->getKind()) 6338 return false; 6339 6340 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6341 auto *TY = cast<TemplateTypeParmDecl>(Y); 6342 if (TX->isParameterPack() != TY->isParameterPack()) 6343 return false; 6344 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6345 return false; 6346 return isSameTypeConstraint(TX->getTypeConstraint(), 6347 TY->getTypeConstraint()); 6348 } 6349 6350 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6351 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6352 return TX->isParameterPack() == TY->isParameterPack() && 6353 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) && 6354 isSameConstraintExpr(TX->getPlaceholderTypeConstraint(), 6355 TY->getPlaceholderTypeConstraint()); 6356 } 6357 6358 auto *TX = cast<TemplateTemplateParmDecl>(X); 6359 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6360 return TX->isParameterPack() == TY->isParameterPack() && 6361 isSameTemplateParameterList(TX->getTemplateParameters(), 6362 TY->getTemplateParameters()); 6363 } 6364 6365 bool ASTContext::isSameTemplateParameterList( 6366 const TemplateParameterList *X, const TemplateParameterList *Y) const { 6367 if (X->size() != Y->size()) 6368 return false; 6369 6370 for (unsigned I = 0, N = X->size(); I != N; ++I) 6371 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6372 return false; 6373 6374 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); 6375 } 6376 6377 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, 6378 const NamedDecl *Y) const { 6379 // If the type parameter isn't the same already, we don't need to check the 6380 // default argument further. 6381 if (!isSameTemplateParameter(X, Y)) 6382 return false; 6383 6384 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { 6385 auto *TTPY = cast<TemplateTypeParmDecl>(Y); 6386 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6387 return false; 6388 6389 return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument()); 6390 } 6391 6392 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6393 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); 6394 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) 6395 return false; 6396 6397 Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts(); 6398 Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts(); 6399 llvm::FoldingSetNodeID XID, YID; 6400 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); 6401 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); 6402 return XID == YID; 6403 } 6404 6405 auto *TTPX = cast<TemplateTemplateParmDecl>(X); 6406 auto *TTPY = cast<TemplateTemplateParmDecl>(Y); 6407 6408 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6409 return false; 6410 6411 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); 6412 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); 6413 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); 6414 } 6415 6416 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6417 if (auto *NS = X->getAsNamespace()) 6418 return NS; 6419 if (auto *NAS = X->getAsNamespaceAlias()) 6420 return NAS->getNamespace(); 6421 return nullptr; 6422 } 6423 6424 static bool isSameQualifier(const NestedNameSpecifier *X, 6425 const NestedNameSpecifier *Y) { 6426 if (auto *NSX = getNamespace(X)) { 6427 auto *NSY = getNamespace(Y); 6428 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6429 return false; 6430 } else if (X->getKind() != Y->getKind()) 6431 return false; 6432 6433 // FIXME: For namespaces and types, we're permitted to check that the entity 6434 // is named via the same tokens. We should probably do so. 6435 switch (X->getKind()) { 6436 case NestedNameSpecifier::Identifier: 6437 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6438 return false; 6439 break; 6440 case NestedNameSpecifier::Namespace: 6441 case NestedNameSpecifier::NamespaceAlias: 6442 // We've already checked that we named the same namespace. 6443 break; 6444 case NestedNameSpecifier::TypeSpec: 6445 case NestedNameSpecifier::TypeSpecWithTemplate: 6446 if (X->getAsType()->getCanonicalTypeInternal() != 6447 Y->getAsType()->getCanonicalTypeInternal()) 6448 return false; 6449 break; 6450 case NestedNameSpecifier::Global: 6451 case NestedNameSpecifier::Super: 6452 return true; 6453 } 6454 6455 // Recurse into earlier portion of NNS, if any. 6456 auto *PX = X->getPrefix(); 6457 auto *PY = Y->getPrefix(); 6458 if (PX && PY) 6459 return isSameQualifier(PX, PY); 6460 return !PX && !PY; 6461 } 6462 6463 /// Determine whether the attributes we can overload on are identical for A and 6464 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6465 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6466 const FunctionDecl *B) { 6467 // Note that pass_object_size attributes are represented in the function's 6468 // ExtParameterInfo, so we don't need to check them here. 6469 6470 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6471 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6472 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6473 6474 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6475 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6476 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6477 6478 // Return false if the number of enable_if attributes is different. 6479 if (!Cand1A || !Cand2A) 6480 return false; 6481 6482 Cand1ID.clear(); 6483 Cand2ID.clear(); 6484 6485 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6486 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6487 6488 // Return false if any of the enable_if expressions of A and B are 6489 // different. 6490 if (Cand1ID != Cand2ID) 6491 return false; 6492 } 6493 return true; 6494 } 6495 6496 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { 6497 // Caution: this function is called by the AST reader during deserialization, 6498 // so it cannot rely on AST invariants being met. Non-trivial accessors 6499 // should be avoided, along with any traversal of redeclaration chains. 6500 6501 if (X == Y) 6502 return true; 6503 6504 if (X->getDeclName() != Y->getDeclName()) 6505 return false; 6506 6507 // Must be in the same context. 6508 // 6509 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6510 // could be two different declarations of the same function. (We will fix the 6511 // semantic DC to refer to the primary definition after merging.) 6512 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6513 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6514 return false; 6515 6516 // Two typedefs refer to the same entity if they have the same underlying 6517 // type. 6518 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6519 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6520 return hasSameType(TypedefX->getUnderlyingType(), 6521 TypedefY->getUnderlyingType()); 6522 6523 // Must have the same kind. 6524 if (X->getKind() != Y->getKind()) 6525 return false; 6526 6527 // Objective-C classes and protocols with the same name always match. 6528 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6529 return true; 6530 6531 if (isa<ClassTemplateSpecializationDecl>(X)) { 6532 // No need to handle these here: we merge them when adding them to the 6533 // template. 6534 return false; 6535 } 6536 6537 // Compatible tags match. 6538 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6539 const auto *TagY = cast<TagDecl>(Y); 6540 return (TagX->getTagKind() == TagY->getTagKind()) || 6541 ((TagX->getTagKind() == TagTypeKind::Struct || 6542 TagX->getTagKind() == TagTypeKind::Class || 6543 TagX->getTagKind() == TagTypeKind::Interface) && 6544 (TagY->getTagKind() == TagTypeKind::Struct || 6545 TagY->getTagKind() == TagTypeKind::Class || 6546 TagY->getTagKind() == TagTypeKind::Interface)); 6547 } 6548 6549 // Functions with the same type and linkage match. 6550 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6551 // functions, etc. 6552 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6553 const auto *FuncY = cast<FunctionDecl>(Y); 6554 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6555 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6556 if (CtorX->getInheritedConstructor() && 6557 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6558 CtorY->getInheritedConstructor().getConstructor())) 6559 return false; 6560 } 6561 6562 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6563 return false; 6564 6565 // Multiversioned functions with different feature strings are represented 6566 // as separate declarations. 6567 if (FuncX->isMultiVersion()) { 6568 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6569 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6570 assert(TAX && TAY && "Multiversion Function without target attribute"); 6571 6572 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6573 return false; 6574 } 6575 6576 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes 6577 // not the same entity if they are constrained. 6578 if ((FuncX->isMemberLikeConstrainedFriend() || 6579 FuncY->isMemberLikeConstrainedFriend()) && 6580 !FuncX->getLexicalDeclContext()->Equals( 6581 FuncY->getLexicalDeclContext())) { 6582 return false; 6583 } 6584 6585 if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), 6586 FuncY->getTrailingRequiresClause())) 6587 return false; 6588 6589 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6590 // Map to the first declaration that we've already merged into this one. 6591 // The TSI of redeclarations might not match (due to calling conventions 6592 // being inherited onto the type but not the TSI), but the TSI type of 6593 // the first declaration of the function should match across modules. 6594 FD = FD->getCanonicalDecl(); 6595 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6596 : FD->getType(); 6597 }; 6598 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6599 if (!hasSameType(XT, YT)) { 6600 // We can get functions with different types on the redecl chain in C++17 6601 // if they have differing exception specifications and at least one of 6602 // the excpetion specs is unresolved. 6603 auto *XFPT = XT->getAs<FunctionProtoType>(); 6604 auto *YFPT = YT->getAs<FunctionProtoType>(); 6605 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6606 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6607 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6608 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6609 return true; 6610 return false; 6611 } 6612 6613 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6614 hasSameOverloadableAttrs(FuncX, FuncY); 6615 } 6616 6617 // Variables with the same type and linkage match. 6618 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6619 const auto *VarY = cast<VarDecl>(Y); 6620 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6621 // During deserialization, we might compare variables before we load 6622 // their types. Assume the types will end up being the same. 6623 if (VarX->getType().isNull() || VarY->getType().isNull()) 6624 return true; 6625 6626 if (hasSameType(VarX->getType(), VarY->getType())) 6627 return true; 6628 6629 // We can get decls with different types on the redecl chain. Eg. 6630 // template <typename T> struct S { static T Var[]; }; // #1 6631 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6632 // Only? happens when completing an incomplete array type. In this case 6633 // when comparing #1 and #2 we should go through their element type. 6634 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6635 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6636 if (!VarXTy || !VarYTy) 6637 return false; 6638 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6639 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6640 } 6641 return false; 6642 } 6643 6644 // Namespaces with the same name and inlinedness match. 6645 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6646 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6647 return NamespaceX->isInline() == NamespaceY->isInline(); 6648 } 6649 6650 // Identical template names and kinds match if their template parameter lists 6651 // and patterns match. 6652 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6653 const auto *TemplateY = cast<TemplateDecl>(Y); 6654 6655 // ConceptDecl wouldn't be the same if their constraint expression differs. 6656 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { 6657 const auto *ConceptY = cast<ConceptDecl>(Y); 6658 if (!isSameConstraintExpr(ConceptX->getConstraintExpr(), 6659 ConceptY->getConstraintExpr())) 6660 return false; 6661 } 6662 6663 return isSameEntity(TemplateX->getTemplatedDecl(), 6664 TemplateY->getTemplatedDecl()) && 6665 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6666 TemplateY->getTemplateParameters()); 6667 } 6668 6669 // Fields with the same name and the same type match. 6670 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6671 const auto *FDY = cast<FieldDecl>(Y); 6672 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6673 return hasSameType(FDX->getType(), FDY->getType()); 6674 } 6675 6676 // Indirect fields with the same target field match. 6677 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6678 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6679 return IFDX->getAnonField()->getCanonicalDecl() == 6680 IFDY->getAnonField()->getCanonicalDecl(); 6681 } 6682 6683 // Enumerators with the same name match. 6684 if (isa<EnumConstantDecl>(X)) 6685 // FIXME: Also check the value is odr-equivalent. 6686 return true; 6687 6688 // Using shadow declarations with the same target match. 6689 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6690 const auto *USY = cast<UsingShadowDecl>(Y); 6691 return USX->getTargetDecl() == USY->getTargetDecl(); 6692 } 6693 6694 // Using declarations with the same qualifier match. (We already know that 6695 // the name matches.) 6696 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6697 const auto *UY = cast<UsingDecl>(Y); 6698 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6699 UX->hasTypename() == UY->hasTypename() && 6700 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6701 } 6702 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6703 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6704 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6705 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6706 } 6707 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6708 return isSameQualifier( 6709 UX->getQualifier(), 6710 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6711 } 6712 6713 // Using-pack declarations are only created by instantiation, and match if 6714 // they're instantiated from matching UnresolvedUsing...Decls. 6715 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6716 return declaresSameEntity( 6717 UX->getInstantiatedFromUsingDecl(), 6718 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6719 } 6720 6721 // Namespace alias definitions with the same target match. 6722 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6723 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6724 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6725 } 6726 6727 return false; 6728 } 6729 6730 TemplateArgument 6731 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6732 switch (Arg.getKind()) { 6733 case TemplateArgument::Null: 6734 return Arg; 6735 6736 case TemplateArgument::Expression: 6737 return Arg; 6738 6739 case TemplateArgument::Declaration: { 6740 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6741 return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()), 6742 Arg.getIsDefaulted()); 6743 } 6744 6745 case TemplateArgument::NullPtr: 6746 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6747 /*isNullPtr*/ true, Arg.getIsDefaulted()); 6748 6749 case TemplateArgument::Template: 6750 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()), 6751 Arg.getIsDefaulted()); 6752 6753 case TemplateArgument::TemplateExpansion: 6754 return TemplateArgument( 6755 getCanonicalTemplateName(Arg.getAsTemplateOrTemplatePattern()), 6756 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted()); 6757 6758 case TemplateArgument::Integral: 6759 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6760 6761 case TemplateArgument::StructuralValue: 6762 return TemplateArgument(*this, 6763 getCanonicalType(Arg.getStructuralValueType()), 6764 Arg.getAsStructuralValue()); 6765 6766 case TemplateArgument::Type: 6767 return TemplateArgument(getCanonicalType(Arg.getAsType()), 6768 /*isNullPtr*/ false, Arg.getIsDefaulted()); 6769 6770 case TemplateArgument::Pack: { 6771 bool AnyNonCanonArgs = false; 6772 auto CanonArgs = ::getCanonicalTemplateArguments( 6773 *this, Arg.pack_elements(), AnyNonCanonArgs); 6774 if (!AnyNonCanonArgs) 6775 return Arg; 6776 return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), 6777 CanonArgs); 6778 } 6779 } 6780 6781 // Silence GCC warning 6782 llvm_unreachable("Unhandled template argument kind"); 6783 } 6784 6785 NestedNameSpecifier * 6786 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6787 if (!NNS) 6788 return nullptr; 6789 6790 switch (NNS->getKind()) { 6791 case NestedNameSpecifier::Identifier: 6792 // Canonicalize the prefix but keep the identifier the same. 6793 return NestedNameSpecifier::Create(*this, 6794 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6795 NNS->getAsIdentifier()); 6796 6797 case NestedNameSpecifier::Namespace: 6798 // A namespace is canonical; build a nested-name-specifier with 6799 // this namespace and no prefix. 6800 return NestedNameSpecifier::Create(*this, nullptr, 6801 NNS->getAsNamespace()->getOriginalNamespace()); 6802 6803 case NestedNameSpecifier::NamespaceAlias: 6804 // A namespace is canonical; build a nested-name-specifier with 6805 // this namespace and no prefix. 6806 return NestedNameSpecifier::Create(*this, nullptr, 6807 NNS->getAsNamespaceAlias()->getNamespace() 6808 ->getOriginalNamespace()); 6809 6810 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6811 // latter will have the 'template' keyword when printed. 6812 case NestedNameSpecifier::TypeSpec: 6813 case NestedNameSpecifier::TypeSpecWithTemplate: { 6814 const Type *T = getCanonicalType(NNS->getAsType()); 6815 6816 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6817 // break it apart into its prefix and identifier, then reconsititute those 6818 // as the canonical nested-name-specifier. This is required to canonicalize 6819 // a dependent nested-name-specifier involving typedefs of dependent-name 6820 // types, e.g., 6821 // typedef typename T::type T1; 6822 // typedef typename T1::type T2; 6823 if (const auto *DNT = T->getAs<DependentNameType>()) 6824 return NestedNameSpecifier::Create( 6825 *this, DNT->getQualifier(), 6826 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6827 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6828 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6829 const_cast<Type *>(T)); 6830 6831 // TODO: Set 'Template' parameter to true for other template types. 6832 return NestedNameSpecifier::Create(*this, nullptr, false, 6833 const_cast<Type *>(T)); 6834 } 6835 6836 case NestedNameSpecifier::Global: 6837 case NestedNameSpecifier::Super: 6838 // The global specifier and __super specifer are canonical and unique. 6839 return NNS; 6840 } 6841 6842 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6843 } 6844 6845 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6846 // Handle the non-qualified case efficiently. 6847 if (!T.hasLocalQualifiers()) { 6848 // Handle the common positive case fast. 6849 if (const auto *AT = dyn_cast<ArrayType>(T)) 6850 return AT; 6851 } 6852 6853 // Handle the common negative case fast. 6854 if (!isa<ArrayType>(T.getCanonicalType())) 6855 return nullptr; 6856 6857 // Apply any qualifiers from the array type to the element type. This 6858 // implements C99 6.7.3p8: "If the specification of an array type includes 6859 // any type qualifiers, the element type is so qualified, not the array type." 6860 6861 // If we get here, we either have type qualifiers on the type, or we have 6862 // sugar such as a typedef in the way. If we have type qualifiers on the type 6863 // we must propagate them down into the element type. 6864 6865 SplitQualType split = T.getSplitDesugaredType(); 6866 Qualifiers qs = split.Quals; 6867 6868 // If we have a simple case, just return now. 6869 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6870 if (!ATy || qs.empty()) 6871 return ATy; 6872 6873 // Otherwise, we have an array and we have qualifiers on it. Push the 6874 // qualifiers into the array element type and return a new array type. 6875 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6876 6877 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6878 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6879 CAT->getSizeExpr(), 6880 CAT->getSizeModifier(), 6881 CAT->getIndexTypeCVRQualifiers())); 6882 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6883 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6884 IAT->getSizeModifier(), 6885 IAT->getIndexTypeCVRQualifiers())); 6886 6887 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6888 return cast<ArrayType>( 6889 getDependentSizedArrayType(NewEltTy, 6890 DSAT->getSizeExpr(), 6891 DSAT->getSizeModifier(), 6892 DSAT->getIndexTypeCVRQualifiers(), 6893 DSAT->getBracketsRange())); 6894 6895 const auto *VAT = cast<VariableArrayType>(ATy); 6896 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6897 VAT->getSizeExpr(), 6898 VAT->getSizeModifier(), 6899 VAT->getIndexTypeCVRQualifiers(), 6900 VAT->getBracketsRange())); 6901 } 6902 6903 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6904 if (T->isArrayType() || T->isFunctionType()) 6905 return getDecayedType(T); 6906 return T; 6907 } 6908 6909 QualType ASTContext::getSignatureParameterType(QualType T) const { 6910 T = getVariableArrayDecayedType(T); 6911 T = getAdjustedParameterType(T); 6912 return T.getUnqualifiedType(); 6913 } 6914 6915 QualType ASTContext::getExceptionObjectType(QualType T) const { 6916 // C++ [except.throw]p3: 6917 // A throw-expression initializes a temporary object, called the exception 6918 // object, the type of which is determined by removing any top-level 6919 // cv-qualifiers from the static type of the operand of throw and adjusting 6920 // the type from "array of T" or "function returning T" to "pointer to T" 6921 // or "pointer to function returning T", [...] 6922 T = getVariableArrayDecayedType(T); 6923 if (T->isArrayType() || T->isFunctionType()) 6924 T = getDecayedType(T); 6925 return T.getUnqualifiedType(); 6926 } 6927 6928 /// getArrayDecayedType - Return the properly qualified result of decaying the 6929 /// specified array type to a pointer. This operation is non-trivial when 6930 /// handling typedefs etc. The canonical type of "T" must be an array type, 6931 /// this returns a pointer to a properly qualified element of the array. 6932 /// 6933 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6934 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6935 // Get the element type with 'getAsArrayType' so that we don't lose any 6936 // typedefs in the element type of the array. This also handles propagation 6937 // of type qualifiers from the array type into the element type if present 6938 // (C99 6.7.3p8). 6939 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6940 assert(PrettyArrayType && "Not an array type!"); 6941 6942 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6943 6944 // int x[restrict 4] -> int *restrict 6945 QualType Result = getQualifiedType(PtrTy, 6946 PrettyArrayType->getIndexTypeQualifiers()); 6947 6948 // int x[_Nullable] -> int * _Nullable 6949 if (auto Nullability = Ty->getNullability()) { 6950 Result = const_cast<ASTContext *>(this)->getAttributedType( 6951 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6952 } 6953 return Result; 6954 } 6955 6956 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6957 return getBaseElementType(array->getElementType()); 6958 } 6959 6960 QualType ASTContext::getBaseElementType(QualType type) const { 6961 Qualifiers qs; 6962 while (true) { 6963 SplitQualType split = type.getSplitDesugaredType(); 6964 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6965 if (!array) break; 6966 6967 type = array->getElementType(); 6968 qs.addConsistentQualifiers(split.Quals); 6969 } 6970 6971 return getQualifiedType(type, qs); 6972 } 6973 6974 /// getConstantArrayElementCount - Returns number of constant array elements. 6975 uint64_t 6976 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6977 uint64_t ElementCount = 1; 6978 do { 6979 ElementCount *= CA->getSize().getZExtValue(); 6980 CA = dyn_cast_or_null<ConstantArrayType>( 6981 CA->getElementType()->getAsArrayTypeUnsafe()); 6982 } while (CA); 6983 return ElementCount; 6984 } 6985 6986 uint64_t ASTContext::getArrayInitLoopExprElementCount( 6987 const ArrayInitLoopExpr *AILE) const { 6988 if (!AILE) 6989 return 0; 6990 6991 uint64_t ElementCount = 1; 6992 6993 do { 6994 ElementCount *= AILE->getArraySize().getZExtValue(); 6995 AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr()); 6996 } while (AILE); 6997 6998 return ElementCount; 6999 } 7000 7001 /// getFloatingRank - Return a relative rank for floating point types. 7002 /// This routine will assert if passed a built-in type that isn't a float. 7003 static FloatingRank getFloatingRank(QualType T) { 7004 if (const auto *CT = T->getAs<ComplexType>()) 7005 return getFloatingRank(CT->getElementType()); 7006 7007 switch (T->castAs<BuiltinType>()->getKind()) { 7008 default: llvm_unreachable("getFloatingRank(): not a floating type"); 7009 case BuiltinType::Float16: return Float16Rank; 7010 case BuiltinType::Half: return HalfRank; 7011 case BuiltinType::Float: return FloatRank; 7012 case BuiltinType::Double: return DoubleRank; 7013 case BuiltinType::LongDouble: return LongDoubleRank; 7014 case BuiltinType::Float128: return Float128Rank; 7015 case BuiltinType::BFloat16: return BFloat16Rank; 7016 case BuiltinType::Ibm128: return Ibm128Rank; 7017 } 7018 } 7019 7020 /// getFloatingTypeOrder - Compare the rank of the two specified floating 7021 /// point types, ignoring the domain of the type (i.e. 'double' == 7022 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 7023 /// LHS < RHS, return -1. 7024 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 7025 FloatingRank LHSR = getFloatingRank(LHS); 7026 FloatingRank RHSR = getFloatingRank(RHS); 7027 7028 if (LHSR == RHSR) 7029 return 0; 7030 if (LHSR > RHSR) 7031 return 1; 7032 return -1; 7033 } 7034 7035 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 7036 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 7037 return 0; 7038 return getFloatingTypeOrder(LHS, RHS); 7039 } 7040 7041 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 7042 /// routine will assert if passed a built-in type that isn't an integer or enum, 7043 /// or if it is not canonicalized. 7044 unsigned ASTContext::getIntegerRank(const Type *T) const { 7045 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 7046 7047 // Results in this 'losing' to any type of the same size, but winning if 7048 // larger. 7049 if (const auto *EIT = dyn_cast<BitIntType>(T)) 7050 return 0 + (EIT->getNumBits() << 3); 7051 7052 switch (cast<BuiltinType>(T)->getKind()) { 7053 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 7054 case BuiltinType::Bool: 7055 return 1 + (getIntWidth(BoolTy) << 3); 7056 case BuiltinType::Char_S: 7057 case BuiltinType::Char_U: 7058 case BuiltinType::SChar: 7059 case BuiltinType::UChar: 7060 return 2 + (getIntWidth(CharTy) << 3); 7061 case BuiltinType::Short: 7062 case BuiltinType::UShort: 7063 return 3 + (getIntWidth(ShortTy) << 3); 7064 case BuiltinType::Int: 7065 case BuiltinType::UInt: 7066 return 4 + (getIntWidth(IntTy) << 3); 7067 case BuiltinType::Long: 7068 case BuiltinType::ULong: 7069 return 5 + (getIntWidth(LongTy) << 3); 7070 case BuiltinType::LongLong: 7071 case BuiltinType::ULongLong: 7072 return 6 + (getIntWidth(LongLongTy) << 3); 7073 case BuiltinType::Int128: 7074 case BuiltinType::UInt128: 7075 return 7 + (getIntWidth(Int128Ty) << 3); 7076 7077 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of 7078 // their underlying types" [c++20 conv.rank] 7079 case BuiltinType::Char8: 7080 return getIntegerRank(UnsignedCharTy.getTypePtr()); 7081 case BuiltinType::Char16: 7082 return getIntegerRank( 7083 getFromTargetType(Target->getChar16Type()).getTypePtr()); 7084 case BuiltinType::Char32: 7085 return getIntegerRank( 7086 getFromTargetType(Target->getChar32Type()).getTypePtr()); 7087 case BuiltinType::WChar_S: 7088 case BuiltinType::WChar_U: 7089 return getIntegerRank( 7090 getFromTargetType(Target->getWCharType()).getTypePtr()); 7091 } 7092 } 7093 7094 /// Whether this is a promotable bitfield reference according 7095 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 7096 /// 7097 /// \returns the type this bit-field will promote to, or NULL if no 7098 /// promotion occurs. 7099 QualType ASTContext::isPromotableBitField(Expr *E) const { 7100 if (E->isTypeDependent() || E->isValueDependent()) 7101 return {}; 7102 7103 // C++ [conv.prom]p5: 7104 // If the bit-field has an enumerated type, it is treated as any other 7105 // value of that type for promotion purposes. 7106 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 7107 return {}; 7108 7109 // FIXME: We should not do this unless E->refersToBitField() is true. This 7110 // matters in C where getSourceBitField() will find bit-fields for various 7111 // cases where the source expression is not a bit-field designator. 7112 7113 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 7114 if (!Field) 7115 return {}; 7116 7117 QualType FT = Field->getType(); 7118 7119 uint64_t BitWidth = Field->getBitWidthValue(*this); 7120 uint64_t IntSize = getTypeSize(IntTy); 7121 // C++ [conv.prom]p5: 7122 // A prvalue for an integral bit-field can be converted to a prvalue of type 7123 // int if int can represent all the values of the bit-field; otherwise, it 7124 // can be converted to unsigned int if unsigned int can represent all the 7125 // values of the bit-field. If the bit-field is larger yet, no integral 7126 // promotion applies to it. 7127 // C11 6.3.1.1/2: 7128 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 7129 // If an int can represent all values of the original type (as restricted by 7130 // the width, for a bit-field), the value is converted to an int; otherwise, 7131 // it is converted to an unsigned int. 7132 // 7133 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 7134 // We perform that promotion here to match GCC and C++. 7135 // FIXME: C does not permit promotion of an enum bit-field whose rank is 7136 // greater than that of 'int'. We perform that promotion to match GCC. 7137 if (BitWidth < IntSize) 7138 return IntTy; 7139 7140 if (BitWidth == IntSize) 7141 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 7142 7143 // Bit-fields wider than int are not subject to promotions, and therefore act 7144 // like the base type. GCC has some weird bugs in this area that we 7145 // deliberately do not follow (GCC follows a pre-standard resolution to 7146 // C's DR315 which treats bit-width as being part of the type, and this leaks 7147 // into their semantics in some cases). 7148 return {}; 7149 } 7150 7151 /// getPromotedIntegerType - Returns the type that Promotable will 7152 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 7153 /// integer type. 7154 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 7155 assert(!Promotable.isNull()); 7156 assert(isPromotableIntegerType(Promotable)); 7157 if (const auto *ET = Promotable->getAs<EnumType>()) 7158 return ET->getDecl()->getPromotionType(); 7159 7160 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 7161 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 7162 // (3.9.1) can be converted to a prvalue of the first of the following 7163 // types that can represent all the values of its underlying type: 7164 // int, unsigned int, long int, unsigned long int, long long int, or 7165 // unsigned long long int [...] 7166 // FIXME: Is there some better way to compute this? 7167 if (BT->getKind() == BuiltinType::WChar_S || 7168 BT->getKind() == BuiltinType::WChar_U || 7169 BT->getKind() == BuiltinType::Char8 || 7170 BT->getKind() == BuiltinType::Char16 || 7171 BT->getKind() == BuiltinType::Char32) { 7172 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 7173 uint64_t FromSize = getTypeSize(BT); 7174 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 7175 LongLongTy, UnsignedLongLongTy }; 7176 for (const auto &PT : PromoteTypes) { 7177 uint64_t ToSize = getTypeSize(PT); 7178 if (FromSize < ToSize || 7179 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType())) 7180 return PT; 7181 } 7182 llvm_unreachable("char type should fit into long long"); 7183 } 7184 } 7185 7186 // At this point, we should have a signed or unsigned integer type. 7187 if (Promotable->isSignedIntegerType()) 7188 return IntTy; 7189 uint64_t PromotableSize = getIntWidth(Promotable); 7190 uint64_t IntSize = getIntWidth(IntTy); 7191 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7192 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7193 } 7194 7195 /// Recurses in pointer/array types until it finds an objc retainable 7196 /// type and returns its ownership. 7197 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7198 while (!T.isNull()) { 7199 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7200 return T.getObjCLifetime(); 7201 if (T->isArrayType()) 7202 T = getBaseElementType(T); 7203 else if (const auto *PT = T->getAs<PointerType>()) 7204 T = PT->getPointeeType(); 7205 else if (const auto *RT = T->getAs<ReferenceType>()) 7206 T = RT->getPointeeType(); 7207 else 7208 break; 7209 } 7210 7211 return Qualifiers::OCL_None; 7212 } 7213 7214 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7215 // Incomplete enum types are not treated as integer types. 7216 // FIXME: In C++, enum types are never integer types. 7217 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7218 return ET->getDecl()->getIntegerType().getTypePtr(); 7219 return nullptr; 7220 } 7221 7222 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7223 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7224 /// LHS < RHS, return -1. 7225 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7226 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7227 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7228 7229 // Unwrap enums to their underlying type. 7230 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7231 LHSC = getIntegerTypeForEnum(ET); 7232 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7233 RHSC = getIntegerTypeForEnum(ET); 7234 7235 if (LHSC == RHSC) return 0; 7236 7237 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7238 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7239 7240 unsigned LHSRank = getIntegerRank(LHSC); 7241 unsigned RHSRank = getIntegerRank(RHSC); 7242 7243 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7244 if (LHSRank == RHSRank) return 0; 7245 return LHSRank > RHSRank ? 1 : -1; 7246 } 7247 7248 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7249 if (LHSUnsigned) { 7250 // If the unsigned [LHS] type is larger, return it. 7251 if (LHSRank >= RHSRank) 7252 return 1; 7253 7254 // If the signed type can represent all values of the unsigned type, it 7255 // wins. Because we are dealing with 2's complement and types that are 7256 // powers of two larger than each other, this is always safe. 7257 return -1; 7258 } 7259 7260 // If the unsigned [RHS] type is larger, return it. 7261 if (RHSRank >= LHSRank) 7262 return -1; 7263 7264 // If the signed type can represent all values of the unsigned type, it 7265 // wins. Because we are dealing with 2's complement and types that are 7266 // powers of two larger than each other, this is always safe. 7267 return 1; 7268 } 7269 7270 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7271 if (CFConstantStringTypeDecl) 7272 return CFConstantStringTypeDecl; 7273 7274 assert(!CFConstantStringTagDecl && 7275 "tag and typedef should be initialized together"); 7276 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7277 CFConstantStringTagDecl->startDefinition(); 7278 7279 struct { 7280 QualType Type; 7281 const char *Name; 7282 } Fields[5]; 7283 unsigned Count = 0; 7284 7285 /// Objective-C ABI 7286 /// 7287 /// typedef struct __NSConstantString_tag { 7288 /// const int *isa; 7289 /// int flags; 7290 /// const char *str; 7291 /// long length; 7292 /// } __NSConstantString; 7293 /// 7294 /// Swift ABI (4.1, 4.2) 7295 /// 7296 /// typedef struct __NSConstantString_tag { 7297 /// uintptr_t _cfisa; 7298 /// uintptr_t _swift_rc; 7299 /// _Atomic(uint64_t) _cfinfoa; 7300 /// const char *_ptr; 7301 /// uint32_t _length; 7302 /// } __NSConstantString; 7303 /// 7304 /// Swift ABI (5.0) 7305 /// 7306 /// typedef struct __NSConstantString_tag { 7307 /// uintptr_t _cfisa; 7308 /// uintptr_t _swift_rc; 7309 /// _Atomic(uint64_t) _cfinfoa; 7310 /// const char *_ptr; 7311 /// uintptr_t _length; 7312 /// } __NSConstantString; 7313 7314 const auto CFRuntime = getLangOpts().CFRuntime; 7315 if (static_cast<unsigned>(CFRuntime) < 7316 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7317 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7318 Fields[Count++] = { IntTy, "flags" }; 7319 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7320 Fields[Count++] = { LongTy, "length" }; 7321 } else { 7322 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7323 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7324 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7325 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7326 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7327 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7328 Fields[Count++] = { IntTy, "_ptr" }; 7329 else 7330 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7331 } 7332 7333 // Create fields 7334 for (unsigned i = 0; i < Count; ++i) { 7335 FieldDecl *Field = 7336 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7337 SourceLocation(), &Idents.get(Fields[i].Name), 7338 Fields[i].Type, /*TInfo=*/nullptr, 7339 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7340 Field->setAccess(AS_public); 7341 CFConstantStringTagDecl->addDecl(Field); 7342 } 7343 7344 CFConstantStringTagDecl->completeDefinition(); 7345 // This type is designed to be compatible with NSConstantString, but cannot 7346 // use the same name, since NSConstantString is an interface. 7347 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7348 CFConstantStringTypeDecl = 7349 buildImplicitTypedef(tagType, "__NSConstantString"); 7350 7351 return CFConstantStringTypeDecl; 7352 } 7353 7354 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7355 if (!CFConstantStringTagDecl) 7356 getCFConstantStringDecl(); // Build the tag and the typedef. 7357 return CFConstantStringTagDecl; 7358 } 7359 7360 // getCFConstantStringType - Return the type used for constant CFStrings. 7361 QualType ASTContext::getCFConstantStringType() const { 7362 return getTypedefType(getCFConstantStringDecl()); 7363 } 7364 7365 QualType ASTContext::getObjCSuperType() const { 7366 if (ObjCSuperType.isNull()) { 7367 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7368 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7369 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7370 } 7371 return ObjCSuperType; 7372 } 7373 7374 void ASTContext::setCFConstantStringType(QualType T) { 7375 const auto *TD = T->castAs<TypedefType>(); 7376 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7377 const auto *TagType = 7378 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7379 CFConstantStringTagDecl = TagType->getDecl(); 7380 } 7381 7382 QualType ASTContext::getBlockDescriptorType() const { 7383 if (BlockDescriptorType) 7384 return getTagDeclType(BlockDescriptorType); 7385 7386 RecordDecl *RD; 7387 // FIXME: Needs the FlagAppleBlock bit. 7388 RD = buildImplicitRecord("__block_descriptor"); 7389 RD->startDefinition(); 7390 7391 QualType FieldTypes[] = { 7392 UnsignedLongTy, 7393 UnsignedLongTy, 7394 }; 7395 7396 static const char *const FieldNames[] = { 7397 "reserved", 7398 "Size" 7399 }; 7400 7401 for (size_t i = 0; i < 2; ++i) { 7402 FieldDecl *Field = FieldDecl::Create( 7403 *this, RD, SourceLocation(), SourceLocation(), 7404 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7405 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7406 Field->setAccess(AS_public); 7407 RD->addDecl(Field); 7408 } 7409 7410 RD->completeDefinition(); 7411 7412 BlockDescriptorType = RD; 7413 7414 return getTagDeclType(BlockDescriptorType); 7415 } 7416 7417 QualType ASTContext::getBlockDescriptorExtendedType() const { 7418 if (BlockDescriptorExtendedType) 7419 return getTagDeclType(BlockDescriptorExtendedType); 7420 7421 RecordDecl *RD; 7422 // FIXME: Needs the FlagAppleBlock bit. 7423 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7424 RD->startDefinition(); 7425 7426 QualType FieldTypes[] = { 7427 UnsignedLongTy, 7428 UnsignedLongTy, 7429 getPointerType(VoidPtrTy), 7430 getPointerType(VoidPtrTy) 7431 }; 7432 7433 static const char *const FieldNames[] = { 7434 "reserved", 7435 "Size", 7436 "CopyFuncPtr", 7437 "DestroyFuncPtr" 7438 }; 7439 7440 for (size_t i = 0; i < 4; ++i) { 7441 FieldDecl *Field = FieldDecl::Create( 7442 *this, RD, SourceLocation(), SourceLocation(), 7443 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7444 /*BitWidth=*/nullptr, 7445 /*Mutable=*/false, ICIS_NoInit); 7446 Field->setAccess(AS_public); 7447 RD->addDecl(Field); 7448 } 7449 7450 RD->completeDefinition(); 7451 7452 BlockDescriptorExtendedType = RD; 7453 return getTagDeclType(BlockDescriptorExtendedType); 7454 } 7455 7456 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7457 const auto *BT = dyn_cast<BuiltinType>(T); 7458 7459 if (!BT) { 7460 if (isa<PipeType>(T)) 7461 return OCLTK_Pipe; 7462 7463 return OCLTK_Default; 7464 } 7465 7466 switch (BT->getKind()) { 7467 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7468 case BuiltinType::Id: \ 7469 return OCLTK_Image; 7470 #include "clang/Basic/OpenCLImageTypes.def" 7471 7472 case BuiltinType::OCLClkEvent: 7473 return OCLTK_ClkEvent; 7474 7475 case BuiltinType::OCLEvent: 7476 return OCLTK_Event; 7477 7478 case BuiltinType::OCLQueue: 7479 return OCLTK_Queue; 7480 7481 case BuiltinType::OCLReserveID: 7482 return OCLTK_ReserveID; 7483 7484 case BuiltinType::OCLSampler: 7485 return OCLTK_Sampler; 7486 7487 default: 7488 return OCLTK_Default; 7489 } 7490 } 7491 7492 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7493 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7494 } 7495 7496 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7497 /// requires copy/dispose. Note that this must match the logic 7498 /// in buildByrefHelpers. 7499 bool ASTContext::BlockRequiresCopying(QualType Ty, 7500 const VarDecl *D) { 7501 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7502 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7503 if (!copyExpr && record->hasTrivialDestructor()) return false; 7504 7505 return true; 7506 } 7507 7508 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7509 // move or destroy. 7510 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7511 return true; 7512 7513 if (!Ty->isObjCRetainableType()) return false; 7514 7515 Qualifiers qs = Ty.getQualifiers(); 7516 7517 // If we have lifetime, that dominates. 7518 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7519 switch (lifetime) { 7520 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7521 7522 // These are just bits as far as the runtime is concerned. 7523 case Qualifiers::OCL_ExplicitNone: 7524 case Qualifiers::OCL_Autoreleasing: 7525 return false; 7526 7527 // These cases should have been taken care of when checking the type's 7528 // non-triviality. 7529 case Qualifiers::OCL_Weak: 7530 case Qualifiers::OCL_Strong: 7531 llvm_unreachable("impossible"); 7532 } 7533 llvm_unreachable("fell out of lifetime switch!"); 7534 } 7535 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7536 Ty->isObjCObjectPointerType()); 7537 } 7538 7539 bool ASTContext::getByrefLifetime(QualType Ty, 7540 Qualifiers::ObjCLifetime &LifeTime, 7541 bool &HasByrefExtendedLayout) const { 7542 if (!getLangOpts().ObjC || 7543 getLangOpts().getGC() != LangOptions::NonGC) 7544 return false; 7545 7546 HasByrefExtendedLayout = false; 7547 if (Ty->isRecordType()) { 7548 HasByrefExtendedLayout = true; 7549 LifeTime = Qualifiers::OCL_None; 7550 } else if ((LifeTime = Ty.getObjCLifetime())) { 7551 // Honor the ARC qualifiers. 7552 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7553 // The MRR rule. 7554 LifeTime = Qualifiers::OCL_ExplicitNone; 7555 } else { 7556 LifeTime = Qualifiers::OCL_None; 7557 } 7558 return true; 7559 } 7560 7561 CanQualType ASTContext::getNSUIntegerType() const { 7562 assert(Target && "Expected target to be initialized"); 7563 const llvm::Triple &T = Target->getTriple(); 7564 // Windows is LLP64 rather than LP64 7565 if (T.isOSWindows() && T.isArch64Bit()) 7566 return UnsignedLongLongTy; 7567 return UnsignedLongTy; 7568 } 7569 7570 CanQualType ASTContext::getNSIntegerType() const { 7571 assert(Target && "Expected target to be initialized"); 7572 const llvm::Triple &T = Target->getTriple(); 7573 // Windows is LLP64 rather than LP64 7574 if (T.isOSWindows() && T.isArch64Bit()) 7575 return LongLongTy; 7576 return LongTy; 7577 } 7578 7579 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7580 if (!ObjCInstanceTypeDecl) 7581 ObjCInstanceTypeDecl = 7582 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7583 return ObjCInstanceTypeDecl; 7584 } 7585 7586 // This returns true if a type has been typedefed to BOOL: 7587 // typedef <type> BOOL; 7588 static bool isTypeTypedefedAsBOOL(QualType T) { 7589 if (const auto *TT = dyn_cast<TypedefType>(T)) 7590 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7591 return II->isStr("BOOL"); 7592 7593 return false; 7594 } 7595 7596 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7597 /// purpose. 7598 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7599 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7600 return CharUnits::Zero(); 7601 7602 CharUnits sz = getTypeSizeInChars(type); 7603 7604 // Make all integer and enum types at least as large as an int 7605 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7606 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7607 // Treat arrays as pointers, since that's how they're passed in. 7608 else if (type->isArrayType()) 7609 sz = getTypeSizeInChars(VoidPtrTy); 7610 return sz; 7611 } 7612 7613 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7614 return getTargetInfo().getCXXABI().isMicrosoft() && 7615 VD->isStaticDataMember() && 7616 VD->getType()->isIntegralOrEnumerationType() && 7617 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7618 } 7619 7620 ASTContext::InlineVariableDefinitionKind 7621 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7622 if (!VD->isInline()) 7623 return InlineVariableDefinitionKind::None; 7624 7625 // In almost all cases, it's a weak definition. 7626 auto *First = VD->getFirstDecl(); 7627 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7628 return InlineVariableDefinitionKind::Weak; 7629 7630 // If there's a file-context declaration in this translation unit, it's a 7631 // non-discardable definition. 7632 for (auto *D : VD->redecls()) 7633 if (D->getLexicalDeclContext()->isFileContext() && 7634 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7635 return InlineVariableDefinitionKind::Strong; 7636 7637 // If we've not seen one yet, we don't know. 7638 return InlineVariableDefinitionKind::WeakUnknown; 7639 } 7640 7641 static std::string charUnitsToString(const CharUnits &CU) { 7642 return llvm::itostr(CU.getQuantity()); 7643 } 7644 7645 /// getObjCEncodingForBlock - Return the encoded type for this block 7646 /// declaration. 7647 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7648 std::string S; 7649 7650 const BlockDecl *Decl = Expr->getBlockDecl(); 7651 QualType BlockTy = 7652 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7653 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7654 // Encode result type. 7655 if (getLangOpts().EncodeExtendedBlockSig) 7656 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7657 true /*Extended*/); 7658 else 7659 getObjCEncodingForType(BlockReturnTy, S); 7660 // Compute size of all parameters. 7661 // Start with computing size of a pointer in number of bytes. 7662 // FIXME: There might(should) be a better way of doing this computation! 7663 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7664 CharUnits ParmOffset = PtrSize; 7665 for (auto *PI : Decl->parameters()) { 7666 QualType PType = PI->getType(); 7667 CharUnits sz = getObjCEncodingTypeSize(PType); 7668 if (sz.isZero()) 7669 continue; 7670 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7671 ParmOffset += sz; 7672 } 7673 // Size of the argument frame 7674 S += charUnitsToString(ParmOffset); 7675 // Block pointer and offset. 7676 S += "@?0"; 7677 7678 // Argument types. 7679 ParmOffset = PtrSize; 7680 for (auto *PVDecl : Decl->parameters()) { 7681 QualType PType = PVDecl->getOriginalType(); 7682 if (const auto *AT = 7683 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7684 // Use array's original type only if it has known number of 7685 // elements. 7686 if (!isa<ConstantArrayType>(AT)) 7687 PType = PVDecl->getType(); 7688 } else if (PType->isFunctionType()) 7689 PType = PVDecl->getType(); 7690 if (getLangOpts().EncodeExtendedBlockSig) 7691 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7692 S, true /*Extended*/); 7693 else 7694 getObjCEncodingForType(PType, S); 7695 S += charUnitsToString(ParmOffset); 7696 ParmOffset += getObjCEncodingTypeSize(PType); 7697 } 7698 7699 return S; 7700 } 7701 7702 std::string 7703 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7704 std::string S; 7705 // Encode result type. 7706 getObjCEncodingForType(Decl->getReturnType(), S); 7707 CharUnits ParmOffset; 7708 // Compute size of all parameters. 7709 for (auto *PI : Decl->parameters()) { 7710 QualType PType = PI->getType(); 7711 CharUnits sz = getObjCEncodingTypeSize(PType); 7712 if (sz.isZero()) 7713 continue; 7714 7715 assert(sz.isPositive() && 7716 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7717 ParmOffset += sz; 7718 } 7719 S += charUnitsToString(ParmOffset); 7720 ParmOffset = CharUnits::Zero(); 7721 7722 // Argument types. 7723 for (auto *PVDecl : Decl->parameters()) { 7724 QualType PType = PVDecl->getOriginalType(); 7725 if (const auto *AT = 7726 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7727 // Use array's original type only if it has known number of 7728 // elements. 7729 if (!isa<ConstantArrayType>(AT)) 7730 PType = PVDecl->getType(); 7731 } else if (PType->isFunctionType()) 7732 PType = PVDecl->getType(); 7733 getObjCEncodingForType(PType, S); 7734 S += charUnitsToString(ParmOffset); 7735 ParmOffset += getObjCEncodingTypeSize(PType); 7736 } 7737 7738 return S; 7739 } 7740 7741 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7742 /// method parameter or return type. If Extended, include class names and 7743 /// block object types. 7744 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7745 QualType T, std::string& S, 7746 bool Extended) const { 7747 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7748 getObjCEncodingForTypeQualifier(QT, S); 7749 // Encode parameter type. 7750 ObjCEncOptions Options = ObjCEncOptions() 7751 .setExpandPointedToStructures() 7752 .setExpandStructures() 7753 .setIsOutermostType(); 7754 if (Extended) 7755 Options.setEncodeBlockParameters().setEncodeClassNames(); 7756 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7757 } 7758 7759 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7760 /// declaration. 7761 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7762 bool Extended) const { 7763 // FIXME: This is not very efficient. 7764 // Encode return type. 7765 std::string S; 7766 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7767 Decl->getReturnType(), S, Extended); 7768 // Compute size of all parameters. 7769 // Start with computing size of a pointer in number of bytes. 7770 // FIXME: There might(should) be a better way of doing this computation! 7771 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7772 // The first two arguments (self and _cmd) are pointers; account for 7773 // their size. 7774 CharUnits ParmOffset = 2 * PtrSize; 7775 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7776 E = Decl->sel_param_end(); PI != E; ++PI) { 7777 QualType PType = (*PI)->getType(); 7778 CharUnits sz = getObjCEncodingTypeSize(PType); 7779 if (sz.isZero()) 7780 continue; 7781 7782 assert(sz.isPositive() && 7783 "getObjCEncodingForMethodDecl - Incomplete param type"); 7784 ParmOffset += sz; 7785 } 7786 S += charUnitsToString(ParmOffset); 7787 S += "@0:"; 7788 S += charUnitsToString(PtrSize); 7789 7790 // Argument types. 7791 ParmOffset = 2 * PtrSize; 7792 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7793 E = Decl->sel_param_end(); PI != E; ++PI) { 7794 const ParmVarDecl *PVDecl = *PI; 7795 QualType PType = PVDecl->getOriginalType(); 7796 if (const auto *AT = 7797 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7798 // Use array's original type only if it has known number of 7799 // elements. 7800 if (!isa<ConstantArrayType>(AT)) 7801 PType = PVDecl->getType(); 7802 } else if (PType->isFunctionType()) 7803 PType = PVDecl->getType(); 7804 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7805 PType, S, Extended); 7806 S += charUnitsToString(ParmOffset); 7807 ParmOffset += getObjCEncodingTypeSize(PType); 7808 } 7809 7810 return S; 7811 } 7812 7813 ObjCPropertyImplDecl * 7814 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7815 const ObjCPropertyDecl *PD, 7816 const Decl *Container) const { 7817 if (!Container) 7818 return nullptr; 7819 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7820 for (auto *PID : CID->property_impls()) 7821 if (PID->getPropertyDecl() == PD) 7822 return PID; 7823 } else { 7824 const auto *OID = cast<ObjCImplementationDecl>(Container); 7825 for (auto *PID : OID->property_impls()) 7826 if (PID->getPropertyDecl() == PD) 7827 return PID; 7828 } 7829 return nullptr; 7830 } 7831 7832 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7833 /// property declaration. If non-NULL, Container must be either an 7834 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7835 /// NULL when getting encodings for protocol properties. 7836 /// Property attributes are stored as a comma-delimited C string. The simple 7837 /// attributes readonly and bycopy are encoded as single characters. The 7838 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7839 /// encoded as single characters, followed by an identifier. Property types 7840 /// are also encoded as a parametrized attribute. The characters used to encode 7841 /// these attributes are defined by the following enumeration: 7842 /// @code 7843 /// enum PropertyAttributes { 7844 /// kPropertyReadOnly = 'R', // property is read-only. 7845 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7846 /// kPropertyByref = '&', // property is a reference to the value last assigned 7847 /// kPropertyDynamic = 'D', // property is dynamic 7848 /// kPropertyGetter = 'G', // followed by getter selector name 7849 /// kPropertySetter = 'S', // followed by setter selector name 7850 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7851 /// kPropertyType = 'T' // followed by old-style type encoding. 7852 /// kPropertyWeak = 'W' // 'weak' property 7853 /// kPropertyStrong = 'P' // property GC'able 7854 /// kPropertyNonAtomic = 'N' // property non-atomic 7855 /// kPropertyOptional = '?' // property optional 7856 /// }; 7857 /// @endcode 7858 std::string 7859 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7860 const Decl *Container) const { 7861 // Collect information from the property implementation decl(s). 7862 bool Dynamic = false; 7863 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7864 7865 if (ObjCPropertyImplDecl *PropertyImpDecl = 7866 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7867 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7868 Dynamic = true; 7869 else 7870 SynthesizePID = PropertyImpDecl; 7871 } 7872 7873 // FIXME: This is not very efficient. 7874 std::string S = "T"; 7875 7876 // Encode result type. 7877 // GCC has some special rules regarding encoding of properties which 7878 // closely resembles encoding of ivars. 7879 getObjCEncodingForPropertyType(PD->getType(), S); 7880 7881 if (PD->isOptional()) 7882 S += ",?"; 7883 7884 if (PD->isReadOnly()) { 7885 S += ",R"; 7886 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7887 S += ",C"; 7888 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7889 S += ",&"; 7890 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7891 S += ",W"; 7892 } else { 7893 switch (PD->getSetterKind()) { 7894 case ObjCPropertyDecl::Assign: break; 7895 case ObjCPropertyDecl::Copy: S += ",C"; break; 7896 case ObjCPropertyDecl::Retain: S += ",&"; break; 7897 case ObjCPropertyDecl::Weak: S += ",W"; break; 7898 } 7899 } 7900 7901 // It really isn't clear at all what this means, since properties 7902 // are "dynamic by default". 7903 if (Dynamic) 7904 S += ",D"; 7905 7906 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7907 S += ",N"; 7908 7909 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7910 S += ",G"; 7911 S += PD->getGetterName().getAsString(); 7912 } 7913 7914 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7915 S += ",S"; 7916 S += PD->getSetterName().getAsString(); 7917 } 7918 7919 if (SynthesizePID) { 7920 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7921 S += ",V"; 7922 S += OID->getNameAsString(); 7923 } 7924 7925 // FIXME: OBJCGC: weak & strong 7926 return S; 7927 } 7928 7929 /// getLegacyIntegralTypeEncoding - 7930 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7931 /// 'l' or 'L' , but not always. For typedefs, we need to use 7932 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7933 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7934 if (PointeeTy->getAs<TypedefType>()) { 7935 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7936 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7937 PointeeTy = UnsignedIntTy; 7938 else 7939 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7940 PointeeTy = IntTy; 7941 } 7942 } 7943 } 7944 7945 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7946 const FieldDecl *Field, 7947 QualType *NotEncodedT) const { 7948 // We follow the behavior of gcc, expanding structures which are 7949 // directly pointed to, and expanding embedded structures. Note that 7950 // these rules are sufficient to prevent recursive encoding of the 7951 // same type. 7952 getObjCEncodingForTypeImpl(T, S, 7953 ObjCEncOptions() 7954 .setExpandPointedToStructures() 7955 .setExpandStructures() 7956 .setIsOutermostType(), 7957 Field, NotEncodedT); 7958 } 7959 7960 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7961 std::string& S) const { 7962 // Encode result type. 7963 // GCC has some special rules regarding encoding of properties which 7964 // closely resembles encoding of ivars. 7965 getObjCEncodingForTypeImpl(T, S, 7966 ObjCEncOptions() 7967 .setExpandPointedToStructures() 7968 .setExpandStructures() 7969 .setIsOutermostType() 7970 .setEncodingProperty(), 7971 /*Field=*/nullptr); 7972 } 7973 7974 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7975 const BuiltinType *BT) { 7976 BuiltinType::Kind kind = BT->getKind(); 7977 switch (kind) { 7978 case BuiltinType::Void: return 'v'; 7979 case BuiltinType::Bool: return 'B'; 7980 case BuiltinType::Char8: 7981 case BuiltinType::Char_U: 7982 case BuiltinType::UChar: return 'C'; 7983 case BuiltinType::Char16: 7984 case BuiltinType::UShort: return 'S'; 7985 case BuiltinType::Char32: 7986 case BuiltinType::UInt: return 'I'; 7987 case BuiltinType::ULong: 7988 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7989 case BuiltinType::UInt128: return 'T'; 7990 case BuiltinType::ULongLong: return 'Q'; 7991 case BuiltinType::Char_S: 7992 case BuiltinType::SChar: return 'c'; 7993 case BuiltinType::Short: return 's'; 7994 case BuiltinType::WChar_S: 7995 case BuiltinType::WChar_U: 7996 case BuiltinType::Int: return 'i'; 7997 case BuiltinType::Long: 7998 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7999 case BuiltinType::LongLong: return 'q'; 8000 case BuiltinType::Int128: return 't'; 8001 case BuiltinType::Float: return 'f'; 8002 case BuiltinType::Double: return 'd'; 8003 case BuiltinType::LongDouble: return 'D'; 8004 case BuiltinType::NullPtr: return '*'; // like char* 8005 8006 case BuiltinType::BFloat16: 8007 case BuiltinType::Float16: 8008 case BuiltinType::Float128: 8009 case BuiltinType::Ibm128: 8010 case BuiltinType::Half: 8011 case BuiltinType::ShortAccum: 8012 case BuiltinType::Accum: 8013 case BuiltinType::LongAccum: 8014 case BuiltinType::UShortAccum: 8015 case BuiltinType::UAccum: 8016 case BuiltinType::ULongAccum: 8017 case BuiltinType::ShortFract: 8018 case BuiltinType::Fract: 8019 case BuiltinType::LongFract: 8020 case BuiltinType::UShortFract: 8021 case BuiltinType::UFract: 8022 case BuiltinType::ULongFract: 8023 case BuiltinType::SatShortAccum: 8024 case BuiltinType::SatAccum: 8025 case BuiltinType::SatLongAccum: 8026 case BuiltinType::SatUShortAccum: 8027 case BuiltinType::SatUAccum: 8028 case BuiltinType::SatULongAccum: 8029 case BuiltinType::SatShortFract: 8030 case BuiltinType::SatFract: 8031 case BuiltinType::SatLongFract: 8032 case BuiltinType::SatUShortFract: 8033 case BuiltinType::SatUFract: 8034 case BuiltinType::SatULongFract: 8035 // FIXME: potentially need @encodes for these! 8036 return ' '; 8037 8038 #define SVE_TYPE(Name, Id, SingletonId) \ 8039 case BuiltinType::Id: 8040 #include "clang/Basic/AArch64SVEACLETypes.def" 8041 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8042 #include "clang/Basic/RISCVVTypes.def" 8043 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8044 #include "clang/Basic/WebAssemblyReferenceTypes.def" 8045 { 8046 DiagnosticsEngine &Diags = C->getDiagnostics(); 8047 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 8048 "cannot yet @encode type %0"); 8049 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 8050 return ' '; 8051 } 8052 8053 case BuiltinType::ObjCId: 8054 case BuiltinType::ObjCClass: 8055 case BuiltinType::ObjCSel: 8056 llvm_unreachable("@encoding ObjC primitive type"); 8057 8058 // OpenCL and placeholder types don't need @encodings. 8059 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 8060 case BuiltinType::Id: 8061 #include "clang/Basic/OpenCLImageTypes.def" 8062 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 8063 case BuiltinType::Id: 8064 #include "clang/Basic/OpenCLExtensionTypes.def" 8065 case BuiltinType::OCLEvent: 8066 case BuiltinType::OCLClkEvent: 8067 case BuiltinType::OCLQueue: 8068 case BuiltinType::OCLReserveID: 8069 case BuiltinType::OCLSampler: 8070 case BuiltinType::Dependent: 8071 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 8072 case BuiltinType::Id: 8073 #include "clang/Basic/PPCTypes.def" 8074 #define BUILTIN_TYPE(KIND, ID) 8075 #define PLACEHOLDER_TYPE(KIND, ID) \ 8076 case BuiltinType::KIND: 8077 #include "clang/AST/BuiltinTypes.def" 8078 llvm_unreachable("invalid builtin type for @encode"); 8079 } 8080 llvm_unreachable("invalid BuiltinType::Kind value"); 8081 } 8082 8083 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 8084 EnumDecl *Enum = ET->getDecl(); 8085 8086 // The encoding of an non-fixed enum type is always 'i', regardless of size. 8087 if (!Enum->isFixed()) 8088 return 'i'; 8089 8090 // The encoding of a fixed enum type matches its fixed underlying type. 8091 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 8092 return getObjCEncodingForPrimitiveType(C, BT); 8093 } 8094 8095 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 8096 QualType T, const FieldDecl *FD) { 8097 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 8098 S += 'b'; 8099 // The NeXT runtime encodes bit fields as b followed by the number of bits. 8100 // The GNU runtime requires more information; bitfields are encoded as b, 8101 // then the offset (in bits) of the first element, then the type of the 8102 // bitfield, then the size in bits. For example, in this structure: 8103 // 8104 // struct 8105 // { 8106 // int integer; 8107 // int flags:2; 8108 // }; 8109 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 8110 // runtime, but b32i2 for the GNU runtime. The reason for this extra 8111 // information is not especially sensible, but we're stuck with it for 8112 // compatibility with GCC, although providing it breaks anything that 8113 // actually uses runtime introspection and wants to work on both runtimes... 8114 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 8115 uint64_t Offset; 8116 8117 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 8118 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 8119 IVD); 8120 } else { 8121 const RecordDecl *RD = FD->getParent(); 8122 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 8123 Offset = RL.getFieldOffset(FD->getFieldIndex()); 8124 } 8125 8126 S += llvm::utostr(Offset); 8127 8128 if (const auto *ET = T->getAs<EnumType>()) 8129 S += ObjCEncodingForEnumType(Ctx, ET); 8130 else { 8131 const auto *BT = T->castAs<BuiltinType>(); 8132 S += getObjCEncodingForPrimitiveType(Ctx, BT); 8133 } 8134 } 8135 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 8136 } 8137 8138 // Helper function for determining whether the encoded type string would include 8139 // a template specialization type. 8140 static bool hasTemplateSpecializationInEncodedString(const Type *T, 8141 bool VisitBasesAndFields) { 8142 T = T->getBaseElementTypeUnsafe(); 8143 8144 if (auto *PT = T->getAs<PointerType>()) 8145 return hasTemplateSpecializationInEncodedString( 8146 PT->getPointeeType().getTypePtr(), false); 8147 8148 auto *CXXRD = T->getAsCXXRecordDecl(); 8149 8150 if (!CXXRD) 8151 return false; 8152 8153 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 8154 return true; 8155 8156 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 8157 return false; 8158 8159 for (const auto &B : CXXRD->bases()) 8160 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 8161 true)) 8162 return true; 8163 8164 for (auto *FD : CXXRD->fields()) 8165 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 8166 true)) 8167 return true; 8168 8169 return false; 8170 } 8171 8172 // FIXME: Use SmallString for accumulating string. 8173 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 8174 const ObjCEncOptions Options, 8175 const FieldDecl *FD, 8176 QualType *NotEncodedT) const { 8177 CanQualType CT = getCanonicalType(T); 8178 switch (CT->getTypeClass()) { 8179 case Type::Builtin: 8180 case Type::Enum: 8181 if (FD && FD->isBitField()) 8182 return EncodeBitField(this, S, T, FD); 8183 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 8184 S += getObjCEncodingForPrimitiveType(this, BT); 8185 else 8186 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8187 return; 8188 8189 case Type::Complex: 8190 S += 'j'; 8191 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8192 ObjCEncOptions(), 8193 /*Field=*/nullptr); 8194 return; 8195 8196 case Type::Atomic: 8197 S += 'A'; 8198 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8199 ObjCEncOptions(), 8200 /*Field=*/nullptr); 8201 return; 8202 8203 // encoding for pointer or reference types. 8204 case Type::Pointer: 8205 case Type::LValueReference: 8206 case Type::RValueReference: { 8207 QualType PointeeTy; 8208 if (isa<PointerType>(CT)) { 8209 const auto *PT = T->castAs<PointerType>(); 8210 if (PT->isObjCSelType()) { 8211 S += ':'; 8212 return; 8213 } 8214 PointeeTy = PT->getPointeeType(); 8215 } else { 8216 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8217 } 8218 8219 bool isReadOnly = false; 8220 // For historical/compatibility reasons, the read-only qualifier of the 8221 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8222 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8223 // Also, do not emit the 'r' for anything but the outermost type! 8224 if (T->getAs<TypedefType>()) { 8225 if (Options.IsOutermostType() && T.isConstQualified()) { 8226 isReadOnly = true; 8227 S += 'r'; 8228 } 8229 } else if (Options.IsOutermostType()) { 8230 QualType P = PointeeTy; 8231 while (auto PT = P->getAs<PointerType>()) 8232 P = PT->getPointeeType(); 8233 if (P.isConstQualified()) { 8234 isReadOnly = true; 8235 S += 'r'; 8236 } 8237 } 8238 if (isReadOnly) { 8239 // Another legacy compatibility encoding. Some ObjC qualifier and type 8240 // combinations need to be rearranged. 8241 // Rewrite "in const" from "nr" to "rn" 8242 if (StringRef(S).ends_with("nr")) 8243 S.replace(S.end()-2, S.end(), "rn"); 8244 } 8245 8246 if (PointeeTy->isCharType()) { 8247 // char pointer types should be encoded as '*' unless it is a 8248 // type that has been typedef'd to 'BOOL'. 8249 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8250 S += '*'; 8251 return; 8252 } 8253 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8254 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8255 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8256 S += '#'; 8257 return; 8258 } 8259 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8260 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8261 S += '@'; 8262 return; 8263 } 8264 // If the encoded string for the class includes template names, just emit 8265 // "^v" for pointers to the class. 8266 if (getLangOpts().CPlusPlus && 8267 (!getLangOpts().EncodeCXXClassTemplateSpec && 8268 hasTemplateSpecializationInEncodedString( 8269 RTy, Options.ExpandPointedToStructures()))) { 8270 S += "^v"; 8271 return; 8272 } 8273 // fall through... 8274 } 8275 S += '^'; 8276 getLegacyIntegralTypeEncoding(PointeeTy); 8277 8278 ObjCEncOptions NewOptions; 8279 if (Options.ExpandPointedToStructures()) 8280 NewOptions.setExpandStructures(); 8281 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8282 /*Field=*/nullptr, NotEncodedT); 8283 return; 8284 } 8285 8286 case Type::ConstantArray: 8287 case Type::IncompleteArray: 8288 case Type::VariableArray: { 8289 const auto *AT = cast<ArrayType>(CT); 8290 8291 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8292 // Incomplete arrays are encoded as a pointer to the array element. 8293 S += '^'; 8294 8295 getObjCEncodingForTypeImpl( 8296 AT->getElementType(), S, 8297 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8298 } else { 8299 S += '['; 8300 8301 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8302 S += llvm::utostr(CAT->getSize().getZExtValue()); 8303 else { 8304 //Variable length arrays are encoded as a regular array with 0 elements. 8305 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8306 "Unknown array type!"); 8307 S += '0'; 8308 } 8309 8310 getObjCEncodingForTypeImpl( 8311 AT->getElementType(), S, 8312 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8313 NotEncodedT); 8314 S += ']'; 8315 } 8316 return; 8317 } 8318 8319 case Type::FunctionNoProto: 8320 case Type::FunctionProto: 8321 S += '?'; 8322 return; 8323 8324 case Type::Record: { 8325 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8326 S += RDecl->isUnion() ? '(' : '{'; 8327 // Anonymous structures print as '?' 8328 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8329 S += II->getName(); 8330 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8331 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8332 llvm::raw_string_ostream OS(S); 8333 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8334 getPrintingPolicy()); 8335 } 8336 } else { 8337 S += '?'; 8338 } 8339 if (Options.ExpandStructures()) { 8340 S += '='; 8341 if (!RDecl->isUnion()) { 8342 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8343 } else { 8344 for (const auto *Field : RDecl->fields()) { 8345 if (FD) { 8346 S += '"'; 8347 S += Field->getNameAsString(); 8348 S += '"'; 8349 } 8350 8351 // Special case bit-fields. 8352 if (Field->isBitField()) { 8353 getObjCEncodingForTypeImpl(Field->getType(), S, 8354 ObjCEncOptions().setExpandStructures(), 8355 Field); 8356 } else { 8357 QualType qt = Field->getType(); 8358 getLegacyIntegralTypeEncoding(qt); 8359 getObjCEncodingForTypeImpl( 8360 qt, S, 8361 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8362 NotEncodedT); 8363 } 8364 } 8365 } 8366 } 8367 S += RDecl->isUnion() ? ')' : '}'; 8368 return; 8369 } 8370 8371 case Type::BlockPointer: { 8372 const auto *BT = T->castAs<BlockPointerType>(); 8373 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8374 if (Options.EncodeBlockParameters()) { 8375 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8376 8377 S += '<'; 8378 // Block return type 8379 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8380 Options.forComponentType(), FD, NotEncodedT); 8381 // Block self 8382 S += "@?"; 8383 // Block parameters 8384 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8385 for (const auto &I : FPT->param_types()) 8386 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8387 NotEncodedT); 8388 } 8389 S += '>'; 8390 } 8391 return; 8392 } 8393 8394 case Type::ObjCObject: { 8395 // hack to match legacy encoding of *id and *Class 8396 QualType Ty = getObjCObjectPointerType(CT); 8397 if (Ty->isObjCIdType()) { 8398 S += "{objc_object=}"; 8399 return; 8400 } 8401 else if (Ty->isObjCClassType()) { 8402 S += "{objc_class=}"; 8403 return; 8404 } 8405 // TODO: Double check to make sure this intentionally falls through. 8406 [[fallthrough]]; 8407 } 8408 8409 case Type::ObjCInterface: { 8410 // Ignore protocol qualifiers when mangling at this level. 8411 // @encode(class_name) 8412 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8413 S += '{'; 8414 S += OI->getObjCRuntimeNameAsString(); 8415 if (Options.ExpandStructures()) { 8416 S += '='; 8417 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8418 DeepCollectObjCIvars(OI, true, Ivars); 8419 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8420 const FieldDecl *Field = Ivars[i]; 8421 if (Field->isBitField()) 8422 getObjCEncodingForTypeImpl(Field->getType(), S, 8423 ObjCEncOptions().setExpandStructures(), 8424 Field); 8425 else 8426 getObjCEncodingForTypeImpl(Field->getType(), S, 8427 ObjCEncOptions().setExpandStructures(), FD, 8428 NotEncodedT); 8429 } 8430 } 8431 S += '}'; 8432 return; 8433 } 8434 8435 case Type::ObjCObjectPointer: { 8436 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8437 if (OPT->isObjCIdType()) { 8438 S += '@'; 8439 return; 8440 } 8441 8442 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8443 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8444 // Since this is a binary compatibility issue, need to consult with 8445 // runtime folks. Fortunately, this is a *very* obscure construct. 8446 S += '#'; 8447 return; 8448 } 8449 8450 if (OPT->isObjCQualifiedIdType()) { 8451 getObjCEncodingForTypeImpl( 8452 getObjCIdType(), S, 8453 Options.keepingOnly(ObjCEncOptions() 8454 .setExpandPointedToStructures() 8455 .setExpandStructures()), 8456 FD); 8457 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8458 // Note that we do extended encoding of protocol qualifier list 8459 // Only when doing ivar or property encoding. 8460 S += '"'; 8461 for (const auto *I : OPT->quals()) { 8462 S += '<'; 8463 S += I->getObjCRuntimeNameAsString(); 8464 S += '>'; 8465 } 8466 S += '"'; 8467 } 8468 return; 8469 } 8470 8471 S += '@'; 8472 if (OPT->getInterfaceDecl() && 8473 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8474 S += '"'; 8475 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8476 for (const auto *I : OPT->quals()) { 8477 S += '<'; 8478 S += I->getObjCRuntimeNameAsString(); 8479 S += '>'; 8480 } 8481 S += '"'; 8482 } 8483 return; 8484 } 8485 8486 // gcc just blithely ignores member pointers. 8487 // FIXME: we should do better than that. 'M' is available. 8488 case Type::MemberPointer: 8489 // This matches gcc's encoding, even though technically it is insufficient. 8490 //FIXME. We should do a better job than gcc. 8491 case Type::Vector: 8492 case Type::ExtVector: 8493 // Until we have a coherent encoding of these three types, issue warning. 8494 if (NotEncodedT) 8495 *NotEncodedT = T; 8496 return; 8497 8498 case Type::ConstantMatrix: 8499 if (NotEncodedT) 8500 *NotEncodedT = T; 8501 return; 8502 8503 case Type::BitInt: 8504 if (NotEncodedT) 8505 *NotEncodedT = T; 8506 return; 8507 8508 // We could see an undeduced auto type here during error recovery. 8509 // Just ignore it. 8510 case Type::Auto: 8511 case Type::DeducedTemplateSpecialization: 8512 return; 8513 8514 case Type::Pipe: 8515 #define ABSTRACT_TYPE(KIND, BASE) 8516 #define TYPE(KIND, BASE) 8517 #define DEPENDENT_TYPE(KIND, BASE) \ 8518 case Type::KIND: 8519 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8520 case Type::KIND: 8521 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8522 case Type::KIND: 8523 #include "clang/AST/TypeNodes.inc" 8524 llvm_unreachable("@encode for dependent type!"); 8525 } 8526 llvm_unreachable("bad type kind!"); 8527 } 8528 8529 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8530 std::string &S, 8531 const FieldDecl *FD, 8532 bool includeVBases, 8533 QualType *NotEncodedT) const { 8534 assert(RDecl && "Expected non-null RecordDecl"); 8535 assert(!RDecl->isUnion() && "Should not be called for unions"); 8536 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8537 return; 8538 8539 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8540 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8541 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8542 8543 if (CXXRec) { 8544 for (const auto &BI : CXXRec->bases()) { 8545 if (!BI.isVirtual()) { 8546 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8547 if (base->isEmpty()) 8548 continue; 8549 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8550 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8551 std::make_pair(offs, base)); 8552 } 8553 } 8554 } 8555 8556 for (FieldDecl *Field : RDecl->fields()) { 8557 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8558 continue; 8559 uint64_t offs = layout.getFieldOffset(Field->getFieldIndex()); 8560 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8561 std::make_pair(offs, Field)); 8562 } 8563 8564 if (CXXRec && includeVBases) { 8565 for (const auto &BI : CXXRec->vbases()) { 8566 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8567 if (base->isEmpty()) 8568 continue; 8569 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8570 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8571 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8572 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8573 std::make_pair(offs, base)); 8574 } 8575 } 8576 8577 CharUnits size; 8578 if (CXXRec) { 8579 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8580 } else { 8581 size = layout.getSize(); 8582 } 8583 8584 #ifndef NDEBUG 8585 uint64_t CurOffs = 0; 8586 #endif 8587 std::multimap<uint64_t, NamedDecl *>::iterator 8588 CurLayObj = FieldOrBaseOffsets.begin(); 8589 8590 if (CXXRec && CXXRec->isDynamicClass() && 8591 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8592 if (FD) { 8593 S += "\"_vptr$"; 8594 std::string recname = CXXRec->getNameAsString(); 8595 if (recname.empty()) recname = "?"; 8596 S += recname; 8597 S += '"'; 8598 } 8599 S += "^^?"; 8600 #ifndef NDEBUG 8601 CurOffs += getTypeSize(VoidPtrTy); 8602 #endif 8603 } 8604 8605 if (!RDecl->hasFlexibleArrayMember()) { 8606 // Mark the end of the structure. 8607 uint64_t offs = toBits(size); 8608 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8609 std::make_pair(offs, nullptr)); 8610 } 8611 8612 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8613 #ifndef NDEBUG 8614 assert(CurOffs <= CurLayObj->first); 8615 if (CurOffs < CurLayObj->first) { 8616 uint64_t padding = CurLayObj->first - CurOffs; 8617 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8618 // packing/alignment of members is different that normal, in which case 8619 // the encoding will be out-of-sync with the real layout. 8620 // If the runtime switches to just consider the size of types without 8621 // taking into account alignment, we could make padding explicit in the 8622 // encoding (e.g. using arrays of chars). The encoding strings would be 8623 // longer then though. 8624 CurOffs += padding; 8625 } 8626 #endif 8627 8628 NamedDecl *dcl = CurLayObj->second; 8629 if (!dcl) 8630 break; // reached end of structure. 8631 8632 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8633 // We expand the bases without their virtual bases since those are going 8634 // in the initial structure. Note that this differs from gcc which 8635 // expands virtual bases each time one is encountered in the hierarchy, 8636 // making the encoding type bigger than it really is. 8637 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8638 NotEncodedT); 8639 assert(!base->isEmpty()); 8640 #ifndef NDEBUG 8641 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8642 #endif 8643 } else { 8644 const auto *field = cast<FieldDecl>(dcl); 8645 if (FD) { 8646 S += '"'; 8647 S += field->getNameAsString(); 8648 S += '"'; 8649 } 8650 8651 if (field->isBitField()) { 8652 EncodeBitField(this, S, field->getType(), field); 8653 #ifndef NDEBUG 8654 CurOffs += field->getBitWidthValue(*this); 8655 #endif 8656 } else { 8657 QualType qt = field->getType(); 8658 getLegacyIntegralTypeEncoding(qt); 8659 getObjCEncodingForTypeImpl( 8660 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8661 FD, NotEncodedT); 8662 #ifndef NDEBUG 8663 CurOffs += getTypeSize(field->getType()); 8664 #endif 8665 } 8666 } 8667 } 8668 } 8669 8670 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8671 std::string& S) const { 8672 if (QT & Decl::OBJC_TQ_In) 8673 S += 'n'; 8674 if (QT & Decl::OBJC_TQ_Inout) 8675 S += 'N'; 8676 if (QT & Decl::OBJC_TQ_Out) 8677 S += 'o'; 8678 if (QT & Decl::OBJC_TQ_Bycopy) 8679 S += 'O'; 8680 if (QT & Decl::OBJC_TQ_Byref) 8681 S += 'R'; 8682 if (QT & Decl::OBJC_TQ_Oneway) 8683 S += 'V'; 8684 } 8685 8686 TypedefDecl *ASTContext::getObjCIdDecl() const { 8687 if (!ObjCIdDecl) { 8688 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8689 T = getObjCObjectPointerType(T); 8690 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8691 } 8692 return ObjCIdDecl; 8693 } 8694 8695 TypedefDecl *ASTContext::getObjCSelDecl() const { 8696 if (!ObjCSelDecl) { 8697 QualType T = getPointerType(ObjCBuiltinSelTy); 8698 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8699 } 8700 return ObjCSelDecl; 8701 } 8702 8703 TypedefDecl *ASTContext::getObjCClassDecl() const { 8704 if (!ObjCClassDecl) { 8705 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8706 T = getObjCObjectPointerType(T); 8707 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8708 } 8709 return ObjCClassDecl; 8710 } 8711 8712 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8713 if (!ObjCProtocolClassDecl) { 8714 ObjCProtocolClassDecl 8715 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8716 SourceLocation(), 8717 &Idents.get("Protocol"), 8718 /*typeParamList=*/nullptr, 8719 /*PrevDecl=*/nullptr, 8720 SourceLocation(), true); 8721 } 8722 8723 return ObjCProtocolClassDecl; 8724 } 8725 8726 //===----------------------------------------------------------------------===// 8727 // __builtin_va_list Construction Functions 8728 //===----------------------------------------------------------------------===// 8729 8730 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8731 StringRef Name) { 8732 // typedef char* __builtin[_ms]_va_list; 8733 QualType T = Context->getPointerType(Context->CharTy); 8734 return Context->buildImplicitTypedef(T, Name); 8735 } 8736 8737 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8738 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8739 } 8740 8741 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8742 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8743 } 8744 8745 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8746 // typedef void* __builtin_va_list; 8747 QualType T = Context->getPointerType(Context->VoidTy); 8748 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8749 } 8750 8751 static TypedefDecl * 8752 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8753 // struct __va_list 8754 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8755 if (Context->getLangOpts().CPlusPlus) { 8756 // namespace std { struct __va_list { 8757 auto *NS = NamespaceDecl::Create( 8758 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8759 /*Inline=*/false, SourceLocation(), SourceLocation(), 8760 &Context->Idents.get("std"), 8761 /*PrevDecl=*/nullptr, /*Nested=*/false); 8762 NS->setImplicit(); 8763 VaListTagDecl->setDeclContext(NS); 8764 } 8765 8766 VaListTagDecl->startDefinition(); 8767 8768 const size_t NumFields = 5; 8769 QualType FieldTypes[NumFields]; 8770 const char *FieldNames[NumFields]; 8771 8772 // void *__stack; 8773 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8774 FieldNames[0] = "__stack"; 8775 8776 // void *__gr_top; 8777 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8778 FieldNames[1] = "__gr_top"; 8779 8780 // void *__vr_top; 8781 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8782 FieldNames[2] = "__vr_top"; 8783 8784 // int __gr_offs; 8785 FieldTypes[3] = Context->IntTy; 8786 FieldNames[3] = "__gr_offs"; 8787 8788 // int __vr_offs; 8789 FieldTypes[4] = Context->IntTy; 8790 FieldNames[4] = "__vr_offs"; 8791 8792 // Create fields 8793 for (unsigned i = 0; i < NumFields; ++i) { 8794 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8795 VaListTagDecl, 8796 SourceLocation(), 8797 SourceLocation(), 8798 &Context->Idents.get(FieldNames[i]), 8799 FieldTypes[i], /*TInfo=*/nullptr, 8800 /*BitWidth=*/nullptr, 8801 /*Mutable=*/false, 8802 ICIS_NoInit); 8803 Field->setAccess(AS_public); 8804 VaListTagDecl->addDecl(Field); 8805 } 8806 VaListTagDecl->completeDefinition(); 8807 Context->VaListTagDecl = VaListTagDecl; 8808 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8809 8810 // } __builtin_va_list; 8811 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8812 } 8813 8814 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8815 // typedef struct __va_list_tag { 8816 RecordDecl *VaListTagDecl; 8817 8818 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8819 VaListTagDecl->startDefinition(); 8820 8821 const size_t NumFields = 5; 8822 QualType FieldTypes[NumFields]; 8823 const char *FieldNames[NumFields]; 8824 8825 // unsigned char gpr; 8826 FieldTypes[0] = Context->UnsignedCharTy; 8827 FieldNames[0] = "gpr"; 8828 8829 // unsigned char fpr; 8830 FieldTypes[1] = Context->UnsignedCharTy; 8831 FieldNames[1] = "fpr"; 8832 8833 // unsigned short reserved; 8834 FieldTypes[2] = Context->UnsignedShortTy; 8835 FieldNames[2] = "reserved"; 8836 8837 // void* overflow_arg_area; 8838 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8839 FieldNames[3] = "overflow_arg_area"; 8840 8841 // void* reg_save_area; 8842 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8843 FieldNames[4] = "reg_save_area"; 8844 8845 // Create fields 8846 for (unsigned i = 0; i < NumFields; ++i) { 8847 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8848 SourceLocation(), 8849 SourceLocation(), 8850 &Context->Idents.get(FieldNames[i]), 8851 FieldTypes[i], /*TInfo=*/nullptr, 8852 /*BitWidth=*/nullptr, 8853 /*Mutable=*/false, 8854 ICIS_NoInit); 8855 Field->setAccess(AS_public); 8856 VaListTagDecl->addDecl(Field); 8857 } 8858 VaListTagDecl->completeDefinition(); 8859 Context->VaListTagDecl = VaListTagDecl; 8860 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8861 8862 // } __va_list_tag; 8863 TypedefDecl *VaListTagTypedefDecl = 8864 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8865 8866 QualType VaListTagTypedefType = 8867 Context->getTypedefType(VaListTagTypedefDecl); 8868 8869 // typedef __va_list_tag __builtin_va_list[1]; 8870 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8871 QualType VaListTagArrayType = Context->getConstantArrayType( 8872 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 8873 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8874 } 8875 8876 static TypedefDecl * 8877 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8878 // struct __va_list_tag { 8879 RecordDecl *VaListTagDecl; 8880 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8881 VaListTagDecl->startDefinition(); 8882 8883 const size_t NumFields = 4; 8884 QualType FieldTypes[NumFields]; 8885 const char *FieldNames[NumFields]; 8886 8887 // unsigned gp_offset; 8888 FieldTypes[0] = Context->UnsignedIntTy; 8889 FieldNames[0] = "gp_offset"; 8890 8891 // unsigned fp_offset; 8892 FieldTypes[1] = Context->UnsignedIntTy; 8893 FieldNames[1] = "fp_offset"; 8894 8895 // void* overflow_arg_area; 8896 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8897 FieldNames[2] = "overflow_arg_area"; 8898 8899 // void* reg_save_area; 8900 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8901 FieldNames[3] = "reg_save_area"; 8902 8903 // Create fields 8904 for (unsigned i = 0; i < NumFields; ++i) { 8905 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8906 VaListTagDecl, 8907 SourceLocation(), 8908 SourceLocation(), 8909 &Context->Idents.get(FieldNames[i]), 8910 FieldTypes[i], /*TInfo=*/nullptr, 8911 /*BitWidth=*/nullptr, 8912 /*Mutable=*/false, 8913 ICIS_NoInit); 8914 Field->setAccess(AS_public); 8915 VaListTagDecl->addDecl(Field); 8916 } 8917 VaListTagDecl->completeDefinition(); 8918 Context->VaListTagDecl = VaListTagDecl; 8919 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8920 8921 // }; 8922 8923 // typedef struct __va_list_tag __builtin_va_list[1]; 8924 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8925 QualType VaListTagArrayType = Context->getConstantArrayType( 8926 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 8927 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8928 } 8929 8930 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8931 // typedef int __builtin_va_list[4]; 8932 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8933 QualType IntArrayType = Context->getConstantArrayType( 8934 Context->IntTy, Size, nullptr, ArraySizeModifier::Normal, 0); 8935 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8936 } 8937 8938 static TypedefDecl * 8939 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8940 // struct __va_list 8941 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8942 if (Context->getLangOpts().CPlusPlus) { 8943 // namespace std { struct __va_list { 8944 NamespaceDecl *NS; 8945 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8946 Context->getTranslationUnitDecl(), 8947 /*Inline=*/false, SourceLocation(), 8948 SourceLocation(), &Context->Idents.get("std"), 8949 /*PrevDecl=*/nullptr, /*Nested=*/false); 8950 NS->setImplicit(); 8951 VaListDecl->setDeclContext(NS); 8952 } 8953 8954 VaListDecl->startDefinition(); 8955 8956 // void * __ap; 8957 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8958 VaListDecl, 8959 SourceLocation(), 8960 SourceLocation(), 8961 &Context->Idents.get("__ap"), 8962 Context->getPointerType(Context->VoidTy), 8963 /*TInfo=*/nullptr, 8964 /*BitWidth=*/nullptr, 8965 /*Mutable=*/false, 8966 ICIS_NoInit); 8967 Field->setAccess(AS_public); 8968 VaListDecl->addDecl(Field); 8969 8970 // }; 8971 VaListDecl->completeDefinition(); 8972 Context->VaListTagDecl = VaListDecl; 8973 8974 // typedef struct __va_list __builtin_va_list; 8975 QualType T = Context->getRecordType(VaListDecl); 8976 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8977 } 8978 8979 static TypedefDecl * 8980 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8981 // struct __va_list_tag { 8982 RecordDecl *VaListTagDecl; 8983 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8984 VaListTagDecl->startDefinition(); 8985 8986 const size_t NumFields = 4; 8987 QualType FieldTypes[NumFields]; 8988 const char *FieldNames[NumFields]; 8989 8990 // long __gpr; 8991 FieldTypes[0] = Context->LongTy; 8992 FieldNames[0] = "__gpr"; 8993 8994 // long __fpr; 8995 FieldTypes[1] = Context->LongTy; 8996 FieldNames[1] = "__fpr"; 8997 8998 // void *__overflow_arg_area; 8999 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9000 FieldNames[2] = "__overflow_arg_area"; 9001 9002 // void *__reg_save_area; 9003 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 9004 FieldNames[3] = "__reg_save_area"; 9005 9006 // Create fields 9007 for (unsigned i = 0; i < NumFields; ++i) { 9008 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 9009 VaListTagDecl, 9010 SourceLocation(), 9011 SourceLocation(), 9012 &Context->Idents.get(FieldNames[i]), 9013 FieldTypes[i], /*TInfo=*/nullptr, 9014 /*BitWidth=*/nullptr, 9015 /*Mutable=*/false, 9016 ICIS_NoInit); 9017 Field->setAccess(AS_public); 9018 VaListTagDecl->addDecl(Field); 9019 } 9020 VaListTagDecl->completeDefinition(); 9021 Context->VaListTagDecl = VaListTagDecl; 9022 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9023 9024 // }; 9025 9026 // typedef __va_list_tag __builtin_va_list[1]; 9027 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9028 QualType VaListTagArrayType = Context->getConstantArrayType( 9029 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 9030 9031 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9032 } 9033 9034 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 9035 // typedef struct __va_list_tag { 9036 RecordDecl *VaListTagDecl; 9037 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9038 VaListTagDecl->startDefinition(); 9039 9040 const size_t NumFields = 3; 9041 QualType FieldTypes[NumFields]; 9042 const char *FieldNames[NumFields]; 9043 9044 // void *CurrentSavedRegisterArea; 9045 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 9046 FieldNames[0] = "__current_saved_reg_area_pointer"; 9047 9048 // void *SavedRegAreaEnd; 9049 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 9050 FieldNames[1] = "__saved_reg_area_end_pointer"; 9051 9052 // void *OverflowArea; 9053 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9054 FieldNames[2] = "__overflow_area_pointer"; 9055 9056 // Create fields 9057 for (unsigned i = 0; i < NumFields; ++i) { 9058 FieldDecl *Field = FieldDecl::Create( 9059 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 9060 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 9061 /*TInfo=*/nullptr, 9062 /*BitWidth=*/nullptr, 9063 /*Mutable=*/false, ICIS_NoInit); 9064 Field->setAccess(AS_public); 9065 VaListTagDecl->addDecl(Field); 9066 } 9067 VaListTagDecl->completeDefinition(); 9068 Context->VaListTagDecl = VaListTagDecl; 9069 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9070 9071 // } __va_list_tag; 9072 TypedefDecl *VaListTagTypedefDecl = 9073 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 9074 9075 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 9076 9077 // typedef __va_list_tag __builtin_va_list[1]; 9078 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9079 QualType VaListTagArrayType = Context->getConstantArrayType( 9080 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 9081 9082 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9083 } 9084 9085 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 9086 TargetInfo::BuiltinVaListKind Kind) { 9087 switch (Kind) { 9088 case TargetInfo::CharPtrBuiltinVaList: 9089 return CreateCharPtrBuiltinVaListDecl(Context); 9090 case TargetInfo::VoidPtrBuiltinVaList: 9091 return CreateVoidPtrBuiltinVaListDecl(Context); 9092 case TargetInfo::AArch64ABIBuiltinVaList: 9093 return CreateAArch64ABIBuiltinVaListDecl(Context); 9094 case TargetInfo::PowerABIBuiltinVaList: 9095 return CreatePowerABIBuiltinVaListDecl(Context); 9096 case TargetInfo::X86_64ABIBuiltinVaList: 9097 return CreateX86_64ABIBuiltinVaListDecl(Context); 9098 case TargetInfo::PNaClABIBuiltinVaList: 9099 return CreatePNaClABIBuiltinVaListDecl(Context); 9100 case TargetInfo::AAPCSABIBuiltinVaList: 9101 return CreateAAPCSABIBuiltinVaListDecl(Context); 9102 case TargetInfo::SystemZBuiltinVaList: 9103 return CreateSystemZBuiltinVaListDecl(Context); 9104 case TargetInfo::HexagonBuiltinVaList: 9105 return CreateHexagonBuiltinVaListDecl(Context); 9106 } 9107 9108 llvm_unreachable("Unhandled __builtin_va_list type kind"); 9109 } 9110 9111 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 9112 if (!BuiltinVaListDecl) { 9113 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 9114 assert(BuiltinVaListDecl->isImplicit()); 9115 } 9116 9117 return BuiltinVaListDecl; 9118 } 9119 9120 Decl *ASTContext::getVaListTagDecl() const { 9121 // Force the creation of VaListTagDecl by building the __builtin_va_list 9122 // declaration. 9123 if (!VaListTagDecl) 9124 (void)getBuiltinVaListDecl(); 9125 9126 return VaListTagDecl; 9127 } 9128 9129 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 9130 if (!BuiltinMSVaListDecl) 9131 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 9132 9133 return BuiltinMSVaListDecl; 9134 } 9135 9136 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 9137 // Allow redecl custom type checking builtin for HLSL. 9138 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin && 9139 BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID())) 9140 return true; 9141 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 9142 } 9143 9144 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 9145 assert(ObjCConstantStringType.isNull() && 9146 "'NSConstantString' type already set!"); 9147 9148 ObjCConstantStringType = getObjCInterfaceType(Decl); 9149 } 9150 9151 /// Retrieve the template name that corresponds to a non-empty 9152 /// lookup. 9153 TemplateName 9154 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 9155 UnresolvedSetIterator End) const { 9156 unsigned size = End - Begin; 9157 assert(size > 1 && "set is not overloaded!"); 9158 9159 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 9160 size * sizeof(FunctionTemplateDecl*)); 9161 auto *OT = new (memory) OverloadedTemplateStorage(size); 9162 9163 NamedDecl **Storage = OT->getStorage(); 9164 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 9165 NamedDecl *D = *I; 9166 assert(isa<FunctionTemplateDecl>(D) || 9167 isa<UnresolvedUsingValueDecl>(D) || 9168 (isa<UsingShadowDecl>(D) && 9169 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 9170 *Storage++ = D; 9171 } 9172 9173 return TemplateName(OT); 9174 } 9175 9176 /// Retrieve a template name representing an unqualified-id that has been 9177 /// assumed to name a template for ADL purposes. 9178 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 9179 auto *OT = new (*this) AssumedTemplateStorage(Name); 9180 return TemplateName(OT); 9181 } 9182 9183 /// Retrieve the template name that represents a qualified 9184 /// template name such as \c std::vector. 9185 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 9186 bool TemplateKeyword, 9187 TemplateName Template) const { 9188 assert(NNS && "Missing nested-name-specifier in qualified template name"); 9189 9190 // FIXME: Canonicalization? 9191 llvm::FoldingSetNodeID ID; 9192 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9193 9194 void *InsertPos = nullptr; 9195 QualifiedTemplateName *QTN = 9196 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9197 if (!QTN) { 9198 QTN = new (*this, alignof(QualifiedTemplateName)) 9199 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9200 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9201 } 9202 9203 return TemplateName(QTN); 9204 } 9205 9206 /// Retrieve the template name that represents a dependent 9207 /// template name such as \c MetaFun::template apply. 9208 TemplateName 9209 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9210 const IdentifierInfo *Name) const { 9211 assert((!NNS || NNS->isDependent()) && 9212 "Nested name specifier must be dependent"); 9213 9214 llvm::FoldingSetNodeID ID; 9215 DependentTemplateName::Profile(ID, NNS, Name); 9216 9217 void *InsertPos = nullptr; 9218 DependentTemplateName *QTN = 9219 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9220 9221 if (QTN) 9222 return TemplateName(QTN); 9223 9224 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9225 if (CanonNNS == NNS) { 9226 QTN = new (*this, alignof(DependentTemplateName)) 9227 DependentTemplateName(NNS, Name); 9228 } else { 9229 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9230 QTN = new (*this, alignof(DependentTemplateName)) 9231 DependentTemplateName(NNS, Name, Canon); 9232 DependentTemplateName *CheckQTN = 9233 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9234 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9235 (void)CheckQTN; 9236 } 9237 9238 DependentTemplateNames.InsertNode(QTN, InsertPos); 9239 return TemplateName(QTN); 9240 } 9241 9242 /// Retrieve the template name that represents a dependent 9243 /// template name such as \c MetaFun::template operator+. 9244 TemplateName 9245 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9246 OverloadedOperatorKind Operator) const { 9247 assert((!NNS || NNS->isDependent()) && 9248 "Nested name specifier must be dependent"); 9249 9250 llvm::FoldingSetNodeID ID; 9251 DependentTemplateName::Profile(ID, NNS, Operator); 9252 9253 void *InsertPos = nullptr; 9254 DependentTemplateName *QTN 9255 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9256 9257 if (QTN) 9258 return TemplateName(QTN); 9259 9260 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9261 if (CanonNNS == NNS) { 9262 QTN = new (*this, alignof(DependentTemplateName)) 9263 DependentTemplateName(NNS, Operator); 9264 } else { 9265 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9266 QTN = new (*this, alignof(DependentTemplateName)) 9267 DependentTemplateName(NNS, Operator, Canon); 9268 9269 DependentTemplateName *CheckQTN 9270 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9271 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9272 (void)CheckQTN; 9273 } 9274 9275 DependentTemplateNames.InsertNode(QTN, InsertPos); 9276 return TemplateName(QTN); 9277 } 9278 9279 TemplateName ASTContext::getSubstTemplateTemplateParm( 9280 TemplateName Replacement, Decl *AssociatedDecl, unsigned Index, 9281 std::optional<unsigned> PackIndex) const { 9282 llvm::FoldingSetNodeID ID; 9283 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl, 9284 Index, PackIndex); 9285 9286 void *insertPos = nullptr; 9287 SubstTemplateTemplateParmStorage *subst 9288 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9289 9290 if (!subst) { 9291 subst = new (*this) SubstTemplateTemplateParmStorage( 9292 Replacement, AssociatedDecl, Index, PackIndex); 9293 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9294 } 9295 9296 return TemplateName(subst); 9297 } 9298 9299 TemplateName 9300 ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack, 9301 Decl *AssociatedDecl, 9302 unsigned Index, bool Final) const { 9303 auto &Self = const_cast<ASTContext &>(*this); 9304 llvm::FoldingSetNodeID ID; 9305 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack, 9306 AssociatedDecl, Index, Final); 9307 9308 void *InsertPos = nullptr; 9309 SubstTemplateTemplateParmPackStorage *Subst 9310 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9311 9312 if (!Subst) { 9313 Subst = new (*this) SubstTemplateTemplateParmPackStorage( 9314 ArgPack.pack_elements(), AssociatedDecl, Index, Final); 9315 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9316 } 9317 9318 return TemplateName(Subst); 9319 } 9320 9321 /// getFromTargetType - Given one of the integer types provided by 9322 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9323 /// is actually a value of type @c TargetInfo::IntType. 9324 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9325 switch (Type) { 9326 case TargetInfo::NoInt: return {}; 9327 case TargetInfo::SignedChar: return SignedCharTy; 9328 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9329 case TargetInfo::SignedShort: return ShortTy; 9330 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9331 case TargetInfo::SignedInt: return IntTy; 9332 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9333 case TargetInfo::SignedLong: return LongTy; 9334 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9335 case TargetInfo::SignedLongLong: return LongLongTy; 9336 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9337 } 9338 9339 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9340 } 9341 9342 //===----------------------------------------------------------------------===// 9343 // Type Predicates. 9344 //===----------------------------------------------------------------------===// 9345 9346 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9347 /// garbage collection attribute. 9348 /// 9349 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9350 if (getLangOpts().getGC() == LangOptions::NonGC) 9351 return Qualifiers::GCNone; 9352 9353 assert(getLangOpts().ObjC); 9354 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9355 9356 // Default behaviour under objective-C's gc is for ObjC pointers 9357 // (or pointers to them) be treated as though they were declared 9358 // as __strong. 9359 if (GCAttrs == Qualifiers::GCNone) { 9360 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9361 return Qualifiers::Strong; 9362 else if (Ty->isPointerType()) 9363 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9364 } else { 9365 // It's not valid to set GC attributes on anything that isn't a 9366 // pointer. 9367 #ifndef NDEBUG 9368 QualType CT = Ty->getCanonicalTypeInternal(); 9369 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9370 CT = AT->getElementType(); 9371 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9372 #endif 9373 } 9374 return GCAttrs; 9375 } 9376 9377 //===----------------------------------------------------------------------===// 9378 // Type Compatibility Testing 9379 //===----------------------------------------------------------------------===// 9380 9381 /// areCompatVectorTypes - Return true if the two specified vector types are 9382 /// compatible. 9383 static bool areCompatVectorTypes(const VectorType *LHS, 9384 const VectorType *RHS) { 9385 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9386 return LHS->getElementType() == RHS->getElementType() && 9387 LHS->getNumElements() == RHS->getNumElements(); 9388 } 9389 9390 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9391 /// compatible. 9392 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9393 const ConstantMatrixType *RHS) { 9394 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9395 return LHS->getElementType() == RHS->getElementType() && 9396 LHS->getNumRows() == RHS->getNumRows() && 9397 LHS->getNumColumns() == RHS->getNumColumns(); 9398 } 9399 9400 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9401 QualType SecondVec) { 9402 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9403 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9404 9405 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9406 return true; 9407 9408 // Treat Neon vector types and most AltiVec vector types as if they are the 9409 // equivalent GCC vector types. 9410 const auto *First = FirstVec->castAs<VectorType>(); 9411 const auto *Second = SecondVec->castAs<VectorType>(); 9412 if (First->getNumElements() == Second->getNumElements() && 9413 hasSameType(First->getElementType(), Second->getElementType()) && 9414 First->getVectorKind() != VectorKind::AltiVecPixel && 9415 First->getVectorKind() != VectorKind::AltiVecBool && 9416 Second->getVectorKind() != VectorKind::AltiVecPixel && 9417 Second->getVectorKind() != VectorKind::AltiVecBool && 9418 First->getVectorKind() != VectorKind::SveFixedLengthData && 9419 First->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9420 Second->getVectorKind() != VectorKind::SveFixedLengthData && 9421 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9422 First->getVectorKind() != VectorKind::RVVFixedLengthData && 9423 Second->getVectorKind() != VectorKind::RVVFixedLengthData) 9424 return true; 9425 9426 return false; 9427 } 9428 9429 /// getSVETypeSize - Return SVE vector or predicate register size. 9430 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9431 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type"); 9432 if (Ty->getKind() == BuiltinType::SveBool || 9433 Ty->getKind() == BuiltinType::SveCount) 9434 return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth(); 9435 return Context.getLangOpts().VScaleMin * 128; 9436 } 9437 9438 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9439 QualType SecondType) { 9440 assert( 9441 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9442 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9443 "Expected SVE builtin type and vector type!"); 9444 9445 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9446 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9447 if (const auto *VT = SecondType->getAs<VectorType>()) { 9448 // Predicates have the same representation as uint8 so we also have to 9449 // check the kind to make these types incompatible. 9450 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 9451 return BT->getKind() == BuiltinType::SveBool; 9452 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 9453 return VT->getElementType().getCanonicalType() == 9454 FirstType->getSveEltType(*this); 9455 else if (VT->getVectorKind() == VectorKind::Generic) 9456 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9457 hasSameType(VT->getElementType(), 9458 getBuiltinVectorTypeInfo(BT).ElementType); 9459 } 9460 } 9461 return false; 9462 }; 9463 9464 return IsValidCast(FirstType, SecondType) || 9465 IsValidCast(SecondType, FirstType); 9466 } 9467 9468 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9469 QualType SecondType) { 9470 assert( 9471 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9472 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9473 "Expected SVE builtin type and vector type!"); 9474 9475 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9476 const auto *BT = FirstType->getAs<BuiltinType>(); 9477 if (!BT) 9478 return false; 9479 9480 const auto *VecTy = SecondType->getAs<VectorType>(); 9481 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData || 9482 VecTy->getVectorKind() == VectorKind::Generic)) { 9483 const LangOptions::LaxVectorConversionKind LVCKind = 9484 getLangOpts().getLaxVectorConversions(); 9485 9486 // Can not convert between sve predicates and sve vectors because of 9487 // different size. 9488 if (BT->getKind() == BuiltinType::SveBool && 9489 VecTy->getVectorKind() == VectorKind::SveFixedLengthData) 9490 return false; 9491 9492 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9493 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9494 // converts to VLAT and VLAT implicitly converts to GNUT." 9495 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9496 // predicates. 9497 if (VecTy->getVectorKind() == VectorKind::Generic && 9498 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9499 return false; 9500 9501 // If -flax-vector-conversions=all is specified, the types are 9502 // certainly compatible. 9503 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9504 return true; 9505 9506 // If -flax-vector-conversions=integer is specified, the types are 9507 // compatible if the elements are integer types. 9508 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9509 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9510 FirstType->getSveEltType(*this)->isIntegerType(); 9511 } 9512 9513 return false; 9514 }; 9515 9516 return IsLaxCompatible(FirstType, SecondType) || 9517 IsLaxCompatible(SecondType, FirstType); 9518 } 9519 9520 /// getRVVTypeSize - Return RVV vector register size. 9521 static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) { 9522 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type"); 9523 auto VScale = Context.getTargetInfo().getVScaleRange(Context.getLangOpts()); 9524 if (!VScale) 9525 return 0; 9526 9527 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty); 9528 9529 uint64_t EltSize = Context.getTypeSize(Info.ElementType); 9530 uint64_t MinElts = Info.EC.getKnownMinValue(); 9531 return VScale->first * MinElts * EltSize; 9532 } 9533 9534 bool ASTContext::areCompatibleRVVTypes(QualType FirstType, 9535 QualType SecondType) { 9536 assert( 9537 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9538 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9539 "Expected RVV builtin type and vector type!"); 9540 9541 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9542 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9543 if (const auto *VT = SecondType->getAs<VectorType>()) { 9544 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || 9545 VT->getVectorKind() == VectorKind::Generic) 9546 return FirstType->isRVVVLSBuiltinType() && 9547 getTypeSize(SecondType) == getRVVTypeSize(*this, BT) && 9548 hasSameType(VT->getElementType(), 9549 getBuiltinVectorTypeInfo(BT).ElementType); 9550 } 9551 } 9552 return false; 9553 }; 9554 9555 return IsValidCast(FirstType, SecondType) || 9556 IsValidCast(SecondType, FirstType); 9557 } 9558 9559 bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType, 9560 QualType SecondType) { 9561 assert( 9562 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9563 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9564 "Expected RVV builtin type and vector type!"); 9565 9566 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9567 const auto *BT = FirstType->getAs<BuiltinType>(); 9568 if (!BT) 9569 return false; 9570 9571 if (!BT->isRVVVLSBuiltinType()) 9572 return false; 9573 9574 const auto *VecTy = SecondType->getAs<VectorType>(); 9575 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) { 9576 const LangOptions::LaxVectorConversionKind LVCKind = 9577 getLangOpts().getLaxVectorConversions(); 9578 9579 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion. 9580 if (getTypeSize(SecondType) != getRVVTypeSize(*this, BT)) 9581 return false; 9582 9583 // If -flax-vector-conversions=all is specified, the types are 9584 // certainly compatible. 9585 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9586 return true; 9587 9588 // If -flax-vector-conversions=integer is specified, the types are 9589 // compatible if the elements are integer types. 9590 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9591 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9592 FirstType->getRVVEltType(*this)->isIntegerType(); 9593 } 9594 9595 return false; 9596 }; 9597 9598 return IsLaxCompatible(FirstType, SecondType) || 9599 IsLaxCompatible(SecondType, FirstType); 9600 } 9601 9602 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9603 while (true) { 9604 // __strong id 9605 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9606 if (Attr->getAttrKind() == attr::ObjCOwnership) 9607 return true; 9608 9609 Ty = Attr->getModifiedType(); 9610 9611 // X *__strong (...) 9612 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9613 Ty = Paren->getInnerType(); 9614 9615 // We do not want to look through typedefs, typeof(expr), 9616 // typeof(type), or any other way that the type is somehow 9617 // abstracted. 9618 } else { 9619 return false; 9620 } 9621 } 9622 } 9623 9624 //===----------------------------------------------------------------------===// 9625 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9626 //===----------------------------------------------------------------------===// 9627 9628 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9629 /// inheritance hierarchy of 'rProto'. 9630 bool 9631 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9632 ObjCProtocolDecl *rProto) const { 9633 if (declaresSameEntity(lProto, rProto)) 9634 return true; 9635 for (auto *PI : rProto->protocols()) 9636 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9637 return true; 9638 return false; 9639 } 9640 9641 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9642 /// Class<pr1, ...>. 9643 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9644 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9645 for (auto *lhsProto : lhs->quals()) { 9646 bool match = false; 9647 for (auto *rhsProto : rhs->quals()) { 9648 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9649 match = true; 9650 break; 9651 } 9652 } 9653 if (!match) 9654 return false; 9655 } 9656 return true; 9657 } 9658 9659 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9660 /// ObjCQualifiedIDType. 9661 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9662 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9663 bool compare) { 9664 // Allow id<P..> and an 'id' in all cases. 9665 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9666 return true; 9667 9668 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9669 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9670 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9671 return false; 9672 9673 if (lhs->isObjCQualifiedIdType()) { 9674 if (rhs->qual_empty()) { 9675 // If the RHS is a unqualified interface pointer "NSString*", 9676 // make sure we check the class hierarchy. 9677 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9678 for (auto *I : lhs->quals()) { 9679 // when comparing an id<P> on lhs with a static type on rhs, 9680 // see if static class implements all of id's protocols, directly or 9681 // through its super class and categories. 9682 if (!rhsID->ClassImplementsProtocol(I, true)) 9683 return false; 9684 } 9685 } 9686 // If there are no qualifiers and no interface, we have an 'id'. 9687 return true; 9688 } 9689 // Both the right and left sides have qualifiers. 9690 for (auto *lhsProto : lhs->quals()) { 9691 bool match = false; 9692 9693 // when comparing an id<P> on lhs with a static type on rhs, 9694 // see if static class implements all of id's protocols, directly or 9695 // through its super class and categories. 9696 for (auto *rhsProto : rhs->quals()) { 9697 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9698 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9699 match = true; 9700 break; 9701 } 9702 } 9703 // If the RHS is a qualified interface pointer "NSString<P>*", 9704 // make sure we check the class hierarchy. 9705 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9706 for (auto *I : lhs->quals()) { 9707 // when comparing an id<P> on lhs with a static type on rhs, 9708 // see if static class implements all of id's protocols, directly or 9709 // through its super class and categories. 9710 if (rhsID->ClassImplementsProtocol(I, true)) { 9711 match = true; 9712 break; 9713 } 9714 } 9715 } 9716 if (!match) 9717 return false; 9718 } 9719 9720 return true; 9721 } 9722 9723 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9724 9725 if (lhs->getInterfaceType()) { 9726 // If both the right and left sides have qualifiers. 9727 for (auto *lhsProto : lhs->quals()) { 9728 bool match = false; 9729 9730 // when comparing an id<P> on rhs with a static type on lhs, 9731 // see if static class implements all of id's protocols, directly or 9732 // through its super class and categories. 9733 // First, lhs protocols in the qualifier list must be found, direct 9734 // or indirect in rhs's qualifier list or it is a mismatch. 9735 for (auto *rhsProto : rhs->quals()) { 9736 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9737 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9738 match = true; 9739 break; 9740 } 9741 } 9742 if (!match) 9743 return false; 9744 } 9745 9746 // Static class's protocols, or its super class or category protocols 9747 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9748 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9749 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9750 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9751 // This is rather dubious but matches gcc's behavior. If lhs has 9752 // no type qualifier and its class has no static protocol(s) 9753 // assume that it is mismatch. 9754 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9755 return false; 9756 for (auto *lhsProto : LHSInheritedProtocols) { 9757 bool match = false; 9758 for (auto *rhsProto : rhs->quals()) { 9759 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9760 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9761 match = true; 9762 break; 9763 } 9764 } 9765 if (!match) 9766 return false; 9767 } 9768 } 9769 return true; 9770 } 9771 return false; 9772 } 9773 9774 /// canAssignObjCInterfaces - Return true if the two interface types are 9775 /// compatible for assignment from RHS to LHS. This handles validation of any 9776 /// protocol qualifiers on the LHS or RHS. 9777 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9778 const ObjCObjectPointerType *RHSOPT) { 9779 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9780 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9781 9782 // If either type represents the built-in 'id' type, return true. 9783 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9784 return true; 9785 9786 // Function object that propagates a successful result or handles 9787 // __kindof types. 9788 auto finish = [&](bool succeeded) -> bool { 9789 if (succeeded) 9790 return true; 9791 9792 if (!RHS->isKindOfType()) 9793 return false; 9794 9795 // Strip off __kindof and protocol qualifiers, then check whether 9796 // we can assign the other way. 9797 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9798 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9799 }; 9800 9801 // Casts from or to id<P> are allowed when the other side has compatible 9802 // protocols. 9803 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9804 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9805 } 9806 9807 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9808 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9809 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9810 } 9811 9812 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9813 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9814 return true; 9815 } 9816 9817 // If we have 2 user-defined types, fall into that path. 9818 if (LHS->getInterface() && RHS->getInterface()) { 9819 return finish(canAssignObjCInterfaces(LHS, RHS)); 9820 } 9821 9822 return false; 9823 } 9824 9825 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9826 /// for providing type-safety for objective-c pointers used to pass/return 9827 /// arguments in block literals. When passed as arguments, passing 'A*' where 9828 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9829 /// not OK. For the return type, the opposite is not OK. 9830 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9831 const ObjCObjectPointerType *LHSOPT, 9832 const ObjCObjectPointerType *RHSOPT, 9833 bool BlockReturnType) { 9834 9835 // Function object that propagates a successful result or handles 9836 // __kindof types. 9837 auto finish = [&](bool succeeded) -> bool { 9838 if (succeeded) 9839 return true; 9840 9841 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9842 if (!Expected->isKindOfType()) 9843 return false; 9844 9845 // Strip off __kindof and protocol qualifiers, then check whether 9846 // we can assign the other way. 9847 return canAssignObjCInterfacesInBlockPointer( 9848 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9849 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9850 BlockReturnType); 9851 }; 9852 9853 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9854 return true; 9855 9856 if (LHSOPT->isObjCBuiltinType()) { 9857 return finish(RHSOPT->isObjCBuiltinType() || 9858 RHSOPT->isObjCQualifiedIdType()); 9859 } 9860 9861 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9862 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9863 // Use for block parameters previous type checking for compatibility. 9864 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9865 // Or corrected type checking as in non-compat mode. 9866 (!BlockReturnType && 9867 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9868 else 9869 return finish(ObjCQualifiedIdTypesAreCompatible( 9870 (BlockReturnType ? LHSOPT : RHSOPT), 9871 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9872 } 9873 9874 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9875 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9876 if (LHS && RHS) { // We have 2 user-defined types. 9877 if (LHS != RHS) { 9878 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9879 return finish(BlockReturnType); 9880 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9881 return finish(!BlockReturnType); 9882 } 9883 else 9884 return true; 9885 } 9886 return false; 9887 } 9888 9889 /// Comparison routine for Objective-C protocols to be used with 9890 /// llvm::array_pod_sort. 9891 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9892 ObjCProtocolDecl * const *rhs) { 9893 return (*lhs)->getName().compare((*rhs)->getName()); 9894 } 9895 9896 /// getIntersectionOfProtocols - This routine finds the intersection of set 9897 /// of protocols inherited from two distinct objective-c pointer objects with 9898 /// the given common base. 9899 /// It is used to build composite qualifier list of the composite type of 9900 /// the conditional expression involving two objective-c pointer objects. 9901 static 9902 void getIntersectionOfProtocols(ASTContext &Context, 9903 const ObjCInterfaceDecl *CommonBase, 9904 const ObjCObjectPointerType *LHSOPT, 9905 const ObjCObjectPointerType *RHSOPT, 9906 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9907 9908 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9909 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9910 assert(LHS->getInterface() && "LHS must have an interface base"); 9911 assert(RHS->getInterface() && "RHS must have an interface base"); 9912 9913 // Add all of the protocols for the LHS. 9914 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9915 9916 // Start with the protocol qualifiers. 9917 for (auto *proto : LHS->quals()) { 9918 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9919 } 9920 9921 // Also add the protocols associated with the LHS interface. 9922 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9923 9924 // Add all of the protocols for the RHS. 9925 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9926 9927 // Start with the protocol qualifiers. 9928 for (auto *proto : RHS->quals()) { 9929 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9930 } 9931 9932 // Also add the protocols associated with the RHS interface. 9933 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9934 9935 // Compute the intersection of the collected protocol sets. 9936 for (auto *proto : LHSProtocolSet) { 9937 if (RHSProtocolSet.count(proto)) 9938 IntersectionSet.push_back(proto); 9939 } 9940 9941 // Compute the set of protocols that is implied by either the common type or 9942 // the protocols within the intersection. 9943 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9944 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9945 9946 // Remove any implied protocols from the list of inherited protocols. 9947 if (!ImpliedProtocols.empty()) { 9948 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9949 return ImpliedProtocols.contains(proto); 9950 }); 9951 } 9952 9953 // Sort the remaining protocols by name. 9954 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9955 compareObjCProtocolsByName); 9956 } 9957 9958 /// Determine whether the first type is a subtype of the second. 9959 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9960 QualType rhs) { 9961 // Common case: two object pointers. 9962 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9963 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9964 if (lhsOPT && rhsOPT) 9965 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9966 9967 // Two block pointers. 9968 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9969 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9970 if (lhsBlock && rhsBlock) 9971 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9972 9973 // If either is an unqualified 'id' and the other is a block, it's 9974 // acceptable. 9975 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9976 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9977 return true; 9978 9979 return false; 9980 } 9981 9982 // Check that the given Objective-C type argument lists are equivalent. 9983 static bool sameObjCTypeArgs(ASTContext &ctx, 9984 const ObjCInterfaceDecl *iface, 9985 ArrayRef<QualType> lhsArgs, 9986 ArrayRef<QualType> rhsArgs, 9987 bool stripKindOf) { 9988 if (lhsArgs.size() != rhsArgs.size()) 9989 return false; 9990 9991 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9992 if (!typeParams) 9993 return false; 9994 9995 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9996 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9997 continue; 9998 9999 switch (typeParams->begin()[i]->getVariance()) { 10000 case ObjCTypeParamVariance::Invariant: 10001 if (!stripKindOf || 10002 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 10003 rhsArgs[i].stripObjCKindOfType(ctx))) { 10004 return false; 10005 } 10006 break; 10007 10008 case ObjCTypeParamVariance::Covariant: 10009 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 10010 return false; 10011 break; 10012 10013 case ObjCTypeParamVariance::Contravariant: 10014 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 10015 return false; 10016 break; 10017 } 10018 } 10019 10020 return true; 10021 } 10022 10023 QualType ASTContext::areCommonBaseCompatible( 10024 const ObjCObjectPointerType *Lptr, 10025 const ObjCObjectPointerType *Rptr) { 10026 const ObjCObjectType *LHS = Lptr->getObjectType(); 10027 const ObjCObjectType *RHS = Rptr->getObjectType(); 10028 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 10029 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 10030 10031 if (!LDecl || !RDecl) 10032 return {}; 10033 10034 // When either LHS or RHS is a kindof type, we should return a kindof type. 10035 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 10036 // kindof(A). 10037 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 10038 10039 // Follow the left-hand side up the class hierarchy until we either hit a 10040 // root or find the RHS. Record the ancestors in case we don't find it. 10041 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 10042 LHSAncestors; 10043 while (true) { 10044 // Record this ancestor. We'll need this if the common type isn't in the 10045 // path from the LHS to the root. 10046 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 10047 10048 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 10049 // Get the type arguments. 10050 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 10051 bool anyChanges = false; 10052 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10053 // Both have type arguments, compare them. 10054 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10055 LHS->getTypeArgs(), RHS->getTypeArgs(), 10056 /*stripKindOf=*/true)) 10057 return {}; 10058 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10059 // If only one has type arguments, the result will not have type 10060 // arguments. 10061 LHSTypeArgs = {}; 10062 anyChanges = true; 10063 } 10064 10065 // Compute the intersection of protocols. 10066 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10067 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 10068 Protocols); 10069 if (!Protocols.empty()) 10070 anyChanges = true; 10071 10072 // If anything in the LHS will have changed, build a new result type. 10073 // If we need to return a kindof type but LHS is not a kindof type, we 10074 // build a new result type. 10075 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 10076 QualType Result = getObjCInterfaceType(LHS->getInterface()); 10077 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 10078 anyKindOf || LHS->isKindOfType()); 10079 return getObjCObjectPointerType(Result); 10080 } 10081 10082 return getObjCObjectPointerType(QualType(LHS, 0)); 10083 } 10084 10085 // Find the superclass. 10086 QualType LHSSuperType = LHS->getSuperClassType(); 10087 if (LHSSuperType.isNull()) 10088 break; 10089 10090 LHS = LHSSuperType->castAs<ObjCObjectType>(); 10091 } 10092 10093 // We didn't find anything by following the LHS to its root; now check 10094 // the RHS against the cached set of ancestors. 10095 while (true) { 10096 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 10097 if (KnownLHS != LHSAncestors.end()) { 10098 LHS = KnownLHS->second; 10099 10100 // Get the type arguments. 10101 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 10102 bool anyChanges = false; 10103 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10104 // Both have type arguments, compare them. 10105 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10106 LHS->getTypeArgs(), RHS->getTypeArgs(), 10107 /*stripKindOf=*/true)) 10108 return {}; 10109 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10110 // If only one has type arguments, the result will not have type 10111 // arguments. 10112 RHSTypeArgs = {}; 10113 anyChanges = true; 10114 } 10115 10116 // Compute the intersection of protocols. 10117 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10118 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 10119 Protocols); 10120 if (!Protocols.empty()) 10121 anyChanges = true; 10122 10123 // If we need to return a kindof type but RHS is not a kindof type, we 10124 // build a new result type. 10125 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 10126 QualType Result = getObjCInterfaceType(RHS->getInterface()); 10127 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 10128 anyKindOf || RHS->isKindOfType()); 10129 return getObjCObjectPointerType(Result); 10130 } 10131 10132 return getObjCObjectPointerType(QualType(RHS, 0)); 10133 } 10134 10135 // Find the superclass of the RHS. 10136 QualType RHSSuperType = RHS->getSuperClassType(); 10137 if (RHSSuperType.isNull()) 10138 break; 10139 10140 RHS = RHSSuperType->castAs<ObjCObjectType>(); 10141 } 10142 10143 return {}; 10144 } 10145 10146 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 10147 const ObjCObjectType *RHS) { 10148 assert(LHS->getInterface() && "LHS is not an interface type"); 10149 assert(RHS->getInterface() && "RHS is not an interface type"); 10150 10151 // Verify that the base decls are compatible: the RHS must be a subclass of 10152 // the LHS. 10153 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 10154 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 10155 if (!IsSuperClass) 10156 return false; 10157 10158 // If the LHS has protocol qualifiers, determine whether all of them are 10159 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 10160 // LHS). 10161 if (LHS->getNumProtocols() > 0) { 10162 // OK if conversion of LHS to SuperClass results in narrowing of types 10163 // ; i.e., SuperClass may implement at least one of the protocols 10164 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 10165 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 10166 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 10167 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 10168 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 10169 // qualifiers. 10170 for (auto *RHSPI : RHS->quals()) 10171 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 10172 // If there is no protocols associated with RHS, it is not a match. 10173 if (SuperClassInheritedProtocols.empty()) 10174 return false; 10175 10176 for (const auto *LHSProto : LHS->quals()) { 10177 bool SuperImplementsProtocol = false; 10178 for (auto *SuperClassProto : SuperClassInheritedProtocols) 10179 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 10180 SuperImplementsProtocol = true; 10181 break; 10182 } 10183 if (!SuperImplementsProtocol) 10184 return false; 10185 } 10186 } 10187 10188 // If the LHS is specialized, we may need to check type arguments. 10189 if (LHS->isSpecialized()) { 10190 // Follow the superclass chain until we've matched the LHS class in the 10191 // hierarchy. This substitutes type arguments through. 10192 const ObjCObjectType *RHSSuper = RHS; 10193 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 10194 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 10195 10196 // If the RHS is specializd, compare type arguments. 10197 if (RHSSuper->isSpecialized() && 10198 !sameObjCTypeArgs(*this, LHS->getInterface(), 10199 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 10200 /*stripKindOf=*/true)) { 10201 return false; 10202 } 10203 } 10204 10205 return true; 10206 } 10207 10208 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 10209 // get the "pointed to" types 10210 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 10211 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 10212 10213 if (!LHSOPT || !RHSOPT) 10214 return false; 10215 10216 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 10217 canAssignObjCInterfaces(RHSOPT, LHSOPT); 10218 } 10219 10220 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 10221 return canAssignObjCInterfaces( 10222 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 10223 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 10224 } 10225 10226 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 10227 /// both shall have the identically qualified version of a compatible type. 10228 /// C99 6.2.7p1: Two types have compatible types if their types are the 10229 /// same. See 6.7.[2,3,5] for additional rules. 10230 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 10231 bool CompareUnqualified) { 10232 if (getLangOpts().CPlusPlus) 10233 return hasSameType(LHS, RHS); 10234 10235 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 10236 } 10237 10238 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 10239 return typesAreCompatible(LHS, RHS); 10240 } 10241 10242 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 10243 return !mergeTypes(LHS, RHS, true).isNull(); 10244 } 10245 10246 /// mergeTransparentUnionType - if T is a transparent union type and a member 10247 /// of T is compatible with SubType, return the merged type, else return 10248 /// QualType() 10249 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 10250 bool OfBlockPointer, 10251 bool Unqualified) { 10252 if (const RecordType *UT = T->getAsUnionType()) { 10253 RecordDecl *UD = UT->getDecl(); 10254 if (UD->hasAttr<TransparentUnionAttr>()) { 10255 for (const auto *I : UD->fields()) { 10256 QualType ET = I->getType().getUnqualifiedType(); 10257 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 10258 if (!MT.isNull()) 10259 return MT; 10260 } 10261 } 10262 } 10263 10264 return {}; 10265 } 10266 10267 /// mergeFunctionParameterTypes - merge two types which appear as function 10268 /// parameter types 10269 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 10270 bool OfBlockPointer, 10271 bool Unqualified) { 10272 // GNU extension: two types are compatible if they appear as a function 10273 // argument, one of the types is a transparent union type and the other 10274 // type is compatible with a union member 10275 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 10276 Unqualified); 10277 if (!lmerge.isNull()) 10278 return lmerge; 10279 10280 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10281 Unqualified); 10282 if (!rmerge.isNull()) 10283 return rmerge; 10284 10285 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10286 } 10287 10288 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10289 bool OfBlockPointer, bool Unqualified, 10290 bool AllowCXX, 10291 bool IsConditionalOperator) { 10292 const auto *lbase = lhs->castAs<FunctionType>(); 10293 const auto *rbase = rhs->castAs<FunctionType>(); 10294 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10295 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10296 bool allLTypes = true; 10297 bool allRTypes = true; 10298 10299 // Check return type 10300 QualType retType; 10301 if (OfBlockPointer) { 10302 QualType RHS = rbase->getReturnType(); 10303 QualType LHS = lbase->getReturnType(); 10304 bool UnqualifiedResult = Unqualified; 10305 if (!UnqualifiedResult) 10306 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10307 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10308 } 10309 else 10310 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10311 Unqualified); 10312 if (retType.isNull()) 10313 return {}; 10314 10315 if (Unqualified) 10316 retType = retType.getUnqualifiedType(); 10317 10318 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10319 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10320 if (Unqualified) { 10321 LRetType = LRetType.getUnqualifiedType(); 10322 RRetType = RRetType.getUnqualifiedType(); 10323 } 10324 10325 if (getCanonicalType(retType) != LRetType) 10326 allLTypes = false; 10327 if (getCanonicalType(retType) != RRetType) 10328 allRTypes = false; 10329 10330 // FIXME: double check this 10331 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10332 // rbase->getRegParmAttr() != 0 && 10333 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10334 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10335 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10336 10337 // Compatible functions must have compatible calling conventions 10338 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10339 return {}; 10340 10341 // Regparm is part of the calling convention. 10342 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10343 return {}; 10344 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10345 return {}; 10346 10347 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10348 return {}; 10349 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10350 return {}; 10351 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10352 return {}; 10353 10354 // When merging declarations, it's common for supplemental information like 10355 // attributes to only be present in one of the declarations, and we generally 10356 // want type merging to preserve the union of information. So a merged 10357 // function type should be noreturn if it was noreturn in *either* operand 10358 // type. 10359 // 10360 // But for the conditional operator, this is backwards. The result of the 10361 // operator could be either operand, and its type should conservatively 10362 // reflect that. So a function type in a composite type is noreturn only 10363 // if it's noreturn in *both* operand types. 10364 // 10365 // Arguably, noreturn is a kind of subtype, and the conditional operator 10366 // ought to produce the most specific common supertype of its operand types. 10367 // That would differ from this rule in contravariant positions. However, 10368 // neither C nor C++ generally uses this kind of subtype reasoning. Also, 10369 // as a practical matter, it would only affect C code that does abstraction of 10370 // higher-order functions (taking noreturn callbacks!), which is uncommon to 10371 // say the least. So we use the simpler rule. 10372 bool NoReturn = IsConditionalOperator 10373 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn() 10374 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10375 if (lbaseInfo.getNoReturn() != NoReturn) 10376 allLTypes = false; 10377 if (rbaseInfo.getNoReturn() != NoReturn) 10378 allRTypes = false; 10379 10380 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10381 10382 if (lproto && rproto) { // two C99 style function prototypes 10383 assert((AllowCXX || 10384 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10385 "C++ shouldn't be here"); 10386 // Compatible functions must have the same number of parameters 10387 if (lproto->getNumParams() != rproto->getNumParams()) 10388 return {}; 10389 10390 // Variadic and non-variadic functions aren't compatible 10391 if (lproto->isVariadic() != rproto->isVariadic()) 10392 return {}; 10393 10394 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10395 return {}; 10396 10397 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10398 bool canUseLeft, canUseRight; 10399 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10400 newParamInfos)) 10401 return {}; 10402 10403 if (!canUseLeft) 10404 allLTypes = false; 10405 if (!canUseRight) 10406 allRTypes = false; 10407 10408 // Check parameter type compatibility 10409 SmallVector<QualType, 10> types; 10410 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10411 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10412 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10413 QualType paramType = mergeFunctionParameterTypes( 10414 lParamType, rParamType, OfBlockPointer, Unqualified); 10415 if (paramType.isNull()) 10416 return {}; 10417 10418 if (Unqualified) 10419 paramType = paramType.getUnqualifiedType(); 10420 10421 types.push_back(paramType); 10422 if (Unqualified) { 10423 lParamType = lParamType.getUnqualifiedType(); 10424 rParamType = rParamType.getUnqualifiedType(); 10425 } 10426 10427 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10428 allLTypes = false; 10429 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10430 allRTypes = false; 10431 } 10432 10433 if (allLTypes) return lhs; 10434 if (allRTypes) return rhs; 10435 10436 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10437 EPI.ExtInfo = einfo; 10438 EPI.ExtParameterInfos = 10439 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10440 return getFunctionType(retType, types, EPI); 10441 } 10442 10443 if (lproto) allRTypes = false; 10444 if (rproto) allLTypes = false; 10445 10446 const FunctionProtoType *proto = lproto ? lproto : rproto; 10447 if (proto) { 10448 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10449 if (proto->isVariadic()) 10450 return {}; 10451 // Check that the types are compatible with the types that 10452 // would result from default argument promotions (C99 6.7.5.3p15). 10453 // The only types actually affected are promotable integer 10454 // types and floats, which would be passed as a different 10455 // type depending on whether the prototype is visible. 10456 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10457 QualType paramTy = proto->getParamType(i); 10458 10459 // Look at the converted type of enum types, since that is the type used 10460 // to pass enum values. 10461 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10462 paramTy = Enum->getDecl()->getIntegerType(); 10463 if (paramTy.isNull()) 10464 return {}; 10465 } 10466 10467 if (isPromotableIntegerType(paramTy) || 10468 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10469 return {}; 10470 } 10471 10472 if (allLTypes) return lhs; 10473 if (allRTypes) return rhs; 10474 10475 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10476 EPI.ExtInfo = einfo; 10477 return getFunctionType(retType, proto->getParamTypes(), EPI); 10478 } 10479 10480 if (allLTypes) return lhs; 10481 if (allRTypes) return rhs; 10482 return getFunctionNoProtoType(retType, einfo); 10483 } 10484 10485 /// Given that we have an enum type and a non-enum type, try to merge them. 10486 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10487 QualType other, bool isBlockReturnType) { 10488 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10489 // a signed integer type, or an unsigned integer type. 10490 // Compatibility is based on the underlying type, not the promotion 10491 // type. 10492 QualType underlyingType = ET->getDecl()->getIntegerType(); 10493 if (underlyingType.isNull()) 10494 return {}; 10495 if (Context.hasSameType(underlyingType, other)) 10496 return other; 10497 10498 // In block return types, we're more permissive and accept any 10499 // integral type of the same size. 10500 if (isBlockReturnType && other->isIntegerType() && 10501 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10502 return other; 10503 10504 return {}; 10505 } 10506 10507 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, 10508 bool Unqualified, bool BlockReturnType, 10509 bool IsConditionalOperator) { 10510 // For C++ we will not reach this code with reference types (see below), 10511 // for OpenMP variant call overloading we might. 10512 // 10513 // C++ [expr]: If an expression initially has the type "reference to T", the 10514 // type is adjusted to "T" prior to any further analysis, the expression 10515 // designates the object or function denoted by the reference, and the 10516 // expression is an lvalue unless the reference is an rvalue reference and 10517 // the expression is a function call (possibly inside parentheses). 10518 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10519 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10520 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10521 LHS->getTypeClass() == RHS->getTypeClass()) 10522 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10523 OfBlockPointer, Unqualified, BlockReturnType); 10524 if (LHSRefTy || RHSRefTy) 10525 return {}; 10526 10527 if (Unqualified) { 10528 LHS = LHS.getUnqualifiedType(); 10529 RHS = RHS.getUnqualifiedType(); 10530 } 10531 10532 QualType LHSCan = getCanonicalType(LHS), 10533 RHSCan = getCanonicalType(RHS); 10534 10535 // If two types are identical, they are compatible. 10536 if (LHSCan == RHSCan) 10537 return LHS; 10538 10539 // If the qualifiers are different, the types aren't compatible... mostly. 10540 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10541 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10542 if (LQuals != RQuals) { 10543 // If any of these qualifiers are different, we have a type 10544 // mismatch. 10545 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10546 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10547 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10548 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10549 return {}; 10550 10551 // Exactly one GC qualifier difference is allowed: __strong is 10552 // okay if the other type has no GC qualifier but is an Objective 10553 // C object pointer (i.e. implicitly strong by default). We fix 10554 // this by pretending that the unqualified type was actually 10555 // qualified __strong. 10556 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10557 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10558 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10559 10560 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10561 return {}; 10562 10563 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10564 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10565 } 10566 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10567 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10568 } 10569 return {}; 10570 } 10571 10572 // Okay, qualifiers are equal. 10573 10574 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10575 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10576 10577 // We want to consider the two function types to be the same for these 10578 // comparisons, just force one to the other. 10579 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10580 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10581 10582 // Same as above for arrays 10583 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10584 LHSClass = Type::ConstantArray; 10585 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10586 RHSClass = Type::ConstantArray; 10587 10588 // ObjCInterfaces are just specialized ObjCObjects. 10589 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10590 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10591 10592 // Canonicalize ExtVector -> Vector. 10593 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10594 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10595 10596 // If the canonical type classes don't match. 10597 if (LHSClass != RHSClass) { 10598 // Note that we only have special rules for turning block enum 10599 // returns into block int returns, not vice-versa. 10600 if (const auto *ETy = LHS->getAs<EnumType>()) { 10601 return mergeEnumWithInteger(*this, ETy, RHS, false); 10602 } 10603 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10604 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10605 } 10606 // allow block pointer type to match an 'id' type. 10607 if (OfBlockPointer && !BlockReturnType) { 10608 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10609 return LHS; 10610 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10611 return RHS; 10612 } 10613 // Allow __auto_type to match anything; it merges to the type with more 10614 // information. 10615 if (const auto *AT = LHS->getAs<AutoType>()) { 10616 if (!AT->isDeduced() && AT->isGNUAutoType()) 10617 return RHS; 10618 } 10619 if (const auto *AT = RHS->getAs<AutoType>()) { 10620 if (!AT->isDeduced() && AT->isGNUAutoType()) 10621 return LHS; 10622 } 10623 return {}; 10624 } 10625 10626 // The canonical type classes match. 10627 switch (LHSClass) { 10628 #define TYPE(Class, Base) 10629 #define ABSTRACT_TYPE(Class, Base) 10630 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10631 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10632 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10633 #include "clang/AST/TypeNodes.inc" 10634 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10635 10636 case Type::Auto: 10637 case Type::DeducedTemplateSpecialization: 10638 case Type::LValueReference: 10639 case Type::RValueReference: 10640 case Type::MemberPointer: 10641 llvm_unreachable("C++ should never be in mergeTypes"); 10642 10643 case Type::ObjCInterface: 10644 case Type::IncompleteArray: 10645 case Type::VariableArray: 10646 case Type::FunctionProto: 10647 case Type::ExtVector: 10648 llvm_unreachable("Types are eliminated above"); 10649 10650 case Type::Pointer: 10651 { 10652 // Merge two pointer types, while trying to preserve typedef info 10653 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10654 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10655 if (Unqualified) { 10656 LHSPointee = LHSPointee.getUnqualifiedType(); 10657 RHSPointee = RHSPointee.getUnqualifiedType(); 10658 } 10659 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10660 Unqualified); 10661 if (ResultType.isNull()) 10662 return {}; 10663 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10664 return LHS; 10665 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10666 return RHS; 10667 return getPointerType(ResultType); 10668 } 10669 case Type::BlockPointer: 10670 { 10671 // Merge two block pointer types, while trying to preserve typedef info 10672 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10673 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10674 if (Unqualified) { 10675 LHSPointee = LHSPointee.getUnqualifiedType(); 10676 RHSPointee = RHSPointee.getUnqualifiedType(); 10677 } 10678 if (getLangOpts().OpenCL) { 10679 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10680 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10681 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10682 // 6.12.5) thus the following check is asymmetric. 10683 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10684 return {}; 10685 LHSPteeQual.removeAddressSpace(); 10686 RHSPteeQual.removeAddressSpace(); 10687 LHSPointee = 10688 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10689 RHSPointee = 10690 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10691 } 10692 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10693 Unqualified); 10694 if (ResultType.isNull()) 10695 return {}; 10696 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10697 return LHS; 10698 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10699 return RHS; 10700 return getBlockPointerType(ResultType); 10701 } 10702 case Type::Atomic: 10703 { 10704 // Merge two pointer types, while trying to preserve typedef info 10705 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10706 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10707 if (Unqualified) { 10708 LHSValue = LHSValue.getUnqualifiedType(); 10709 RHSValue = RHSValue.getUnqualifiedType(); 10710 } 10711 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10712 Unqualified); 10713 if (ResultType.isNull()) 10714 return {}; 10715 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10716 return LHS; 10717 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10718 return RHS; 10719 return getAtomicType(ResultType); 10720 } 10721 case Type::ConstantArray: 10722 { 10723 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10724 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10725 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10726 return {}; 10727 10728 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10729 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10730 if (Unqualified) { 10731 LHSElem = LHSElem.getUnqualifiedType(); 10732 RHSElem = RHSElem.getUnqualifiedType(); 10733 } 10734 10735 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10736 if (ResultType.isNull()) 10737 return {}; 10738 10739 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10740 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10741 10742 // If either side is a variable array, and both are complete, check whether 10743 // the current dimension is definite. 10744 if (LVAT || RVAT) { 10745 auto SizeFetch = [this](const VariableArrayType* VAT, 10746 const ConstantArrayType* CAT) 10747 -> std::pair<bool,llvm::APInt> { 10748 if (VAT) { 10749 std::optional<llvm::APSInt> TheInt; 10750 Expr *E = VAT->getSizeExpr(); 10751 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10752 return std::make_pair(true, *TheInt); 10753 return std::make_pair(false, llvm::APSInt()); 10754 } 10755 if (CAT) 10756 return std::make_pair(true, CAT->getSize()); 10757 return std::make_pair(false, llvm::APInt()); 10758 }; 10759 10760 bool HaveLSize, HaveRSize; 10761 llvm::APInt LSize, RSize; 10762 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10763 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10764 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10765 return {}; // Definite, but unequal, array dimension 10766 } 10767 10768 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10769 return LHS; 10770 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10771 return RHS; 10772 if (LCAT) 10773 return getConstantArrayType(ResultType, LCAT->getSize(), 10774 LCAT->getSizeExpr(), ArraySizeModifier(), 0); 10775 if (RCAT) 10776 return getConstantArrayType(ResultType, RCAT->getSize(), 10777 RCAT->getSizeExpr(), ArraySizeModifier(), 0); 10778 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10779 return LHS; 10780 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10781 return RHS; 10782 if (LVAT) { 10783 // FIXME: This isn't correct! But tricky to implement because 10784 // the array's size has to be the size of LHS, but the type 10785 // has to be different. 10786 return LHS; 10787 } 10788 if (RVAT) { 10789 // FIXME: This isn't correct! But tricky to implement because 10790 // the array's size has to be the size of RHS, but the type 10791 // has to be different. 10792 return RHS; 10793 } 10794 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10795 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10796 return getIncompleteArrayType(ResultType, ArraySizeModifier(), 0); 10797 } 10798 case Type::FunctionNoProto: 10799 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified, 10800 /*AllowCXX=*/false, IsConditionalOperator); 10801 case Type::Record: 10802 case Type::Enum: 10803 return {}; 10804 case Type::Builtin: 10805 // Only exactly equal builtin types are compatible, which is tested above. 10806 return {}; 10807 case Type::Complex: 10808 // Distinct complex types are incompatible. 10809 return {}; 10810 case Type::Vector: 10811 // FIXME: The merged type should be an ExtVector! 10812 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10813 RHSCan->castAs<VectorType>())) 10814 return LHS; 10815 return {}; 10816 case Type::ConstantMatrix: 10817 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10818 RHSCan->castAs<ConstantMatrixType>())) 10819 return LHS; 10820 return {}; 10821 case Type::ObjCObject: { 10822 // Check if the types are assignment compatible. 10823 // FIXME: This should be type compatibility, e.g. whether 10824 // "LHS x; RHS x;" at global scope is legal. 10825 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10826 RHS->castAs<ObjCObjectType>())) 10827 return LHS; 10828 return {}; 10829 } 10830 case Type::ObjCObjectPointer: 10831 if (OfBlockPointer) { 10832 if (canAssignObjCInterfacesInBlockPointer( 10833 LHS->castAs<ObjCObjectPointerType>(), 10834 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10835 return LHS; 10836 return {}; 10837 } 10838 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10839 RHS->castAs<ObjCObjectPointerType>())) 10840 return LHS; 10841 return {}; 10842 case Type::Pipe: 10843 assert(LHS != RHS && 10844 "Equivalent pipe types should have already been handled!"); 10845 return {}; 10846 case Type::BitInt: { 10847 // Merge two bit-precise int types, while trying to preserve typedef info. 10848 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10849 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10850 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10851 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10852 10853 // Like unsigned/int, shouldn't have a type if they don't match. 10854 if (LHSUnsigned != RHSUnsigned) 10855 return {}; 10856 10857 if (LHSBits != RHSBits) 10858 return {}; 10859 return LHS; 10860 } 10861 } 10862 10863 llvm_unreachable("Invalid Type::Class!"); 10864 } 10865 10866 bool ASTContext::mergeExtParameterInfo( 10867 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10868 bool &CanUseFirst, bool &CanUseSecond, 10869 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10870 assert(NewParamInfos.empty() && "param info list not empty"); 10871 CanUseFirst = CanUseSecond = true; 10872 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10873 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10874 10875 // Fast path: if the first type doesn't have ext parameter infos, 10876 // we match if and only if the second type also doesn't have them. 10877 if (!FirstHasInfo && !SecondHasInfo) 10878 return true; 10879 10880 bool NeedParamInfo = false; 10881 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10882 : SecondFnType->getExtParameterInfos().size(); 10883 10884 for (size_t I = 0; I < E; ++I) { 10885 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10886 if (FirstHasInfo) 10887 FirstParam = FirstFnType->getExtParameterInfo(I); 10888 if (SecondHasInfo) 10889 SecondParam = SecondFnType->getExtParameterInfo(I); 10890 10891 // Cannot merge unless everything except the noescape flag matches. 10892 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10893 return false; 10894 10895 bool FirstNoEscape = FirstParam.isNoEscape(); 10896 bool SecondNoEscape = SecondParam.isNoEscape(); 10897 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10898 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10899 if (NewParamInfos.back().getOpaqueValue()) 10900 NeedParamInfo = true; 10901 if (FirstNoEscape != IsNoEscape) 10902 CanUseFirst = false; 10903 if (SecondNoEscape != IsNoEscape) 10904 CanUseSecond = false; 10905 } 10906 10907 if (!NeedParamInfo) 10908 NewParamInfos.clear(); 10909 10910 return true; 10911 } 10912 10913 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10914 ObjCLayouts[CD] = nullptr; 10915 } 10916 10917 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10918 /// 'RHS' attributes and returns the merged version; including for function 10919 /// return types. 10920 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10921 QualType LHSCan = getCanonicalType(LHS), 10922 RHSCan = getCanonicalType(RHS); 10923 // If two types are identical, they are compatible. 10924 if (LHSCan == RHSCan) 10925 return LHS; 10926 if (RHSCan->isFunctionType()) { 10927 if (!LHSCan->isFunctionType()) 10928 return {}; 10929 QualType OldReturnType = 10930 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10931 QualType NewReturnType = 10932 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10933 QualType ResReturnType = 10934 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10935 if (ResReturnType.isNull()) 10936 return {}; 10937 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10938 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10939 // In either case, use OldReturnType to build the new function type. 10940 const auto *F = LHS->castAs<FunctionType>(); 10941 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10942 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10943 EPI.ExtInfo = getFunctionExtInfo(LHS); 10944 QualType ResultType = 10945 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10946 return ResultType; 10947 } 10948 } 10949 return {}; 10950 } 10951 10952 // If the qualifiers are different, the types can still be merged. 10953 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10954 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10955 if (LQuals != RQuals) { 10956 // If any of these qualifiers are different, we have a type mismatch. 10957 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10958 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10959 return {}; 10960 10961 // Exactly one GC qualifier difference is allowed: __strong is 10962 // okay if the other type has no GC qualifier but is an Objective 10963 // C object pointer (i.e. implicitly strong by default). We fix 10964 // this by pretending that the unqualified type was actually 10965 // qualified __strong. 10966 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10967 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10968 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10969 10970 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10971 return {}; 10972 10973 if (GC_L == Qualifiers::Strong) 10974 return LHS; 10975 if (GC_R == Qualifiers::Strong) 10976 return RHS; 10977 return {}; 10978 } 10979 10980 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10981 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10982 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10983 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10984 if (ResQT == LHSBaseQT) 10985 return LHS; 10986 if (ResQT == RHSBaseQT) 10987 return RHS; 10988 } 10989 return {}; 10990 } 10991 10992 //===----------------------------------------------------------------------===// 10993 // Integer Predicates 10994 //===----------------------------------------------------------------------===// 10995 10996 unsigned ASTContext::getIntWidth(QualType T) const { 10997 if (const auto *ET = T->getAs<EnumType>()) 10998 T = ET->getDecl()->getIntegerType(); 10999 if (T->isBooleanType()) 11000 return 1; 11001 if (const auto *EIT = T->getAs<BitIntType>()) 11002 return EIT->getNumBits(); 11003 // For builtin types, just use the standard type sizing method 11004 return (unsigned)getTypeSize(T); 11005 } 11006 11007 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 11008 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11009 T->isFixedPointType()) && 11010 "Unexpected type"); 11011 11012 // Turn <4 x signed int> -> <4 x unsigned int> 11013 if (const auto *VTy = T->getAs<VectorType>()) 11014 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 11015 VTy->getNumElements(), VTy->getVectorKind()); 11016 11017 // For _BitInt, return an unsigned _BitInt with same width. 11018 if (const auto *EITy = T->getAs<BitIntType>()) 11019 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 11020 11021 // For enums, get the underlying integer type of the enum, and let the general 11022 // integer type signchanging code handle it. 11023 if (const auto *ETy = T->getAs<EnumType>()) 11024 T = ETy->getDecl()->getIntegerType(); 11025 11026 switch (T->castAs<BuiltinType>()->getKind()) { 11027 case BuiltinType::Char_U: 11028 // Plain `char` is mapped to `unsigned char` even if it's already unsigned 11029 case BuiltinType::Char_S: 11030 case BuiltinType::SChar: 11031 case BuiltinType::Char8: 11032 return UnsignedCharTy; 11033 case BuiltinType::Short: 11034 return UnsignedShortTy; 11035 case BuiltinType::Int: 11036 return UnsignedIntTy; 11037 case BuiltinType::Long: 11038 return UnsignedLongTy; 11039 case BuiltinType::LongLong: 11040 return UnsignedLongLongTy; 11041 case BuiltinType::Int128: 11042 return UnsignedInt128Ty; 11043 // wchar_t is special. It is either signed or not, but when it's signed, 11044 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 11045 // version of its underlying type instead. 11046 case BuiltinType::WChar_S: 11047 return getUnsignedWCharType(); 11048 11049 case BuiltinType::ShortAccum: 11050 return UnsignedShortAccumTy; 11051 case BuiltinType::Accum: 11052 return UnsignedAccumTy; 11053 case BuiltinType::LongAccum: 11054 return UnsignedLongAccumTy; 11055 case BuiltinType::SatShortAccum: 11056 return SatUnsignedShortAccumTy; 11057 case BuiltinType::SatAccum: 11058 return SatUnsignedAccumTy; 11059 case BuiltinType::SatLongAccum: 11060 return SatUnsignedLongAccumTy; 11061 case BuiltinType::ShortFract: 11062 return UnsignedShortFractTy; 11063 case BuiltinType::Fract: 11064 return UnsignedFractTy; 11065 case BuiltinType::LongFract: 11066 return UnsignedLongFractTy; 11067 case BuiltinType::SatShortFract: 11068 return SatUnsignedShortFractTy; 11069 case BuiltinType::SatFract: 11070 return SatUnsignedFractTy; 11071 case BuiltinType::SatLongFract: 11072 return SatUnsignedLongFractTy; 11073 default: 11074 assert((T->hasUnsignedIntegerRepresentation() || 11075 T->isUnsignedFixedPointType()) && 11076 "Unexpected signed integer or fixed point type"); 11077 return T; 11078 } 11079 } 11080 11081 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 11082 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11083 T->isFixedPointType()) && 11084 "Unexpected type"); 11085 11086 // Turn <4 x unsigned int> -> <4 x signed int> 11087 if (const auto *VTy = T->getAs<VectorType>()) 11088 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 11089 VTy->getNumElements(), VTy->getVectorKind()); 11090 11091 // For _BitInt, return a signed _BitInt with same width. 11092 if (const auto *EITy = T->getAs<BitIntType>()) 11093 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 11094 11095 // For enums, get the underlying integer type of the enum, and let the general 11096 // integer type signchanging code handle it. 11097 if (const auto *ETy = T->getAs<EnumType>()) 11098 T = ETy->getDecl()->getIntegerType(); 11099 11100 switch (T->castAs<BuiltinType>()->getKind()) { 11101 case BuiltinType::Char_S: 11102 // Plain `char` is mapped to `signed char` even if it's already signed 11103 case BuiltinType::Char_U: 11104 case BuiltinType::UChar: 11105 case BuiltinType::Char8: 11106 return SignedCharTy; 11107 case BuiltinType::UShort: 11108 return ShortTy; 11109 case BuiltinType::UInt: 11110 return IntTy; 11111 case BuiltinType::ULong: 11112 return LongTy; 11113 case BuiltinType::ULongLong: 11114 return LongLongTy; 11115 case BuiltinType::UInt128: 11116 return Int128Ty; 11117 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 11118 // there's no matching "signed wchar_t". Therefore we return the signed 11119 // version of its underlying type instead. 11120 case BuiltinType::WChar_U: 11121 return getSignedWCharType(); 11122 11123 case BuiltinType::UShortAccum: 11124 return ShortAccumTy; 11125 case BuiltinType::UAccum: 11126 return AccumTy; 11127 case BuiltinType::ULongAccum: 11128 return LongAccumTy; 11129 case BuiltinType::SatUShortAccum: 11130 return SatShortAccumTy; 11131 case BuiltinType::SatUAccum: 11132 return SatAccumTy; 11133 case BuiltinType::SatULongAccum: 11134 return SatLongAccumTy; 11135 case BuiltinType::UShortFract: 11136 return ShortFractTy; 11137 case BuiltinType::UFract: 11138 return FractTy; 11139 case BuiltinType::ULongFract: 11140 return LongFractTy; 11141 case BuiltinType::SatUShortFract: 11142 return SatShortFractTy; 11143 case BuiltinType::SatUFract: 11144 return SatFractTy; 11145 case BuiltinType::SatULongFract: 11146 return SatLongFractTy; 11147 default: 11148 assert( 11149 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 11150 "Unexpected signed integer or fixed point type"); 11151 return T; 11152 } 11153 } 11154 11155 ASTMutationListener::~ASTMutationListener() = default; 11156 11157 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 11158 QualType ReturnType) {} 11159 11160 //===----------------------------------------------------------------------===// 11161 // Builtin Type Computation 11162 //===----------------------------------------------------------------------===// 11163 11164 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 11165 /// pointer over the consumed characters. This returns the resultant type. If 11166 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 11167 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 11168 /// a vector of "i*". 11169 /// 11170 /// RequiresICE is filled in on return to indicate whether the value is required 11171 /// to be an Integer Constant Expression. 11172 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 11173 ASTContext::GetBuiltinTypeError &Error, 11174 bool &RequiresICE, 11175 bool AllowTypeModifiers) { 11176 // Modifiers. 11177 int HowLong = 0; 11178 bool Signed = false, Unsigned = false; 11179 RequiresICE = false; 11180 11181 // Read the prefixed modifiers first. 11182 bool Done = false; 11183 #ifndef NDEBUG 11184 bool IsSpecial = false; 11185 #endif 11186 while (!Done) { 11187 switch (*Str++) { 11188 default: Done = true; --Str; break; 11189 case 'I': 11190 RequiresICE = true; 11191 break; 11192 case 'S': 11193 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 11194 assert(!Signed && "Can't use 'S' modifier multiple times!"); 11195 Signed = true; 11196 break; 11197 case 'U': 11198 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 11199 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 11200 Unsigned = true; 11201 break; 11202 case 'L': 11203 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 11204 assert(HowLong <= 2 && "Can't have LLLL modifier"); 11205 ++HowLong; 11206 break; 11207 case 'N': 11208 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 11209 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11210 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 11211 #ifndef NDEBUG 11212 IsSpecial = true; 11213 #endif 11214 if (Context.getTargetInfo().getLongWidth() == 32) 11215 ++HowLong; 11216 break; 11217 case 'W': 11218 // This modifier represents int64 type. 11219 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11220 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 11221 #ifndef NDEBUG 11222 IsSpecial = true; 11223 #endif 11224 switch (Context.getTargetInfo().getInt64Type()) { 11225 default: 11226 llvm_unreachable("Unexpected integer type"); 11227 case TargetInfo::SignedLong: 11228 HowLong = 1; 11229 break; 11230 case TargetInfo::SignedLongLong: 11231 HowLong = 2; 11232 break; 11233 } 11234 break; 11235 case 'Z': 11236 // This modifier represents int32 type. 11237 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11238 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 11239 #ifndef NDEBUG 11240 IsSpecial = true; 11241 #endif 11242 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 11243 default: 11244 llvm_unreachable("Unexpected integer type"); 11245 case TargetInfo::SignedInt: 11246 HowLong = 0; 11247 break; 11248 case TargetInfo::SignedLong: 11249 HowLong = 1; 11250 break; 11251 case TargetInfo::SignedLongLong: 11252 HowLong = 2; 11253 break; 11254 } 11255 break; 11256 case 'O': 11257 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11258 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 11259 #ifndef NDEBUG 11260 IsSpecial = true; 11261 #endif 11262 if (Context.getLangOpts().OpenCL) 11263 HowLong = 1; 11264 else 11265 HowLong = 2; 11266 break; 11267 } 11268 } 11269 11270 QualType Type; 11271 11272 // Read the base type. 11273 switch (*Str++) { 11274 default: llvm_unreachable("Unknown builtin type letter!"); 11275 case 'x': 11276 assert(HowLong == 0 && !Signed && !Unsigned && 11277 "Bad modifiers used with 'x'!"); 11278 Type = Context.Float16Ty; 11279 break; 11280 case 'y': 11281 assert(HowLong == 0 && !Signed && !Unsigned && 11282 "Bad modifiers used with 'y'!"); 11283 Type = Context.BFloat16Ty; 11284 break; 11285 case 'v': 11286 assert(HowLong == 0 && !Signed && !Unsigned && 11287 "Bad modifiers used with 'v'!"); 11288 Type = Context.VoidTy; 11289 break; 11290 case 'h': 11291 assert(HowLong == 0 && !Signed && !Unsigned && 11292 "Bad modifiers used with 'h'!"); 11293 Type = Context.HalfTy; 11294 break; 11295 case 'f': 11296 assert(HowLong == 0 && !Signed && !Unsigned && 11297 "Bad modifiers used with 'f'!"); 11298 Type = Context.FloatTy; 11299 break; 11300 case 'd': 11301 assert(HowLong < 3 && !Signed && !Unsigned && 11302 "Bad modifiers used with 'd'!"); 11303 if (HowLong == 1) 11304 Type = Context.LongDoubleTy; 11305 else if (HowLong == 2) 11306 Type = Context.Float128Ty; 11307 else 11308 Type = Context.DoubleTy; 11309 break; 11310 case 's': 11311 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11312 if (Unsigned) 11313 Type = Context.UnsignedShortTy; 11314 else 11315 Type = Context.ShortTy; 11316 break; 11317 case 'i': 11318 if (HowLong == 3) 11319 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11320 else if (HowLong == 2) 11321 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11322 else if (HowLong == 1) 11323 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11324 else 11325 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11326 break; 11327 case 'c': 11328 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11329 if (Signed) 11330 Type = Context.SignedCharTy; 11331 else if (Unsigned) 11332 Type = Context.UnsignedCharTy; 11333 else 11334 Type = Context.CharTy; 11335 break; 11336 case 'b': // boolean 11337 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11338 Type = Context.BoolTy; 11339 break; 11340 case 'z': // size_t. 11341 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11342 Type = Context.getSizeType(); 11343 break; 11344 case 'w': // wchar_t. 11345 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11346 Type = Context.getWideCharType(); 11347 break; 11348 case 'F': 11349 Type = Context.getCFConstantStringType(); 11350 break; 11351 case 'G': 11352 Type = Context.getObjCIdType(); 11353 break; 11354 case 'H': 11355 Type = Context.getObjCSelType(); 11356 break; 11357 case 'M': 11358 Type = Context.getObjCSuperType(); 11359 break; 11360 case 'a': 11361 Type = Context.getBuiltinVaListType(); 11362 assert(!Type.isNull() && "builtin va list type not initialized!"); 11363 break; 11364 case 'A': 11365 // This is a "reference" to a va_list; however, what exactly 11366 // this means depends on how va_list is defined. There are two 11367 // different kinds of va_list: ones passed by value, and ones 11368 // passed by reference. An example of a by-value va_list is 11369 // x86, where va_list is a char*. An example of by-ref va_list 11370 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11371 // we want this argument to be a char*&; for x86-64, we want 11372 // it to be a __va_list_tag*. 11373 Type = Context.getBuiltinVaListType(); 11374 assert(!Type.isNull() && "builtin va list type not initialized!"); 11375 if (Type->isArrayType()) 11376 Type = Context.getArrayDecayedType(Type); 11377 else 11378 Type = Context.getLValueReferenceType(Type); 11379 break; 11380 case 'q': { 11381 char *End; 11382 unsigned NumElements = strtoul(Str, &End, 10); 11383 assert(End != Str && "Missing vector size"); 11384 Str = End; 11385 11386 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11387 RequiresICE, false); 11388 assert(!RequiresICE && "Can't require vector ICE"); 11389 11390 Type = Context.getScalableVectorType(ElementType, NumElements); 11391 break; 11392 } 11393 case 'Q': { 11394 switch (*Str++) { 11395 case 'a': { 11396 Type = Context.SveCountTy; 11397 break; 11398 } 11399 default: 11400 llvm_unreachable("Unexpected target builtin type"); 11401 } 11402 break; 11403 } 11404 case 'V': { 11405 char *End; 11406 unsigned NumElements = strtoul(Str, &End, 10); 11407 assert(End != Str && "Missing vector size"); 11408 Str = End; 11409 11410 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11411 RequiresICE, false); 11412 assert(!RequiresICE && "Can't require vector ICE"); 11413 11414 // TODO: No way to make AltiVec vectors in builtins yet. 11415 Type = Context.getVectorType(ElementType, NumElements, VectorKind::Generic); 11416 break; 11417 } 11418 case 'E': { 11419 char *End; 11420 11421 unsigned NumElements = strtoul(Str, &End, 10); 11422 assert(End != Str && "Missing vector size"); 11423 11424 Str = End; 11425 11426 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11427 false); 11428 Type = Context.getExtVectorType(ElementType, NumElements); 11429 break; 11430 } 11431 case 'X': { 11432 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11433 false); 11434 assert(!RequiresICE && "Can't require complex ICE"); 11435 Type = Context.getComplexType(ElementType); 11436 break; 11437 } 11438 case 'Y': 11439 Type = Context.getPointerDiffType(); 11440 break; 11441 case 'P': 11442 Type = Context.getFILEType(); 11443 if (Type.isNull()) { 11444 Error = ASTContext::GE_Missing_stdio; 11445 return {}; 11446 } 11447 break; 11448 case 'J': 11449 if (Signed) 11450 Type = Context.getsigjmp_bufType(); 11451 else 11452 Type = Context.getjmp_bufType(); 11453 11454 if (Type.isNull()) { 11455 Error = ASTContext::GE_Missing_setjmp; 11456 return {}; 11457 } 11458 break; 11459 case 'K': 11460 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11461 Type = Context.getucontext_tType(); 11462 11463 if (Type.isNull()) { 11464 Error = ASTContext::GE_Missing_ucontext; 11465 return {}; 11466 } 11467 break; 11468 case 'p': 11469 Type = Context.getProcessIDType(); 11470 break; 11471 } 11472 11473 // If there are modifiers and if we're allowed to parse them, go for it. 11474 Done = !AllowTypeModifiers; 11475 while (!Done) { 11476 switch (char c = *Str++) { 11477 default: Done = true; --Str; break; 11478 case '*': 11479 case '&': { 11480 // Both pointers and references can have their pointee types 11481 // qualified with an address space. 11482 char *End; 11483 unsigned AddrSpace = strtoul(Str, &End, 10); 11484 if (End != Str) { 11485 // Note AddrSpace == 0 is not the same as an unspecified address space. 11486 Type = Context.getAddrSpaceQualType( 11487 Type, 11488 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11489 Str = End; 11490 } 11491 if (c == '*') 11492 Type = Context.getPointerType(Type); 11493 else 11494 Type = Context.getLValueReferenceType(Type); 11495 break; 11496 } 11497 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11498 case 'C': 11499 Type = Type.withConst(); 11500 break; 11501 case 'D': 11502 Type = Context.getVolatileType(Type); 11503 break; 11504 case 'R': 11505 Type = Type.withRestrict(); 11506 break; 11507 } 11508 } 11509 11510 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11511 "Integer constant 'I' type must be an integer"); 11512 11513 return Type; 11514 } 11515 11516 // On some targets such as PowerPC, some of the builtins are defined with custom 11517 // type descriptors for target-dependent types. These descriptors are decoded in 11518 // other functions, but it may be useful to be able to fall back to default 11519 // descriptor decoding to define builtins mixing target-dependent and target- 11520 // independent types. This function allows decoding one type descriptor with 11521 // default decoding. 11522 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11523 GetBuiltinTypeError &Error, bool &RequireICE, 11524 bool AllowTypeModifiers) const { 11525 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11526 } 11527 11528 /// GetBuiltinType - Return the type for the specified builtin. 11529 QualType ASTContext::GetBuiltinType(unsigned Id, 11530 GetBuiltinTypeError &Error, 11531 unsigned *IntegerConstantArgs) const { 11532 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11533 if (TypeStr[0] == '\0') { 11534 Error = GE_Missing_type; 11535 return {}; 11536 } 11537 11538 SmallVector<QualType, 8> ArgTypes; 11539 11540 bool RequiresICE = false; 11541 Error = GE_None; 11542 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11543 RequiresICE, true); 11544 if (Error != GE_None) 11545 return {}; 11546 11547 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11548 11549 while (TypeStr[0] && TypeStr[0] != '.') { 11550 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11551 if (Error != GE_None) 11552 return {}; 11553 11554 // If this argument is required to be an IntegerConstantExpression and the 11555 // caller cares, fill in the bitmask we return. 11556 if (RequiresICE && IntegerConstantArgs) 11557 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11558 11559 // Do array -> pointer decay. The builtin should use the decayed type. 11560 if (Ty->isArrayType()) 11561 Ty = getArrayDecayedType(Ty); 11562 11563 ArgTypes.push_back(Ty); 11564 } 11565 11566 if (Id == Builtin::BI__GetExceptionInfo) 11567 return {}; 11568 11569 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11570 "'.' should only occur at end of builtin type list!"); 11571 11572 bool Variadic = (TypeStr[0] == '.'); 11573 11574 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11575 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11576 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11577 11578 11579 // We really shouldn't be making a no-proto type here. 11580 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 11581 return getFunctionNoProtoType(ResType, EI); 11582 11583 FunctionProtoType::ExtProtoInfo EPI; 11584 EPI.ExtInfo = EI; 11585 EPI.Variadic = Variadic; 11586 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11587 EPI.ExceptionSpec.Type = 11588 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11589 11590 return getFunctionType(ResType, ArgTypes, EPI); 11591 } 11592 11593 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11594 const FunctionDecl *FD) { 11595 if (!FD->isExternallyVisible()) 11596 return GVA_Internal; 11597 11598 // Non-user-provided functions get emitted as weak definitions with every 11599 // use, no matter whether they've been explicitly instantiated etc. 11600 if (!FD->isUserProvided()) 11601 return GVA_DiscardableODR; 11602 11603 GVALinkage External; 11604 switch (FD->getTemplateSpecializationKind()) { 11605 case TSK_Undeclared: 11606 case TSK_ExplicitSpecialization: 11607 External = GVA_StrongExternal; 11608 break; 11609 11610 case TSK_ExplicitInstantiationDefinition: 11611 return GVA_StrongODR; 11612 11613 // C++11 [temp.explicit]p10: 11614 // [ Note: The intent is that an inline function that is the subject of 11615 // an explicit instantiation declaration will still be implicitly 11616 // instantiated when used so that the body can be considered for 11617 // inlining, but that no out-of-line copy of the inline function would be 11618 // generated in the translation unit. -- end note ] 11619 case TSK_ExplicitInstantiationDeclaration: 11620 return GVA_AvailableExternally; 11621 11622 case TSK_ImplicitInstantiation: 11623 External = GVA_DiscardableODR; 11624 break; 11625 } 11626 11627 if (!FD->isInlined()) 11628 return External; 11629 11630 if ((!Context.getLangOpts().CPlusPlus && 11631 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11632 !FD->hasAttr<DLLExportAttr>()) || 11633 FD->hasAttr<GNUInlineAttr>()) { 11634 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11635 11636 // GNU or C99 inline semantics. Determine whether this symbol should be 11637 // externally visible. 11638 if (FD->isInlineDefinitionExternallyVisible()) 11639 return External; 11640 11641 // C99 inline semantics, where the symbol is not externally visible. 11642 return GVA_AvailableExternally; 11643 } 11644 11645 // Functions specified with extern and inline in -fms-compatibility mode 11646 // forcibly get emitted. While the body of the function cannot be later 11647 // replaced, the function definition cannot be discarded. 11648 if (FD->isMSExternInline()) 11649 return GVA_StrongODR; 11650 11651 if (Context.getTargetInfo().getCXXABI().isMicrosoft() && 11652 isa<CXXConstructorDecl>(FD) && 11653 cast<CXXConstructorDecl>(FD)->isInheritingConstructor()) 11654 // Our approach to inheriting constructors is fundamentally different from 11655 // that used by the MS ABI, so keep our inheriting constructor thunks 11656 // internal rather than trying to pick an unambiguous mangling for them. 11657 return GVA_Internal; 11658 11659 return GVA_DiscardableODR; 11660 } 11661 11662 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11663 const Decl *D, GVALinkage L) { 11664 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11665 // dllexport/dllimport on inline functions. 11666 if (D->hasAttr<DLLImportAttr>()) { 11667 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11668 return GVA_AvailableExternally; 11669 } else if (D->hasAttr<DLLExportAttr>()) { 11670 if (L == GVA_DiscardableODR) 11671 return GVA_StrongODR; 11672 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11673 // Device-side functions with __global__ attribute must always be 11674 // visible externally so they can be launched from host. 11675 if (D->hasAttr<CUDAGlobalAttr>() && 11676 (L == GVA_DiscardableODR || L == GVA_Internal)) 11677 return GVA_StrongODR; 11678 // Single source offloading languages like CUDA/HIP need to be able to 11679 // access static device variables from host code of the same compilation 11680 // unit. This is done by externalizing the static variable with a shared 11681 // name between the host and device compilation which is the same for the 11682 // same compilation unit whereas different among different compilation 11683 // units. 11684 if (Context.shouldExternalize(D)) 11685 return GVA_StrongExternal; 11686 } 11687 return L; 11688 } 11689 11690 /// Adjust the GVALinkage for a declaration based on what an external AST source 11691 /// knows about whether there can be other definitions of this declaration. 11692 static GVALinkage 11693 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11694 GVALinkage L) { 11695 ExternalASTSource *Source = Ctx.getExternalSource(); 11696 if (!Source) 11697 return L; 11698 11699 switch (Source->hasExternalDefinitions(D)) { 11700 case ExternalASTSource::EK_Never: 11701 // Other translation units rely on us to provide the definition. 11702 if (L == GVA_DiscardableODR) 11703 return GVA_StrongODR; 11704 break; 11705 11706 case ExternalASTSource::EK_Always: 11707 return GVA_AvailableExternally; 11708 11709 case ExternalASTSource::EK_ReplyHazy: 11710 break; 11711 } 11712 return L; 11713 } 11714 11715 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11716 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11717 adjustGVALinkageForAttributes(*this, FD, 11718 basicGVALinkageForFunction(*this, FD))); 11719 } 11720 11721 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11722 const VarDecl *VD) { 11723 // As an extension for interactive REPLs, make sure constant variables are 11724 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl 11725 // marking them as internal. 11726 if (Context.getLangOpts().CPlusPlus && 11727 Context.getLangOpts().IncrementalExtensions && 11728 VD->getType().isConstQualified() && 11729 !VD->getType().isVolatileQualified() && !VD->isInline() && 11730 !isa<VarTemplateSpecializationDecl>(VD) && !VD->getDescribedVarTemplate()) 11731 return GVA_DiscardableODR; 11732 11733 if (!VD->isExternallyVisible()) 11734 return GVA_Internal; 11735 11736 if (VD->isStaticLocal()) { 11737 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11738 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11739 LexicalContext = LexicalContext->getLexicalParent(); 11740 11741 // ObjC Blocks can create local variables that don't have a FunctionDecl 11742 // LexicalContext. 11743 if (!LexicalContext) 11744 return GVA_DiscardableODR; 11745 11746 // Otherwise, let the static local variable inherit its linkage from the 11747 // nearest enclosing function. 11748 auto StaticLocalLinkage = 11749 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11750 11751 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11752 // be emitted in any object with references to the symbol for the object it 11753 // contains, whether inline or out-of-line." 11754 // Similar behavior is observed with MSVC. An alternative ABI could use 11755 // StrongODR/AvailableExternally to match the function, but none are 11756 // known/supported currently. 11757 if (StaticLocalLinkage == GVA_StrongODR || 11758 StaticLocalLinkage == GVA_AvailableExternally) 11759 return GVA_DiscardableODR; 11760 return StaticLocalLinkage; 11761 } 11762 11763 // MSVC treats in-class initialized static data members as definitions. 11764 // By giving them non-strong linkage, out-of-line definitions won't 11765 // cause link errors. 11766 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11767 return GVA_DiscardableODR; 11768 11769 // Most non-template variables have strong linkage; inline variables are 11770 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11771 GVALinkage StrongLinkage; 11772 switch (Context.getInlineVariableDefinitionKind(VD)) { 11773 case ASTContext::InlineVariableDefinitionKind::None: 11774 StrongLinkage = GVA_StrongExternal; 11775 break; 11776 case ASTContext::InlineVariableDefinitionKind::Weak: 11777 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11778 StrongLinkage = GVA_DiscardableODR; 11779 break; 11780 case ASTContext::InlineVariableDefinitionKind::Strong: 11781 StrongLinkage = GVA_StrongODR; 11782 break; 11783 } 11784 11785 switch (VD->getTemplateSpecializationKind()) { 11786 case TSK_Undeclared: 11787 return StrongLinkage; 11788 11789 case TSK_ExplicitSpecialization: 11790 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11791 VD->isStaticDataMember() 11792 ? GVA_StrongODR 11793 : StrongLinkage; 11794 11795 case TSK_ExplicitInstantiationDefinition: 11796 return GVA_StrongODR; 11797 11798 case TSK_ExplicitInstantiationDeclaration: 11799 return GVA_AvailableExternally; 11800 11801 case TSK_ImplicitInstantiation: 11802 return GVA_DiscardableODR; 11803 } 11804 11805 llvm_unreachable("Invalid Linkage!"); 11806 } 11807 11808 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const { 11809 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11810 adjustGVALinkageForAttributes(*this, VD, 11811 basicGVALinkageForVariable(*this, VD))); 11812 } 11813 11814 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11815 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11816 if (!VD->isFileVarDecl()) 11817 return false; 11818 // Global named register variables (GNU extension) are never emitted. 11819 if (VD->getStorageClass() == SC_Register) 11820 return false; 11821 if (VD->getDescribedVarTemplate() || 11822 isa<VarTemplatePartialSpecializationDecl>(VD)) 11823 return false; 11824 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11825 // We never need to emit an uninstantiated function template. 11826 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11827 return false; 11828 } else if (isa<PragmaCommentDecl>(D)) 11829 return true; 11830 else if (isa<PragmaDetectMismatchDecl>(D)) 11831 return true; 11832 else if (isa<OMPRequiresDecl>(D)) 11833 return true; 11834 else if (isa<OMPThreadPrivateDecl>(D)) 11835 return !D->getDeclContext()->isDependentContext(); 11836 else if (isa<OMPAllocateDecl>(D)) 11837 return !D->getDeclContext()->isDependentContext(); 11838 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11839 return !D->getDeclContext()->isDependentContext(); 11840 else if (isa<ImportDecl>(D)) 11841 return true; 11842 else 11843 return false; 11844 11845 // If this is a member of a class template, we do not need to emit it. 11846 if (D->getDeclContext()->isDependentContext()) 11847 return false; 11848 11849 // Weak references don't produce any output by themselves. 11850 if (D->hasAttr<WeakRefAttr>()) 11851 return false; 11852 11853 // Aliases and used decls are required. 11854 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11855 return true; 11856 11857 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11858 // Forward declarations aren't required. 11859 if (!FD->doesThisDeclarationHaveABody()) 11860 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11861 11862 // Constructors and destructors are required. 11863 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11864 return true; 11865 11866 // The key function for a class is required. This rule only comes 11867 // into play when inline functions can be key functions, though. 11868 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11869 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11870 const CXXRecordDecl *RD = MD->getParent(); 11871 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11872 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11873 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11874 return true; 11875 } 11876 } 11877 } 11878 11879 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11880 11881 // static, static inline, always_inline, and extern inline functions can 11882 // always be deferred. Normal inline functions can be deferred in C99/C++. 11883 // Implicit template instantiations can also be deferred in C++. 11884 return !isDiscardableGVALinkage(Linkage); 11885 } 11886 11887 const auto *VD = cast<VarDecl>(D); 11888 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11889 11890 // If the decl is marked as `declare target to`, it should be emitted for the 11891 // host and for the device. 11892 if (LangOpts.OpenMP && 11893 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11894 return true; 11895 11896 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11897 !isMSStaticDataMemberInlineDefinition(VD)) 11898 return false; 11899 11900 // Variables in other module units shouldn't be forced to be emitted. 11901 if (VD->isInAnotherModuleUnit()) 11902 return false; 11903 11904 // Variables that can be needed in other TUs are required. 11905 auto Linkage = GetGVALinkageForVariable(VD); 11906 if (!isDiscardableGVALinkage(Linkage)) 11907 return true; 11908 11909 // We never need to emit a variable that is available in another TU. 11910 if (Linkage == GVA_AvailableExternally) 11911 return false; 11912 11913 // Variables that have destruction with side-effects are required. 11914 if (VD->needsDestruction(*this)) 11915 return true; 11916 11917 // Variables that have initialization with side-effects are required. 11918 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11919 // We can get a value-dependent initializer during error recovery. 11920 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11921 return true; 11922 11923 // Likewise, variables with tuple-like bindings are required if their 11924 // bindings have side-effects. 11925 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11926 for (const auto *BD : DD->bindings()) 11927 if (const auto *BindingVD = BD->getHoldingVar()) 11928 if (DeclMustBeEmitted(BindingVD)) 11929 return true; 11930 11931 return false; 11932 } 11933 11934 void ASTContext::forEachMultiversionedFunctionVersion( 11935 const FunctionDecl *FD, 11936 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11937 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11938 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11939 FD = FD->getMostRecentDecl(); 11940 // FIXME: The order of traversal here matters and depends on the order of 11941 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11942 // shouldn't rely on that. 11943 for (auto *CurDecl : 11944 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11945 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11946 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11947 !SeenDecls.contains(CurFD)) { 11948 SeenDecls.insert(CurFD); 11949 Pred(CurFD); 11950 } 11951 } 11952 } 11953 11954 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11955 bool IsCXXMethod, 11956 bool IsBuiltin) const { 11957 // Pass through to the C++ ABI object 11958 if (IsCXXMethod) 11959 return ABI->getDefaultMethodCallConv(IsVariadic); 11960 11961 // Builtins ignore user-specified default calling convention and remain the 11962 // Target's default calling convention. 11963 if (!IsBuiltin) { 11964 switch (LangOpts.getDefaultCallingConv()) { 11965 case LangOptions::DCC_None: 11966 break; 11967 case LangOptions::DCC_CDecl: 11968 return CC_C; 11969 case LangOptions::DCC_FastCall: 11970 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11971 return CC_X86FastCall; 11972 break; 11973 case LangOptions::DCC_StdCall: 11974 if (!IsVariadic) 11975 return CC_X86StdCall; 11976 break; 11977 case LangOptions::DCC_VectorCall: 11978 // __vectorcall cannot be applied to variadic functions. 11979 if (!IsVariadic) 11980 return CC_X86VectorCall; 11981 break; 11982 case LangOptions::DCC_RegCall: 11983 // __regcall cannot be applied to variadic functions. 11984 if (!IsVariadic) 11985 return CC_X86RegCall; 11986 break; 11987 case LangOptions::DCC_RtdCall: 11988 if (!IsVariadic) 11989 return CC_M68kRTD; 11990 break; 11991 } 11992 } 11993 return Target->getDefaultCallingConv(); 11994 } 11995 11996 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11997 // Pass through to the C++ ABI object 11998 return ABI->isNearlyEmpty(RD); 11999 } 12000 12001 VTableContextBase *ASTContext::getVTableContext() { 12002 if (!VTContext.get()) { 12003 auto ABI = Target->getCXXABI(); 12004 if (ABI.isMicrosoft()) 12005 VTContext.reset(new MicrosoftVTableContext(*this)); 12006 else { 12007 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 12008 ? ItaniumVTableContext::Relative 12009 : ItaniumVTableContext::Pointer; 12010 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 12011 } 12012 } 12013 return VTContext.get(); 12014 } 12015 12016 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 12017 if (!T) 12018 T = Target; 12019 switch (T->getCXXABI().getKind()) { 12020 case TargetCXXABI::AppleARM64: 12021 case TargetCXXABI::Fuchsia: 12022 case TargetCXXABI::GenericAArch64: 12023 case TargetCXXABI::GenericItanium: 12024 case TargetCXXABI::GenericARM: 12025 case TargetCXXABI::GenericMIPS: 12026 case TargetCXXABI::iOS: 12027 case TargetCXXABI::WebAssembly: 12028 case TargetCXXABI::WatchOS: 12029 case TargetCXXABI::XL: 12030 return ItaniumMangleContext::create(*this, getDiagnostics()); 12031 case TargetCXXABI::Microsoft: 12032 return MicrosoftMangleContext::create(*this, getDiagnostics()); 12033 } 12034 llvm_unreachable("Unsupported ABI"); 12035 } 12036 12037 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 12038 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 12039 "Device mangle context does not support Microsoft mangling."); 12040 switch (T.getCXXABI().getKind()) { 12041 case TargetCXXABI::AppleARM64: 12042 case TargetCXXABI::Fuchsia: 12043 case TargetCXXABI::GenericAArch64: 12044 case TargetCXXABI::GenericItanium: 12045 case TargetCXXABI::GenericARM: 12046 case TargetCXXABI::GenericMIPS: 12047 case TargetCXXABI::iOS: 12048 case TargetCXXABI::WebAssembly: 12049 case TargetCXXABI::WatchOS: 12050 case TargetCXXABI::XL: 12051 return ItaniumMangleContext::create( 12052 *this, getDiagnostics(), 12053 [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> { 12054 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 12055 return RD->getDeviceLambdaManglingNumber(); 12056 return std::nullopt; 12057 }, 12058 /*IsAux=*/true); 12059 case TargetCXXABI::Microsoft: 12060 return MicrosoftMangleContext::create(*this, getDiagnostics(), 12061 /*IsAux=*/true); 12062 } 12063 llvm_unreachable("Unsupported ABI"); 12064 } 12065 12066 CXXABI::~CXXABI() = default; 12067 12068 size_t ASTContext::getSideTableAllocatedMemory() const { 12069 return ASTRecordLayouts.getMemorySize() + 12070 llvm::capacity_in_bytes(ObjCLayouts) + 12071 llvm::capacity_in_bytes(KeyFunctions) + 12072 llvm::capacity_in_bytes(ObjCImpls) + 12073 llvm::capacity_in_bytes(BlockVarCopyInits) + 12074 llvm::capacity_in_bytes(DeclAttrs) + 12075 llvm::capacity_in_bytes(TemplateOrInstantiation) + 12076 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 12077 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 12078 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 12079 llvm::capacity_in_bytes(OverriddenMethods) + 12080 llvm::capacity_in_bytes(Types) + 12081 llvm::capacity_in_bytes(VariableArrayTypes); 12082 } 12083 12084 /// getIntTypeForBitwidth - 12085 /// sets integer QualTy according to specified details: 12086 /// bitwidth, signed/unsigned. 12087 /// Returns empty type if there is no appropriate target types. 12088 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 12089 unsigned Signed) const { 12090 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 12091 CanQualType QualTy = getFromTargetType(Ty); 12092 if (!QualTy && DestWidth == 128) 12093 return Signed ? Int128Ty : UnsignedInt128Ty; 12094 return QualTy; 12095 } 12096 12097 /// getRealTypeForBitwidth - 12098 /// sets floating point QualTy according to specified bitwidth. 12099 /// Returns empty type if there is no appropriate target types. 12100 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 12101 FloatModeKind ExplicitType) const { 12102 FloatModeKind Ty = 12103 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 12104 switch (Ty) { 12105 case FloatModeKind::Half: 12106 return HalfTy; 12107 case FloatModeKind::Float: 12108 return FloatTy; 12109 case FloatModeKind::Double: 12110 return DoubleTy; 12111 case FloatModeKind::LongDouble: 12112 return LongDoubleTy; 12113 case FloatModeKind::Float128: 12114 return Float128Ty; 12115 case FloatModeKind::Ibm128: 12116 return Ibm128Ty; 12117 case FloatModeKind::NoFloat: 12118 return {}; 12119 } 12120 12121 llvm_unreachable("Unhandled TargetInfo::RealType value"); 12122 } 12123 12124 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 12125 if (Number > 1) 12126 MangleNumbers[ND] = Number; 12127 } 12128 12129 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 12130 bool ForAuxTarget) const { 12131 auto I = MangleNumbers.find(ND); 12132 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 12133 // CUDA/HIP host compilation encodes host and device mangling numbers 12134 // as lower and upper half of 32 bit integer. 12135 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 12136 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 12137 } else { 12138 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 12139 "number for aux target"); 12140 } 12141 return Res > 1 ? Res : 1; 12142 } 12143 12144 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 12145 if (Number > 1) 12146 StaticLocalNumbers[VD] = Number; 12147 } 12148 12149 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 12150 auto I = StaticLocalNumbers.find(VD); 12151 return I != StaticLocalNumbers.end() ? I->second : 1; 12152 } 12153 12154 MangleNumberingContext & 12155 ASTContext::getManglingNumberContext(const DeclContext *DC) { 12156 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12157 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 12158 if (!MCtx) 12159 MCtx = createMangleNumberingContext(); 12160 return *MCtx; 12161 } 12162 12163 MangleNumberingContext & 12164 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 12165 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12166 std::unique_ptr<MangleNumberingContext> &MCtx = 12167 ExtraMangleNumberingContexts[D]; 12168 if (!MCtx) 12169 MCtx = createMangleNumberingContext(); 12170 return *MCtx; 12171 } 12172 12173 std::unique_ptr<MangleNumberingContext> 12174 ASTContext::createMangleNumberingContext() const { 12175 return ABI->createMangleNumberingContext(); 12176 } 12177 12178 const CXXConstructorDecl * 12179 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 12180 return ABI->getCopyConstructorForExceptionObject( 12181 cast<CXXRecordDecl>(RD->getFirstDecl())); 12182 } 12183 12184 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 12185 CXXConstructorDecl *CD) { 12186 return ABI->addCopyConstructorForExceptionObject( 12187 cast<CXXRecordDecl>(RD->getFirstDecl()), 12188 cast<CXXConstructorDecl>(CD->getFirstDecl())); 12189 } 12190 12191 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 12192 TypedefNameDecl *DD) { 12193 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 12194 } 12195 12196 TypedefNameDecl * 12197 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 12198 return ABI->getTypedefNameForUnnamedTagDecl(TD); 12199 } 12200 12201 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 12202 DeclaratorDecl *DD) { 12203 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 12204 } 12205 12206 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 12207 return ABI->getDeclaratorForUnnamedTagDecl(TD); 12208 } 12209 12210 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 12211 ParamIndices[D] = index; 12212 } 12213 12214 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 12215 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 12216 assert(I != ParamIndices.end() && 12217 "ParmIndices lacks entry set by ParmVarDecl"); 12218 return I->second; 12219 } 12220 12221 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 12222 unsigned Length) const { 12223 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 12224 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 12225 EltTy = EltTy.withConst(); 12226 12227 EltTy = adjustStringLiteralBaseType(EltTy); 12228 12229 // Get an array type for the string, according to C99 6.4.5. This includes 12230 // the null terminator character. 12231 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 12232 ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0); 12233 } 12234 12235 StringLiteral * 12236 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 12237 StringLiteral *&Result = StringLiteralCache[Key]; 12238 if (!Result) 12239 Result = StringLiteral::Create( 12240 *this, Key, StringLiteralKind::Ordinary, 12241 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 12242 SourceLocation()); 12243 return Result; 12244 } 12245 12246 MSGuidDecl * 12247 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 12248 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 12249 12250 llvm::FoldingSetNodeID ID; 12251 MSGuidDecl::Profile(ID, Parts); 12252 12253 void *InsertPos; 12254 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 12255 return Existing; 12256 12257 QualType GUIDType = getMSGuidType().withConst(); 12258 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 12259 MSGuidDecls.InsertNode(New, InsertPos); 12260 return New; 12261 } 12262 12263 UnnamedGlobalConstantDecl * 12264 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 12265 const APValue &APVal) const { 12266 llvm::FoldingSetNodeID ID; 12267 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 12268 12269 void *InsertPos; 12270 if (UnnamedGlobalConstantDecl *Existing = 12271 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 12272 return Existing; 12273 12274 UnnamedGlobalConstantDecl *New = 12275 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 12276 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 12277 return New; 12278 } 12279 12280 TemplateParamObjectDecl * 12281 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 12282 assert(T->isRecordType() && "template param object of unexpected type"); 12283 12284 // C++ [temp.param]p8: 12285 // [...] a static storage duration object of type 'const T' [...] 12286 T.addConst(); 12287 12288 llvm::FoldingSetNodeID ID; 12289 TemplateParamObjectDecl::Profile(ID, T, V); 12290 12291 void *InsertPos; 12292 if (TemplateParamObjectDecl *Existing = 12293 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 12294 return Existing; 12295 12296 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 12297 TemplateParamObjectDecls.InsertNode(New, InsertPos); 12298 return New; 12299 } 12300 12301 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 12302 const llvm::Triple &T = getTargetInfo().getTriple(); 12303 if (!T.isOSDarwin()) 12304 return false; 12305 12306 if (!(T.isiOS() && T.isOSVersionLT(7)) && 12307 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 12308 return false; 12309 12310 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 12311 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 12312 uint64_t Size = sizeChars.getQuantity(); 12313 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 12314 unsigned Align = alignChars.getQuantity(); 12315 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 12316 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 12317 } 12318 12319 bool 12320 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 12321 const ObjCMethodDecl *MethodImpl) { 12322 // No point trying to match an unavailable/deprecated mothod. 12323 if (MethodDecl->hasAttr<UnavailableAttr>() 12324 || MethodDecl->hasAttr<DeprecatedAttr>()) 12325 return false; 12326 if (MethodDecl->getObjCDeclQualifier() != 12327 MethodImpl->getObjCDeclQualifier()) 12328 return false; 12329 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 12330 return false; 12331 12332 if (MethodDecl->param_size() != MethodImpl->param_size()) 12333 return false; 12334 12335 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 12336 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 12337 EF = MethodDecl->param_end(); 12338 IM != EM && IF != EF; ++IM, ++IF) { 12339 const ParmVarDecl *DeclVar = (*IF); 12340 const ParmVarDecl *ImplVar = (*IM); 12341 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 12342 return false; 12343 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 12344 return false; 12345 } 12346 12347 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12348 } 12349 12350 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12351 LangAS AS; 12352 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12353 AS = LangAS::Default; 12354 else 12355 AS = QT->getPointeeType().getAddressSpace(); 12356 12357 return getTargetInfo().getNullPointerValue(AS); 12358 } 12359 12360 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12361 return getTargetInfo().getTargetAddressSpace(AS); 12362 } 12363 12364 bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const { 12365 if (X == Y) 12366 return true; 12367 if (!X || !Y) 12368 return false; 12369 llvm::FoldingSetNodeID IDX, IDY; 12370 X->Profile(IDX, *this, /*Canonical=*/true); 12371 Y->Profile(IDY, *this, /*Canonical=*/true); 12372 return IDX == IDY; 12373 } 12374 12375 // The getCommon* helpers return, for given 'same' X and Y entities given as 12376 // inputs, another entity which is also the 'same' as the inputs, but which 12377 // is closer to the canonical form of the inputs, each according to a given 12378 // criteria. 12379 // The getCommon*Checked variants are 'null inputs not-allowed' equivalents of 12380 // the regular ones. 12381 12382 static Decl *getCommonDecl(Decl *X, Decl *Y) { 12383 if (!declaresSameEntity(X, Y)) 12384 return nullptr; 12385 for (const Decl *DX : X->redecls()) { 12386 // If we reach Y before reaching the first decl, that means X is older. 12387 if (DX == Y) 12388 return X; 12389 // If we reach the first decl, then Y is older. 12390 if (DX->isFirstDecl()) 12391 return Y; 12392 } 12393 llvm_unreachable("Corrupt redecls chain"); 12394 } 12395 12396 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12397 static T *getCommonDecl(T *X, T *Y) { 12398 return cast_or_null<T>( 12399 getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)), 12400 const_cast<Decl *>(cast_or_null<Decl>(Y)))); 12401 } 12402 12403 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12404 static T *getCommonDeclChecked(T *X, T *Y) { 12405 return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)), 12406 const_cast<Decl *>(cast<Decl>(Y)))); 12407 } 12408 12409 static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X, 12410 TemplateName Y) { 12411 if (X.getAsVoidPointer() == Y.getAsVoidPointer()) 12412 return X; 12413 // FIXME: There are cases here where we could find a common template name 12414 // with more sugar. For example one could be a SubstTemplateTemplate* 12415 // replacing the other. 12416 TemplateName CX = Ctx.getCanonicalTemplateName(X); 12417 if (CX.getAsVoidPointer() != 12418 Ctx.getCanonicalTemplateName(Y).getAsVoidPointer()) 12419 return TemplateName(); 12420 return CX; 12421 } 12422 12423 static TemplateName 12424 getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) { 12425 TemplateName R = getCommonTemplateName(Ctx, X, Y); 12426 assert(R.getAsVoidPointer() != nullptr); 12427 return R; 12428 } 12429 12430 static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs, 12431 ArrayRef<QualType> Ys, bool Unqualified = false) { 12432 assert(Xs.size() == Ys.size()); 12433 SmallVector<QualType, 8> Rs(Xs.size()); 12434 for (size_t I = 0; I < Rs.size(); ++I) 12435 Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified); 12436 return Rs; 12437 } 12438 12439 template <class T> 12440 static SourceLocation getCommonAttrLoc(const T *X, const T *Y) { 12441 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc() 12442 : SourceLocation(); 12443 } 12444 12445 static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx, 12446 const TemplateArgument &X, 12447 const TemplateArgument &Y) { 12448 if (X.getKind() != Y.getKind()) 12449 return TemplateArgument(); 12450 12451 switch (X.getKind()) { 12452 case TemplateArgument::ArgKind::Type: 12453 if (!Ctx.hasSameType(X.getAsType(), Y.getAsType())) 12454 return TemplateArgument(); 12455 return TemplateArgument( 12456 Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType())); 12457 case TemplateArgument::ArgKind::NullPtr: 12458 if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) 12459 return TemplateArgument(); 12460 return TemplateArgument( 12461 Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()), 12462 /*Unqualified=*/true); 12463 case TemplateArgument::ArgKind::Expression: 12464 if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType())) 12465 return TemplateArgument(); 12466 // FIXME: Try to keep the common sugar. 12467 return X; 12468 case TemplateArgument::ArgKind::Template: { 12469 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate(); 12470 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12471 if (!CTN.getAsVoidPointer()) 12472 return TemplateArgument(); 12473 return TemplateArgument(CTN); 12474 } 12475 case TemplateArgument::ArgKind::TemplateExpansion: { 12476 TemplateName TX = X.getAsTemplateOrTemplatePattern(), 12477 TY = Y.getAsTemplateOrTemplatePattern(); 12478 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12479 if (!CTN.getAsVoidPointer()) 12480 return TemplateName(); 12481 auto NExpX = X.getNumTemplateExpansions(); 12482 assert(NExpX == Y.getNumTemplateExpansions()); 12483 return TemplateArgument(CTN, NExpX); 12484 } 12485 default: 12486 // FIXME: Handle the other argument kinds. 12487 return X; 12488 } 12489 } 12490 12491 static bool getCommonTemplateArguments(ASTContext &Ctx, 12492 SmallVectorImpl<TemplateArgument> &R, 12493 ArrayRef<TemplateArgument> Xs, 12494 ArrayRef<TemplateArgument> Ys) { 12495 if (Xs.size() != Ys.size()) 12496 return true; 12497 R.resize(Xs.size()); 12498 for (size_t I = 0; I < R.size(); ++I) { 12499 R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]); 12500 if (R[I].isNull()) 12501 return true; 12502 } 12503 return false; 12504 } 12505 12506 static auto getCommonTemplateArguments(ASTContext &Ctx, 12507 ArrayRef<TemplateArgument> Xs, 12508 ArrayRef<TemplateArgument> Ys) { 12509 SmallVector<TemplateArgument, 8> R; 12510 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys); 12511 assert(!Different); 12512 (void)Different; 12513 return R; 12514 } 12515 12516 template <class T> 12517 static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) { 12518 return X->getKeyword() == Y->getKeyword() ? X->getKeyword() 12519 : ElaboratedTypeKeyword::None; 12520 } 12521 12522 template <class T> 12523 static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X, 12524 const T *Y) { 12525 // FIXME: Try to keep the common NNS sugar. 12526 return X->getQualifier() == Y->getQualifier() 12527 ? X->getQualifier() 12528 : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier()); 12529 } 12530 12531 template <class T> 12532 static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) { 12533 return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType()); 12534 } 12535 12536 template <class T> 12537 static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X, 12538 Qualifiers &QX, const T *Y, 12539 Qualifiers &QY) { 12540 QualType EX = X->getElementType(), EY = Y->getElementType(); 12541 QualType R = Ctx.getCommonSugaredType(EX, EY, 12542 /*Unqualified=*/true); 12543 Qualifiers RQ = R.getQualifiers(); 12544 QX += EX.getQualifiers() - RQ; 12545 QY += EY.getQualifiers() - RQ; 12546 return R; 12547 } 12548 12549 template <class T> 12550 static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) { 12551 return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType()); 12552 } 12553 12554 template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) { 12555 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr())); 12556 return X->getSizeExpr(); 12557 } 12558 12559 static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) { 12560 assert(X->getSizeModifier() == Y->getSizeModifier()); 12561 return X->getSizeModifier(); 12562 } 12563 12564 static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X, 12565 const ArrayType *Y) { 12566 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers()); 12567 return X->getIndexTypeCVRQualifiers(); 12568 } 12569 12570 // Merges two type lists such that the resulting vector will contain 12571 // each type (in a canonical sense) only once, in the order they appear 12572 // from X to Y. If they occur in both X and Y, the result will contain 12573 // the common sugared type between them. 12574 static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out, 12575 ArrayRef<QualType> X, ArrayRef<QualType> Y) { 12576 llvm::DenseMap<QualType, unsigned> Found; 12577 for (auto Ts : {X, Y}) { 12578 for (QualType T : Ts) { 12579 auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size()); 12580 if (!Res.second) { 12581 QualType &U = Out[Res.first->second]; 12582 U = Ctx.getCommonSugaredType(U, T); 12583 } else { 12584 Out.emplace_back(T); 12585 } 12586 } 12587 } 12588 } 12589 12590 FunctionProtoType::ExceptionSpecInfo 12591 ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1, 12592 FunctionProtoType::ExceptionSpecInfo ESI2, 12593 SmallVectorImpl<QualType> &ExceptionTypeStorage, 12594 bool AcceptDependent) { 12595 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type; 12596 12597 // If either of them can throw anything, that is the result. 12598 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) { 12599 if (EST1 == I) 12600 return ESI1; 12601 if (EST2 == I) 12602 return ESI2; 12603 } 12604 12605 // If either of them is non-throwing, the result is the other. 12606 for (auto I : 12607 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) { 12608 if (EST1 == I) 12609 return ESI2; 12610 if (EST2 == I) 12611 return ESI1; 12612 } 12613 12614 // If we're left with value-dependent computed noexcept expressions, we're 12615 // stuck. Before C++17, we can just drop the exception specification entirely, 12616 // since it's not actually part of the canonical type. And this should never 12617 // happen in C++17, because it would mean we were computing the composite 12618 // pointer type of dependent types, which should never happen. 12619 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) { 12620 assert(AcceptDependent && 12621 "computing composite pointer type of dependent types"); 12622 return FunctionProtoType::ExceptionSpecInfo(); 12623 } 12624 12625 // Switch over the possibilities so that people adding new values know to 12626 // update this function. 12627 switch (EST1) { 12628 case EST_None: 12629 case EST_DynamicNone: 12630 case EST_MSAny: 12631 case EST_BasicNoexcept: 12632 case EST_DependentNoexcept: 12633 case EST_NoexceptFalse: 12634 case EST_NoexceptTrue: 12635 case EST_NoThrow: 12636 llvm_unreachable("These ESTs should be handled above"); 12637 12638 case EST_Dynamic: { 12639 // This is the fun case: both exception specifications are dynamic. Form 12640 // the union of the two lists. 12641 assert(EST2 == EST_Dynamic && "other cases should already be handled"); 12642 mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions, 12643 ESI2.Exceptions); 12644 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic); 12645 Result.Exceptions = ExceptionTypeStorage; 12646 return Result; 12647 } 12648 12649 case EST_Unevaluated: 12650 case EST_Uninstantiated: 12651 case EST_Unparsed: 12652 llvm_unreachable("shouldn't see unresolved exception specifications here"); 12653 } 12654 12655 llvm_unreachable("invalid ExceptionSpecificationType"); 12656 } 12657 12658 static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, 12659 Qualifiers &QX, const Type *Y, 12660 Qualifiers &QY) { 12661 Type::TypeClass TC = X->getTypeClass(); 12662 assert(TC == Y->getTypeClass()); 12663 switch (TC) { 12664 #define UNEXPECTED_TYPE(Class, Kind) \ 12665 case Type::Class: \ 12666 llvm_unreachable("Unexpected " Kind ": " #Class); 12667 12668 #define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical") 12669 #define TYPE(Class, Base) 12670 #include "clang/AST/TypeNodes.inc" 12671 12672 #define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free") 12673 SUGAR_FREE_TYPE(Builtin) 12674 SUGAR_FREE_TYPE(DeducedTemplateSpecialization) 12675 SUGAR_FREE_TYPE(DependentBitInt) 12676 SUGAR_FREE_TYPE(Enum) 12677 SUGAR_FREE_TYPE(BitInt) 12678 SUGAR_FREE_TYPE(ObjCInterface) 12679 SUGAR_FREE_TYPE(Record) 12680 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack) 12681 SUGAR_FREE_TYPE(UnresolvedUsing) 12682 #undef SUGAR_FREE_TYPE 12683 #define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique") 12684 NON_UNIQUE_TYPE(TypeOfExpr) 12685 NON_UNIQUE_TYPE(VariableArray) 12686 #undef NON_UNIQUE_TYPE 12687 12688 UNEXPECTED_TYPE(TypeOf, "sugar") 12689 12690 #undef UNEXPECTED_TYPE 12691 12692 case Type::Auto: { 12693 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 12694 assert(AX->getDeducedType().isNull()); 12695 assert(AY->getDeducedType().isNull()); 12696 assert(AX->getKeyword() == AY->getKeyword()); 12697 assert(AX->isInstantiationDependentType() == 12698 AY->isInstantiationDependentType()); 12699 auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(), 12700 AY->getTypeConstraintArguments()); 12701 return Ctx.getAutoType(QualType(), AX->getKeyword(), 12702 AX->isInstantiationDependentType(), 12703 AX->containsUnexpandedParameterPack(), 12704 getCommonDeclChecked(AX->getTypeConstraintConcept(), 12705 AY->getTypeConstraintConcept()), 12706 As); 12707 } 12708 case Type::IncompleteArray: { 12709 const auto *AX = cast<IncompleteArrayType>(X), 12710 *AY = cast<IncompleteArrayType>(Y); 12711 return Ctx.getIncompleteArrayType( 12712 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12713 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12714 } 12715 case Type::DependentSizedArray: { 12716 const auto *AX = cast<DependentSizedArrayType>(X), 12717 *AY = cast<DependentSizedArrayType>(Y); 12718 return Ctx.getDependentSizedArrayType( 12719 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12720 getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY), 12721 getCommonIndexTypeCVRQualifiers(AX, AY), 12722 AX->getBracketsRange() == AY->getBracketsRange() 12723 ? AX->getBracketsRange() 12724 : SourceRange()); 12725 } 12726 case Type::ConstantArray: { 12727 const auto *AX = cast<ConstantArrayType>(X), 12728 *AY = cast<ConstantArrayType>(Y); 12729 assert(AX->getSize() == AY->getSize()); 12730 const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) 12731 ? AX->getSizeExpr() 12732 : nullptr; 12733 return Ctx.getConstantArrayType( 12734 getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, 12735 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12736 } 12737 case Type::Atomic: { 12738 const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); 12739 return Ctx.getAtomicType( 12740 Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType())); 12741 } 12742 case Type::Complex: { 12743 const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y); 12744 return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY)); 12745 } 12746 case Type::Pointer: { 12747 const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y); 12748 return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY)); 12749 } 12750 case Type::BlockPointer: { 12751 const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y); 12752 return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY)); 12753 } 12754 case Type::ObjCObjectPointer: { 12755 const auto *PX = cast<ObjCObjectPointerType>(X), 12756 *PY = cast<ObjCObjectPointerType>(Y); 12757 return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY)); 12758 } 12759 case Type::MemberPointer: { 12760 const auto *PX = cast<MemberPointerType>(X), 12761 *PY = cast<MemberPointerType>(Y); 12762 return Ctx.getMemberPointerType( 12763 getCommonPointeeType(Ctx, PX, PY), 12764 Ctx.getCommonSugaredType(QualType(PX->getClass(), 0), 12765 QualType(PY->getClass(), 0)) 12766 .getTypePtr()); 12767 } 12768 case Type::LValueReference: { 12769 const auto *PX = cast<LValueReferenceType>(X), 12770 *PY = cast<LValueReferenceType>(Y); 12771 // FIXME: Preserve PointeeTypeAsWritten. 12772 return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY), 12773 PX->isSpelledAsLValue() || 12774 PY->isSpelledAsLValue()); 12775 } 12776 case Type::RValueReference: { 12777 const auto *PX = cast<RValueReferenceType>(X), 12778 *PY = cast<RValueReferenceType>(Y); 12779 // FIXME: Preserve PointeeTypeAsWritten. 12780 return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY)); 12781 } 12782 case Type::DependentAddressSpace: { 12783 const auto *PX = cast<DependentAddressSpaceType>(X), 12784 *PY = cast<DependentAddressSpaceType>(Y); 12785 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr())); 12786 return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY), 12787 PX->getAddrSpaceExpr(), 12788 getCommonAttrLoc(PX, PY)); 12789 } 12790 case Type::FunctionNoProto: { 12791 const auto *FX = cast<FunctionNoProtoType>(X), 12792 *FY = cast<FunctionNoProtoType>(Y); 12793 assert(FX->getExtInfo() == FY->getExtInfo()); 12794 return Ctx.getFunctionNoProtoType( 12795 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()), 12796 FX->getExtInfo()); 12797 } 12798 case Type::FunctionProto: { 12799 const auto *FX = cast<FunctionProtoType>(X), 12800 *FY = cast<FunctionProtoType>(Y); 12801 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(), 12802 EPIY = FY->getExtProtoInfo(); 12803 assert(EPIX.ExtInfo == EPIY.ExtInfo); 12804 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos); 12805 assert(EPIX.RefQualifier == EPIY.RefQualifier); 12806 assert(EPIX.TypeQuals == EPIY.TypeQuals); 12807 assert(EPIX.Variadic == EPIY.Variadic); 12808 12809 // FIXME: Can we handle an empty EllipsisLoc? 12810 // Use emtpy EllipsisLoc if X and Y differ. 12811 12812 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn; 12813 12814 QualType R = 12815 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()); 12816 auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(), 12817 /*Unqualified=*/true); 12818 12819 SmallVector<QualType, 8> Exceptions; 12820 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs( 12821 EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true); 12822 return Ctx.getFunctionType(R, P, EPIX); 12823 } 12824 case Type::ObjCObject: { 12825 const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y); 12826 assert( 12827 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(), 12828 OY->getProtocols().begin(), OY->getProtocols().end(), 12829 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) { 12830 return P0->getCanonicalDecl() == P1->getCanonicalDecl(); 12831 }) && 12832 "protocol lists must be the same"); 12833 auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(), 12834 OY->getTypeArgsAsWritten()); 12835 return Ctx.getObjCObjectType( 12836 Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs, 12837 OX->getProtocols(), 12838 OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten()); 12839 } 12840 case Type::ConstantMatrix: { 12841 const auto *MX = cast<ConstantMatrixType>(X), 12842 *MY = cast<ConstantMatrixType>(Y); 12843 assert(MX->getNumRows() == MY->getNumRows()); 12844 assert(MX->getNumColumns() == MY->getNumColumns()); 12845 return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY), 12846 MX->getNumRows(), MX->getNumColumns()); 12847 } 12848 case Type::DependentSizedMatrix: { 12849 const auto *MX = cast<DependentSizedMatrixType>(X), 12850 *MY = cast<DependentSizedMatrixType>(Y); 12851 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr())); 12852 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr())); 12853 return Ctx.getDependentSizedMatrixType( 12854 getCommonElementType(Ctx, MX, MY), MX->getRowExpr(), 12855 MX->getColumnExpr(), getCommonAttrLoc(MX, MY)); 12856 } 12857 case Type::Vector: { 12858 const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y); 12859 assert(VX->getNumElements() == VY->getNumElements()); 12860 assert(VX->getVectorKind() == VY->getVectorKind()); 12861 return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY), 12862 VX->getNumElements(), VX->getVectorKind()); 12863 } 12864 case Type::ExtVector: { 12865 const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y); 12866 assert(VX->getNumElements() == VY->getNumElements()); 12867 return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY), 12868 VX->getNumElements()); 12869 } 12870 case Type::DependentSizedExtVector: { 12871 const auto *VX = cast<DependentSizedExtVectorType>(X), 12872 *VY = cast<DependentSizedExtVectorType>(Y); 12873 return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY), 12874 getCommonSizeExpr(Ctx, VX, VY), 12875 getCommonAttrLoc(VX, VY)); 12876 } 12877 case Type::DependentVector: { 12878 const auto *VX = cast<DependentVectorType>(X), 12879 *VY = cast<DependentVectorType>(Y); 12880 assert(VX->getVectorKind() == VY->getVectorKind()); 12881 return Ctx.getDependentVectorType( 12882 getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY), 12883 getCommonAttrLoc(VX, VY), VX->getVectorKind()); 12884 } 12885 case Type::InjectedClassName: { 12886 const auto *IX = cast<InjectedClassNameType>(X), 12887 *IY = cast<InjectedClassNameType>(Y); 12888 return Ctx.getInjectedClassNameType( 12889 getCommonDeclChecked(IX->getDecl(), IY->getDecl()), 12890 Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(), 12891 IY->getInjectedSpecializationType())); 12892 } 12893 case Type::TemplateSpecialization: { 12894 const auto *TX = cast<TemplateSpecializationType>(X), 12895 *TY = cast<TemplateSpecializationType>(Y); 12896 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12897 TY->template_arguments()); 12898 return Ctx.getTemplateSpecializationType( 12899 ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(), 12900 TY->getTemplateName()), 12901 As, X->getCanonicalTypeInternal()); 12902 } 12903 case Type::Decltype: { 12904 const auto *DX = cast<DecltypeType>(X); 12905 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Y); 12906 assert(DX->isDependentType()); 12907 assert(DY->isDependentType()); 12908 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr())); 12909 // As Decltype is not uniqued, building a common type would be wasteful. 12910 return QualType(DX, 0); 12911 } 12912 case Type::DependentName: { 12913 const auto *NX = cast<DependentNameType>(X), 12914 *NY = cast<DependentNameType>(Y); 12915 assert(NX->getIdentifier() == NY->getIdentifier()); 12916 return Ctx.getDependentNameType( 12917 getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY), 12918 NX->getIdentifier(), NX->getCanonicalTypeInternal()); 12919 } 12920 case Type::DependentTemplateSpecialization: { 12921 const auto *TX = cast<DependentTemplateSpecializationType>(X), 12922 *TY = cast<DependentTemplateSpecializationType>(Y); 12923 assert(TX->getIdentifier() == TY->getIdentifier()); 12924 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12925 TY->template_arguments()); 12926 return Ctx.getDependentTemplateSpecializationType( 12927 getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY), 12928 TX->getIdentifier(), As); 12929 } 12930 case Type::UnaryTransform: { 12931 const auto *TX = cast<UnaryTransformType>(X), 12932 *TY = cast<UnaryTransformType>(Y); 12933 assert(TX->getUTTKind() == TY->getUTTKind()); 12934 return Ctx.getUnaryTransformType( 12935 Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()), 12936 Ctx.getCommonSugaredType(TX->getUnderlyingType(), 12937 TY->getUnderlyingType()), 12938 TX->getUTTKind()); 12939 } 12940 case Type::PackExpansion: { 12941 const auto *PX = cast<PackExpansionType>(X), 12942 *PY = cast<PackExpansionType>(Y); 12943 assert(PX->getNumExpansions() == PY->getNumExpansions()); 12944 return Ctx.getPackExpansionType( 12945 Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()), 12946 PX->getNumExpansions(), false); 12947 } 12948 case Type::Pipe: { 12949 const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y); 12950 assert(PX->isReadOnly() == PY->isReadOnly()); 12951 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType 12952 : &ASTContext::getWritePipeType; 12953 return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY)); 12954 } 12955 case Type::TemplateTypeParm: { 12956 const auto *TX = cast<TemplateTypeParmType>(X), 12957 *TY = cast<TemplateTypeParmType>(Y); 12958 assert(TX->getDepth() == TY->getDepth()); 12959 assert(TX->getIndex() == TY->getIndex()); 12960 assert(TX->isParameterPack() == TY->isParameterPack()); 12961 return Ctx.getTemplateTypeParmType( 12962 TX->getDepth(), TX->getIndex(), TX->isParameterPack(), 12963 getCommonDecl(TX->getDecl(), TY->getDecl())); 12964 } 12965 } 12966 llvm_unreachable("Unknown Type Class"); 12967 } 12968 12969 static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, 12970 const Type *Y, 12971 SplitQualType Underlying) { 12972 Type::TypeClass TC = X->getTypeClass(); 12973 if (TC != Y->getTypeClass()) 12974 return QualType(); 12975 switch (TC) { 12976 #define UNEXPECTED_TYPE(Class, Kind) \ 12977 case Type::Class: \ 12978 llvm_unreachable("Unexpected " Kind ": " #Class); 12979 #define TYPE(Class, Base) 12980 #define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent") 12981 #include "clang/AST/TypeNodes.inc" 12982 12983 #define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical") 12984 CANONICAL_TYPE(Atomic) 12985 CANONICAL_TYPE(BitInt) 12986 CANONICAL_TYPE(BlockPointer) 12987 CANONICAL_TYPE(Builtin) 12988 CANONICAL_TYPE(Complex) 12989 CANONICAL_TYPE(ConstantArray) 12990 CANONICAL_TYPE(ConstantMatrix) 12991 CANONICAL_TYPE(Enum) 12992 CANONICAL_TYPE(ExtVector) 12993 CANONICAL_TYPE(FunctionNoProto) 12994 CANONICAL_TYPE(FunctionProto) 12995 CANONICAL_TYPE(IncompleteArray) 12996 CANONICAL_TYPE(LValueReference) 12997 CANONICAL_TYPE(MemberPointer) 12998 CANONICAL_TYPE(ObjCInterface) 12999 CANONICAL_TYPE(ObjCObject) 13000 CANONICAL_TYPE(ObjCObjectPointer) 13001 CANONICAL_TYPE(Pipe) 13002 CANONICAL_TYPE(Pointer) 13003 CANONICAL_TYPE(Record) 13004 CANONICAL_TYPE(RValueReference) 13005 CANONICAL_TYPE(VariableArray) 13006 CANONICAL_TYPE(Vector) 13007 #undef CANONICAL_TYPE 13008 13009 #undef UNEXPECTED_TYPE 13010 13011 case Type::Adjusted: { 13012 const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y); 13013 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType(); 13014 if (!Ctx.hasSameType(OX, OY)) 13015 return QualType(); 13016 // FIXME: It's inefficient to have to unify the original types. 13017 return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY), 13018 Ctx.getQualifiedType(Underlying)); 13019 } 13020 case Type::Decayed: { 13021 const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y); 13022 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType(); 13023 if (!Ctx.hasSameType(OX, OY)) 13024 return QualType(); 13025 // FIXME: It's inefficient to have to unify the original types. 13026 return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY), 13027 Ctx.getQualifiedType(Underlying)); 13028 } 13029 case Type::Attributed: { 13030 const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y); 13031 AttributedType::Kind Kind = AX->getAttrKind(); 13032 if (Kind != AY->getAttrKind()) 13033 return QualType(); 13034 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType(); 13035 if (!Ctx.hasSameType(MX, MY)) 13036 return QualType(); 13037 // FIXME: It's inefficient to have to unify the modified types. 13038 return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY), 13039 Ctx.getQualifiedType(Underlying)); 13040 } 13041 case Type::BTFTagAttributed: { 13042 const auto *BX = cast<BTFTagAttributedType>(X); 13043 const BTFTypeTagAttr *AX = BX->getAttr(); 13044 // The attribute is not uniqued, so just compare the tag. 13045 if (AX->getBTFTypeTag() != 13046 cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag()) 13047 return QualType(); 13048 return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying)); 13049 } 13050 case Type::Auto: { 13051 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 13052 13053 AutoTypeKeyword KW = AX->getKeyword(); 13054 if (KW != AY->getKeyword()) 13055 return QualType(); 13056 13057 ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(), 13058 AY->getTypeConstraintConcept()); 13059 SmallVector<TemplateArgument, 8> As; 13060 if (CD && 13061 getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(), 13062 AY->getTypeConstraintArguments())) { 13063 CD = nullptr; // The arguments differ, so make it unconstrained. 13064 As.clear(); 13065 } 13066 13067 // Both auto types can't be dependent, otherwise they wouldn't have been 13068 // sugar. This implies they can't contain unexpanded packs either. 13069 return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), 13070 /*IsDependent=*/false, /*IsPack=*/false, CD, As); 13071 } 13072 case Type::Decltype: 13073 return QualType(); 13074 case Type::DeducedTemplateSpecialization: 13075 // FIXME: Try to merge these. 13076 return QualType(); 13077 13078 case Type::Elaborated: { 13079 const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y); 13080 return Ctx.getElaboratedType( 13081 ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY), 13082 Ctx.getQualifiedType(Underlying), 13083 ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl())); 13084 } 13085 case Type::MacroQualified: { 13086 const auto *MX = cast<MacroQualifiedType>(X), 13087 *MY = cast<MacroQualifiedType>(Y); 13088 const IdentifierInfo *IX = MX->getMacroIdentifier(); 13089 if (IX != MY->getMacroIdentifier()) 13090 return QualType(); 13091 return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX); 13092 } 13093 case Type::SubstTemplateTypeParm: { 13094 const auto *SX = cast<SubstTemplateTypeParmType>(X), 13095 *SY = cast<SubstTemplateTypeParmType>(Y); 13096 Decl *CD = 13097 ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl()); 13098 if (!CD) 13099 return QualType(); 13100 unsigned Index = SX->getIndex(); 13101 if (Index != SY->getIndex()) 13102 return QualType(); 13103 auto PackIndex = SX->getPackIndex(); 13104 if (PackIndex != SY->getPackIndex()) 13105 return QualType(); 13106 return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying), 13107 CD, Index, PackIndex); 13108 } 13109 case Type::ObjCTypeParam: 13110 // FIXME: Try to merge these. 13111 return QualType(); 13112 case Type::Paren: 13113 return Ctx.getParenType(Ctx.getQualifiedType(Underlying)); 13114 13115 case Type::TemplateSpecialization: { 13116 const auto *TX = cast<TemplateSpecializationType>(X), 13117 *TY = cast<TemplateSpecializationType>(Y); 13118 TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(), 13119 TY->getTemplateName()); 13120 if (!CTN.getAsVoidPointer()) 13121 return QualType(); 13122 SmallVector<TemplateArgument, 8> Args; 13123 if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(), 13124 TY->template_arguments())) 13125 return QualType(); 13126 return Ctx.getTemplateSpecializationType(CTN, Args, 13127 Ctx.getQualifiedType(Underlying)); 13128 } 13129 case Type::Typedef: { 13130 const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y); 13131 const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl()); 13132 if (!CD) 13133 return QualType(); 13134 return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying)); 13135 } 13136 case Type::TypeOf: { 13137 // The common sugar between two typeof expressions, where one is 13138 // potentially a typeof_unqual and the other is not, we unify to the 13139 // qualified type as that retains the most information along with the type. 13140 // We only return a typeof_unqual type when both types are unqual types. 13141 TypeOfKind Kind = TypeOfKind::Qualified; 13142 if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() && 13143 cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified) 13144 Kind = TypeOfKind::Unqualified; 13145 return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind); 13146 } 13147 case Type::TypeOfExpr: 13148 return QualType(); 13149 13150 case Type::UnaryTransform: { 13151 const auto *UX = cast<UnaryTransformType>(X), 13152 *UY = cast<UnaryTransformType>(Y); 13153 UnaryTransformType::UTTKind KX = UX->getUTTKind(); 13154 if (KX != UY->getUTTKind()) 13155 return QualType(); 13156 QualType BX = UX->getBaseType(), BY = UY->getBaseType(); 13157 if (!Ctx.hasSameType(BX, BY)) 13158 return QualType(); 13159 // FIXME: It's inefficient to have to unify the base types. 13160 return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY), 13161 Ctx.getQualifiedType(Underlying), KX); 13162 } 13163 case Type::Using: { 13164 const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y); 13165 const UsingShadowDecl *CD = 13166 ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl()); 13167 if (!CD) 13168 return QualType(); 13169 return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); 13170 } 13171 } 13172 llvm_unreachable("Unhandled Type Class"); 13173 } 13174 13175 static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) { 13176 SmallVector<SplitQualType, 8> R; 13177 while (true) { 13178 QTotal.addConsistentQualifiers(T.Quals); 13179 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); 13180 if (NT == QualType(T.Ty, 0)) 13181 break; 13182 R.push_back(T); 13183 T = NT.split(); 13184 } 13185 return R; 13186 } 13187 13188 QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, 13189 bool Unqualified) { 13190 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y)); 13191 if (X == Y) 13192 return X; 13193 if (!Unqualified) { 13194 if (X.isCanonical()) 13195 return X; 13196 if (Y.isCanonical()) 13197 return Y; 13198 } 13199 13200 SplitQualType SX = X.split(), SY = Y.split(); 13201 Qualifiers QX, QY; 13202 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys, 13203 // until we reach their underlying "canonical nodes". Note these are not 13204 // necessarily canonical types, as they may still have sugared properties. 13205 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively. 13206 auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY); 13207 if (SX.Ty != SY.Ty) { 13208 // The canonical nodes differ. Build a common canonical node out of the two, 13209 // unifying their sugar. This may recurse back here. 13210 SX.Ty = 13211 ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr(); 13212 } else { 13213 // The canonical nodes were identical: We may have desugared too much. 13214 // Add any common sugar back in. 13215 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) { 13216 QX -= SX.Quals; 13217 QY -= SY.Quals; 13218 SX = Xs.pop_back_val(); 13219 SY = Ys.pop_back_val(); 13220 } 13221 } 13222 if (Unqualified) 13223 QX = Qualifiers::removeCommonQualifiers(QX, QY); 13224 else 13225 assert(QX == QY); 13226 13227 // Even though the remaining sugar nodes in Xs and Ys differ, some may be 13228 // related. Walk up these nodes, unifying them and adding the result. 13229 while (!Xs.empty() && !Ys.empty()) { 13230 auto Underlying = SplitQualType( 13231 SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals)); 13232 SX = Xs.pop_back_val(); 13233 SY = Ys.pop_back_val(); 13234 SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying) 13235 .getTypePtrOrNull(); 13236 // Stop at the first pair which is unrelated. 13237 if (!SX.Ty) { 13238 SX.Ty = Underlying.Ty; 13239 break; 13240 } 13241 QX -= Underlying.Quals; 13242 }; 13243 13244 // Add back the missing accumulated qualifiers, which were stripped off 13245 // with the sugar nodes we could not unify. 13246 QualType R = getQualifiedType(SX.Ty, QX); 13247 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X)); 13248 return R; 13249 } 13250 13251 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 13252 assert(Ty->isFixedPointType()); 13253 13254 if (Ty->isSaturatedFixedPointType()) return Ty; 13255 13256 switch (Ty->castAs<BuiltinType>()->getKind()) { 13257 default: 13258 llvm_unreachable("Not a fixed point type!"); 13259 case BuiltinType::ShortAccum: 13260 return SatShortAccumTy; 13261 case BuiltinType::Accum: 13262 return SatAccumTy; 13263 case BuiltinType::LongAccum: 13264 return SatLongAccumTy; 13265 case BuiltinType::UShortAccum: 13266 return SatUnsignedShortAccumTy; 13267 case BuiltinType::UAccum: 13268 return SatUnsignedAccumTy; 13269 case BuiltinType::ULongAccum: 13270 return SatUnsignedLongAccumTy; 13271 case BuiltinType::ShortFract: 13272 return SatShortFractTy; 13273 case BuiltinType::Fract: 13274 return SatFractTy; 13275 case BuiltinType::LongFract: 13276 return SatLongFractTy; 13277 case BuiltinType::UShortFract: 13278 return SatUnsignedShortFractTy; 13279 case BuiltinType::UFract: 13280 return SatUnsignedFractTy; 13281 case BuiltinType::ULongFract: 13282 return SatUnsignedLongFractTy; 13283 } 13284 } 13285 13286 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 13287 if (LangOpts.OpenCL) 13288 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 13289 13290 if (LangOpts.CUDA) 13291 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 13292 13293 return getLangASFromTargetAS(AS); 13294 } 13295 13296 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 13297 // doesn't include ASTContext.h 13298 template 13299 clang::LazyGenerationalUpdatePtr< 13300 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 13301 clang::LazyGenerationalUpdatePtr< 13302 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 13303 const clang::ASTContext &Ctx, Decl *Value); 13304 13305 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 13306 assert(Ty->isFixedPointType()); 13307 13308 const TargetInfo &Target = getTargetInfo(); 13309 switch (Ty->castAs<BuiltinType>()->getKind()) { 13310 default: 13311 llvm_unreachable("Not a fixed point type!"); 13312 case BuiltinType::ShortAccum: 13313 case BuiltinType::SatShortAccum: 13314 return Target.getShortAccumScale(); 13315 case BuiltinType::Accum: 13316 case BuiltinType::SatAccum: 13317 return Target.getAccumScale(); 13318 case BuiltinType::LongAccum: 13319 case BuiltinType::SatLongAccum: 13320 return Target.getLongAccumScale(); 13321 case BuiltinType::UShortAccum: 13322 case BuiltinType::SatUShortAccum: 13323 return Target.getUnsignedShortAccumScale(); 13324 case BuiltinType::UAccum: 13325 case BuiltinType::SatUAccum: 13326 return Target.getUnsignedAccumScale(); 13327 case BuiltinType::ULongAccum: 13328 case BuiltinType::SatULongAccum: 13329 return Target.getUnsignedLongAccumScale(); 13330 case BuiltinType::ShortFract: 13331 case BuiltinType::SatShortFract: 13332 return Target.getShortFractScale(); 13333 case BuiltinType::Fract: 13334 case BuiltinType::SatFract: 13335 return Target.getFractScale(); 13336 case BuiltinType::LongFract: 13337 case BuiltinType::SatLongFract: 13338 return Target.getLongFractScale(); 13339 case BuiltinType::UShortFract: 13340 case BuiltinType::SatUShortFract: 13341 return Target.getUnsignedShortFractScale(); 13342 case BuiltinType::UFract: 13343 case BuiltinType::SatUFract: 13344 return Target.getUnsignedFractScale(); 13345 case BuiltinType::ULongFract: 13346 case BuiltinType::SatULongFract: 13347 return Target.getUnsignedLongFractScale(); 13348 } 13349 } 13350 13351 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 13352 assert(Ty->isFixedPointType()); 13353 13354 const TargetInfo &Target = getTargetInfo(); 13355 switch (Ty->castAs<BuiltinType>()->getKind()) { 13356 default: 13357 llvm_unreachable("Not a fixed point type!"); 13358 case BuiltinType::ShortAccum: 13359 case BuiltinType::SatShortAccum: 13360 return Target.getShortAccumIBits(); 13361 case BuiltinType::Accum: 13362 case BuiltinType::SatAccum: 13363 return Target.getAccumIBits(); 13364 case BuiltinType::LongAccum: 13365 case BuiltinType::SatLongAccum: 13366 return Target.getLongAccumIBits(); 13367 case BuiltinType::UShortAccum: 13368 case BuiltinType::SatUShortAccum: 13369 return Target.getUnsignedShortAccumIBits(); 13370 case BuiltinType::UAccum: 13371 case BuiltinType::SatUAccum: 13372 return Target.getUnsignedAccumIBits(); 13373 case BuiltinType::ULongAccum: 13374 case BuiltinType::SatULongAccum: 13375 return Target.getUnsignedLongAccumIBits(); 13376 case BuiltinType::ShortFract: 13377 case BuiltinType::SatShortFract: 13378 case BuiltinType::Fract: 13379 case BuiltinType::SatFract: 13380 case BuiltinType::LongFract: 13381 case BuiltinType::SatLongFract: 13382 case BuiltinType::UShortFract: 13383 case BuiltinType::SatUShortFract: 13384 case BuiltinType::UFract: 13385 case BuiltinType::SatUFract: 13386 case BuiltinType::ULongFract: 13387 case BuiltinType::SatULongFract: 13388 return 0; 13389 } 13390 } 13391 13392 llvm::FixedPointSemantics 13393 ASTContext::getFixedPointSemantics(QualType Ty) const { 13394 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 13395 "Can only get the fixed point semantics for a " 13396 "fixed point or integer type."); 13397 if (Ty->isIntegerType()) 13398 return llvm::FixedPointSemantics::GetIntegerSemantics( 13399 getIntWidth(Ty), Ty->isSignedIntegerType()); 13400 13401 bool isSigned = Ty->isSignedFixedPointType(); 13402 return llvm::FixedPointSemantics( 13403 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 13404 Ty->isSaturatedFixedPointType(), 13405 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 13406 } 13407 13408 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 13409 assert(Ty->isFixedPointType()); 13410 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 13411 } 13412 13413 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 13414 assert(Ty->isFixedPointType()); 13415 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 13416 } 13417 13418 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 13419 assert(Ty->isUnsignedFixedPointType() && 13420 "Expected unsigned fixed point type"); 13421 13422 switch (Ty->castAs<BuiltinType>()->getKind()) { 13423 case BuiltinType::UShortAccum: 13424 return ShortAccumTy; 13425 case BuiltinType::UAccum: 13426 return AccumTy; 13427 case BuiltinType::ULongAccum: 13428 return LongAccumTy; 13429 case BuiltinType::SatUShortAccum: 13430 return SatShortAccumTy; 13431 case BuiltinType::SatUAccum: 13432 return SatAccumTy; 13433 case BuiltinType::SatULongAccum: 13434 return SatLongAccumTy; 13435 case BuiltinType::UShortFract: 13436 return ShortFractTy; 13437 case BuiltinType::UFract: 13438 return FractTy; 13439 case BuiltinType::ULongFract: 13440 return LongFractTy; 13441 case BuiltinType::SatUShortFract: 13442 return SatShortFractTy; 13443 case BuiltinType::SatUFract: 13444 return SatFractTy; 13445 case BuiltinType::SatULongFract: 13446 return SatLongFractTy; 13447 default: 13448 llvm_unreachable("Unexpected unsigned fixed point type"); 13449 } 13450 } 13451 13452 std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs( 13453 const TargetVersionAttr *TV) const { 13454 assert(TV != nullptr); 13455 llvm::SmallVector<StringRef, 8> Feats; 13456 std::vector<std::string> ResFeats; 13457 TV->getFeatures(Feats); 13458 for (auto &Feature : Feats) 13459 if (Target->validateCpuSupports(Feature.str())) 13460 // Use '?' to mark features that came from TargetVersion. 13461 ResFeats.push_back("?" + Feature.str()); 13462 return ResFeats; 13463 } 13464 13465 ParsedTargetAttr 13466 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 13467 assert(TD != nullptr); 13468 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr()); 13469 13470 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 13471 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 13472 }); 13473 return ParsedAttr; 13474 } 13475 13476 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13477 const FunctionDecl *FD) const { 13478 if (FD) 13479 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 13480 else 13481 Target->initFeatureMap(FeatureMap, getDiagnostics(), 13482 Target->getTargetOpts().CPU, 13483 Target->getTargetOpts().Features); 13484 } 13485 13486 // Fills in the supplied string map with the set of target features for the 13487 // passed in function. 13488 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13489 GlobalDecl GD) const { 13490 StringRef TargetCPU = Target->getTargetOpts().CPU; 13491 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 13492 if (const auto *TD = FD->getAttr<TargetAttr>()) { 13493 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 13494 13495 // Make a copy of the features as passed on the command line into the 13496 // beginning of the additional features from the function to override. 13497 ParsedAttr.Features.insert( 13498 ParsedAttr.Features.begin(), 13499 Target->getTargetOpts().FeaturesAsWritten.begin(), 13500 Target->getTargetOpts().FeaturesAsWritten.end()); 13501 13502 if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) 13503 TargetCPU = ParsedAttr.CPU; 13504 13505 // Now populate the feature map, first with the TargetCPU which is either 13506 // the default or a new one from the target attribute string. Then we'll use 13507 // the passed in features (FeaturesAsWritten) along with the new ones from 13508 // the attribute. 13509 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 13510 ParsedAttr.Features); 13511 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 13512 llvm::SmallVector<StringRef, 32> FeaturesTmp; 13513 Target->getCPUSpecificCPUDispatchFeatures( 13514 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 13515 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 13516 Features.insert(Features.begin(), 13517 Target->getTargetOpts().FeaturesAsWritten.begin(), 13518 Target->getTargetOpts().FeaturesAsWritten.end()); 13519 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13520 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 13521 std::vector<std::string> Features; 13522 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 13523 if (Target->getTriple().isAArch64()) { 13524 // TargetClones for AArch64 13525 if (VersionStr != "default") { 13526 SmallVector<StringRef, 1> VersionFeatures; 13527 VersionStr.split(VersionFeatures, "+"); 13528 for (auto &VFeature : VersionFeatures) { 13529 VFeature = VFeature.trim(); 13530 // Use '?' to mark features that came from AArch64 TargetClones. 13531 Features.push_back((StringRef{"?"} + VFeature).str()); 13532 } 13533 } 13534 Features.insert(Features.begin(), 13535 Target->getTargetOpts().FeaturesAsWritten.begin(), 13536 Target->getTargetOpts().FeaturesAsWritten.end()); 13537 } else { 13538 if (VersionStr.starts_with("arch=")) 13539 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 13540 else if (VersionStr != "default") 13541 Features.push_back((StringRef{"+"} + VersionStr).str()); 13542 } 13543 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13544 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { 13545 std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV); 13546 Feats.insert(Feats.begin(), 13547 Target->getTargetOpts().FeaturesAsWritten.begin(), 13548 Target->getTargetOpts().FeaturesAsWritten.end()); 13549 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats); 13550 } else { 13551 FeatureMap = Target->getTargetOpts().FeatureMap; 13552 } 13553 } 13554 13555 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 13556 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 13557 return *OMPTraitInfoVector.back(); 13558 } 13559 13560 const StreamingDiagnostic &clang:: 13561 operator<<(const StreamingDiagnostic &DB, 13562 const ASTContext::SectionInfo &Section) { 13563 if (Section.Decl) 13564 return DB << Section.Decl; 13565 return DB << "a prior #pragma section"; 13566 } 13567 13568 bool ASTContext::mayExternalize(const Decl *D) const { 13569 bool IsInternalVar = 13570 isa<VarDecl>(D) && 13571 basicGVALinkageForVariable(*this, cast<VarDecl>(D)) == GVA_Internal; 13572 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 13573 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 13574 (D->hasAttr<CUDAConstantAttr>() && 13575 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 13576 // CUDA/HIP: managed variables need to be externalized since it is 13577 // a declaration in IR, therefore cannot have internal linkage. Kernels in 13578 // anonymous name space needs to be externalized to avoid duplicate symbols. 13579 return (IsInternalVar && 13580 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 13581 (D->hasAttr<CUDAGlobalAttr>() && 13582 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 13583 GVA_Internal); 13584 } 13585 13586 bool ASTContext::shouldExternalize(const Decl *D) const { 13587 return mayExternalize(D) && 13588 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 13589 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 13590 } 13591 13592 StringRef ASTContext::getCUIDHash() const { 13593 if (!CUIDHash.empty()) 13594 return CUIDHash; 13595 if (LangOpts.CUID.empty()) 13596 return StringRef(); 13597 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 13598 return CUIDHash; 13599 } 13600